├── .envrc ├── version.sbt ├── project ├── build.properties └── plugins.sbt ├── src ├── test │ ├── resources │ │ ├── lua │ │ │ └── test.lua │ │ ├── application.conf │ │ └── log4j2.xml │ └── scala │ │ └── redis │ │ ├── RedisVersion.scala │ │ ├── issues │ │ └── Issue26Spec.scala │ │ ├── protocol │ │ ├── ParseNumberScalaCheck.scala │ │ ├── RedisProtocolRequestSpec.scala │ │ ├── ParseNumberSpec.scala │ │ ├── ParseParse.scala │ │ └── RedisProtocolReplySpec.scala │ │ ├── commands │ │ ├── ConnectionSpec.scala │ │ ├── HyperLogLogSpec.scala │ │ ├── TransactionsSpec.scala │ │ ├── GeoSpec.scala │ │ ├── ServerSpec.scala │ │ ├── ScriptingSpec.scala │ │ ├── BListsSpec.scala │ │ ├── HashesSpec.scala │ │ └── ListsSpec.scala │ │ ├── TestBase.scala │ │ ├── SentinelMutablePoolSpec.scala │ │ ├── RedisTest.scala │ │ ├── SentinelMonitoredRedisClientMasterSlavesSpec.scala │ │ ├── RedisPoolSpec.scala │ │ ├── ConverterSpec.scala │ │ ├── SentinelSpec.scala │ │ ├── RedisProcess.scala │ │ ├── actors │ │ ├── RedisSubscriberActorSpec.scala │ │ └── RedisClientActorSpec.scala │ │ ├── RedisPubSubSpec.scala │ │ └── RedisClusterTest.scala ├── main │ ├── scala │ │ └── redis │ │ │ ├── commands │ │ │ ├── Publish.scala │ │ │ ├── Clusters.scala │ │ │ ├── HyperLogLog.scala │ │ │ ├── Connection.scala │ │ │ ├── BLists.scala │ │ │ ├── Sentinel.scala │ │ │ ├── Geo.scala │ │ │ ├── Scripting.scala │ │ │ ├── Hashes.scala │ │ │ ├── Server.scala │ │ │ ├── Sets.scala │ │ │ ├── Lists.scala │ │ │ ├── Strings.scala │ │ │ ├── Keys.scala │ │ │ ├── Transactions.scala │ │ │ └── SortedSets.scala │ │ │ ├── api │ │ │ ├── Publish.scala │ │ │ ├── geo │ │ │ │ ├── DistUnits.scala │ │ │ │ ├── GeoOptions.scala │ │ │ │ └── Geo.scala │ │ │ ├── Transactions.scala │ │ │ ├── HyperLogLog.scala │ │ │ ├── pubsub │ │ │ │ └── pubsub.scala │ │ │ ├── Connection.scala │ │ │ ├── Sentinel.scala │ │ │ ├── BLists.scala │ │ │ ├── api.scala │ │ │ ├── Scripting.scala │ │ │ ├── Clusters.scala │ │ │ ├── Lists.scala │ │ │ ├── Servers.scala │ │ │ ├── Hashes.scala │ │ │ └── Sets.scala │ │ │ ├── Operation.scala │ │ │ ├── Request.scala │ │ │ ├── protocol │ │ │ ├── RedisProtocolRequest.scala │ │ │ └── ParseNumber.scala │ │ │ ├── util │ │ │ └── CRC16.java │ │ │ ├── actors │ │ │ ├── RedisReplyDecoder.scala │ │ │ ├── RedisClientActor.scala │ │ │ ├── RedisSubscriberActor.scala │ │ │ └── RedisWorkerIO.scala │ │ │ └── RedisCommand.scala │ └── resources │ │ └── reference.conf └── bench │ └── src │ └── test │ └── scala │ └── rediscala │ └── benchmark │ ├── ByteStringBench.scala │ ├── RedisBenchProtocol.scala │ ├── RedisBench.scala │ └── RedisBenchPool.scala ├── shell.nix ├── .gitlab-ci.yml ├── .gitignore ├── .travis.yml └── benchmark └── src └── main └── scala └── redis ├── commands ├── Ping.scala ├── Get.scala └── Hgetall.scala ├── RedisState.scala └── protocol └── ParseNumberBench.scala /.envrc: -------------------------------------------------------------------------------- 1 | use nix 2 | -------------------------------------------------------------------------------- /version.sbt: -------------------------------------------------------------------------------- 1 | version := "1.9.2" 2 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.4.7 2 | -------------------------------------------------------------------------------- /src/test/resources/lua/test.lua: -------------------------------------------------------------------------------- 1 | return "test" 2 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? import { } }: 2 | 3 | pkgs.mkShell { 4 | name = "rediscala-shell"; 5 | buildInputs = with pkgs; [ redis scala sbt ]; 6 | } 7 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | image: hseeberger/scala-sbt:8u181_2.12.6_1.2.3 2 | 3 | stages: 4 | - test 5 | 6 | test: 7 | stage: test 8 | script: sbt clean coverage test 9 | -------------------------------------------------------------------------------- /src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | akka { 2 | loglevel = "DEBUG" 3 | test { 4 | timefactor = 10 5 | timefactor = ${?TEST_TIME_FACTOR} 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .idea* 3 | *.iml 4 | *.ipr 5 | .classpath* 6 | .project* 7 | .settings* 8 | .worksheet/ 9 | 10 | .*.s[a-w][a-z] 11 | *.un~ 12 | Session.vim 13 | .netrwhist 14 | *~ 15 | .DS_Store 16 | *.rdb 17 | *.aof 18 | redis-3.2.0/ 19 | redis-3.2.0.tar.gz 20 | 21 | .direnv 22 | .bsp/ 23 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | env: 2 | TEST_TIME_FACTOR=100 3 | dist: xenial 4 | language: scala 5 | 6 | jdk: 7 | - openjdk8 8 | 9 | services: 10 | - redis-server 11 | 12 | script: 13 | - export SBT_OPTS="${SBT_OPTS} -Xmx2G -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -Xss2M" 14 | - sbt clean coverage +test 15 | after_success: "sbt coverageReport coveralls" 16 | -------------------------------------------------------------------------------- /src/test/scala/redis/RedisVersion.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | case class RedisVersion(major: Int, minor: Int, patch: Int) extends Ordered[RedisVersion] { 4 | 5 | import scala.math.Ordered.orderingToOrdered 6 | 7 | override def compare(that: RedisVersion): Int = 8 | (this.major, this.minor, this.patch) 9 | .compare((that.major, that.minor, that.patch)) 10 | } 11 | -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Publish.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.{ByteStringSerializer, Request} 4 | import scala.concurrent.Future 5 | import redis.api.publish.{Publish => PublishCommand} 6 | 7 | trait Publish extends Request { 8 | def publish[V: ByteStringSerializer](channel: String, value: V): Future[Long] = 9 | send(PublishCommand(channel, value)) 10 | } 11 | -------------------------------------------------------------------------------- /src/test/resources/log4j2.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/Publish.scala: -------------------------------------------------------------------------------- 1 | package redis.api.publish 2 | 3 | import redis.{RedisCommandIntegerLong, ByteStringSerializer} 4 | import akka.util.ByteString 5 | 6 | case class Publish[A](channel: String, value: A)(implicit convert: ByteStringSerializer[A]) extends RedisCommandIntegerLong { 7 | val isMasterOnly = true 8 | val encodedRequest: ByteString = encode("PUBLISH", Seq(ByteString(channel), convert.serialize(value))) 9 | } 10 | -------------------------------------------------------------------------------- /src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | rediscala { 2 | loglevel = "DEBUG" 3 | stdout-loglevel = "ON" 4 | loggers = ["akka.event.slf4j.Slf4jLogger"] 5 | rediscala-client-worker-dispatcher { 6 | mailbox-type = "akka.dispatch.SingleConsumerOnlyUnboundedMailbox" 7 | # Throughput defines the maximum number of messages to be 8 | # processed per actor before the thread jumps to the next actor. 9 | # Set to 1 for as fair as possible. 10 | throughput = 512 11 | } 12 | } -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Clusters.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.Request 4 | import redis.api.clusters._ 5 | 6 | import scala.concurrent.Future 7 | /** 8 | * Blocking commands on the Lists 9 | */ 10 | trait Clusters extends Request { 11 | 12 | def clusterSlots(): Future[Seq[ClusterSlot]] = send(ClusterSlots()) 13 | 14 | def clusterInfo(): Future[Map[String, String]] = send(ClusterInfo()) 15 | 16 | def clusterNodes(): Future[Array[ClusterNodeInfo]] = send(ClusterNodes()) 17 | } -------------------------------------------------------------------------------- /src/test/scala/redis/issues/Issue26Spec.scala: -------------------------------------------------------------------------------- 1 | package redis.issues 2 | 3 | import org.scalatest.RecoverMethods._ 4 | import org.scalatest.Succeeded 5 | import redis._ 6 | 7 | class Issue26Spec extends RedisStandaloneServer { 8 | 9 | "Deserializer exceptions" should { 10 | "be propagated to resulting future" in { 11 | redis.set("key", "value").futureValue shouldBe true 12 | recoverToSucceededIf[NumberFormatException](redis.get[Double]("key")).futureValue shouldBe Succeeded 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/geo/DistUnits.scala: -------------------------------------------------------------------------------- 1 | package redis.api.geo 2 | 3 | object DistUnits{ 4 | sealed trait Measurement{ 5 | def value:String = { 6 | this match { 7 | case Meter => "m" 8 | case Kilometer => "km" 9 | case Mile => "mi" 10 | case Feet => "ft" 11 | } 12 | } 13 | } 14 | case object Meter extends Measurement 15 | case object Kilometer extends Measurement 16 | case object Mile extends Measurement 17 | case object Feet extends Measurement 18 | } 19 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/geo/GeoOptions.scala: -------------------------------------------------------------------------------- 1 | package redis.api.geo 2 | 3 | /** 4 | * Created by avilevi on 08/12/2016. 5 | */ 6 | object GeoOptions{ 7 | sealed trait WithOption{ 8 | def value:String = { 9 | this match { 10 | case WithDist => "WITHDIST" 11 | case WithCoord => "WITHCOORD" 12 | case WithHash => "WITHHASH" 13 | } 14 | } 15 | } 16 | case object WithDist extends WithOption 17 | case object WithCoord extends WithOption 18 | case object WithHash extends WithOption 19 | 20 | } 21 | -------------------------------------------------------------------------------- /src/test/scala/redis/protocol/ParseNumberScalaCheck.scala: -------------------------------------------------------------------------------- 1 | package redis.protocol 2 | 3 | import akka.util.ByteString 4 | import org.scalacheck.Properties 5 | import org.scalacheck.Prop.forAll 6 | 7 | object ParseNumberScalaCheck extends Properties("ParseNumber") { 8 | property("parse long") = forAll { (a: Long) => 9 | val s = a.toString 10 | ParseNumber.parseLong(ByteString(s)) == s.toLong 11 | } 12 | 13 | property("parse int") = forAll { (a: Int) => 14 | val s = a.toString 15 | ParseNumber.parseInt(ByteString(s)) == s.toInt 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/main/scala/redis/commands/HyperLogLog.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.api.hyperloglog._ 4 | import redis.{ByteStringSerializer, Request} 5 | 6 | import scala.concurrent.Future 7 | 8 | trait HyperLogLog extends Request { 9 | def pfadd[V: ByteStringSerializer](key: String, values: V*): Future[Long] = 10 | send(Pfadd(key, values)) 11 | 12 | def pfcount(keys: String*): Future[Long] = 13 | send(Pfcount(keys)) 14 | 15 | def pfmerge(destKey: String, sourceKeys: String*): Future[Boolean] = 16 | send(Pfmerge(destKey, sourceKeys)) 17 | } 18 | -------------------------------------------------------------------------------- /src/test/scala/redis/protocol/RedisProtocolRequestSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.protocol 2 | 3 | import akka.util.ByteString 4 | import redis.TestBase 5 | 6 | class RedisProtocolRequestSpec extends TestBase { 7 | 8 | "Encode request" should { 9 | "inline" in { 10 | RedisProtocolRequest.inline("PING") shouldBe ByteString("PING\r\n") 11 | } 12 | "multibulk" in { 13 | val encoded = RedisProtocolRequest.multiBulk("SET", Seq(ByteString("mykey"), ByteString("myvalue"))) 14 | encoded shouldBe ByteString("*3\r\n$3\r\nSET\r\n$5\r\nmykey\r\n$7\r\nmyvalue\r\n") 15 | } 16 | } 17 | 18 | } 19 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | 2 | resolvers += "jgit-repo" at "https://download.eclipse.org/jgit/maven" 3 | 4 | addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "1.3.2") 5 | 6 | addSbtPlugin("com.typesafe.sbt" % "sbt-ghpages" % "0.6.2") 7 | 8 | addSbtPlugin("com.eed3si9n" % "sbt-unidoc" % "0.4.1") 9 | 10 | resolvers += Classpaths.sbtPluginReleases 11 | 12 | addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.6.1") 13 | 14 | addSbtPlugin("org.scoverage" % "sbt-coveralls" % "1.2.7") 15 | 16 | resolvers += Resolver.url( 17 | "bintray-sbt-plugin-releases", 18 | url("https://dl.bintray.com/content/sbt/sbt-plugin-releases"))( 19 | Resolver.ivyStylePatterns) 20 | 21 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "2.5") 22 | 23 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "2.0.1") 24 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/Transactions.scala: -------------------------------------------------------------------------------- 1 | package redis.api.transactions 2 | 3 | import redis.{RedisCommandMultiBulk, RedisCommandStatusBoolean} 4 | import akka.util.ByteString 5 | import redis.protocol.MultiBulk 6 | 7 | case object Multi extends RedisCommandStatusBoolean { 8 | val isMasterOnly = true 9 | val encodedRequest: ByteString = encode("MULTI") 10 | } 11 | 12 | case object Exec extends RedisCommandMultiBulk[MultiBulk] { 13 | val isMasterOnly = true 14 | val encodedRequest: ByteString = encode("EXEC") 15 | 16 | def decodeReply(r: MultiBulk): MultiBulk = r 17 | } 18 | 19 | case class Watch(keys: Set[String]) extends RedisCommandStatusBoolean { 20 | val isMasterOnly = true 21 | val encodedRequest: ByteString = encode("WATCH", keys.map(ByteString.apply).toSeq) 22 | } -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Connection.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.{ByteStringDeserializer, ByteStringSerializer, Request} 4 | import scala.concurrent.Future 5 | import redis.protocol.Status 6 | import redis.api.connection._ 7 | 8 | trait Connection extends Request { 9 | def auth[V: ByteStringSerializer](value: V): Future[Status] = 10 | send(Auth(value)) 11 | 12 | def echo[V: ByteStringSerializer, R: ByteStringDeserializer](value: V): Future[Option[R]] = 13 | send(Echo(value)) 14 | 15 | def ping(): Future[String] = 16 | send(Ping) 17 | 18 | // commands sent after will fail with [[redis.protocol.NoConnectionException]] 19 | def quit(): Future[Boolean] = 20 | send(Quit) 21 | 22 | def select(index: Int): Future[Boolean] = 23 | send(Select(index)) 24 | } 25 | 26 | -------------------------------------------------------------------------------- /benchmark/src/main/scala/redis/commands/Ping.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import java.util.concurrent.TimeUnit 4 | 5 | import org.openjdk.jmh.annotations._ 6 | import redis.{Redis, RedisStateHelper} 7 | 8 | import scala.concurrent.{Future, Await} 9 | 10 | @OutputTimeUnit(TimeUnit.SECONDS) 11 | @State(Scope.Benchmark) 12 | class Ping extends RedisStateHelper { 13 | 14 | @Param(Array("10000", "100000", "300000", "500000")) 15 | var iteration: Int = _ 16 | 17 | @Benchmark 18 | @BenchmarkMode(Array(Mode.SingleShotTime)) 19 | def measurePing(): Unit = { 20 | import scala.concurrent.duration._ 21 | implicit def exec = rs.akkaSystem.dispatchers.lookup(Redis.dispatcher.name) 22 | 23 | val r = for (i <- (0 to iteration).toVector) yield { 24 | rs.redis.ping() 25 | } 26 | 27 | val a = Await.ready(Future.sequence(r), 10 seconds) 28 | () 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/main/scala/redis/commands/BLists.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.{ByteStringDeserializer, Request} 4 | import scala.concurrent.Future 5 | import scala.concurrent.duration._ 6 | import redis.api.blists._ 7 | 8 | /** 9 | * Blocking commands on the Lists 10 | */ 11 | trait BLists extends Request { 12 | 13 | // TODO Future[Option[(KK, ByteString)]] 14 | def blpop[R: ByteStringDeserializer](keys: Seq[String], timeout: FiniteDuration = Duration.Zero): Future[Option[(String, R)]] = 15 | send(Blpop(keys, timeout)) 16 | 17 | def brpop[R: ByteStringDeserializer](keys: Seq[String], timeout: FiniteDuration = Duration.Zero): Future[Option[(String, R)]] = 18 | send(Brpop(keys, timeout)) 19 | 20 | def brpoplpush[R: ByteStringDeserializer](source: String, destination: String, timeout: FiniteDuration = Duration.Zero): Future[Option[R]] = 21 | send(Brpoplpush(source, destination, timeout)) 22 | } -------------------------------------------------------------------------------- /benchmark/src/main/scala/redis/RedisState.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import org.openjdk.jmh.annotations.{Setup, Level, TearDown} 4 | 5 | import scala.concurrent.Await 6 | import scala.concurrent.duration.Duration 7 | 8 | case class RedisState(initF: () => Unit = () => ()) { 9 | val akkaSystem = akka.actor.ActorSystem() 10 | val redis = RedisClient()(akkaSystem) 11 | 12 | implicit val exec = akkaSystem.dispatchers.lookup(Redis.dispatcher.name) 13 | 14 | import scala.concurrent.duration._ 15 | 16 | Await.result(redis.ping(), 2 seconds) 17 | 18 | @TearDown(Level.Trial) 19 | def down: Unit = { 20 | redis.stop() 21 | akkaSystem.terminate 22 | Await.result(akkaSystem.whenTerminated, Duration.Inf) 23 | } 24 | } 25 | 26 | trait RedisStateHelper { 27 | var rs: RedisState = _ 28 | 29 | @Setup(Level.Trial) 30 | def up() = { 31 | rs = RedisState() 32 | initRedisState() 33 | } 34 | 35 | @TearDown(Level.Trial) 36 | def down() = { 37 | rs.down 38 | } 39 | 40 | def initRedisState(): Unit = {} 41 | } 42 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/HyperLogLog.scala: -------------------------------------------------------------------------------- 1 | package redis.api.hyperloglog 2 | 3 | import akka.util.ByteString 4 | import redis.{RedisCommandIntegerLong, RedisCommandStatusBoolean, ByteStringSerializer} 5 | 6 | case class Pfadd[K, V](key: K, values: Seq[V])(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) extends RedisCommandIntegerLong { 7 | val isMasterOnly = true 8 | val encodedRequest: ByteString = encode("PFADD", redisKey.serialize(key) +: values.map(convert.serialize)) 9 | } 10 | 11 | case class Pfcount[K](keys: Seq[K])(implicit redisKey: ByteStringSerializer[K]) extends RedisCommandIntegerLong { 12 | val isMasterOnly = false 13 | val encodedRequest: ByteString = encode("PFCOUNT", keys.map(redisKey.serialize)) 14 | } 15 | 16 | case class Pfmerge[K](destKey: K, sourceKeys: Seq[K])(implicit redisKey: ByteStringSerializer[K]) extends RedisCommandStatusBoolean { 17 | val isMasterOnly = true 18 | val encodedRequest: ByteString = encode("PFMERGE", redisKey.serialize(destKey) +: sourceKeys.map(redisKey.serialize)) 19 | } -------------------------------------------------------------------------------- /src/test/scala/redis/commands/ConnectionSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | import akka.util.ByteString 5 | import redis.actors.ReplyErrorException 6 | 7 | class ConnectionSpec extends RedisStandaloneServer { 8 | 9 | "Connection commands" should { 10 | "SELECT" in { 11 | redis.select(0).futureValue shouldBe true 12 | redis.select(-1).failed.futureValue shouldBe a[ReplyErrorException] 13 | } 14 | "AUTH" in { 15 | redis.auth("no password").failed.futureValue shouldBe a[ReplyErrorException] 16 | } 17 | "ECHO" in { 18 | val hello = "Hello World!" 19 | redis.echo(hello).futureValue shouldBe Some(ByteString(hello)) 20 | } 21 | "PING" in { 22 | redis.ping().futureValue shouldBe "PONG" 23 | } 24 | "QUIT" in { 25 | // todo test that the TCP connection is reset. 26 | // Now all future commands are failed with a timeout 27 | redis.quit().futureValue shouldBe true 28 | // redis.echo("should fail").futureValue shouldBe "" 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/pubsub/pubsub.scala: -------------------------------------------------------------------------------- 1 | package redis.api.pubsub 2 | 3 | import akka.util.ByteString 4 | import redis.protocol.RedisProtocolRequest 5 | 6 | case class Message(channel: String, data: ByteString) 7 | 8 | case class PMessage(patternMatched: String, channel: String, data: ByteString) 9 | 10 | sealed trait SubscribeMessage { 11 | def cmd: String 12 | 13 | def params: Seq[String] 14 | 15 | def toByteString: ByteString = RedisProtocolRequest.multiBulk(cmd, params.map(ByteString.apply)) 16 | } 17 | 18 | case class PSUBSCRIBE(pattern: String*) extends SubscribeMessage { 19 | def cmd = "PSUBSCRIBE" 20 | 21 | def params = pattern 22 | } 23 | 24 | case class PUNSUBSCRIBE(pattern: String*) extends SubscribeMessage { 25 | def cmd = "PUNSUBSCRIBE" 26 | 27 | def params = pattern 28 | } 29 | 30 | case class SUBSCRIBE(channel: String*) extends SubscribeMessage { 31 | def cmd = "SUBSCRIBE" 32 | 33 | def params = channel 34 | } 35 | 36 | case class UNSUBSCRIBE(channel: String*) extends SubscribeMessage { 37 | def cmd = "UNSUBSCRIBE" 38 | 39 | def params = channel 40 | } 41 | -------------------------------------------------------------------------------- /src/main/scala/redis/Operation.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import scala.concurrent.Promise 4 | import redis.protocol.{DecodeResult, RedisReply} 5 | import akka.util.ByteString 6 | 7 | import scala.util.Try 8 | 9 | case class Operation[RedisReplyT <: RedisReply, T](redisCommand: RedisCommand[RedisReplyT, T], promise: Promise[T]) { 10 | def decodeRedisReplyThenComplete(bs: ByteString): DecodeResult[Unit] = { 11 | val r = redisCommand.decodeRedisReply.apply(bs) 12 | r.foreach { reply => 13 | completeSuccess(reply) 14 | } 15 | } 16 | 17 | def completeSuccess(redisReply: RedisReplyT): Promise[T] = { 18 | val v = Try(redisCommand.decodeReply(redisReply)) 19 | promise.complete(v) 20 | } 21 | 22 | def tryCompleteSuccess(redisReply: RedisReply) = { 23 | val v = Try(redisCommand.decodeReply(redisReply.asInstanceOf[RedisReplyT])) 24 | promise.tryComplete(v) 25 | } 26 | 27 | def completeSuccessValue(value: T) = promise.success(value) 28 | 29 | def completeFailed(t: Throwable) = promise.failure(t) 30 | } 31 | 32 | case class Transaction(commands: Seq[Operation[_, _]]) 33 | -------------------------------------------------------------------------------- /benchmark/src/main/scala/redis/commands/Get.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import java.util.concurrent.TimeUnit 4 | 5 | import org.openjdk.jmh.annotations._ 6 | import redis.{Redis, RedisStateHelper} 7 | 8 | import scala.concurrent.{Await, Future} 9 | 10 | @OutputTimeUnit(TimeUnit.SECONDS) 11 | @State(Scope.Benchmark) 12 | class Get extends RedisStateHelper { 13 | 14 | @Param(Array("10000", "100000", "300000", "500000")) 15 | var iteration: Int = _ 16 | 17 | var getKey = "getKey" 18 | 19 | override def initRedisState(): Unit = { 20 | import scala.concurrent.duration._ 21 | implicit val exec = rs.akkaSystem.dispatchers.lookup(Redis.dispatcher.name) 22 | 23 | Await.result(rs.redis.set(getKey, "value"), 20 seconds) 24 | } 25 | 26 | @Benchmark 27 | @BenchmarkMode(Array(Mode.SingleShotTime)) 28 | def measurePing(): Unit = { 29 | import scala.concurrent.duration._ 30 | implicit def exec = rs.akkaSystem.dispatchers.lookup(Redis.dispatcher.name) 31 | 32 | val r = for (i <- (0 to iteration).toVector) yield { 33 | rs.redis.get(getKey) 34 | } 35 | 36 | val a = Await.ready(Future.sequence(r), 10 seconds) 37 | () 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/test/scala/redis/commands/HyperLogLogSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.RedisStandaloneServer 4 | 5 | 6 | 7 | class HyperLogLogSpec extends RedisStandaloneServer { 8 | 9 | 10 | "HyperLogLog commands" should { 11 | "PFADD" in { 12 | val r = redis.pfadd("hll", "a", "b", "c", "d", "e", "f", "g").flatMap(_ => { 13 | redis.pfcount("hll").flatMap(count => { 14 | count shouldBe 7 15 | redis.pfadd("hll", "h", "i").flatMap(_ => { 16 | redis.pfcount("hll") 17 | }) 18 | }) 19 | }) 20 | r.futureValue shouldBe 9 21 | } 22 | 23 | "PFCOUNT" in { 24 | val r = redis.pfadd("hll2", "a", "b", "c", "d", "e", "f", "g").flatMap(_ => { 25 | redis.pfcount("hll2") 26 | }) 27 | r.futureValue shouldBe 7 28 | } 29 | 30 | "PFMERGE" in { 31 | val r = redis.pfadd("hll3", "a", "b").flatMap(_ => { 32 | redis.pfadd("hll4", "c", "d").flatMap(_ => { 33 | redis.pfmerge("hll5", "hll4", "hll3").flatMap(merged => { 34 | merged shouldBe true 35 | redis.pfcount("hll5") 36 | }) 37 | }) 38 | }) 39 | 40 | r.futureValue shouldBe 4 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Sentinel.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.Request 4 | import redis.actors.ReplyErrorException 5 | import redis.api._ 6 | 7 | import scala.concurrent.Future 8 | 9 | trait Sentinel extends Request { 10 | 11 | def masters(): Future[Seq[Map[String, String]]] = 12 | send(SenMasters()) 13 | 14 | def slaves(master: String): Future[Seq[Map[String, String]]] = 15 | send(SenSlaves(master)) 16 | 17 | def isMasterDown(master: String): Future[Option[Boolean]] = { 18 | send(SenMasterInfo(master)) map { response => 19 | Some(!(response("name") == master && response("flags") == "master")) 20 | } recoverWith { 21 | case ReplyErrorException(message) if message.startsWith("ERR No such master with that name") => Future.successful(None) 22 | } 23 | } 24 | 25 | def getMasterAddr(master: String): Future[Option[(String, Int)]] = 26 | send(SenGetMasterAddr(master)) map { 27 | case Some(Seq(ip, port)) => Some((ip, port.toInt)) 28 | case _ => None 29 | } 30 | 31 | def resetMaster(pattern: String): Future[Boolean] = 32 | send(SenResetMaster(pattern)) 33 | 34 | def failover(master: String): Future[Boolean] = 35 | send(SenMasterFailover(master)) 36 | 37 | } 38 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/Connection.scala: -------------------------------------------------------------------------------- 1 | package redis.api.connection 2 | 3 | import redis._ 4 | import akka.util.ByteString 5 | import redis.protocol.Status 6 | 7 | case class Auth[V](value: V)(implicit convert: ByteStringSerializer[V]) extends RedisCommandStatus[Status] { 8 | val isMasterOnly = true 9 | val encodedRequest: ByteString = encode("AUTH", Seq(convert.serialize(value))) 10 | 11 | def decodeReply(s: Status) = s 12 | } 13 | 14 | case class Echo[V, R](value: V)(implicit convert: ByteStringSerializer[V], deserializerR : ByteStringDeserializer[R]) extends RedisCommandBulkOptionByteString[R] { 15 | val isMasterOnly = true 16 | val encodedRequest: ByteString = encode("ECHO", Seq(convert.serialize(value))) 17 | val deserializer: ByteStringDeserializer[R] = deserializerR 18 | } 19 | 20 | case object Ping extends RedisCommandStatusString { 21 | val isMasterOnly = true 22 | val encodedRequest: ByteString = encode("PING") 23 | } 24 | 25 | case object Quit extends RedisCommandStatusBoolean { 26 | val isMasterOnly = true 27 | val encodedRequest: ByteString = encode("QUIT") 28 | } 29 | 30 | case class Select(index: Int) extends RedisCommandStatusBoolean { 31 | val isMasterOnly = true 32 | val encodedRequest: ByteString = encode("SELECT", Seq(ByteString(index.toString))) 33 | } -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Geo.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.Request 4 | import redis.api.geo.DistUnits.{Kilometer, Measurement, Meter} 5 | import redis.api.geo.GeoOptions.{WithDist, WithOption} 6 | import redis.api.geo._ 7 | 8 | import scala.concurrent.Future 9 | 10 | trait Geo extends Request { 11 | 12 | def geoAdd[K](key: String, lat: Double, lng: Double, loc: String): Future[Long] = 13 | send(GeoAdd(key, lat, lng, loc)) 14 | 15 | def geoRadius[K](key: String, lat: Double, lng: Double, radius: Double, dim: Measurement = Kilometer): Future[Seq[String]] = 16 | send(GeoRadius(key, lat, lng, radius, dim)) 17 | 18 | def geoRadiusByMember[K](key: String, member: String, dist:Int, dim: Measurement = Meter): Future[Seq[String]] = 19 | send(GeoRadiusByMember(key, member, dist, dim)) 20 | 21 | def geoRadiusByMemberWithOpt[K](key: String, member: String, dist:Int, dim: Measurement = Meter, opt: WithOption = WithDist, 22 | count: Int = Int.MaxValue)= 23 | send(GeoRadiusByMemberWithOpt(key, member, dist, dim, opt,count)) 24 | 25 | def geoDist[K] (key: String ,member1: String, member2: String, unit: Measurement = Meter ): Future[Double] = 26 | send(GeoDist(key, member1, member2, unit)) 27 | 28 | def geoHash[K] (key: String ,members: String *): Future[Seq[String]] = send(GeoHash(key, members)) 29 | 30 | def geoPos[K] (key: String ,members: String *): Future[Seq[String]] = send(GeoPos(key, members)) 31 | 32 | } 33 | -------------------------------------------------------------------------------- /src/test/scala/redis/TestBase.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import java.nio.file.{Files, Path} 4 | 5 | import org.apache.logging.log4j.scala.Logger 6 | import org.scalatest.concurrent.{Eventually, ScalaFutures} 7 | import org.scalatest.matchers.{MatchResult, Matcher} 8 | import org.scalatest.matchers.should.Matchers 9 | import org.scalatest.wordspec.{AnyWordSpecLike} 10 | 11 | trait TestBase extends AnyWordSpecLike with Matchers with ScalaFutures with Eventually { 12 | import org.scalatest.time.{Millis, Seconds, Span} 13 | implicit protected val defaultPatience = 14 | PatienceConfig(timeout = scaled(Span(1, Seconds)), interval = Span(100, Millis)) 15 | 16 | protected val log = Logger(getClass) 17 | 18 | protected def createTempDirectory(): Path = { 19 | val dir = Files.createTempDirectory("rediscala-test-dir") 20 | dir.toFile.deleteOnExit() 21 | dir 22 | } 23 | 24 | protected def beBetween[T: Ordering](from: T, to: T): Matcher[T] = { 25 | val range = s"[$from, $to]" 26 | new Matcher[T] { 27 | def apply(left: T): MatchResult = { 28 | val ordering = implicitly[Ordering[T]] 29 | val isBetween = ordering.lteq(from, left) && ordering.gteq(to, left) 30 | MatchResult( 31 | isBetween, 32 | s"$left wasn't in range $range", 33 | s"$left was in range $range", 34 | Vector(left, from, to) 35 | ) 36 | } 37 | 38 | override def toString: String = "be between " + range 39 | } 40 | } 41 | 42 | } 43 | -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Scripting.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | import redis.actors.ReplyErrorException 5 | import redis.api.scripting._ 6 | 7 | import scala.concurrent.Future 8 | 9 | trait Scripting extends Request { 10 | /** 11 | * Try EVALSHA, if NOSCRIPT returned, fallback to EVAL 12 | */ 13 | def evalshaOrEval[R: RedisReplyDeserializer](redisScript: RedisScript, keys: Seq[String] = Seq.empty[String], args: Seq[String] = Seq.empty[String]): Future[R] = { 14 | evalsha(redisScript.sha1, keys, args).recoverWith({ 15 | case ReplyErrorException(message) if message.startsWith("NOSCRIPT") => eval(redisScript.script, keys, args) 16 | }) 17 | } 18 | 19 | def eval[R: RedisReplyDeserializer](script: String, keys: Seq[String] = Seq.empty[String], args: Seq[String] = Seq.empty[String]): Future[R] = { 20 | send(Eval(script, keys, args)) 21 | } 22 | 23 | def evalsha[R: RedisReplyDeserializer](sha1: String, keys: Seq[String] = Seq.empty[String], args: Seq[String] = Seq.empty[String]): Future[R] = { 24 | send(Evalsha(sha1, keys, args)) 25 | } 26 | 27 | def scriptFlush(): Future[Boolean] = { 28 | send(ScriptFlush) 29 | } 30 | 31 | def scriptKill(): Future[Boolean] = { 32 | send(ScriptKill) 33 | } 34 | 35 | def scriptLoad(script: String): Future[String] = { 36 | send(ScriptLoad(script)) 37 | } 38 | 39 | def scriptExists(sha1: String*): Future[Seq[Boolean]] = { 40 | send(ScriptExists(sha1)) 41 | } 42 | } 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /src/test/scala/redis/SentinelMutablePoolSpec.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import redis.RedisServerHelper.redisHost 4 | 5 | class SentinelMutablePoolSpec extends RedisSentinelClients("SentinelMutablePoolSpec") { 6 | 7 | private val redisPool = new RedisClientMutablePool(Seq(RedisServer(redisHost, slavePort1)), masterName) 8 | 9 | "mutable pool" should { 10 | "add remove" in { 11 | log.debug("checking initial pool size") 12 | eventually { 13 | redisPool.redisConnectionPool.size shouldBe 1 14 | } 15 | log.debug("adding new redis server") 16 | redisPool.addServer(RedisServer(redisHost, slavePort2)) 17 | log.debug("checking new pool size") 18 | eventually { 19 | redisPool.redisConnectionPool.size shouldBe 2 20 | } 21 | redisPool.addServer(RedisServer(redisHost, slavePort2)) 22 | eventually { 23 | redisPool.redisConnectionPool.size shouldBe 2 24 | } 25 | 26 | val key = "keyPoolDb0" 27 | redisClient.set(key, "hello").futureValue 28 | 29 | redisPool.get[String](key).futureValue shouldBe Some("hello") 30 | redisPool.get[String](key).futureValue shouldBe Some("hello") 31 | 32 | redisPool.removeServer(RedisServer(redisHost, slavePort2)) 33 | 34 | eventually { 35 | redisPool.redisConnectionPool.size shouldBe 1 36 | } 37 | 38 | redisPool.get[String](key).futureValue shouldBe Some("hello") 39 | redisPool.get[String](key).futureValue shouldBe Some("hello") 40 | } 41 | } 42 | 43 | } 44 | -------------------------------------------------------------------------------- /src/test/scala/redis/RedisTest.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import akka.ConfigurationException 4 | 5 | import akka.util.ByteString 6 | 7 | class RedisTest extends RedisStandaloneServer { 8 | 9 | "basic test" should { 10 | "ping" in { 11 | redis.ping.futureValue shouldBe "PONG" 12 | } 13 | "set" in { 14 | redis.set("key", "value").futureValue shouldBe true 15 | } 16 | "get" in { 17 | redis.get("key").futureValue shouldBe Some(ByteString("value")) 18 | } 19 | "del" in { 20 | redis.del("key").futureValue shouldBe 1 21 | } 22 | "get not found" in { 23 | redis.get("key").futureValue shouldBe None 24 | } 25 | } 26 | 27 | "init connection test" should { 28 | "ok" in { 29 | withRedisServer(port => { 30 | val redis = RedisClient(port = port) 31 | // TODO set password (CONFIG SET requiredpass password) 32 | val r = for { 33 | _ <- redis.select(2) 34 | _ <- redis.set("keyDbSelect", "2") 35 | } yield { 36 | val redis = RedisClient(port = port, password = Some("password"), db = Some(2)) 37 | redis.get[String]("keyDbSelect").futureValue shouldBe Some("2") 38 | } 39 | r.futureValue 40 | }) 41 | } 42 | "use custom dispatcher" in { 43 | a[ConfigurationException] shouldBe thrownBy { 44 | withRedisServer(port => { 45 | implicit val redisDispatcher = RedisDispatcher("no-this-dispatcher") 46 | RedisClient(port = port) 47 | }) 48 | } 49 | } 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /benchmark/src/main/scala/redis/protocol/ParseNumberBench.scala: -------------------------------------------------------------------------------- 1 | package redis.protocol 2 | 3 | import akka.util.ByteString 4 | import org.openjdk.jmh.annotations.{Scope, State, Benchmark} 5 | 6 | @State(Scope.Benchmark) 7 | class ParseNumberBench { 8 | val bs = ByteString("123") 9 | 10 | @Benchmark 11 | def parseLongWithToLong(): Long = { 12 | bs.utf8String.toLong 13 | } 14 | 15 | @Benchmark 16 | def parseLongPositiveOnly(): Long = { 17 | PositiveLongParser.parse(bs) 18 | } 19 | 20 | @Benchmark 21 | def parseLongWithParseNumber(): Long = { 22 | ParseNumber.parseLong(bs) 23 | } 24 | 25 | @Benchmark 26 | def parseIntWithToInt(): Int = { 27 | bs.utf8String.toInt 28 | } 29 | 30 | @Benchmark 31 | def parseIntWithParseNumber(): Int = { 32 | ParseNumber.parseInt(bs) 33 | } 34 | } 35 | 36 | object PositiveLongParser { 37 | 38 | /** 39 | * @see https://github.com/undertow-io/undertow/commit/94cd8882a32351f85cdb40df06e939b093751c2e 40 | * @param byteString 41 | * @return 42 | */ 43 | def parse(byteString: ByteString) : Long = { 44 | var value: Long = 0L 45 | val length = byteString.length 46 | 47 | if (length == 0) { 48 | throw new NumberFormatException(byteString.utf8String) 49 | } 50 | 51 | var multiplier = 1 52 | var i = length - 1 53 | 54 | while(i >= 0) { 55 | val c = byteString(i) 56 | 57 | if (c < '0' || c > '9') { 58 | throw new NumberFormatException(byteString.utf8String) 59 | } 60 | val digit = c - '0' 61 | value += digit * multiplier 62 | multiplier *= 10 63 | 64 | i -= 1 65 | } 66 | value 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/Sentinel.scala: -------------------------------------------------------------------------------- 1 | package redis.api 2 | 3 | import redis._ 4 | import akka.util.ByteString 5 | import redis.protocol.MultiBulk 6 | 7 | case class SenMasters() extends RedisCommandMultiBulk[Seq[Map[String,String]]] { 8 | val isMasterOnly = true 9 | val encodedRequest: ByteString = encode("SENTINEL MASTERS") 10 | 11 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toSeqMapString(mb) 12 | } 13 | 14 | case class SenSlaves(master: String) extends RedisCommandMultiBulk[Seq[Map[String,String]]] { 15 | val isMasterOnly = true 16 | val encodedRequest: ByteString = encode(s"SENTINEL SLAVES $master") 17 | 18 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toSeqMapString(mb) 19 | } 20 | 21 | case class SenMasterInfo(master: String) extends RedisCommandMultiBulk[Map[String, String]] { 22 | val isMasterOnly = true 23 | val encodedRequest: ByteString = encode(s"SENTINEL master $master") 24 | 25 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toMapString(mb) 26 | } 27 | 28 | case class SenGetMasterAddr(master: String) extends RedisCommandMultiBulk[Option[Seq[String]]] { 29 | val isMasterOnly = true 30 | val encodedRequest: ByteString = encode(s"SENTINEL get-master-addr-by-name $master") 31 | 32 | def decodeReply(mb: MultiBulk) = mb.responses.map(_.map(_.toString)) 33 | } 34 | 35 | case class SenResetMaster(pattern: String) extends RedisCommandIntegerBoolean { 36 | val isMasterOnly = true 37 | val encodedRequest: ByteString = encode(s"SENTINEL RESET $pattern") 38 | } 39 | 40 | case class SenMasterFailover(master: String) extends RedisCommandStatusBoolean { 41 | val isMasterOnly = true 42 | val encodedRequest: ByteString = encode(s"SENTINEL FAILOVER $master") 43 | } 44 | -------------------------------------------------------------------------------- /src/test/scala/redis/SentinelMonitoredRedisClientMasterSlavesSpec.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import redis.RedisServerHelper.redisHost 4 | 5 | class SentinelMonitoredRedisClientMasterSlavesSpec 6 | extends RedisSentinelClients("SentinelMonitoredRedisClientMasterSlavesSpec") { 7 | 8 | lazy val redisMasterSlavesPool = 9 | SentinelMonitoredRedisClientMasterSlaves(master = masterName, sentinels = sentinelPorts.map((redisHost, _))) 10 | 11 | "sentinel slave pool" should { 12 | "add and remove" in { 13 | eventually { 14 | redisMasterSlavesPool.set("test", "value").futureValue 15 | redisMasterSlavesPool.slavesClients.redisConnectionPool.size shouldBe 2 16 | } 17 | 18 | val newSlave = newSlaveProcess() 19 | 20 | eventually { 21 | redisMasterSlavesPool.slavesClients.redisConnectionPool.size shouldBe 3 22 | } 23 | newSlave.stop() 24 | 25 | eventually { 26 | redisMasterSlavesPool.get[String]("test").futureValue shouldBe Some("value") 27 | } 28 | slave1.stop() 29 | slave2.stop() 30 | 31 | eventually { 32 | redisMasterSlavesPool.slavesClients.redisConnectionPool.size shouldBe 0 33 | } 34 | 35 | redisMasterSlavesPool.get[String]("test").futureValue shouldBe Some("value") 36 | newSlaveProcess() 37 | 38 | eventually { 39 | redisMasterSlavesPool.slavesClients.redisConnectionPool.size shouldBe 1 40 | } 41 | } 42 | /* 43 | "changemaster" in { 44 | Try(redisMasterSlavesPool.masterClient.shutdown(), timeOut)) 45 | awaitAssert( redisMasterSlavesPool.slavesClients.redisConnectionPool.size shouldBe 0, 20 second ) 46 | redisMasterSlavesPool.get[String]("test"), timeOut) shouldBe Some("value") 47 | }*/ 48 | 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/BLists.scala: -------------------------------------------------------------------------------- 1 | package redis.api.blists 2 | 3 | import scala.concurrent.duration.{Duration, FiniteDuration} 4 | import redis._ 5 | import akka.util.ByteString 6 | import redis.protocol.{RedisReply, MultiBulk, Bulk} 7 | 8 | case class Blpop[KK: ByteStringSerializer, R: ByteStringDeserializer](keys: Seq[KK], timeout: FiniteDuration = Duration.Zero) 9 | extends BXpop[KK, R]("BLPOP") 10 | 11 | 12 | case class Brpop[KK: ByteStringSerializer, R: ByteStringDeserializer](keys: Seq[KK], timeout: FiniteDuration = Duration.Zero) 13 | extends BXpop[KK, R]("BRPOP") 14 | 15 | 16 | private[redis] abstract class BXpop[KK, R](command: String)(implicit redisKeys: ByteStringSerializer[KK], deserializerR: ByteStringDeserializer[R]) 17 | extends RedisCommandMultiBulk[Option[(String, R)]] { 18 | val isMasterOnly = true 19 | val keys: Seq[KK] 20 | val timeout: FiniteDuration 21 | 22 | val encodedRequest: ByteString = encode(command, keys.map(redisKeys.serialize) ++ Seq(ByteString(timeout.toSeconds.toString))) 23 | 24 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toOptionStringByteString(mb) 25 | } 26 | 27 | case class Brpoplpush[KS, KD, R](source: KS, destination: KD, timeout: FiniteDuration = Duration.Zero) 28 | (implicit bsSource: ByteStringSerializer[KS], bsDest: ByteStringSerializer[KD], deserializerR: ByteStringDeserializer[R]) 29 | extends RedisCommandRedisReply[Option[R]] { 30 | val isMasterOnly = true 31 | val encodedRequest: ByteString = encode("BRPOPLPUSH", 32 | Seq(bsSource.serialize(source), bsDest.serialize(destination), ByteString(timeout.toSeconds.toString))) 33 | 34 | def decodeReply(redisReply: RedisReply): Option[R] = redisReply match { 35 | case b: Bulk => b.asOptByteString.map(deserializerR.deserialize) 36 | case _ => None 37 | } 38 | } -------------------------------------------------------------------------------- /src/main/scala/redis/api/api.scala: -------------------------------------------------------------------------------- 1 | package redis.api 2 | 3 | import akka.util.ByteString 4 | 5 | 6 | trait Aggregate 7 | 8 | case object SUM extends Aggregate 9 | 10 | case object MIN extends Aggregate 11 | 12 | case object MAX extends Aggregate 13 | 14 | case class Limit(value: Double, inclusive: Boolean = true) { 15 | def toByteString: ByteString = ByteString(if (inclusive) value.toString else "(" + value.toString) 16 | } 17 | 18 | trait Order 19 | 20 | case object ASC extends Order 21 | 22 | case object DESC extends Order 23 | 24 | case class LimitOffsetCount(offset: Long, count: Long) { 25 | def toByteString: Seq[ByteString] = Seq(ByteString("LIMIT"), ByteString(offset.toString), ByteString(count.toString)) 26 | } 27 | 28 | 29 | sealed trait BitOperator 30 | 31 | case object AND extends BitOperator 32 | 33 | case object OR extends BitOperator 34 | 35 | case object XOR extends BitOperator 36 | 37 | case object NOT extends BitOperator 38 | 39 | 40 | sealed trait ListPivot 41 | 42 | case object AFTER extends ListPivot 43 | 44 | case object BEFORE extends ListPivot 45 | 46 | 47 | sealed trait ShutdownModifier 48 | 49 | case object SAVE extends ShutdownModifier 50 | 51 | case object NOSAVE extends ShutdownModifier 52 | 53 | 54 | sealed trait ZaddOption { 55 | def serialize: ByteString 56 | } 57 | 58 | object ZaddOption { 59 | 60 | case object XX extends ZaddOption { 61 | override def serialize: ByteString = ByteString("XX") 62 | } 63 | 64 | case object NX extends ZaddOption { 65 | override def serialize: ByteString = ByteString("NX") 66 | } 67 | 68 | case object CH extends ZaddOption { 69 | override def serialize: ByteString = ByteString("CH") 70 | } 71 | 72 | case object INCR extends ZaddOption { 73 | override def serialize: ByteString = ByteString("INCR") 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /benchmark/src/main/scala/redis/commands/Hgetall.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import java.util.concurrent.TimeUnit 4 | 5 | import org.openjdk.jmh.annotations._ 6 | import redis.{Redis, RedisStateHelper} 7 | 8 | import scala.concurrent.{Future, Await} 9 | 10 | @OutputTimeUnit(TimeUnit.SECONDS) 11 | @State(Scope.Benchmark) 12 | class Hgetall extends RedisStateHelper { 13 | 14 | @Param(Array("1000", "5000", "10000")) 15 | var hashSize: Int = _ 16 | 17 | val hsetKey = "hsetKey" 18 | 19 | var scalaRedis : com.redis.RedisClient = _ 20 | 21 | @Setup(Level.Trial) 22 | def upScalaRedis(): Unit = { 23 | scalaRedis = new com.redis.RedisClient("localhost", 6379) 24 | scalaRedis.ping 25 | } 26 | 27 | @TearDown(Level.Trial) 28 | def downScalaRedis(): Unit = { 29 | scalaRedis.disconnect 30 | } 31 | 32 | override def initRedisState(): Unit = { 33 | import scala.concurrent.duration._ 34 | implicit val exec = rs.akkaSystem.dispatchers.lookup(Redis.dispatcher.name) 35 | 36 | Await.result(rs.redis.flushall(), 20 seconds) 37 | val r = for (i <- 0 to hashSize) yield { 38 | rs.redis.hset(hsetKey, i.toString, i) 39 | } 40 | 41 | Await.result(Future.sequence(r), 20 seconds) 42 | } 43 | 44 | @Benchmark 45 | @BenchmarkMode(Array(Mode.SingleShotTime)) 46 | def measureHgetall(): Seq[Map[String, String]] = { 47 | import scala.concurrent.duration._ 48 | implicit def exec = rs.akkaSystem.dispatchers.lookup(Redis.dispatcher.name) 49 | 50 | val r = for (i <- 0 to 100) yield { 51 | rs.redis.hgetall[String](hsetKey) 52 | } 53 | 54 | Await.result(Future.sequence(r), 30 seconds) 55 | } 56 | 57 | @Benchmark 58 | @BenchmarkMode(Array(Mode.SingleShotTime)) 59 | def measureHgetallScalaRedis(): Seq[Option[Map[String, String]]] = { 60 | val r = for (i <- 0 to 100) yield { 61 | scalaRedis.hgetall(hsetKey) 62 | } 63 | r 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Hashes.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.api.hashes._ 4 | import redis.{ByteStringDeserializer, ByteStringSerializer, Cursor, Request} 5 | 6 | import scala.concurrent.Future 7 | 8 | trait Hashes extends Request { 9 | 10 | def hdel(key: String, fields: String*): Future[Long] = 11 | send(Hdel(key, fields)) 12 | 13 | def hexists(key: String, field: String): Future[Boolean] = 14 | send(Hexists(key, field)) 15 | 16 | def hget[R: ByteStringDeserializer](key: String, field: String): Future[Option[R]] = 17 | send(Hget(key, field)) 18 | 19 | def hgetall[R: ByteStringDeserializer](key: String): Future[Map[String, R]] = 20 | send(Hgetall(key)) 21 | 22 | def hincrby(key: String, fields: String, increment: Long): Future[Long] = 23 | send(Hincrby(key, fields, increment)) 24 | 25 | def hincrbyfloat(key: String, fields: String, increment: Double): Future[Double] = 26 | send(Hincrbyfloat(key, fields, increment)) 27 | 28 | def hkeys(key: String): Future[Seq[String]] = 29 | send(Hkeys(key)) 30 | 31 | def hlen(key: String): Future[Long] = 32 | send(Hlen(key)) 33 | 34 | def hmget[R: ByteStringDeserializer](key: String, fields: String*): Future[Seq[Option[R]]] = 35 | send(Hmget(key, fields)) 36 | 37 | def hmset[V: ByteStringSerializer](key: String, keysValues: Map[String, V]): Future[Boolean] = 38 | send(Hmset(key, keysValues)) 39 | 40 | def hset[V: ByteStringSerializer](key: String, field: String, value: V): Future[Boolean] = 41 | send(Hset(key, field, value)) 42 | 43 | def hsetnx[V: ByteStringSerializer](key: String, field: String, value: V): Future[Boolean] = 44 | send(Hsetnx(key, field, value)) 45 | 46 | def hvals[R: ByteStringDeserializer](key: String): Future[Seq[R]] = 47 | send(Hvals(key)) 48 | 49 | def hscan[R: ByteStringDeserializer](key: String, cursor: Int = 0, count: Option[Int] = None, matchGlob: Option[String] = None): Future[Cursor[Map[String, R]]] = 50 | send(HScan(key, cursor, count, matchGlob)) 51 | 52 | } 53 | -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Server.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.Request 4 | import redis.api.servers._ 5 | import scala.concurrent.Future 6 | import redis.api.ShutdownModifier 7 | 8 | trait Server extends Request { 9 | def bgrewriteaof(): Future[String] = send(Bgrewriteaof) 10 | 11 | def bgsave(): Future[String] = send(Bgsave) 12 | 13 | def clientKill(ip: String, port: Int): Future[Boolean] = 14 | send(ClientKill(ip, port)) 15 | 16 | def clientList(): Future[Seq[Map[String, String]]] = 17 | send(ClientList) 18 | 19 | def clientGetname(): Future[Option[String]] = 20 | send(ClientGetname) 21 | 22 | def clientSetname(connectionName: String): Future[Boolean] = 23 | send(ClientSetname(connectionName)) 24 | 25 | def configGet(parameter: String): Future[Map[String, String]] = 26 | send(ConfigGet(parameter)) 27 | 28 | def configSet(parameter: String, value: String): Future[Boolean] = 29 | send(ConfigSet(parameter, value)) 30 | 31 | def configResetstat(): Future[Boolean] = 32 | send(ConfigResetstat) 33 | 34 | def dbsize(): Future[Long] = 35 | send(Dbsize) 36 | 37 | def debugObject(key: String): Future[String] = 38 | send(DebugObject(key)) 39 | 40 | def debugSegfault(): Future[String] = 41 | send(DebugSegfault) 42 | 43 | def flushall(): Future[Boolean] = 44 | send(Flushall) 45 | 46 | def flushdb(): Future[Boolean] = 47 | send(Flushdb) 48 | 49 | def info(): Future[String] = 50 | send(Info()) 51 | 52 | def info(section: String): Future[String] = 53 | send(Info(Some(section))) 54 | 55 | def lastsave(): Future[Long] = 56 | send(Lastsave) 57 | 58 | def save(): Future[Boolean] = 59 | send(Save) 60 | 61 | def shutdown(): Future[Boolean] = 62 | send(Shutdown()) 63 | 64 | def shutdown(modifier: ShutdownModifier): Future[Boolean] = 65 | send(Shutdown(Some(modifier))) 66 | 67 | def slaveof(host: String, port: Int): Future[Boolean] = 68 | send(Slaveof(host, port)) 69 | 70 | def slaveofNoOne(): Future[Boolean] = send(SlaveofNoOne) 71 | 72 | def time(): Future[(Long, Long)] = 73 | send(Time) 74 | } -------------------------------------------------------------------------------- /src/test/scala/redis/protocol/ParseNumberSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.protocol 2 | 3 | import akka.util.ByteString 4 | import redis.TestBase 5 | 6 | class ParseNumberSpec extends TestBase { 7 | 8 | 9 | "ParseNumber.parseInt" should { 10 | "ok" in { 11 | ParseNumber.parseInt(ByteString("0")) shouldBe 0 12 | ParseNumber.parseInt(ByteString("10")) shouldBe 10 13 | ParseNumber.parseInt(ByteString("-10")) shouldBe -10 14 | ParseNumber.parseInt(ByteString("-123456")) shouldBe -123456 15 | ParseNumber.parseInt(ByteString("1234567890")) shouldBe 1234567890 16 | ParseNumber.parseInt(ByteString("-1234567890")) shouldBe -1234567890 17 | } 18 | 19 | "null" in { 20 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(null) 21 | } 22 | 23 | "lone \"+\" or \"-\"" in { 24 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(ByteString("+")) 25 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(ByteString("-")) 26 | } 27 | 28 | "invalid first char" in { 29 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(ByteString("$")) 30 | } 31 | 32 | "empty" in { 33 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(ByteString.empty) 34 | } 35 | 36 | "invalid char" in { 37 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(ByteString("?")) 38 | } 39 | 40 | "limit min" in { 41 | val l1 : Long = java.lang.Integer.MIN_VALUE 42 | val l = l1 - 1 43 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(ByteString(l.toString)) 44 | } 45 | 46 | "limit max" in { 47 | val l1 : Long = java.lang.Integer.MAX_VALUE 48 | val l = l1 + 1 49 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(ByteString(l.toString)) 50 | } 51 | 52 | "not a number" in { 53 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(ByteString("not a number")) 54 | } 55 | 56 | "launch exception before integer overflow" in { 57 | a[NumberFormatException] should be thrownBy ParseNumber.parseInt(ByteString("-2147483650")) 58 | } 59 | 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/main/scala/redis/Request.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import redis.protocol.RedisReply 4 | import scala.concurrent.{ExecutionContext, Promise, Future} 5 | import scala.collection.immutable.Queue 6 | import akka.actor.ActorRef 7 | import java.util.concurrent.atomic.AtomicInteger 8 | 9 | 10 | trait Request { 11 | implicit val executionContext: ExecutionContext 12 | 13 | def send[T](redisCommand: RedisCommand[_ <: RedisReply, T]): Future[T] 14 | } 15 | 16 | trait ActorRequest { 17 | implicit val executionContext: ExecutionContext 18 | 19 | def redisConnection: ActorRef 20 | 21 | def send[T](redisCommand: RedisCommand[_ <: RedisReply, T]): Future[T] = { 22 | val promise = Promise[T]() 23 | redisConnection ! Operation(redisCommand, promise) 24 | promise.future 25 | } 26 | } 27 | 28 | trait BufferedRequest { 29 | implicit val executionContext: ExecutionContext 30 | 31 | val operations = Queue.newBuilder[Operation[_, _]] 32 | 33 | def send[T](redisCommand: RedisCommand[_ <: RedisReply, T]): Future[T] = { 34 | val promise = Promise[T]() 35 | operations += Operation(redisCommand, promise) 36 | promise.future 37 | } 38 | } 39 | 40 | 41 | trait RoundRobinPoolRequest { 42 | implicit val executionContext: ExecutionContext 43 | 44 | def redisConnectionPool: Seq[ActorRef] 45 | 46 | val next = new AtomicInteger(0) 47 | 48 | def getNextConnection: Option[ActorRef] = { 49 | val size = redisConnectionPool.size 50 | if (size == 0) { 51 | None 52 | } else { 53 | val index = next.getAndIncrement % size 54 | Some(redisConnectionPool(if (index < 0) size + index - 1 else index)) 55 | } 56 | } 57 | 58 | protected def send[T](redisConnection: ActorRef, redisCommand: RedisCommand[_ <: RedisReply, T]): Future[T] = { 59 | val promise = Promise[T]() 60 | redisConnection ! Operation(redisCommand, promise) 61 | promise.future 62 | } 63 | 64 | def send[T](redisCommand: RedisCommand[_ <: RedisReply, T]): Future[T] = { 65 | getNextConnection.fold( 66 | Future.failed[T](new RuntimeException("redis pool is empty")) 67 | ) { redisConnection => 68 | send(redisConnection, redisCommand) 69 | } 70 | } 71 | 72 | } 73 | -------------------------------------------------------------------------------- /src/test/scala/redis/RedisPoolSpec.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import redis.api.connection.Select 4 | import scala.concurrent._ 5 | 6 | class RedisPoolSpec extends RedisStandaloneServer { 7 | 8 | "basic pool test" should { 9 | "ok" in { 10 | val redisPool = new RedisClientPool( 11 | Seq( 12 | RedisServer(port = port, db = Some(0)), 13 | RedisServer(port = port, db = Some(1)), 14 | RedisServer(port = port, db = Some(3)))) 15 | val key = "keyPoolDb0" 16 | redisPool.set(key, 0) 17 | val r = for { 18 | getDb1 <- redisPool.get(key) 19 | getDb2 <- redisPool.get(key) 20 | getDb0 <- redisPool.get[String](key) 21 | select <- Future.sequence(redisPool.broadcast(Select(0))) 22 | getKey1 <- redisPool.get[String](key) 23 | getKey2 <- redisPool.get[String](key) 24 | getKey0 <- redisPool.get[String](key) 25 | } yield { 26 | getDb1 shouldBe empty 27 | getDb2 shouldBe empty 28 | getDb0 shouldBe Some("0") 29 | select shouldBe Seq(true, true, true) 30 | getKey1 shouldBe Some("0") 31 | getKey2 shouldBe Some("0") 32 | getKey0 shouldBe Some("0") 33 | } 34 | r.futureValue 35 | } 36 | 37 | "check status" in { 38 | val redisPool = new RedisClientPool( 39 | Seq( 40 | RedisServer(port = port, db = Some(0)), 41 | RedisServer(port = port, db = Some(1)), 42 | RedisServer(port = 3333, db = Some(3)))) 43 | val key = "keyPoolDb0" 44 | 45 | eventually { 46 | redisPool.redisConnectionPool.size shouldBe 2 47 | } 48 | redisPool.set(key, 0) 49 | val r = for { 50 | getDb1 <- redisPool.get(key) 51 | getDb0 <- redisPool.get[String](key) 52 | select <- Future.sequence(redisPool.broadcast(Select(0))) 53 | getKey1 <- redisPool.get[String](key) 54 | getKey0 <- redisPool.get[String](key) 55 | } yield { 56 | getDb1 shouldBe empty 57 | getDb0 shouldBe Some("0") 58 | select shouldBe Seq(true, true) 59 | getKey1 shouldBe Some("0") 60 | getKey0 shouldBe Some("0") 61 | } 62 | r.futureValue 63 | 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/test/scala/redis/commands/TransactionsSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | 5 | import akka.util.ByteString 6 | import redis.actors.ReplyErrorException 7 | import redis.protocol.{Bulk, Status, MultiBulk} 8 | 9 | class TransactionsSpec extends RedisStandaloneServer { 10 | 11 | "Transactions commands" should { 12 | "basic" in { 13 | val redisTransaction = redis.transaction() 14 | redisTransaction.exec() 15 | redisTransaction.watch("a") 16 | val set = redisTransaction.set("a", "abc") 17 | val decr = redisTransaction.decr("a") 18 | val get = redisTransaction.get("a") 19 | redisTransaction.exec() 20 | val r = for { 21 | s <- set 22 | g <- get 23 | } yield { 24 | s shouldBe true 25 | g shouldBe Some(ByteString("abc")) 26 | } 27 | decr.failed.futureValue shouldBe a[ReplyErrorException] 28 | r.futureValue 29 | } 30 | 31 | "function api" in { 32 | withClue("empty") { 33 | val empty = redis.multi().exec() 34 | empty.futureValue shouldBe MultiBulk(Some(Vector())) 35 | } 36 | val redisTransaction = redis.multi(redis => { 37 | redis.set("a", "abc") 38 | redis.get("a") 39 | }) 40 | withClue("non empty") { 41 | val exec = redisTransaction.exec() 42 | exec.futureValue shouldBe MultiBulk(Some(Vector(Status(ByteString("OK")), Bulk(Some(ByteString("abc")))))) 43 | } 44 | withClue("reused") { 45 | redisTransaction.get("transactionUndefinedKey") 46 | val exec = redisTransaction.exec() 47 | exec.futureValue shouldBe MultiBulk(Some(Vector(Status(ByteString("OK")), Bulk(Some(ByteString("abc"))), Bulk(None)))) 48 | } 49 | withClue("watch") { 50 | val transaction = redis.watch("transactionWatchKey") 51 | transaction.watcher.result() shouldBe Set("transactionWatchKey") 52 | transaction.unwatch() 53 | transaction.watcher.result() shouldBe empty 54 | val set = transaction.set("transactionWatch", "value") 55 | transaction.exec() 56 | val r = for { 57 | s <- set 58 | } yield { 59 | s shouldBe true 60 | } 61 | r.futureValue 62 | } 63 | } 64 | 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Sets.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.{Cursor, ByteStringDeserializer, ByteStringSerializer, Request} 4 | import scala.concurrent.Future 5 | import redis.api.sets._ 6 | 7 | trait Sets extends Request { 8 | 9 | def sadd[V: ByteStringSerializer](key: String, members: V*): Future[Long] = 10 | send(Sadd(key, members)) 11 | 12 | def scard(key: String): Future[Long] = 13 | send(Scard(key)) 14 | 15 | def sdiff[R: ByteStringDeserializer](key: String, keys: String*): Future[Seq[R]] = 16 | send(Sdiff(key, keys)) 17 | 18 | def sdiffstore(destination: String, key: String, keys: String*): Future[Long] = 19 | send(Sdiffstore(destination, key, keys)) 20 | 21 | def sinter[R: ByteStringDeserializer](key: String, keys: String*): Future[Seq[R]] = 22 | send(Sinter(key, keys)) 23 | 24 | def sinterstore(destination: String, key: String, keys: String*): Future[Long] = 25 | send(Sinterstore(destination, key, keys)) 26 | 27 | def sismember[V: ByteStringSerializer](key: String, member: V): Future[Boolean] = 28 | send(Sismember(key, member)) 29 | 30 | def smembers[R: ByteStringDeserializer](key: String): Future[Seq[R]] = 31 | send(Smembers(key)) 32 | 33 | def smove[V: ByteStringSerializer](source: String, destination: String, member: V): Future[Boolean] = 34 | send(Smove(source, destination, member)) 35 | 36 | def spop[R: ByteStringDeserializer](key: String): Future[Option[R]] = 37 | send(Spop(key)) 38 | 39 | def srandmember[R: ByteStringDeserializer](key: String): Future[Option[R]] = 40 | send(Srandmember(key)) 41 | 42 | def srandmember[R: ByteStringDeserializer](key: String, count: Long): Future[Seq[R]] = 43 | send(Srandmembers(key, count)) 44 | 45 | def srem[V: ByteStringSerializer](key: String, members: V*): Future[Long] = 46 | send(Srem(key, members)) 47 | 48 | def sunion[R: ByteStringDeserializer](key: String, keys: String*): Future[Seq[R]] = 49 | send(Sunion(key, keys)) 50 | 51 | def sunionstore(destination: String, key: String, keys: String*): Future[Long] = 52 | send(Sunionstore(destination, key, keys)) 53 | 54 | def sscan[R: ByteStringDeserializer](key: String, cursor: Int = 0, count: Option[Int] = None, matchGlob: Option[String] = None): Future[Cursor[Seq[R]]] = 55 | send(Sscan(key, cursor, count, matchGlob)) 56 | } 57 | -------------------------------------------------------------------------------- /src/bench/src/test/scala/rediscala/benchmark/ByteStringBench.scala: -------------------------------------------------------------------------------- 1 | package rediscala.benchmark 2 | 3 | import org.scalameter.api._ 4 | import akka.util.ByteString 5 | import redis.protocol.ParseNumber 6 | 7 | object ByteStringBench extends Bench.ForkedTime { 8 | 9 | val sizes = Gen.range("size")(200000, 800000, 100000) 10 | 11 | val ranges = for { 12 | size <- sizes 13 | } yield 0 until size 14 | 15 | performance of "String to Int" in { 16 | measure method "parseInt" in { 17 | using(ranges) in { 18 | i => 19 | for { 20 | ii <- i 21 | } yield { 22 | val a = Integer.parseInt(StringToIntData.data(ii % StringToIntData.data.size).utf8String) 23 | a 24 | } 25 | } 26 | } 27 | 28 | measure method "valueOf" in { 29 | using(ranges) in { 30 | i => 31 | for { 32 | ii <- i 33 | } yield { 34 | Integer.valueOf(StringToIntData.data(ii % StringToIntData.data.size).utf8String) 35 | } 36 | } 37 | } 38 | 39 | measure method "faster" in { 40 | using(ranges) in { 41 | i => 42 | for { 43 | ii <- i 44 | } yield { 45 | ParseNumber.parseInt(StringToIntData.data(ii % StringToIntData.data.size)) 46 | } 47 | } 48 | } 49 | } 50 | 51 | performance of "ByteString() == 'OK'" in { 52 | measure method "utf8String" in { 53 | using(ranges) in { 54 | i => 55 | for { 56 | ii <- i 57 | } yield { 58 | DataEquality.data(ii % DataEquality.data.size).utf8String == "OK" 59 | } 60 | } 61 | } 62 | 63 | measure method "ByteString == ByteString(\"OK\")" in { 64 | using(ranges) in { 65 | i => 66 | for { 67 | ii <- i 68 | } yield { 69 | DataEquality.data(ii % DataEquality.data.size) == DataEquality.ok 70 | } 71 | } 72 | } 73 | } 74 | } 75 | 76 | object DataEquality { 77 | val data = Seq(ByteString("OK"), ByteString("Not ok"), ByteString("")) 78 | val ok = ByteString("OK") 79 | } 80 | 81 | object StringToIntData { 82 | //val data: Seq[String] = (0 to 1000).toSeq.map(i => i.toString) 83 | val data: Seq[ByteString] = (0 to 1000).toSeq.map(i => ByteString(i.toString)) 84 | } -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Lists.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.{ByteStringDeserializer, ByteStringSerializer, Request} 4 | import scala.concurrent.Future 5 | import redis.api.lists._ 6 | import redis.api.{AFTER, BEFORE, ListPivot} 7 | 8 | trait Lists extends Request { 9 | 10 | def lindex[R: ByteStringDeserializer](key: String, index: Long): Future[Option[R]] = 11 | send(Lindex(key, index)) 12 | 13 | def linsertAfter[V: ByteStringSerializer](key: String, pivot: String, value: V): Future[Long] = 14 | linsert(key, AFTER, pivot, value) 15 | 16 | def linsertBefore[V: ByteStringSerializer](key: String, pivot: String, value: V): Future[Long] = 17 | linsert(key, BEFORE, pivot, value) 18 | 19 | def linsert[V: ByteStringSerializer](key: String, beforeAfter: ListPivot, pivot: String, value: V): Future[Long] = 20 | send(Linsert(key, beforeAfter, pivot, value)) 21 | 22 | def llen(key: String): Future[Long] = 23 | send(Llen(key)) 24 | 25 | def lpop[R: ByteStringDeserializer](key: String): Future[Option[R]] = 26 | send(Lpop(key)) 27 | 28 | def lpush[V: ByteStringSerializer](key: String, values: V*): Future[Long] = 29 | send(Lpush(key, values)) 30 | 31 | def lpushx[V: ByteStringSerializer](key: String, value: V): Future[Long] = 32 | send(Lpushx(key, value)) 33 | 34 | def lrange[R: ByteStringDeserializer](key: String, start: Long, stop: Long): Future[Seq[R]] = 35 | send(Lrange(key, start, stop)) 36 | 37 | def lrem[V: ByteStringSerializer](key: String, count: Long, value: V): Future[Long] = 38 | send(Lrem(key, count, value)) 39 | 40 | def lset[V: ByteStringSerializer](key: String, index: Long, value: V): Future[Boolean] = 41 | send(Lset(key, index, value)) 42 | 43 | def ltrim(key: String, start: Long, stop: Long): Future[Boolean] = 44 | send(Ltrim(key, start, stop)) 45 | 46 | def rpop[R: ByteStringDeserializer](key: String): Future[Option[R]] = 47 | send(Rpop(key)) 48 | 49 | def rpoplpush[R: ByteStringDeserializer](source: String, destination: String): Future[Option[R]] = 50 | send(Rpoplpush(source, destination)) 51 | 52 | def rpush[V: ByteStringSerializer](key: String, values: V*): Future[Long] = 53 | send(Rpush(key, values)) 54 | 55 | def rpushx[V: ByteStringSerializer](key: String, value: V): Future[Long] = 56 | send(Rpushx(key, value)) 57 | 58 | } -------------------------------------------------------------------------------- /src/main/scala/redis/protocol/RedisProtocolRequest.scala: -------------------------------------------------------------------------------- 1 | package redis.protocol 2 | 3 | import java.lang.System.arraycopy 4 | import java.nio.charset.Charset 5 | 6 | import akka.util.ByteString 7 | 8 | import scala.compat.Platform._ 9 | 10 | object RedisProtocolRequest { 11 | val UTF8_CHARSET = Charset.forName("UTF-8") 12 | val LS_STRING = "\r\n" 13 | val LS = LS_STRING.getBytes(UTF8_CHARSET) 14 | 15 | def multiBulk(command: String, args: Seq[ByteString]): ByteString = { 16 | val argsSizeString = (args.size + 1).toString 17 | var length: Int = 1 + argsSizeString.length + LS.length 18 | 19 | val cmdLenghtString = command.length.toString 20 | 21 | length += 1 + cmdLenghtString.length + LS.length + command.length + LS.length 22 | 23 | args.foreach(arg => { 24 | val argLengthString = arg.length.toString 25 | length += 1 + argLengthString.length + LS.length + arg.length + LS.length 26 | }) 27 | 28 | val bytes: Array[Byte] = new Array(length) 29 | var i: Int = 0 30 | bytes.update(i, '*') 31 | i += 1 32 | arraycopy(argsSizeString.getBytes(UTF8_CHARSET), 0, bytes, i, argsSizeString.length) 33 | i += argsSizeString.length 34 | arraycopy(LS, 0, bytes, i, LS.length) 35 | i += LS.length 36 | 37 | bytes.update(i, '$') 38 | i += 1 39 | arraycopy(cmdLenghtString.getBytes(UTF8_CHARSET), 0, bytes, i, cmdLenghtString.length) 40 | i += cmdLenghtString.length 41 | arraycopy(LS, 0, bytes, i, LS.length) 42 | i += LS.length 43 | arraycopy(command.getBytes(UTF8_CHARSET), 0, bytes, i, command.length) 44 | i += command.length 45 | arraycopy(LS, 0, bytes, i, LS.length) 46 | i += LS.length 47 | 48 | args.foreach(arg => { 49 | bytes.update(i, '$') 50 | i += 1 51 | 52 | val argLengthString = arg.length.toString 53 | arraycopy(argLengthString.getBytes(UTF8_CHARSET), 0, bytes, i, argLengthString.length) 54 | i += argLengthString.length 55 | arraycopy(LS, 0, bytes, i, LS.length) 56 | i += LS.length 57 | 58 | val argArray = arg.toArray 59 | arraycopy(argArray, 0, bytes, i, argArray.length) 60 | i += argArray.length 61 | 62 | arraycopy(LS, 0, bytes, i, LS.length) 63 | i += LS.length 64 | }) 65 | ByteString(bytes) 66 | } 67 | 68 | def inline(command: String): ByteString = ByteString(command + LS_STRING) 69 | } 70 | -------------------------------------------------------------------------------- /src/test/scala/redis/ConverterSpec.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import akka.util.ByteString 4 | import redis.protocol.{Bulk, RedisReply} 5 | 6 | case class DumbClass(s1: String, s2: String) 7 | 8 | object DumbClass { 9 | implicit val byteStringFormatter = new ByteStringFormatter[DumbClass] { 10 | def serialize(data: DumbClass): ByteString = { 11 | ByteString(data.s1 + "|" + data.s2) 12 | } 13 | 14 | def deserialize(bs: ByteString): DumbClass = { 15 | val r = bs.utf8String.split('|').toList 16 | DumbClass(r(0), r(1)) 17 | } 18 | } 19 | 20 | implicit val redisReplyDeserializer = new RedisReplyDeserializer[DumbClass] { 21 | override def deserialize: PartialFunction[RedisReply, DumbClass] = { 22 | case Bulk(Some(bs)) => byteStringFormatter.deserialize(bs) 23 | } 24 | } 25 | } 26 | 27 | class ConverterSpec extends TestBase { 28 | 29 | import redis.ByteStringSerializer._ 30 | 31 | "ByteStringSerializer" should { 32 | "String" in { 33 | String.serialize("super string !") shouldBe ByteString("super string !") 34 | } 35 | 36 | "Short" in { 37 | ShortConverter.serialize(123) shouldBe ByteString("123") 38 | } 39 | 40 | "Int" in { 41 | IntConverter.serialize(123) shouldBe ByteString("123") 42 | } 43 | 44 | "Long" in { 45 | LongConverter.serialize(123) shouldBe ByteString("123") 46 | } 47 | 48 | "Float" in { 49 | FloatConverter.serialize(123.123f) shouldBe ByteString("123.123") 50 | } 51 | 52 | "Double" in { 53 | DoubleConverter.serialize(123.123456) shouldBe ByteString("123.123456") 54 | } 55 | 56 | "Char" in { 57 | CharConverter.serialize('a') shouldBe ByteString('a') 58 | } 59 | 60 | "Byte" in { 61 | ByteConverter.serialize(123) shouldBe ByteString(123) 62 | } 63 | 64 | "ArrayByte" in { 65 | ArrayByteConverter.serialize(Array[Byte](1, 2, 3)) shouldBe ByteString(Array[Byte](1, 2, 3)) 66 | } 67 | 68 | "ByteString" in { 69 | ByteStringConverter.serialize(ByteString("stupid")) shouldBe ByteString("stupid") 70 | } 71 | } 72 | 73 | "ByteStringFormatter" should { 74 | "DumbClass" in { 75 | val dumb = DumbClass("aa", "bb") 76 | 77 | val formatter = implicitly[ByteStringFormatter[DumbClass]] 78 | 79 | formatter.serialize(dumb) shouldBe ByteString("aa|bb") 80 | formatter.deserialize(ByteString("aa|bb")) shouldBe dumb 81 | } 82 | } 83 | 84 | } 85 | -------------------------------------------------------------------------------- /src/test/scala/redis/commands/GeoSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | import redis.api.geo.DistUnits._ 5 | 6 | 7 | class GeoSpec extends RedisStandaloneServer { 8 | 9 | val testKey = "Sicily" 10 | 11 | def addPlaces() = { 12 | redis.geoAdd(testKey, 13.361389, 38.115556, "Palermo").futureValue 13 | redis.geoAdd(testKey, 15.087269, 37.502669, "Catania").futureValue 14 | redis.geoAdd(testKey, 13.583333, 37.316667, "Agrigento").futureValue 15 | 16 | } 17 | 18 | "Geo commands " should { 19 | 20 | "GEOADD add key member" in { 21 | val res = redis.geoAdd(testKey, 23.361389, 48.115556, "SomePlace").futureValue 22 | res shouldEqual 1 23 | } 24 | 25 | "GEORADIUS" in { 26 | addPlaces() 27 | redis.geoRadius(testKey, 15, 37, 200, Kilometer).futureValue shouldEqual Vector("Agrigento", "Catania") 28 | } 29 | 30 | "GEORADIUS By Member" in { 31 | addPlaces() 32 | redis.geoRadiusByMember(testKey, "Catania", 500, Kilometer).futureValue shouldEqual Vector( 33 | "Agrigento", 34 | "Palermo", 35 | "Catania") 36 | } 37 | 38 | "GEORADIUS By Member with opt" in { 39 | addPlaces() 40 | val res = redis.geoRadiusByMemberWithOpt(testKey, "Agrigento", 100, Kilometer).futureValue 41 | res shouldEqual Vector("Agrigento", "0.0000", "Palermo", "89.8694") 42 | } 43 | 44 | "GEODIST with default unit" in { 45 | addPlaces() 46 | val res = redis.geoDist(testKey, "Palermo", "Catania").futureValue 47 | res shouldEqual 203017.1901 48 | } 49 | 50 | "GEODIST in km" in { 51 | addPlaces() 52 | val res = redis.geoDist(testKey, "Palermo", "Catania", Kilometer).futureValue 53 | res shouldEqual 203.0172 54 | } 55 | 56 | "GEODIST in mile" in { 57 | addPlaces() 58 | val res = redis.geoDist(testKey, "Palermo", "Catania", Mile).futureValue 59 | res shouldEqual 126.1493 60 | } 61 | 62 | "GEODIST in feet" in { 63 | addPlaces() 64 | val res = redis.geoDist(testKey, "Palermo", "Catania", Feet).futureValue 65 | res shouldEqual 666066.8965 66 | } 67 | 68 | "GEOHASH " in { 69 | addPlaces() 70 | val res = redis.geoHash(testKey, "Palermo", "Catania").futureValue 71 | res shouldEqual Vector("sfdtv6s9ew0", "sf7h526gsz0") 72 | } 73 | 74 | "GEOPOS " in { 75 | addPlaces() 76 | val res = redis.geoPos(testKey, "Palermo", "Catania").futureValue 77 | res should not be empty 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/bench/src/test/scala/rediscala/benchmark/RedisBenchProtocol.scala: -------------------------------------------------------------------------------- 1 | package rediscala.benchmark 2 | 3 | import redis.protocol.RedisProtocolRequest 4 | import akka.util.ByteString 5 | import org.scalameter.api._ 6 | 7 | object RedisBenchProtocol extends Bench.ForkedTime { 8 | 9 | override def reporter = Reporter.Composite( 10 | new RegressionReporter( 11 | RegressionReporter.Tester.Accepter(), 12 | RegressionReporter.Historian.Complete()), 13 | HtmlReporter(embedDsv = true) 14 | ) 15 | 16 | override def persistor = new SerializationPersistor() 17 | 18 | val sizes = Gen.range("size")(20000, 80000, 10000) 19 | 20 | val ranges = for { 21 | size <- sizes 22 | } yield 0 until size 23 | 24 | 25 | performance of "Protocol request encode" in { 26 | val argsBulk = Seq(ByteString("i"), ByteString("abc"), ByteString("iksjdlkgdfgjfdgjdfkgjjqsdqlksdqklsjdqljsdqkjsd")) 27 | 28 | measure method "multiBulk (slow)" in { 29 | using(ranges) in { 30 | i => 31 | for { 32 | ii <- i 33 | } yield { 34 | RedisProtocolRequestSlow.multiBulkSlow("INCR", argsBulk) 35 | } 36 | } 37 | } 38 | 39 | measure method "multiBulk2" in { 40 | using(ranges) in { 41 | i => 42 | for { 43 | ii <- i 44 | } yield { 45 | RedisProtocolRequest.multiBulk("INCR", argsBulk) 46 | } 47 | } 48 | } 49 | 50 | measure method "inline" in { 51 | using(ranges) in { 52 | i => 53 | for { 54 | ii <- i 55 | } yield { 56 | RedisProtocolRequest.inline("PING") 57 | } 58 | } 59 | } 60 | //*/ 61 | } 62 | } 63 | 64 | object RedisProtocolRequestSlow { 65 | 66 | import RedisProtocolRequest._ 67 | 68 | /** 69 | * 25% slower 70 | * @param command 71 | * @param args 72 | * @return 73 | */ 74 | def multiBulkSlow(command: String, args: Seq[ByteString]): ByteString = { 75 | val requestBuilder = ByteString.newBuilder 76 | requestBuilder.putByte('*') 77 | requestBuilder.putBytes((args.size + 1).toString.getBytes(UTF8_CHARSET)) 78 | requestBuilder.putBytes(LS) 79 | 80 | requestBuilder.putByte('$') 81 | requestBuilder.putBytes(command.length.toString.getBytes(UTF8_CHARSET)) 82 | requestBuilder.putBytes(LS) 83 | requestBuilder.putBytes(command.getBytes(UTF8_CHARSET)) 84 | requestBuilder.putBytes(LS) 85 | 86 | args.foreach(arg => { 87 | requestBuilder.putByte('$') 88 | requestBuilder.putBytes(arg.length.toString.getBytes(UTF8_CHARSET)) 89 | requestBuilder.putBytes(LS) 90 | requestBuilder ++= arg 91 | requestBuilder.putBytes(LS) 92 | }) 93 | 94 | requestBuilder.result() 95 | } 96 | } -------------------------------------------------------------------------------- /src/test/scala/redis/SentinelSpec.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | class SentinelSpec extends RedisSentinelClients("SentinelSpec") { 4 | 5 | "sentinel monitored test" should { 6 | 7 | "master auto failover" in { 8 | val port = sentinelMonitoredRedisClient.redisClient.port 9 | 10 | sentinelMonitoredRedisClient.ping().futureValue shouldBe "PONG" 11 | sentinelClient.failover(masterName).futureValue shouldBe true 12 | 13 | sentinelMonitoredRedisClient.ping().futureValue shouldBe "PONG" 14 | Seq(slavePort1, slavePort2, port) should contain(sentinelMonitoredRedisClient.redisClient.port) 15 | 16 | sentinelMonitoredRedisClient.ping().futureValue shouldBe "PONG" 17 | Seq(slavePort1, slavePort2, masterPort, port) should contain(sentinelMonitoredRedisClient.redisClient.port) 18 | } 19 | 20 | "ping" in { 21 | sentinelMonitoredRedisClient.ping().futureValue shouldBe "PONG" 22 | redisClient.ping().futureValue shouldBe "PONG" 23 | } 24 | 25 | "sentinel nodes auto discovery" in { 26 | val sentinelCount = sentinelMonitoredRedisClient.sentinelClients.size 27 | val sentinel = newSentinelProcess() 28 | 29 | eventually { 30 | sentinelMonitoredRedisClient.sentinelClients.size shouldBe sentinelCount + 1 31 | } 32 | sentinel.stop() 33 | eventually { 34 | sentinelMonitoredRedisClient.sentinelClients.size shouldBe sentinelCount 35 | } 36 | } 37 | } 38 | 39 | "sentinel test" should { 40 | "masters" in { 41 | val r = sentinelClient.masters().futureValue 42 | r(0)("name") shouldBe masterName 43 | r(0)("flags").startsWith("master") shouldBe true 44 | } 45 | "no such master" in { 46 | val opt = sentinelClient.getMasterAddr("no-such-master").futureValue 47 | withClue(s"unexpected: master with name '$masterName' was not supposed to be found") { 48 | opt shouldBe empty 49 | } 50 | } 51 | "unknown master state" in { 52 | val opt = sentinelClient.isMasterDown("no-such-master").futureValue 53 | withClue("unexpected: master state should be unknown") { opt shouldBe empty } 54 | } 55 | "master ok" in { 56 | withClue(s"unexpected: master with name '$masterName' was not found") { 57 | sentinelClient.isMasterDown(masterName).futureValue shouldBe Some(false) 58 | } 59 | } 60 | "slaves" in { 61 | val r = sentinelClient.slaves(masterName).futureValue 62 | r should not be empty 63 | r(0)("flags").startsWith("slave") shouldBe true 64 | } 65 | "reset bogus master" in { 66 | sentinelClient.resetMaster("no-such-master").futureValue shouldBe false 67 | } 68 | "reset master" in { 69 | sentinelClient.resetMaster(masterName).futureValue shouldBe true 70 | } 71 | } 72 | 73 | } 74 | -------------------------------------------------------------------------------- /src/main/scala/redis/util/CRC16.java: -------------------------------------------------------------------------------- 1 | package redis.util; 2 | 3 | /** 4 | * Created by npeters on 23/05/16. 5 | */ 6 | public class CRC16 { 7 | 8 | 9 | static int crc16tab[] = { 10 | 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, 11 | 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, 12 | 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, 13 | 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, 14 | 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, 15 | 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, 16 | 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, 17 | 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, 18 | 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, 19 | 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, 20 | 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, 21 | 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, 22 | 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, 23 | 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, 24 | 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, 25 | 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, 26 | 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, 27 | 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, 28 | 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, 29 | 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, 30 | 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, 31 | 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, 32 | 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, 33 | 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, 34 | 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, 35 | 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, 36 | 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, 37 | 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, 38 | 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, 39 | 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, 40 | 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, 41 | 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0 42 | }; 43 | 44 | public static int crc16(String buf) { 45 | int counter; 46 | int crc = 0; 47 | for (counter = 0; counter < buf.length(); counter++) { 48 | int crcUnsign = (crc << 8) & 0xFFFF; 49 | crc = crcUnsign ^ crc16tab[((crc >> 8) ^ (int) buf.charAt(counter)) & 0x00FF]; 50 | } 51 | return crc; 52 | } 53 | 54 | } 55 | -------------------------------------------------------------------------------- /src/test/scala/redis/RedisProcess.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import java.net.Socket 4 | 5 | import org.apache.logging.log4j.scala.Logger 6 | import redis.RedisServerHelper.{redisHost, redisServerCmd, redisServerLogLevel} 7 | 8 | import scala.reflect.io.File 9 | import scala.sys.process.{Process, ProcessLogger} 10 | import scala.util.control.NonFatal 11 | 12 | import RedisProcess._ 13 | 14 | object RedisProcess { 15 | //scala 2.11 doesn't have Process.isAlive 16 | implicit class ProcessExt(val self: Process) extends AnyVal { 17 | def isAlive(): Boolean = { true } 18 | } 19 | } 20 | 21 | class RedisProcess(val port: Int) { 22 | protected var maybeServer: Option[Process] = None 23 | protected val cmd = s"$redisServerCmd --port $port $redisServerLogLevel" 24 | protected val log = Logger(getClass) 25 | protected val processLogger = ProcessLogger(line => log.debug(line), line => log.error(line)) 26 | 27 | def start(): Unit = synchronized { 28 | log.debug(s"starting $this") 29 | maybeServer match { 30 | case None => maybeServer = Some(Process(cmd).run(processLogger)) 31 | case Some(_) => log.warn(s"$this already started") 32 | } 33 | } 34 | 35 | def stop(): Unit = synchronized { 36 | log.debug(s"stopping $this") 37 | maybeServer match { 38 | case Some(s) => 39 | if (s.isAlive()) { 40 | try { 41 | val out = new Socket(redisHost, port).getOutputStream 42 | out.write("SHUTDOWN NOSAVE\n".getBytes) 43 | out.flush() 44 | out.close() 45 | } catch { 46 | case NonFatal(e) => log.warn(s"couldn't stop $this", e) 47 | } finally { 48 | s.destroy() 49 | maybeServer = None 50 | } 51 | } else { 52 | log.info("Process was stopped externally") 53 | } 54 | case None => 55 | log.debug(s"$this already stopped") 56 | } 57 | } 58 | 59 | override def toString: String = s"RedisProcess($port)" 60 | } 61 | 62 | class SentinelProcess(masterName: String, masterPort: Int, port: Int) extends RedisProcess(port) { 63 | val sentinelConfPath = { 64 | val sentinelConf = 65 | s""" 66 | |sentinel monitor $masterName $redisHost $masterPort 2 67 | |sentinel down-after-milliseconds $masterName 5000 68 | |sentinel parallel-syncs $masterName 1 69 | |sentinel failover-timeout $masterName 10000 70 | """.stripMargin 71 | 72 | val sentinelConfFile = File.makeTemp("rediscala-sentinel", ".conf") 73 | sentinelConfFile.writeAll(sentinelConf) 74 | sentinelConfFile.path 75 | } 76 | 77 | override protected val cmd = s"$redisServerCmd $sentinelConfPath --port $port --sentinel $redisServerLogLevel" 78 | } 79 | 80 | class SlaveProcess(masterPort: Int, port: Int) extends RedisProcess(port) { 81 | override protected val cmd = s"$redisServerCmd --port $port --slaveof $redisHost $masterPort $redisServerLogLevel" 82 | } 83 | -------------------------------------------------------------------------------- /src/test/scala/redis/protocol/ParseParse.scala: -------------------------------------------------------------------------------- 1 | package redis.protocol 2 | 3 | import akka.util.ByteString 4 | import redis.TestBase 5 | 6 | class ParseParse extends TestBase { 7 | "parse" should { 8 | "integer" in { 9 | val int = ByteString("654\r\n") 10 | RedisProtocolReply.decodeInteger(int) shouldBe FullyDecoded(Integer(ByteString("654")), ByteString()) 11 | 12 | val (intStart, intEnd) = int.splitAt(int.length - 1) 13 | 14 | var result = RedisProtocolReply.decodeInteger(ByteString(intStart.head)) 15 | for { 16 | b <- intStart.tail 17 | } yield { 18 | result = result.run(ByteString(b)) 19 | result.isFullyDecoded shouldBe false 20 | } 21 | 22 | val decodeResult = result.run(intEnd) 23 | decodeResult.isFullyDecoded shouldBe true 24 | decodeResult shouldBe FullyDecoded(Integer(ByteString("654")), ByteString()) 25 | } 26 | 27 | "decodeBulk" in { 28 | val bulk = ByteString("6\r\nfoobar\r\n") 29 | RedisProtocolReply.decodeBulk(bulk) shouldBe FullyDecoded(Bulk(Some(ByteString("foobar"))), ByteString()) 30 | 31 | val (bulkStart, bulkEnd) = bulk.splitAt(bulk.length - 1) 32 | 33 | var result = RedisProtocolReply.decodeBulk(ByteString(bulkStart.head)) 34 | for { 35 | b <- bulkStart.tail 36 | } yield { 37 | result = result.run(ByteString(b)) 38 | result.isFullyDecoded shouldBe false 39 | } 40 | 41 | val decodeResult = result.run(bulkEnd) 42 | decodeResult.isFullyDecoded shouldBe true 43 | decodeResult shouldBe FullyDecoded(Bulk(Some(ByteString("foobar"))), ByteString()) 44 | } 45 | 46 | "multibulk" in { 47 | val multibulkString = ByteString("*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n") 48 | 49 | val (multibulkStringStart, multibulkStringEnd) = multibulkString.splitAt(multibulkString.length - 1) 50 | 51 | var r3 = RedisProtocolReply.decodeReplyMultiBulk(ByteString(multibulkStringStart.head)) 52 | 53 | for { 54 | b <- multibulkStringStart.tail 55 | } yield { 56 | r3 = r3.run(ByteString(b)) 57 | r3.isFullyDecoded shouldBe false 58 | } 59 | 60 | val nextBS = ByteString("*2\r\n$3\r\none\r\n$3\r\ntwo\r\n") 61 | val result = r3.run(multibulkStringEnd ++ nextBS) 62 | result.isFullyDecoded shouldBe true 63 | 64 | val multibulk = Some(Vector(Bulk(Some(ByteString("foo"))), Bulk(Some(ByteString("bar"))), Bulk(Some(ByteString("Hello"))), Bulk(Some(ByteString("World"))))) 65 | result shouldEqual FullyDecoded(MultiBulk(multibulk), nextBS) 66 | 67 | 68 | val bs = ByteString("*4\r\n$3\r\none\r\n$1\r\n2\r\n$3\r\ntwo\r\n$1\r\n4\r\n*2\r\n$3\r\ntwo\r\n$5\r\nthree") 69 | val nextBS2 = ByteString("*2\r\n$3\r\ntwo\r\n$5\r\nthree") 70 | 71 | val r10 = RedisProtocolReply.decodeReplyMultiBulk(bs) 72 | r10 shouldEqual FullyDecoded(MultiBulk(Some( 73 | Vector(Bulk(Some(ByteString("one"))), Bulk(Some(ByteString("2"))), 74 | Bulk(Some(ByteString("two"))), Bulk(Some(ByteString("4")))))), nextBS2) 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/test/scala/redis/actors/RedisSubscriberActorSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.actors 2 | 3 | import akka.testkit._ 4 | import akka.actor._ 5 | import java.net.InetSocketAddress 6 | 7 | import akka.util.ByteString 8 | import redis.protocol.RedisProtocolRequest 9 | import redis.{Redis, TestBase} 10 | import akka.io.Tcp._ 11 | import redis.api.pubsub.Message 12 | import redis.api.pubsub.PMessage 13 | 14 | class RedisSubscriberActorSpec extends TestKit(ActorSystem()) with TestBase with ImplicitSender { 15 | 16 | import scala.concurrent.duration._ 17 | 18 | "RedisClientActor" should { 19 | 20 | "connection closed -> reconnect" in { 21 | val probeMock = TestProbe() 22 | val channels = Seq("channel") 23 | val patterns = Seq("pattern.*") 24 | 25 | val subscriberActor = TestActorRef[SubscriberActor](Props(classOf[SubscriberActor], 26 | new InetSocketAddress("localhost", 6379), channels, patterns, probeMock.ref) 27 | .withDispatcher(Redis.dispatcher.name)) 28 | 29 | val connectMsg = probeMock.expectMsgType[Connect] 30 | connectMsg shouldBe Connect(subscriberActor.underlyingActor.address, options = SO.KeepAlive(on = true) :: Nil) 31 | val probeTcpWorker = TestProbe() 32 | probeTcpWorker.send(subscriberActor, Connected(connectMsg.remoteAddress, connectMsg.remoteAddress)) 33 | probeTcpWorker.expectMsgType[Register] shouldBe Register(subscriberActor) 34 | val bs = RedisProtocolRequest.multiBulk("SUBSCRIBE", channels.map(ByteString(_))) ++ RedisProtocolRequest.multiBulk("PSUBSCRIBE", patterns.map(ByteString(_))) 35 | probeTcpWorker.expectMsgType[Write] shouldBe Write(bs, WriteAck) 36 | probeTcpWorker.reply(WriteAck) 37 | 38 | val newChannels = channels :+ "channel2" 39 | subscriberActor.underlyingActor.subscribe("channel2") 40 | awaitAssert({ 41 | subscriberActor.underlyingActor.channelsSubscribed should contain theSameElementsAs(newChannels) 42 | }, 5.seconds dilated) 43 | probeTcpWorker.expectMsgType[Write] shouldBe Write(RedisProtocolRequest.multiBulk("SUBSCRIBE", Seq(ByteString("channel2"))), WriteAck) 44 | probeTcpWorker.reply(WriteAck) 45 | 46 | // ConnectionClosed 47 | probeTcpWorker.send(subscriberActor, ErrorClosed("test")) 48 | 49 | // Reconnect 50 | val connectMsg2 = probeMock.expectMsgType[Connect] 51 | connectMsg2 shouldBe Connect(subscriberActor.underlyingActor.address, options = SO.KeepAlive(on = true) :: Nil) 52 | val probeTcpWorker2 = TestProbe() 53 | probeTcpWorker2.send(subscriberActor, Connected(connectMsg2.remoteAddress, connectMsg2.remoteAddress)) 54 | probeTcpWorker2.expectMsgType[Register] shouldBe Register(subscriberActor) 55 | 56 | // check the new Channel is there 57 | val bs2 = RedisProtocolRequest.multiBulk("SUBSCRIBE", newChannels.map(ByteString(_))) ++ RedisProtocolRequest.multiBulk("PSUBSCRIBE", patterns.map(ByteString(_))) 58 | val m = probeTcpWorker2.expectMsgType[Write] 59 | m shouldBe Write(bs2, WriteAck) 60 | } 61 | } 62 | } 63 | 64 | class SubscriberActor(address: InetSocketAddress, 65 | channels: Seq[String], 66 | patterns: Seq[String], 67 | probeMock: ActorRef 68 | ) extends RedisSubscriberActor(address, channels, patterns, None, (status:Boolean) => {()} ) { 69 | 70 | override val tcp = probeMock 71 | 72 | override def onMessage(m: Message) = { 73 | probeMock ! m 74 | } 75 | 76 | def onPMessage(pm: PMessage): Unit = { 77 | probeMock ! pm 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/Scripting.scala: -------------------------------------------------------------------------------- 1 | package redis.api.scripting 2 | 3 | import java.io.File 4 | import java.security.MessageDigest 5 | import redis.protocol.{MultiBulk, Bulk} 6 | import redis._ 7 | import akka.util.ByteString 8 | 9 | object RedisScript { 10 | def fromFile(file: File): RedisScript = { 11 | val source = scala.io.Source.fromFile(file) 12 | val lines = try source.mkString.stripMargin.replaceAll("[\n\r]","") finally source.close() 13 | RedisScript(lines) 14 | } 15 | 16 | def fromResource(path: String): RedisScript = { 17 | val source = scala.io.Source.fromURL(getClass.getResource(path)) 18 | val lines = try source.mkString.stripMargin.replaceAll("[\n\r]","") finally source.close() 19 | RedisScript(lines) 20 | } 21 | } 22 | 23 | case class RedisScript(script: String) { 24 | lazy val sha1 = { 25 | val messageDigestSha1 = MessageDigest.getInstance("SHA-1") 26 | messageDigestSha1.digest(script.getBytes("UTF-8")).map("%02x".format(_)).mkString 27 | } 28 | } 29 | 30 | trait EvaledScript extends { 31 | val isMasterOnly = true 32 | def encodeRequest[KK, KA]( 33 | encoder: ((String, Seq[ByteString]) => ByteString), 34 | command: String, 35 | param: String, 36 | keys: Seq[KK], 37 | args: Seq[KA], 38 | keySerializer: ByteStringSerializer[KK], 39 | argSerializer: ByteStringSerializer[KA]): ByteString = { 40 | encoder(command, 41 | (ByteString(param) 42 | +: ByteString(keys.length.toString) 43 | +: keys.map(keySerializer.serialize)) ++ args.map(argSerializer.serialize)) 44 | } 45 | } 46 | 47 | case class Eval[R, KK, KA](script: String, keys: Seq[KK] = Seq(), args: Seq[KA] = Seq())(implicit redisKeys: ByteStringSerializer[KK], redisArgs: ByteStringSerializer[KA], deserializerR: RedisReplyDeserializer[R]) 48 | extends RedisCommandRedisReplyRedisReply[R] 49 | with EvaledScript { 50 | val encodedRequest: ByteString = encodeRequest(encode, "EVAL", script, keys, args, redisKeys, redisArgs) 51 | val deserializer: RedisReplyDeserializer[R] = deserializerR 52 | } 53 | 54 | case class Evalsha[R, KK, KA](sha1: String, keys: Seq[KK] = Seq(), args: Seq[KA] = Seq())(implicit redisKeys: ByteStringSerializer[KK], redisArgs: ByteStringSerializer[KA], deserializerR: RedisReplyDeserializer[R]) 55 | extends RedisCommandRedisReplyRedisReply[R] 56 | with EvaledScript { 57 | val encodedRequest: ByteString = encodeRequest(encode, "EVALSHA", sha1, keys, args, redisKeys, redisArgs) 58 | val deserializer: RedisReplyDeserializer[R] = deserializerR 59 | } 60 | 61 | case object ScriptFlush extends RedisCommandStatusBoolean { 62 | val isMasterOnly = true 63 | val encodedRequest: ByteString = encode("SCRIPT", Seq(ByteString("FLUSH"))) 64 | } 65 | 66 | case object ScriptKill extends RedisCommandStatusBoolean { 67 | val isMasterOnly = true 68 | val encodedRequest: ByteString = encode("SCRIPT", Seq(ByteString("KILL"))) 69 | } 70 | 71 | case class ScriptLoad(script: String) extends RedisCommandBulk[String] { 72 | val isMasterOnly = true 73 | val encodedRequest: ByteString = encode("SCRIPT", Seq(ByteString("LOAD"), ByteString(script))) 74 | 75 | def decodeReply(bulk: Bulk) = bulk.toString 76 | } 77 | 78 | case class ScriptExists(sha1: Seq[String]) extends RedisCommandMultiBulk[Seq[Boolean]] { 79 | val isMasterOnly = true 80 | val encodedRequest: ByteString = encode("SCRIPT", ByteString("EXISTS") +: sha1.map(ByteString(_))) 81 | 82 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toSeqBoolean(mb) 83 | } 84 | -------------------------------------------------------------------------------- /src/test/scala/redis/commands/ServerSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | 5 | import redis.actors.{InvalidRedisReply, ReplyErrorException} 6 | import redis.api.NOSAVE 7 | 8 | class ServerSpec extends RedisStandaloneServer { 9 | 10 | "Server commands" should { 11 | 12 | "BGSAVE" in { 13 | redis.bgsave().futureValue shouldBe "Background saving started" 14 | } 15 | 16 | "CLIENT KILL" in { 17 | redis.clientKill("8.8.8.8", 53).failed.futureValue shouldBe a[ReplyErrorException] 18 | } 19 | 20 | "CLIENT LIST" in { 21 | redis.clientList().futureValue should not be empty 22 | } 23 | 24 | "CLIENT GETNAME" in { 25 | redis.clientGetname().futureValue shouldBe None 26 | } 27 | 28 | "CLIENT SETNAME" in { 29 | redis.clientSetname("rediscala").futureValue shouldBe true 30 | } 31 | 32 | "CONFIG GET" in { 33 | val map = redis.configGet("*").futureValue 34 | map should not be empty 35 | } 36 | "CONFIG SET" in { 37 | val r = for { 38 | set <- redis.configSet("loglevel", "warning") 39 | loglevel <- redis.configGet("loglevel") 40 | } yield { 41 | set shouldBe true 42 | loglevel.get("loglevel") shouldBe Some("warning") 43 | } 44 | r.futureValue 45 | } 46 | 47 | "CONFIG RESETSTAT" in { 48 | redis.configResetstat().futureValue shouldBe true 49 | } 50 | 51 | "DBSIZE" in { 52 | redis.dbsize().futureValue should be >= 0L 53 | } 54 | 55 | "DEBUG OBJECT" in { 56 | redis.debugObject("serverDebugObj").failed.futureValue shouldBe a[ReplyErrorException] 57 | } 58 | 59 | "DEBUG SEGFAULT" ignore {} 60 | 61 | "FLUSHALL" in { 62 | redis.flushall().futureValue shouldBe true 63 | } 64 | 65 | "FLUSHDB" in { 66 | redis.flushdb().futureValue shouldBe true 67 | } 68 | 69 | "INFO" in { 70 | val r = for { 71 | info <- redis.info() 72 | infoCpu <- redis.info("cpu") 73 | } yield { 74 | info shouldBe a[String] 75 | infoCpu shouldBe a[String] 76 | } 77 | r.futureValue 78 | } 79 | 80 | "LASTSAVE" in { 81 | redis.lastsave().futureValue should be >= 0L 82 | } 83 | 84 | "SAVE" in { 85 | val result = redis.save().recover { 86 | case ReplyErrorException("ERR Background save already in progress") => 87 | true 88 | }.futureValue 89 | result shouldBe true 90 | } 91 | 92 | "SLAVE OF" in { 93 | redis.slaveof("server", 12345).futureValue shouldBe true 94 | } 95 | 96 | "SLAVE OF NO ONE" in { 97 | redis.slaveofNoOne().futureValue shouldBe true 98 | } 99 | 100 | "TIME" in { 101 | redis.time().futureValue 102 | } 103 | 104 | "BGREWRITEAOF" in { 105 | // depending on the redis version, this string could vary, redis 2.8.21 says 'scheduled' 106 | // but redis 2.8.18 says 'started' 107 | val r = redis.bgrewriteaof().futureValue 108 | r should (be("Background append only file rewriting started") or 109 | be("Background append only file rewriting scheduled")) 110 | } 111 | 112 | "SHUTDOWN" in { 113 | redis.shutdown().failed.futureValue shouldBe InvalidRedisReply 114 | } 115 | 116 | "SHUTDOWN (with modifier)" in { 117 | withRedisServer(port => { 118 | val redis = RedisClient(port = port) 119 | redis.shutdown(NOSAVE).failed.futureValue shouldBe InvalidRedisReply 120 | }) 121 | } 122 | 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/main/scala/redis/actors/RedisReplyDecoder.scala: -------------------------------------------------------------------------------- 1 | package redis.actors 2 | 3 | import akka.actor.Actor 4 | import scala.collection.mutable 5 | import redis.protocol.{FullyDecoded, DecodeResult, RedisProtocolReply, RedisReply} 6 | import akka.util.ByteString 7 | import akka.event.Logging 8 | import scala.annotation.tailrec 9 | import redis.Operation 10 | 11 | class RedisReplyDecoder() extends Actor { 12 | 13 | 14 | val queuePromises = mutable.Queue[Operation[_,_]]() 15 | 16 | val log = Logging(context.system, this) 17 | 18 | override def postStop(): Unit = { 19 | queuePromises.foreach(op => { 20 | op.completeFailed(InvalidRedisReply) 21 | }) 22 | } 23 | 24 | def receive = { 25 | case promises: QueuePromises => { 26 | queuePromises ++= promises.queue 27 | } 28 | case byteStringInput: ByteString => decodeReplies(byteStringInput) 29 | } 30 | 31 | var partiallyDecoded: DecodeResult[Unit] = DecodeResult.unit 32 | 33 | def decodeReplies(dataByteString: ByteString): Unit = { 34 | partiallyDecoded = if (partiallyDecoded.isFullyDecoded) { 35 | decodeRepliesRecur(partiallyDecoded.rest ++ dataByteString) 36 | } else { 37 | val r = partiallyDecoded.run(dataByteString) 38 | if (r.isFullyDecoded) { 39 | decodeRepliesRecur(r.rest) 40 | } else { 41 | r 42 | } 43 | } 44 | } 45 | 46 | @tailrec 47 | private def decodeRepliesRecur(bs: ByteString): DecodeResult[Unit] = { 48 | if (queuePromises.nonEmpty && bs.nonEmpty) { 49 | val op = queuePromises.dequeue() 50 | val result = decodeRedisReply(op, bs) 51 | 52 | if (result.isFullyDecoded) { 53 | decodeRepliesRecur(result.rest) 54 | } else { 55 | result 56 | } 57 | } else { 58 | FullyDecoded((), bs) 59 | } 60 | } 61 | 62 | def decodeRedisReply(operation: Operation[_, _], bs: ByteString): DecodeResult[Unit] = { 63 | if (operation.redisCommand.decodeRedisReply.isDefinedAt(bs)) { 64 | operation.decodeRedisReplyThenComplete(bs) 65 | } else if (RedisProtocolReply.decodeReplyError.isDefinedAt(bs)) { 66 | RedisProtocolReply.decodeReplyError.apply(bs) 67 | .foreach { error => 68 | operation.completeFailed(ReplyErrorException(error.toString)) 69 | } 70 | } else { 71 | operation.completeFailed(InvalidRedisReply) 72 | throw new Exception(s"Redis Protocol error: Got ${bs.head} as initial reply byte for Operation: $operation") 73 | } 74 | } 75 | } 76 | 77 | case class ReplyErrorException(message: String) extends Exception(message) 78 | 79 | object InvalidRedisReply extends RuntimeException("Could not decode the redis reply (Connection closed)") 80 | 81 | trait DecodeReplies { 82 | var partiallyDecoded: DecodeResult[Unit] = DecodeResult.unit 83 | 84 | def decodeReplies(dataByteString: ByteString): Unit = { 85 | partiallyDecoded = if (partiallyDecoded.isFullyDecoded) { 86 | decodeRepliesRecur(dataByteString) 87 | } else { 88 | val r = partiallyDecoded.run(dataByteString) 89 | if (r.isFullyDecoded) { 90 | decodeRepliesRecur(r.rest) 91 | } else { 92 | r 93 | } 94 | } 95 | } 96 | 97 | @tailrec 98 | private def decodeRepliesRecur(bs: ByteString): DecodeResult[Unit] = { 99 | val r = RedisProtocolReply.decodeReply(bs).map(onDecodedReply) 100 | if (r.isFullyDecoded) { 101 | decodeRepliesRecur(r.rest) 102 | } else { 103 | r 104 | } 105 | } 106 | 107 | def onDecodedReply(reply: RedisReply): Unit 108 | } 109 | 110 | case class QueuePromises(queue: mutable.Queue[Operation[_, _]]) -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Strings.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | import scala.concurrent.Future 5 | import redis.api.strings._ 6 | import redis.api._ 7 | 8 | trait Strings extends Request { 9 | 10 | def append[V: ByteStringSerializer](key: String, value: V): Future[Long] = 11 | send(Append(key, value)) 12 | 13 | def bitcount(key: String): Future[Long] = 14 | send(Bitcount(key)) 15 | 16 | def bitcount(key: String, start: Long, end: Long): Future[Long] = 17 | send(BitcountRange(key, start, end)) 18 | 19 | def bitopAND(destkey: String, keys: String*): Future[Long] = 20 | bitop(AND, destkey, keys: _*) 21 | 22 | def bitopOR(destkey: String, keys: String*): Future[Long] = 23 | bitop(OR, destkey, keys: _*) 24 | 25 | def bitopXOR(destkey: String, keys: String*): Future[Long] = 26 | bitop(XOR, destkey, keys: _*) 27 | 28 | def bitopNOT(destkey: String, key: String): Future[Long] = 29 | bitop(NOT, destkey, key) 30 | 31 | def bitop(operation: BitOperator, destkey: String, keys: String*): Future[Long] = 32 | send(Bitop(operation, destkey, keys)) 33 | 34 | def bitpos(key: String, bit: Long, start: Long = 0, end: Long = -1): Future[Long] = 35 | send(Bitpos(key, bit, start, end)) 36 | 37 | def decr(key: String): Future[Long] = 38 | send(Decr(key)) 39 | 40 | def decrby(key: String, decrement: Long): Future[Long] = 41 | send(Decrby(key, decrement)) 42 | 43 | def get[R: ByteStringDeserializer](key: String): Future[Option[R]] = 44 | send(Get(key)) 45 | 46 | def getbit(key: String, offset: Long): Future[Boolean] = 47 | send(Getbit(key, offset)) 48 | 49 | def getrange[R: ByteStringDeserializer](key: String, start: Long, end: Long): Future[Option[R]] = 50 | send(Getrange(key, start, end)) 51 | 52 | def getset[V: ByteStringSerializer, R: ByteStringDeserializer](key: String, value: V): Future[Option[R]] = 53 | send(Getset(key, value)) 54 | 55 | def incr(key: String): Future[Long] = 56 | send(Incr(key)) 57 | 58 | def incrby(key: String, increment: Long): Future[Long] = 59 | send(Incrby(key, increment)) 60 | 61 | def incrbyfloat(key: String, increment: Double): Future[Option[Double]] = 62 | send(Incrbyfloat(key, increment)) 63 | 64 | def mget[R: ByteStringDeserializer](keys: String*): Future[Seq[Option[R]]] = 65 | send(Mget(keys)) 66 | 67 | def mset[V: ByteStringSerializer](keysValues: Map[String, V]): Future[Boolean] = 68 | send(Mset(keysValues)) 69 | 70 | def msetnx[V: ByteStringSerializer](keysValues: Map[String, V]): Future[Boolean] = 71 | send(Msetnx(keysValues)) 72 | 73 | def psetex[V: ByteStringSerializer](key: String, milliseconds: Long, value: V): Future[Boolean] = 74 | send(Psetex(key, milliseconds, value)) 75 | 76 | def set[V: ByteStringSerializer](key: String, value: V, 77 | exSeconds: Option[Long] = None, 78 | pxMilliseconds: Option[Long] = None, 79 | NX: Boolean = false, 80 | XX: Boolean = false): Future[Boolean] = { 81 | send(Set(key, value, exSeconds, pxMilliseconds, NX, XX)) 82 | } 83 | 84 | def setbit(key: String, offset: Long, value: Boolean): Future[Boolean] = 85 | send(Setbit(key, offset, value)) 86 | 87 | def setex[V: ByteStringSerializer](key: String, seconds: Long, value: V): Future[Boolean] = 88 | send(Setex(key, seconds, value)) 89 | 90 | def setnx[V: ByteStringSerializer](key: String, value: V): Future[Boolean] = 91 | send(Setnx(key, value)) 92 | 93 | def setrange[V: ByteStringSerializer](key: String, offset: Long, value: V): Future[Long] = 94 | send(Setrange(key, offset, value)) 95 | 96 | def strlen(key: String): Future[Long] = 97 | send(Strlen(key)) 98 | 99 | } 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /src/main/scala/redis/protocol/ParseNumber.scala: -------------------------------------------------------------------------------- 1 | package redis.protocol 2 | 3 | import akka.util.ByteString 4 | 5 | object ParseNumber { 6 | 7 | /** 8 | * Fast decoder from ByteString to Int 9 | * Code from openjdk java.lang.Integer parseInt() 10 | * @see http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/7-b147/java/lang/Integer.java#Integer.parseInt%28java.lang.String%2Cint%29 11 | * 3 times faster than java.lang.Integer.parseInt(ByteString().utf8String)) 12 | * @param byteString 13 | * @return 14 | */ 15 | def parseInt(byteString: ByteString): Int = { 16 | if (byteString == null) { 17 | throw new NumberFormatException("null") 18 | } 19 | 20 | var result = 0 21 | var negative = false 22 | var i = 0 23 | val len = byteString.length 24 | var limit = -java.lang.Integer.MAX_VALUE 25 | 26 | if (len > 0) { 27 | val firstChar = byteString(0) 28 | if (firstChar < '0') { 29 | // Possible leading "+" or "-" 30 | if (firstChar == '-') { 31 | negative = true 32 | limit = java.lang.Integer.MIN_VALUE 33 | } else if (firstChar != '+') 34 | throw new NumberFormatException(byteString.toString()) 35 | 36 | if (len == 1) // Cannot have lone "+" or "-" 37 | throw new NumberFormatException(byteString.toString()) 38 | i += 1 39 | } 40 | val multmin = limit / 10 41 | while (i < len) { 42 | // Accumulating negatively avoids surprises near MAX_VALUE 43 | val digit = byteString(i) - '0' 44 | i += 1 45 | if (digit < 0 || digit > 9) { 46 | throw new NumberFormatException(byteString.toString()) 47 | } 48 | if (result < multmin) { 49 | throw new NumberFormatException(byteString.toString()) 50 | } 51 | result *= 10 52 | if (result < limit + digit) { 53 | throw new NumberFormatException(byteString.toString()) 54 | } 55 | result -= digit 56 | } 57 | } else { 58 | throw new NumberFormatException(byteString.toString()) 59 | } 60 | if (negative) 61 | result 62 | else 63 | -result 64 | } 65 | 66 | def parseLong(byteString: ByteString): Long = { 67 | if (byteString == null) { 68 | throw new NumberFormatException("null") 69 | } 70 | 71 | var result = 0L 72 | var negative = false 73 | var i = 0 74 | val len = byteString.length 75 | var limit = -java.lang.Long.MAX_VALUE 76 | 77 | if (len > 0) { 78 | val firstChar = byteString(0) 79 | if (firstChar < '0') { 80 | // Possible leading "+" or "-" 81 | if (firstChar == '-') { 82 | negative = true 83 | limit = java.lang.Long.MIN_VALUE 84 | } else if (firstChar != '+') 85 | throw new NumberFormatException(byteString.toString()) 86 | 87 | if (len == 1) // Cannot have lone "+" or "-" 88 | throw new NumberFormatException(byteString.toString()) 89 | i += 1 90 | } 91 | val multmin = limit / 10 92 | while (i < len) { 93 | // Accumulating negatively avoids surprises near MAX_VALUE 94 | val digit = byteString(i) - '0' 95 | i += 1 96 | if (digit < 0 || digit > 9) { 97 | throw new NumberFormatException(byteString.toString()) 98 | } 99 | if (result < multmin) { 100 | throw new NumberFormatException(byteString.toString()) 101 | } 102 | result *= 10 103 | if (result < limit + digit) { 104 | throw new NumberFormatException(byteString.toString()) 105 | } 106 | result -= digit 107 | } 108 | } else { 109 | throw new NumberFormatException(byteString.toString()) 110 | } 111 | if (negative) 112 | result 113 | else 114 | -result 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/Clusters.scala: -------------------------------------------------------------------------------- 1 | package redis.api.clusters 2 | 3 | import akka.util.ByteString 4 | import redis.RedisCommand 5 | import redis.protocol._ 6 | 7 | 8 | 9 | case class ClusterNode(host:String, port:Int, id:String) 10 | case class ClusterSlot(begin:Int, end:Int, master:ClusterNode, slaves:Seq[ClusterNode]) extends Comparable[ClusterSlot] { 11 | override def compareTo(x: ClusterSlot): Int = { 12 | this.begin.compare(x.begin) 13 | } 14 | } 15 | 16 | 17 | case class ClusterSlots() extends RedisCommand[MultiBulk,Seq[ClusterSlot]] { 18 | val isMasterOnly = false 19 | val encodedRequest: ByteString = encode("CLUSTER SLOTS") 20 | 21 | def buildClusterNode(vect:Seq[RedisReply]): ClusterNode = { 22 | ClusterNode(vect(0).toByteString.utf8String,vect(1).toByteString.utf8String.toInt,vect(2).toByteString.utf8String) 23 | } 24 | 25 | def decodeReply(mb: MultiBulk): Seq[ClusterSlot] = { 26 | val clusterSlots: Option[Seq[ClusterSlot]] = mb.responses.map{ vector => 27 | vector.collect { 28 | case MultiBulk(Some(groupSlot)) => 29 | // 30 | // redis response: 31 | // MultiBulk(begin,end,MultiBulk(masterId,masterPort,masterId),MultiBulk(slave1Id,slave1Port,slave1Id),MultiBulk(slave2Id,slave2Port,slave2Id))..., 32 | // MultiBulk(begin,end,MultiBulk(masterId,masterPort,masterId),MultiBulk(slave1Id,slave1Port,slave1Id),MultiBulk(slave2Id,slave2Port,slave2Id)) 33 | // 34 | val begin = groupSlot(0).toByteString.utf8String.toInt 35 | val end = groupSlot(1).toByteString.utf8String.toInt 36 | val masterMB = groupSlot(2) 37 | 38 | val masterNode = masterMB match { 39 | case MultiBulk(Some(vect)) => buildClusterNode(vect) 40 | case _ => throw new RuntimeException("no master found") 41 | } 42 | 43 | val slavesNode: Seq[ClusterNode] = groupSlot.lift(3).map { 44 | case MultiBulk(Some(responses)) => 45 | responses.grouped(3).map { vect => 46 | buildClusterNode(vect) 47 | }.toList 48 | case _ => Seq.empty 49 | }.getOrElse(Seq.empty) 50 | ClusterSlot(begin,end,masterNode,slavesNode) 51 | 52 | } 53 | } 54 | clusterSlots.getOrElse(Seq.empty) 55 | } 56 | 57 | override val decodeRedisReply: PartialFunction[ByteString, DecodeResult[MultiBulk]] = { 58 | case bs if bs.head == RedisProtocolReply.MULTIBULK => { 59 | val multibulk = RedisProtocolReply.decodeReplyMultiBulk(bs) 60 | multibulk 61 | } 62 | case bs if bs.head == RedisProtocolReply.INTEGER => { 63 | RedisProtocolReply.decodeReplyMultiBulk(bs) 64 | } 65 | case bs => { 66 | RedisProtocolReply.decodeReplyMultiBulk(bs) 67 | } 68 | 69 | } 70 | } 71 | 72 | case class ClusterInfo() extends RedisCommand[Bulk, Map[String, String]] { 73 | val isMasterOnly = false 74 | val encodedRequest: ByteString = encode("CLUSTER INFO") 75 | def decodeReply(b: Bulk): Map[String, String] = { 76 | b.response.map(_.utf8String.split("\r\n").map(_.split(":")).map(s => (s(0),s(1))).toMap).getOrElse(Map.empty) 77 | } 78 | override val decodeRedisReply: PartialFunction[ByteString, DecodeResult[Bulk]] = { 79 | case s => RedisProtocolReply.decodeReplyBulk(s) 80 | } 81 | } 82 | case class ClusterNodeInfo(id:String, ip_port:String, flags:String, master:String, ping_sent:Long, pong_recv:Long, config_epoch:Long, link_state:String, slots:Array[String]) 83 | case class ClusterNodes() extends RedisCommand[Bulk, Array[ClusterNodeInfo]] { 84 | val isMasterOnly = false 85 | val encodedRequest: ByteString = encode("CLUSTER NODES") 86 | def decodeReply(b: Bulk): Array[ClusterNodeInfo] = { 87 | b.response.map(_.utf8String.split("\n").map(_.split(" ")).map(s => ClusterNodeInfo(s(0), s(1), s(2), s(3), s(4).toLong, s(5).toLong, s(6).toLong, s(7), s.drop(8)))).getOrElse(Array.empty) 88 | } 89 | override val decodeRedisReply: PartialFunction[ByteString, DecodeResult[Bulk]] = { 90 | case s => RedisProtocolReply.decodeReplyBulk(s) 91 | } 92 | } -------------------------------------------------------------------------------- /src/main/scala/redis/api/geo/Geo.scala: -------------------------------------------------------------------------------- 1 | package redis.api.geo 2 | 3 | import akka.util.ByteString 4 | import redis._ 5 | import redis.api.geo.DistUnits.Measurement 6 | import redis.api.geo.GeoOptions.WithOption 7 | import redis.protocol._ 8 | 9 | case class GeoAdd[K](key: K, lat: Double, lng: Double, loc: String)(implicit redisKey: ByteStringSerializer[K]) 10 | extends SimpleClusterKey[K] with RedisCommandIntegerLong { 11 | val isMasterOnly = false 12 | val encodedRequest: ByteString = encode("GEOADD", Seq(redisKey.serialize(key), ByteString(lng.toString), 13 | ByteString(lat.toString), ByteString(loc))) 14 | } 15 | 16 | case class GeoRadius[K](key: K, lat: Double, lng: Double, radius: Double, unit: Measurement) 17 | (implicit redisKey: ByteStringSerializer[K]) 18 | extends SimpleClusterKey[K] with RedisCommandMultiBulk[Seq[String]] { 19 | val isMasterOnly = false 20 | val encodedRequest: ByteString = encode("GEORADIUS", Seq(redisKey.serialize(key), ByteString(lng.toString), 21 | ByteString(lat.toString), ByteString(radius.toString), ByteString(unit.value))) 22 | def decodeReply(mb: MultiBulk): Seq[String] = MultiBulkConverter.toStringsSeq(mb) 23 | } 24 | 25 | case class GeoRadiusByMember[K](key: K, member:String, dist:Int, unit: Measurement) 26 | (implicit redisKey: ByteStringSerializer[K]) 27 | extends SimpleClusterKey[K] with RedisCommandMultiBulk[Seq[String]] { 28 | val isMasterOnly = false 29 | val encodedRequest: ByteString = encode("GEORADIUSBYMEMBER", Seq(redisKey.serialize(key), ByteString(member), 30 | ByteString(dist.toString), ByteString(unit.value))) 31 | def decodeReply(mb: MultiBulk): Seq[String] = MultiBulkConverter.toStringsSeq(mb) 32 | } 33 | 34 | case class GeoRadiusByMemberWithOpt[K](key: K, member:String, dist:Int, unit: Measurement, opt:WithOption, count: Int) 35 | (implicit redisKey: ByteStringSerializer[K]) 36 | extends SimpleClusterKey[K] with RedisCommandMultiBulk[Seq[String]] { 37 | val isMasterOnly = false 38 | val encodedRequest: ByteString = encode("GEORADIUSBYMEMBER", Seq(redisKey.serialize(key), ByteString(member), 39 | ByteString(dist.toString), ByteString(unit.value), ByteString(opt.value),ByteString("COUNT"), ByteString(count.toString))) 40 | def decodeReply(mb: MultiBulk): Seq[String] = MultiBulkConverter.toStringsSeq(mb) 41 | 42 | } 43 | 44 | case class GeoDist[K](key: K, member1 :String, member2: String, unit: Measurement) 45 | (implicit redisKey: ByteStringSerializer[K]) 46 | extends SimpleClusterKey[K] with RedisCommandBulkDouble { 47 | val isMasterOnly = false 48 | val encodedRequest: ByteString = encode("GEODIST", Seq(redisKey.serialize(key), ByteString(member1), 49 | ByteString(member2), ByteString(unit.value))) 50 | 51 | def decodeReply(mb: MultiBulk): Seq[String] = MultiBulkConverter.toStringsSeq(mb) 52 | } 53 | 54 | case class GeoHash[K](key: K, member: Seq[String] )(implicit redisKey: ByteStringSerializer[K]) 55 | extends SimpleClusterKey[K] with RedisCommandMultiBulk[Seq[String]]{ 56 | val isMasterOnly = false 57 | val members: Seq[ByteString] = member.foldLeft(Seq.empty[ByteString]){ case (acc, e) => ByteString(e.toString) +: acc } 58 | val keySec: Seq[ByteString] = Seq(redisKey.serialize(key)) 59 | val encodedRequest: ByteString = encode("GEOHASH", keySec ++ members ) 60 | def decodeReply(mb: MultiBulk): Seq[String] = MultiBulkConverter.toStringsSeq(mb) 61 | } 62 | 63 | case class GeoPos[K](key: K, member: Seq[String] )(implicit redisKey: ByteStringSerializer[K]) 64 | extends SimpleClusterKey[K] with RedisCommandMultiBulk[Seq[String]]{ 65 | val isMasterOnly = false 66 | val members: Seq[ByteString] = member.foldLeft(Seq.empty[ByteString]){ case (acc, e) => ByteString(e.toString) +: acc } 67 | val keySec: Seq[ByteString] = Seq(redisKey.serialize(key)) 68 | val encodedRequest: ByteString = encode("GEOPOS", keySec ++ members ) 69 | def decodeReply(mb: MultiBulk): Seq[String] = MultiBulkConverter.toStringsSeq(mb) 70 | } -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Keys.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | import scala.concurrent.Future 5 | import scala.concurrent.duration._ 6 | import redis.api.{Order, LimitOffsetCount} 7 | import redis.api.keys._ 8 | 9 | trait Keys extends Request { 10 | 11 | def del(keys: String*): Future[Long] = 12 | send(Del(keys)) 13 | 14 | def dump[R: ByteStringDeserializer](key: String): Future[Option[R]] = 15 | send(Dump(key)) 16 | 17 | def exists(key: String): Future[Boolean] = 18 | send(Exists(key)) 19 | 20 | def existsMany(keys: String*): Future[Long] = 21 | send(ExistsMany(keys)) 22 | 23 | def expire(key: String, seconds: Long): Future[Boolean] = 24 | send(Expire(key, seconds)) 25 | 26 | def expireat(key: String, seconds: Long): Future[Boolean] = 27 | send(Expireat(key, seconds)) 28 | 29 | def keys(pattern: String): Future[Seq[String]] = 30 | send(Keys(pattern)) 31 | 32 | def migrate(host: String, port: Int, key: String, destinationDB: Int, timeout: FiniteDuration): Future[Boolean] = { 33 | send(Migrate(host, port, key, destinationDB, timeout)) 34 | } 35 | 36 | def move(key: String, db: Int): Future[Boolean] = 37 | send(Move(key, db)) 38 | 39 | def objectRefcount(key: String): Future[Option[Long]] = 40 | send(ObjectRefcount(key)) 41 | 42 | def objectIdletime(key: String): Future[Option[Long]] = 43 | send(ObjectIdletime(key)) 44 | 45 | def objectEncoding(key: String): Future[Option[String]] = 46 | send(ObjectEncoding(key)) 47 | 48 | def persist(key: String): Future[Boolean] = 49 | send(Persist(key)) 50 | 51 | def pexpire(key: String, milliseconds: Long): Future[Boolean] = 52 | send(Pexpire(key, milliseconds)) 53 | 54 | def pexpireat(key: String, millisecondsTimestamp: Long): Future[Boolean] = 55 | send(Pexpireat(key, millisecondsTimestamp)) 56 | 57 | def pttl(key: String): Future[Long] = 58 | send(Pttl(key)) 59 | 60 | def randomkey[R: ByteStringDeserializer](): Future[Option[R]] = 61 | send(Randomkey()) 62 | 63 | def rename(key: String, newkey: String): Future[Boolean] = 64 | send(Rename(key, newkey)) 65 | 66 | def renamenx(key: String, newkey: String): Future[Boolean] = 67 | send(Renamex(key, newkey)) 68 | 69 | def restore[V: ByteStringSerializer](key: String, ttl: Long = 0, serializedValue: V): Future[Boolean] = 70 | send(Restore(key, ttl, serializedValue)) 71 | 72 | def sort[R: ByteStringDeserializer](key: String, 73 | byPattern: Option[String] = None, 74 | limit: Option[LimitOffsetCount] = None, 75 | getPatterns: Seq[String] = Seq(), 76 | order: Option[Order] = None, 77 | alpha: Boolean = false): Future[Seq[R]] = { 78 | send(Sort(key, byPattern, limit, getPatterns, order, alpha)) 79 | } 80 | 81 | def sortStore(key: String, 82 | byPattern: Option[String] = None, 83 | limit: Option[LimitOffsetCount] = None, 84 | getPatterns: Seq[String] = Seq(), 85 | order: Option[Order] = None, 86 | alpha: Boolean = false, 87 | store: String): Future[Long] = { 88 | send(SortStore(key, byPattern, limit, getPatterns, order, alpha, store)) 89 | } 90 | 91 | def ttl(key: String): Future[Long] = 92 | send(Ttl(key)) 93 | 94 | def `type`(key: String): Future[String] = 95 | send(Type(key)) 96 | 97 | def scan(cursor: Int = 0, count: Option[Int] = None, matchGlob: Option[String] = None): Future[Cursor[Seq[String]]] = 98 | send(Scan(cursor, count, matchGlob)) 99 | 100 | } 101 | -------------------------------------------------------------------------------- /src/test/scala/redis/protocol/RedisProtocolReplySpec.scala: -------------------------------------------------------------------------------- 1 | package redis.protocol 2 | 3 | import akka.util.ByteString 4 | import redis.TestBase 5 | 6 | class RedisProtocolReplySpec extends TestBase { 7 | 8 | "Decode reply" should { 9 | "fail" in { 10 | val bs = ByteString("!!") 11 | an[Exception] shouldBe thrownBy { 12 | RedisProtocolReply.decodeReply(bs) 13 | } 14 | } 15 | } 16 | 17 | "Decode String" should { 18 | "decode simple string" in { 19 | val ok = ByteString("OK\r\n") 20 | RedisProtocolReply.decodeString(ok) shouldBe FullyDecoded(ok.dropRight(2), ByteString()) 21 | } 22 | "wait for more content" in { 23 | val waitForMore = ByteString("waiting for more") 24 | val r = RedisProtocolReply.decodeString(waitForMore) 25 | r.isFullyDecoded shouldBe false 26 | r.rest shouldBe waitForMore 27 | } 28 | "decode and keep the tail" in { 29 | val decode = ByteString("decode\r\n") 30 | val keepTail = ByteString("keep the tail") 31 | RedisProtocolReply.decodeString(decode ++ keepTail) shouldBe FullyDecoded(decode.dropRight(2), keepTail) 32 | } 33 | } 34 | 35 | "Decode integer" should { 36 | "decode positive integer" in { 37 | val int = ByteString("6\r\n") 38 | RedisProtocolReply.decodeInteger(int) shouldBe FullyDecoded(Integer(ByteString("6")), ByteString()) 39 | } 40 | "decode negative integer" in { 41 | val int = ByteString("-6\r\n") 42 | val decoded = RedisProtocolReply.decodeInteger(int) 43 | decoded shouldBe FullyDecoded(Integer(ByteString("-6")), ByteString()) 44 | } 45 | } 46 | 47 | "Decode bulk" should { 48 | "decode simple bulk" in { 49 | val bulk = ByteString("6\r\nfoobar\r\n") 50 | RedisProtocolReply.decodeBulk(bulk) shouldBe FullyDecoded(Bulk(Some(ByteString("foobar"))), ByteString()) 51 | } 52 | "decode Null Bulk Reply" in { 53 | val bulk = ByteString("-1\r\n") 54 | RedisProtocolReply.decodeBulk(bulk) shouldBe FullyDecoded(Bulk(None), ByteString()) 55 | } 56 | } 57 | 58 | "Decode multi bulk" should { 59 | "decode simple" in { 60 | val multibulkString = ByteString("4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n") 61 | val multibulk = Some(Vector(Bulk(Some(ByteString("foo"))), Bulk(Some(ByteString("bar"))), Bulk(Some(ByteString("Hello"))), Bulk(Some(ByteString("World"))))) 62 | RedisProtocolReply.decodeMultiBulk(multibulkString) shouldBe FullyDecoded(MultiBulk(multibulk), ByteString()) 63 | } 64 | "decode waiting" in { 65 | val multibulkString = ByteString("4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$50\r\nWaiting ...") 66 | val r = RedisProtocolReply.decodeMultiBulk(multibulkString) 67 | r.isFullyDecoded shouldBe false 68 | r.rest shouldBe ByteString() 69 | } 70 | "decode Empty Multi Bulk" in { 71 | val emptyMultiBulk = ByteString("0\r\n") 72 | RedisProtocolReply.decodeMultiBulk(emptyMultiBulk) shouldBe FullyDecoded(MultiBulk(Some(Vector())), ByteString()) 73 | } 74 | "decode Null Multi Bulk" in { 75 | val nullMultiBulk = ByteString("-1\r\n") 76 | RedisProtocolReply.decodeMultiBulk(nullMultiBulk) shouldBe FullyDecoded(MultiBulk(None), ByteString()) 77 | } 78 | "decode Null element in Multi Bulk" in { 79 | val nullElementInMultiBulk = ByteString("3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n") 80 | val multibulk = Some(Vector(Bulk(Some(ByteString("foo"))), Bulk(None), Bulk(Some(ByteString("bar"))))) 81 | RedisProtocolReply.decodeMultiBulk(nullElementInMultiBulk) shouldBe FullyDecoded(MultiBulk(multibulk), ByteString()) 82 | } 83 | "decode different reply type" in { 84 | val diff = ByteString("5\r\n:1\r\n:2\r\n:3\r\n:4\r\n$6\r\nfoobar\r\n") 85 | val multibulk = Some(Vector(Integer(ByteString("1")), Integer(ByteString("2")), Integer(ByteString("3")), Integer(ByteString("4")), Bulk(Some(ByteString("foobar"))))) 86 | RedisProtocolReply.decodeMultiBulk(diff) shouldBe FullyDecoded(MultiBulk(multibulk), ByteString()) 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/test/scala/redis/commands/ScriptingSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import java.io.File 4 | 5 | import redis._ 6 | 7 | import akka.util.ByteString 8 | import redis.protocol.{Bulk, MultiBulk} 9 | import redis.actors.ReplyErrorException 10 | import redis.api.scripting.RedisScript 11 | 12 | class ScriptingSpec extends RedisStandaloneServer { 13 | 14 | "Scripting commands" should { 15 | val redisScript = RedisScript("return 'rediscala'") 16 | val redisScriptKeysArgs = RedisScript("return {KEYS[1],ARGV[1]}") 17 | val redisScriptConversionObject = RedisScript("return redis.call('get', 'dumbKey')") 18 | 19 | "evalshaOrEval (RedisScript)" in { 20 | redis.scriptFlush().futureValue shouldBe true 21 | val r = redis.evalshaOrEval(redisScriptKeysArgs, Seq("key"), Seq("arg")).futureValue 22 | r shouldBe MultiBulk(Some(Vector(Bulk(Some(ByteString("key"))), Bulk(Some(ByteString("arg")))))) 23 | } 24 | 25 | "EVAL" in { 26 | redis.eval(redisScript.script).futureValue shouldBe Bulk(Some(ByteString("rediscala"))) 27 | } 28 | 29 | "EVAL with type conversion" in { 30 | val dumbObject = new DumbClass("foo", "bar") 31 | val r = redis 32 | .set("dumbKey", dumbObject) 33 | .flatMap(_ => { 34 | redis.eval[DumbClass](redisScriptConversionObject.script) 35 | }) 36 | 37 | r.futureValue shouldBe dumbObject 38 | } 39 | 40 | "EVALSHA" in { 41 | redis.evalsha(redisScript.sha1).futureValue shouldBe Bulk(Some(ByteString("rediscala"))) 42 | } 43 | 44 | "EVALSHA with type conversion" in { 45 | val dumbObject = new DumbClass("foo2", "bar2") 46 | val r = redis 47 | .set("dumbKey", dumbObject) 48 | .flatMap(_ => { 49 | redis.evalsha[DumbClass](redisScriptConversionObject.sha1) 50 | }) 51 | 52 | r.futureValue shouldBe dumbObject 53 | } 54 | 55 | "evalshaOrEvalForTypeOf (RedisScript)" in { 56 | redis.scriptFlush().futureValue shouldBe true 57 | val dumbObject = new DumbClass("foo3", "bar3") 58 | 59 | val r = redis 60 | .set("dumbKey", dumbObject) 61 | .flatMap(_ => { 62 | redis.evalshaOrEval[DumbClass](redisScriptConversionObject) 63 | }) 64 | 65 | r.futureValue shouldBe dumbObject 66 | } 67 | 68 | "SCRIPT FLUSH" in { 69 | redis.scriptFlush().futureValue shouldBe true 70 | } 71 | 72 | "SCRIPT KILL" in { 73 | 74 | withRedisServer(serverPort => { 75 | val redisKiller = RedisClient(port = serverPort) 76 | val redisScriptLauncher = RedisClient(port = serverPort) 77 | redisKiller.scriptKill().failed.futureValue shouldBe a[ReplyErrorException] 78 | 79 | val infiniteScript = redisScriptLauncher.eval(""" 80 | |local i = 1 81 | |while(i > 0) do 82 | |end 83 | |return 0 84 | """.stripMargin) 85 | Thread.sleep(500) 86 | eventually { 87 | redisKiller.scriptKill().futureValue shouldBe true 88 | } 89 | infiniteScript.failed.futureValue 90 | }) 91 | } 92 | 93 | "SCRIPT LOAD" in { 94 | redis.scriptLoad("return 'rediscala'").futureValue shouldBe "d4cf7650161a37eb55a7e9325f3534cec6fc3241" 95 | } 96 | 97 | "SCRIPT EXISTS" in { 98 | val redisScriptNotFound = RedisScript("return 'SCRIPT EXISTS not found'") 99 | val redisScriptFound = RedisScript("return 'SCRIPT EXISTS found'") 100 | val scriptsLoaded = redis 101 | .scriptLoad(redisScriptFound.script) 102 | .flatMap(_ => redis.scriptExists(redisScriptFound.sha1, redisScriptNotFound.sha1)) 103 | scriptsLoaded.futureValue shouldBe Seq(true, false) 104 | 105 | } 106 | 107 | "fromFile" in { 108 | val testScriptFile = new File(getClass.getResource("/lua/test.lua").getPath) 109 | RedisScript.fromFile(testScriptFile) shouldBe RedisScript("""return "test"""") 110 | } 111 | 112 | "fromResource" in { 113 | val testScriptPath = "/lua/test.lua" 114 | RedisScript.fromResource(testScriptPath) shouldBe RedisScript("""return "test"""") 115 | } 116 | 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/main/scala/redis/actors/RedisClientActor.scala: -------------------------------------------------------------------------------- 1 | package redis.actors 2 | 3 | import java.net.InetSocketAddress 4 | 5 | import akka.actor.SupervisorStrategy.Stop 6 | import akka.actor._ 7 | import akka.util.{ByteString, ByteStringBuilder} 8 | import redis.{Operation, Transaction} 9 | 10 | import scala.collection.mutable 11 | import scala.concurrent.duration.FiniteDuration 12 | 13 | object RedisClientActor { 14 | 15 | def props( address: InetSocketAddress, getConnectOperations: () => Seq[Operation[_, _]], 16 | onConnectStatus: Boolean => Unit, 17 | dispatcherName: String, 18 | connectTimeout: Option[FiniteDuration] = None) = 19 | Props(new RedisClientActor(address, getConnectOperations, onConnectStatus, dispatcherName, connectTimeout)) 20 | } 21 | 22 | class RedisClientActor(override val address: InetSocketAddress, getConnectOperations: () => 23 | Seq[Operation[_, _]], onConnectStatus: Boolean => Unit, dispatcherName: String, connectTimeout: Option[FiniteDuration] = None) extends RedisWorkerIO(address, onConnectStatus, connectTimeout) { 24 | 25 | 26 | import context._ 27 | 28 | var repliesDecoder = initRepliesDecoder() 29 | 30 | // connection closed on the sending direction 31 | var oldRepliesDecoder: Option[ActorRef] = None 32 | 33 | def initRepliesDecoder() = 34 | context.actorOf(Props(classOf[RedisReplyDecoder]).withDispatcher(dispatcherName)) 35 | 36 | var queuePromises = mutable.Queue[Operation[_, _]]() 37 | 38 | def writing: Receive = { 39 | case op: Operation[_, _] => 40 | queuePromises enqueue op 41 | write(op.redisCommand.encodedRequest) 42 | case Transaction(commands) => { 43 | val buffer = new ByteStringBuilder 44 | commands.foreach(operation => { 45 | buffer.append(operation.redisCommand.encodedRequest) 46 | queuePromises enqueue operation 47 | }) 48 | write(buffer.result()) 49 | } 50 | case Terminated(actorRef) => 51 | log.warning(s"Terminated($actorRef)") 52 | case KillOldRepliesDecoder => killOldRepliesDecoder() 53 | } 54 | 55 | def onDataReceived(dataByteString: ByteString): Unit = { 56 | repliesDecoder ! dataByteString 57 | } 58 | 59 | def onDataReceivedOnClosingConnection(dataByteString: ByteString): Unit = { 60 | oldRepliesDecoder.foreach(oldRepliesDecoder => oldRepliesDecoder ! dataByteString) 61 | } 62 | 63 | def onWriteSent(): Unit = { 64 | repliesDecoder ! QueuePromises(queuePromises) 65 | queuePromises = mutable.Queue[Operation[_, _]]() 66 | } 67 | 68 | def onConnectionClosed(): Unit = { 69 | queuePromises.foreach(op => { 70 | op.completeFailed(NoConnectionException) 71 | }) 72 | queuePromises.clear() 73 | killOldRepliesDecoder() 74 | oldRepliesDecoder = Some(repliesDecoder) 75 | // TODO send delayed message to oldRepliesDecoder to kill himself after X seconds 76 | this.context.system.scheduler.scheduleOnce(reconnectDuration * 10, self, KillOldRepliesDecoder) 77 | repliesDecoder = initRepliesDecoder() 78 | } 79 | 80 | def onClosingConnectionClosed(): Unit = killOldRepliesDecoder() 81 | 82 | def killOldRepliesDecoder() = { 83 | oldRepliesDecoder.foreach(oldRepliesDecoder => oldRepliesDecoder ! PoisonPill) 84 | oldRepliesDecoder = None 85 | } 86 | 87 | override val supervisorStrategy = 88 | OneForOneStrategy() { 89 | case _: Exception => { 90 | // Start a new decoder 91 | repliesDecoder = initRepliesDecoder() 92 | restartConnection() 93 | // stop the old one => clean the mailbox 94 | Stop 95 | } 96 | } 97 | 98 | def onConnectWrite(): ByteString = { 99 | val ops = getConnectOperations() 100 | val buffer = new ByteStringBuilder 101 | 102 | val queuePromisesConnect = mutable.Queue[Operation[_, _]]() 103 | ops.foreach(operation => { 104 | buffer.append(operation.redisCommand.encodedRequest) 105 | queuePromisesConnect enqueue operation 106 | }) 107 | queuePromises = queuePromisesConnect ++ queuePromises 108 | buffer.result() 109 | } 110 | 111 | } 112 | 113 | case object NoConnectionException extends RuntimeException("No Connection established") 114 | 115 | case object KillOldRepliesDecoder 116 | -------------------------------------------------------------------------------- /src/bench/src/test/scala/rediscala/benchmark/RedisBench.scala: -------------------------------------------------------------------------------- 1 | package rediscala.benchmark 2 | 3 | import scala.concurrent._ 4 | import scala.concurrent.duration._ 5 | 6 | import akka.actor.ActorSystem 7 | import scala.collection.Iterator 8 | 9 | import org.scalameter._ 10 | import org.scalameter.api.{Executor,Aggregator,Gen,Reporter,RegressionReporter,HtmlReporter,SerializationPersistor} 11 | import redis.RedisClient 12 | import org.scalameter.execution 13 | 14 | import org.scalameter.picklers.noPickler._ 15 | 16 | object RedisBench extends Bench[Double] { 17 | 18 | override def reporter: Reporter[Double] = Reporter.Composite( 19 | new RegressionReporter[Double]( 20 | RegressionReporter.Tester.Accepter(), 21 | RegressionReporter.Historian.Complete()), 22 | HtmlReporter(embedDsv = true) 23 | ) 24 | 25 | import Executor.Measurer 26 | 27 | def aggregator = Aggregator.average 28 | 29 | def measurer: Measurer[Double] = new Measurer.IgnoringGC with Measurer.PeriodicReinstantiation[Double] with Measurer.OutlierElimination[Double] with Measurer.RelativeNoise { 30 | def numeric: Numeric[Double] = implicitly[Numeric[Double]] 31 | } 32 | 33 | def executor: Executor[Double] = new execution.SeparateJvmsExecutor(warmer, aggregator, measurer) 34 | 35 | def persistor = new SerializationPersistor() 36 | 37 | def exponential(axisName: String)(from: Int, until: Int, factor: Int): Gen[(Int, RedisBenchContext)] = new Gen[(Int, RedisBenchContext)] { 38 | def warmupset = { 39 | Iterator.single(((until - from) / 2, new RedisBenchContext())) 40 | } 41 | 42 | def dataset = Iterator.iterate(from)(_ * factor).takeWhile(_ <= until).map(x => Parameters(new Parameter[String](axisName) -> x)) 43 | 44 | def generate(params: Parameters) = { 45 | (params[Int](axisName), new RedisBenchContext()) 46 | } 47 | } 48 | 49 | val sizes = exponential("size")(20000, 400000, 2) 50 | 51 | performance of "RedisBench" in { 52 | 53 | measure method "ping" in { 54 | 55 | using(sizes).setUp(redisSetUp()) 56 | .tearDown(redisTearDown) 57 | .in { 58 | case (i: Int, redisBench: RedisBenchContext) => 59 | val redis = redisBench.redis 60 | implicit val ec = redis.executionContext 61 | 62 | val r = for { 63 | ii <- 0 until i 64 | } yield { 65 | redis.ping() 66 | } 67 | Await.result(Future.sequence(r), 30 seconds) 68 | } 69 | } 70 | 71 | measure method "set" in { 72 | 73 | using(sizes).setUp(redisSetUp()) 74 | .tearDown(redisTearDown) 75 | .in { 76 | case (i: Int, redisBench: RedisBenchContext) => 77 | val redis = redisBench.redis 78 | implicit val ec = redis.executionContext 79 | 80 | val r = for { 81 | ii <- 0 until i 82 | } yield { 83 | redis.set("a", ii) 84 | } 85 | Await.result(Future.sequence(r), 30 seconds) 86 | } 87 | } 88 | 89 | measure method "get" in { 90 | 91 | using(sizes).setUp(redisSetUp(_.set("a", "abc"))) 92 | .tearDown(redisTearDown) 93 | .in { 94 | case (i: Int, redisBench: RedisBenchContext) => 95 | val redis = redisBench.redis 96 | implicit val ec = redis.executionContext 97 | 98 | val r = for { 99 | ii <- 0 until i 100 | } yield { 101 | redis.get("i") 102 | } 103 | Await.result(Future.sequence(r), 30 seconds) 104 | } 105 | } 106 | 107 | } 108 | 109 | def redisSetUp(init: RedisClient => Unit = _ => {})(data: (Int, RedisBenchContext)) = data match { 110 | case (i: Int, redisBench: RedisBenchContext) => { 111 | redisBench.akkaSystem = akka.actor.ActorSystem() 112 | redisBench.redis = RedisClient()(redisBench.akkaSystem) 113 | Await.result(redisBench.redis.ping(), 2 seconds) 114 | } 115 | } 116 | 117 | def redisTearDown(data: (Int, RedisBenchContext)) = data match { 118 | case (i: Int, redisBench: RedisBenchContext) => 119 | redisBench.redis.stop() 120 | redisBench.akkaSystem.terminate() 121 | Await.result(redisBench.akkaSystem.whenTerminated, Duration.Inf) 122 | } 123 | } 124 | 125 | class RedisBenchContext(var redis: RedisClient = null, var akkaSystem: ActorSystem = null) 126 | -------------------------------------------------------------------------------- /src/main/scala/redis/actors/RedisSubscriberActor.scala: -------------------------------------------------------------------------------- 1 | package redis.actors 2 | 3 | import akka.util.ByteString 4 | import redis.protocol.{Error, MultiBulk, RedisReply} 5 | import redis.api.pubsub._ 6 | import java.net.InetSocketAddress 7 | import redis.api.connection.Auth 8 | 9 | class RedisSubscriberActorWithCallback( 10 | address: InetSocketAddress, 11 | channels: Seq[String], 12 | patterns: Seq[String], 13 | messageCallback: Message => Unit, 14 | pmessageCallback: PMessage => Unit, 15 | authPassword: Option[String] = None, 16 | onConnectStatus: Boolean => Unit 17 | ) extends RedisSubscriberActor(address, channels, patterns, authPassword,onConnectStatus) { 18 | def onMessage(m: Message) = messageCallback(m) 19 | 20 | def onPMessage(pm: PMessage) = pmessageCallback(pm) 21 | } 22 | 23 | abstract class RedisSubscriberActor( 24 | address: InetSocketAddress, 25 | channels: Seq[String], 26 | patterns: Seq[String], 27 | authPassword: Option[String] = None, 28 | onConnectStatus: Boolean => Unit 29 | ) extends RedisWorkerIO(address,onConnectStatus) with DecodeReplies { 30 | def onConnectWrite(): ByteString = { 31 | authPassword.map(Auth(_).encodedRequest).getOrElse(ByteString.empty) 32 | } 33 | 34 | def onMessage(m: Message): Unit 35 | 36 | def onPMessage(pm: PMessage): Unit 37 | 38 | /** 39 | * Keep states of channels and actor in case of connection reset 40 | */ 41 | var channelsSubscribed = channels.toSet 42 | var patternsSubscribed = patterns.toSet 43 | 44 | override def preStart(): Unit = { 45 | super.preStart() 46 | if(channelsSubscribed.nonEmpty){ 47 | write(SUBSCRIBE(channelsSubscribed.toSeq: _*).toByteString) 48 | } 49 | if(patternsSubscribed.nonEmpty){ 50 | write(PSUBSCRIBE(patternsSubscribed.toSeq: _*).toByteString) 51 | } 52 | } 53 | 54 | def writing: Receive = { 55 | case message: SubscribeMessage => { 56 | if(message.params.nonEmpty){ 57 | write(message.toByteString) 58 | message match { 59 | case s: SUBSCRIBE => channelsSubscribed ++= s.channel 60 | case u: UNSUBSCRIBE => channelsSubscribed --= u.channel 61 | case ps: PSUBSCRIBE => patternsSubscribed ++= ps.pattern 62 | case pu: PUNSUBSCRIBE => patternsSubscribed --= pu.pattern 63 | } 64 | } 65 | } 66 | } 67 | 68 | def subscribe(channels: String*): Unit = { 69 | self ! SUBSCRIBE(channels: _*) 70 | } 71 | 72 | def unsubscribe(channels: String*): Unit = { 73 | self ! UNSUBSCRIBE(channels: _*) 74 | } 75 | 76 | def psubscribe(patterns: String*): Unit = { 77 | self ! PSUBSCRIBE(patterns: _*) 78 | } 79 | 80 | def punsubscribe(patterns: String*): Unit = { 81 | self ! PUNSUBSCRIBE(patterns: _*) 82 | } 83 | 84 | def onConnectionClosed(): Unit = {} 85 | 86 | def onWriteSent(): Unit = {} 87 | 88 | def onDataReceived(dataByteString: ByteString): Unit = { 89 | decodeReplies(dataByteString) 90 | } 91 | 92 | def onDecodedReply(reply: RedisReply): Unit = { 93 | reply match { 94 | case MultiBulk(Some(list)) if list.length == 3 && list.head.toByteString.utf8String == "message" => { 95 | onMessage(Message(list(1).toByteString.utf8String, list(2).toByteString)) 96 | } 97 | case MultiBulk(Some(list)) if list.length == 4 && list.head.toByteString.utf8String == "pmessage" => { 98 | onPMessage(PMessage(list(1).toByteString.utf8String, list(2).toByteString.utf8String, list(3).toByteString)) 99 | } 100 | case error @ Error(_) => 101 | onErrorReply(error) 102 | case _ => // subscribe or psubscribe 103 | } 104 | } 105 | 106 | def onDataReceivedOnClosingConnection(dataByteString: ByteString): Unit = decodeReplies(dataByteString) 107 | 108 | def onClosingConnectionClosed(): Unit = {} 109 | 110 | def onErrorReply(error: Error): Unit = {} 111 | } 112 | -------------------------------------------------------------------------------- /src/bench/src/test/scala/rediscala/benchmark/RedisBenchPool.scala: -------------------------------------------------------------------------------- 1 | package rediscala.benchmark 2 | 3 | 4 | import scala.concurrent._ 5 | import scala.concurrent.duration._ 6 | 7 | import akka.actor.ActorSystem 8 | import scala.collection.Iterator 9 | 10 | import org.scalameter._ 11 | import redis.{RedisServer, RedisClientPool, RedisClient} 12 | import org.scalameter.execution 13 | 14 | import org.scalameter.api.{Executor,Aggregator,Gen,Reporter,RegressionReporter,HtmlReporter,SerializationPersistor} 15 | 16 | import org.scalameter.picklers.noPickler._ 17 | 18 | object RedisBenchPool extends Bench[Double] { 19 | 20 | override def reporter: Reporter[Double] = Reporter.Composite( 21 | new RegressionReporter[Double]( 22 | RegressionReporter.Tester.Accepter(), 23 | RegressionReporter.Historian.Complete()), 24 | HtmlReporter(embedDsv = true) 25 | ) 26 | 27 | import Executor.Measurer 28 | 29 | def aggregator = Aggregator.average 30 | 31 | def measurer: Measurer[Double] = new Measurer.IgnoringGC with Measurer.PeriodicReinstantiation[Double] with Measurer.OutlierElimination[Double] with Measurer.RelativeNoise { 32 | def numeric: Numeric[Double] = implicitly[Numeric[Double]] 33 | } 34 | 35 | //def measurer: Measurer = new Executor.Measurer.MemoryFootprint 36 | 37 | def executor: Executor[Double] = new execution.SeparateJvmsExecutor(warmer, aggregator, measurer) 38 | 39 | 40 | def persistor = new SerializationPersistor() 41 | 42 | def exponential(axisName: String)(from: Int, until: Int, factor: Int): Gen[(Int, RedisBenchContextPool)] = new Gen[(Int, RedisBenchContextPool)] { 43 | def warmupset = { 44 | Iterator.single(((until - from) / 2, new RedisBenchContextPool())) 45 | } 46 | 47 | def dataset = Iterator.iterate(from)(_ * factor).takeWhile(_ <= until).map(x => Parameters(new Parameter[String](axisName) -> x)) 48 | 49 | def generate(params: Parameters) = { 50 | (params[Int](axisName), new RedisBenchContextPool()) 51 | } 52 | } 53 | 54 | val sizes = exponential("size")(20000, 400000, 2) 55 | 56 | performance of "RedisBench" in { 57 | 58 | measure method "ping" in { 59 | 60 | using(sizes).setUp(redisSetUp()) 61 | .tearDown(redisTearDown) 62 | .in { 63 | case (i: Int, redisBench: RedisBenchContextPool) => 64 | val redis = redisBench.redis 65 | implicit val ec = redis.executionContext 66 | 67 | val r = for { 68 | ii <- 0 until i 69 | } yield { 70 | redis.ping() 71 | } 72 | Await.result(Future.sequence(r), 30 seconds) 73 | } 74 | } 75 | 76 | measure method "set" in { 77 | 78 | using(sizes).setUp(redisSetUp()) 79 | .tearDown(redisTearDown) 80 | .in { 81 | case (i: Int, redisBench: RedisBenchContextPool) => 82 | val redis = redisBench.redis 83 | implicit val ec = redis.executionContext 84 | 85 | val r = for { 86 | ii <- 0 until i 87 | } yield { 88 | redis.set("a", ii) 89 | } 90 | Await.result(Future.sequence(r), 30 seconds) 91 | } 92 | } 93 | 94 | measure method "get" in { 95 | 96 | using(sizes).setUp(redisSetUp(_.set("a", "abc"))) 97 | .tearDown(redisTearDown) 98 | .in { 99 | case (i: Int, redisBench: RedisBenchContextPool) => 100 | val redis = redisBench.redis 101 | implicit val ec = redis.executionContext 102 | 103 | val r = for { 104 | ii <- 0 until i 105 | } yield { 106 | redis.get("a") 107 | } 108 | Await.result(Future.sequence(r), 30 seconds) 109 | } 110 | } 111 | 112 | } 113 | 114 | def redisSetUp(init: RedisClient => Unit = _ => {})(data: (Int, RedisBenchContextPool)) = data match { 115 | case (i: Int, redisBench: RedisBenchContextPool) => 116 | redisBench.akkaSystem = akka.actor.ActorSystem() 117 | redisBench.redis = new RedisClientPool(Seq(RedisServer(), RedisServer(), RedisServer()))(redisBench.akkaSystem) 118 | Await.result(redisBench.redis.ping(), 2 seconds) 119 | } 120 | 121 | def redisTearDown(data: (Int, RedisBenchContextPool)) = data match { 122 | case (i: Int, redisBench: RedisBenchContextPool) => 123 | redisBench.redis.stop() 124 | redisBench.akkaSystem.terminate() 125 | Await.result(redisBench.akkaSystem.whenTerminated, Duration.Inf) 126 | } 127 | } 128 | 129 | class RedisBenchContextPool(var redis: RedisClientPool = null, var akkaSystem: ActorSystem = null) -------------------------------------------------------------------------------- /src/test/scala/redis/RedisPubSubSpec.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import redis.api.pubsub._ 4 | import redis.actors.RedisSubscriberActor 5 | import java.net.InetSocketAddress 6 | import java.util.concurrent.atomic.AtomicInteger 7 | 8 | import akka.actor.{ActorRef, Props} 9 | import akka.testkit.{TestActorRef, TestProbe} 10 | import akka.util.ByteString 11 | 12 | class RedisPubSubSpec extends RedisStandaloneServer { 13 | 14 | 15 | "PubSub test" should { 16 | "ok (client + callback)" in { 17 | val receivedMessages = new AtomicInteger() 18 | val channel1 = "ch1" 19 | val channel2 = "ch2" 20 | RedisPubSub( 21 | port = port, 22 | channels = Seq(channel1, channel2), 23 | patterns = Nil, 24 | onMessage = (m: Message) => { 25 | log.debug(s"received $m") 26 | if(m.channel != channel2) { 27 | receivedMessages.incrementAndGet() 28 | } 29 | } 30 | ) 31 | 32 | //wait for subscription 33 | eventually { 34 | redis.publish(channel2, "1").futureValue shouldBe 1 35 | } 36 | 37 | eventually { 38 | redis.publish(channel1, "2").futureValue shouldBe 1 39 | redis.publish("otherChannel", "message").futureValue shouldBe 0 40 | receivedMessages.get() shouldBe 1 41 | } 42 | } 43 | 44 | "ok (actor)" in { 45 | val probeMock = TestProbe() 46 | val channels = Seq("channel") 47 | val patterns = Seq("pattern.*") 48 | 49 | val subscriberActor = TestActorRef[SubscriberActor]( 50 | Props(classOf[SubscriberActor], new InetSocketAddress("localhost", port), 51 | channels, patterns, probeMock.ref) 52 | .withDispatcher(Redis.dispatcher.name), 53 | "SubscriberActor" 54 | ) 55 | import scala.concurrent.duration._ 56 | 57 | system.scheduler.scheduleOnce(2 seconds)(redis.publish("channel", "value")) 58 | 59 | probeMock.expectMsgType[Message](5 seconds) shouldBe Message("channel", ByteString("value")) 60 | 61 | redis.publish("pattern.1", "value") 62 | 63 | probeMock.expectMsgType[PMessage] shouldBe PMessage("pattern.*", "pattern.1", ByteString("value")) 64 | 65 | subscriberActor.underlyingActor.subscribe("channel2") 66 | subscriberActor.underlyingActor.unsubscribe("channel") 67 | 68 | system.scheduler.scheduleOnce(2 seconds)({ 69 | redis.publish("channel", "value") 70 | redis.publish("channel2", "value") 71 | }) 72 | probeMock.expectMsgType[Message](5 seconds) shouldBe Message("channel2", ByteString("value")) 73 | 74 | subscriberActor.underlyingActor.unsubscribe("channel2") 75 | system.scheduler.scheduleOnce(1 second)({ 76 | redis.publish("channel2", ByteString("value")) 77 | }) 78 | probeMock.expectNoMessage(3 seconds) 79 | 80 | subscriberActor.underlyingActor.subscribe("channel2") 81 | system.scheduler.scheduleOnce(1 second)({ 82 | redis.publish("channel2", ByteString("value")) 83 | }) 84 | probeMock.expectMsgType[Message](5 seconds) shouldBe Message("channel2", ByteString("value")) 85 | 86 | subscriberActor.underlyingActor.psubscribe("pattern2.*") 87 | subscriberActor.underlyingActor.punsubscribe("pattern.*") 88 | 89 | system.scheduler.scheduleOnce(2 seconds)({ 90 | redis.publish("pattern2.match", ByteString("value")) 91 | redis.publish("pattern.*", ByteString("value")) 92 | }) 93 | probeMock.expectMsgType[PMessage](5 seconds) shouldBe PMessage("pattern2.*", "pattern2.match", ByteString("value")) 94 | 95 | subscriberActor.underlyingActor.punsubscribe("pattern2.*") 96 | system.scheduler.scheduleOnce(2 seconds)({ 97 | redis.publish("pattern2.match", ByteString("value")) 98 | }) 99 | probeMock.expectNoMessage(3 seconds) 100 | 101 | subscriberActor.underlyingActor.psubscribe("pattern.*") 102 | system.scheduler.scheduleOnce(2 seconds)({ 103 | redis.publish("pattern.*", ByteString("value")) 104 | }) 105 | probeMock.expectMsgType[PMessage](5 seconds) shouldBe PMessage("pattern.*", "pattern.*", ByteString("value")) 106 | } 107 | } 108 | 109 | } 110 | 111 | class SubscriberActor(address: InetSocketAddress, 112 | channels: Seq[String], 113 | patterns: Seq[String], 114 | probeMock: ActorRef 115 | ) extends RedisSubscriberActor(address, channels, patterns, None, (b:Boolean) => () ) { 116 | 117 | override def onMessage(m: Message) = { 118 | probeMock ! m 119 | } 120 | 121 | def onPMessage(pm: PMessage): Unit = { 122 | probeMock ! pm 123 | } 124 | } 125 | 126 | 127 | -------------------------------------------------------------------------------- /src/main/scala/redis/commands/Transactions.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | import scala.concurrent.{Promise, Future, ExecutionContext} 5 | import akka.actor._ 6 | import scala.collection.immutable.Queue 7 | import redis.actors.ReplyErrorException 8 | import redis.protocol._ 9 | import redis.protocol.MultiBulk 10 | import scala.Some 11 | import scala.util.{Failure, Success} 12 | import redis.api.transactions.{Watch, Exec, Multi} 13 | import akka.util.ByteString 14 | 15 | trait Transactions extends ActorRequest { 16 | 17 | def multi(): TransactionBuilder = transaction() 18 | 19 | def multi(operations: (TransactionBuilder) => Unit): TransactionBuilder = { 20 | val builder = transaction() 21 | operations(builder) 22 | builder 23 | } 24 | 25 | def transaction(): TransactionBuilder = TransactionBuilder(redisConnection) 26 | 27 | def watch(watchKeys: String*): TransactionBuilder = { 28 | val builder = transaction() 29 | builder.watch(watchKeys: _*) 30 | builder 31 | } 32 | 33 | } 34 | 35 | case class TransactionBuilder(redisConnection: ActorRef)(implicit val executionContext: ExecutionContext) extends BufferedRequest with RedisCommands { 36 | 37 | //val operations = Queue.newBuilder[Operation[_, _]] 38 | val watcher = Set.newBuilder[String] 39 | 40 | def unwatch(): Unit = { 41 | watcher.clear() 42 | } 43 | 44 | def watch(keys: String*): Unit = { 45 | watcher ++= keys 46 | } 47 | 48 | def discard(): Unit = { 49 | operations.result().map(operation => { 50 | operation.completeFailed(TransactionDiscardedException) 51 | }) 52 | operations.clear() 53 | unwatch() 54 | } 55 | 56 | // todo maybe return a Future for the general state of the transaction ? (Success or Failure) 57 | def exec(): Future[MultiBulk] = { 58 | val t = Transaction(watcher.result(), operations.result(), redisConnection) 59 | val p = Promise[MultiBulk]() 60 | t.process(p) 61 | p.future 62 | } 63 | } 64 | 65 | case class Transaction(watcher: Set[String], operations: Queue[Operation[_, _]], redisConnection: ActorRef)(implicit val executionContext: ExecutionContext) { 66 | 67 | def process(promise: Promise[MultiBulk]): Unit = { 68 | val multiOp = Operation(Multi, Promise[Boolean]()) 69 | val execOp = Operation(Exec, execPromise(promise)) 70 | 71 | val commands = Seq.newBuilder[Operation[_, _]] 72 | 73 | val watchOp = watchOperation(watcher) 74 | watchOp.map(commands.+=(_)) 75 | commands += multiOp 76 | commands ++= operations.map(op => operationToQueuedOperation(op)) 77 | commands += execOp 78 | 79 | redisConnection ! redis.Transaction(commands.result()) 80 | } 81 | 82 | def operationToQueuedOperation(op: Operation[_, _]) = { 83 | val cmd = new RedisCommandStatusString { 84 | val isMasterOnly = true 85 | val encodedRequest: ByteString = op.redisCommand.encodedRequest 86 | } 87 | Operation(cmd, Promise[String]()) 88 | } 89 | 90 | def ignoredPromise() = Promise[Any]() 91 | 92 | def execPromise(promise: Promise[MultiBulk]): Promise[MultiBulk] = { 93 | val p = Promise[MultiBulk]() 94 | p.future.onComplete(reply => { 95 | reply match { 96 | case Success(m: MultiBulk) => { 97 | promise.success(m) 98 | dispatchExecReply(m) 99 | } 100 | case Success(r) => { 101 | promise.failure(TransactionExecException(r)) 102 | operations.foreach(_.completeFailed(TransactionExecException(r))) 103 | } 104 | case Failure(f) => { 105 | promise.failure(f) 106 | operations.foreach(_.completeFailed(f)) 107 | } 108 | } 109 | }) 110 | p 111 | } 112 | 113 | def dispatchExecReply(multiBulk: MultiBulk) = { 114 | multiBulk.responses.map(replies => { 115 | replies.zip(operations).map { 116 | case (e: Error, operation) => operation.completeFailed(ReplyErrorException(e.toString())) 117 | case (reply, operation) => operation.tryCompleteSuccess(reply) 118 | } 119 | }).getOrElse({ 120 | operations.foreach(_.completeFailed(TransactionWatchException())) 121 | }) 122 | } 123 | 124 | 125 | def watchOperation(keys: Set[String]): Option[Operation[_, Boolean]] = { 126 | if (keys.nonEmpty) { 127 | Some(Operation(Watch(keys), Promise[Boolean]())) 128 | } else { 129 | None 130 | } 131 | } 132 | } 133 | 134 | case class TransactionExecException(reply: RedisReply) extends Exception(s"Expected MultiBulk response, got : $reply") 135 | 136 | case object TransactionDiscardedException extends Exception 137 | 138 | case class TransactionWatchException(message: String = "One watched key has been modified, transaction has failed") extends Exception(message) 139 | 140 | 141 | 142 | 143 | 144 | -------------------------------------------------------------------------------- /src/test/scala/redis/commands/BListsSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | 5 | import akka.util.ByteString 6 | import scala.concurrent.duration._ 7 | 8 | class BListsSpec extends RedisStandaloneServer { 9 | 10 | "Blocking Lists commands" should { 11 | 12 | "BLPOP already containing elements" in { 13 | val redisB = RedisBlockingClient(port = port) 14 | val r = for { 15 | _ <- redis.del("blpop1", "blpop2") 16 | p <- redis.rpush("blpop1", "a", "b", "c") 17 | b <- redisB.blpop(Seq("blpop1", "blpop2")) 18 | } yield { 19 | b shouldBe Some("blpop1" -> ByteString("a")) 20 | } 21 | val rr = r.futureValue 22 | redisB.stop() 23 | rr 24 | } 25 | 26 | "BLPOP blocking" in { 27 | val redisB = RedisBlockingClient(port = port) 28 | val blockTime = 1.second 29 | val rr = within(blockTime, blockTime * 2) { 30 | val r = redis 31 | .del("blpopBlock") 32 | .flatMap(_ => { 33 | val blpop = redisB.blpop(Seq("blpopBlock")) 34 | Thread.sleep(blockTime.toMillis) 35 | redis.rpush("blpopBlock", "a", "b", "c") 36 | blpop 37 | }) 38 | r.futureValue shouldBe Some("blpopBlock" -> ByteString("a")) 39 | } 40 | redisB.stop() 41 | rr 42 | } 43 | 44 | "BLPOP blocking timeout" in { 45 | val redisB = RedisBlockingClient(port = port) 46 | val rr = within(1.seconds, 10.seconds) { 47 | val r = redis 48 | .del("blpopBlockTimeout") 49 | .flatMap(_ => { 50 | redisB.brpop(Seq("blpopBlockTimeout"), 1.seconds) 51 | }) 52 | r.futureValue shouldBe empty 53 | } 54 | redisB.stop() 55 | rr 56 | } 57 | 58 | "BRPOP already containing elements" in { 59 | val redisB = RedisBlockingClient(port = port) 60 | val r = for { 61 | _ <- redis.del("brpop1", "brpop2") 62 | p <- redis.rpush("brpop1", "a", "b", "c") 63 | b <- redisB.brpop(Seq("brpop1", "brpop2")) 64 | } yield { 65 | redisB.stop() 66 | b shouldBe Some("brpop1" -> ByteString("c")) 67 | } 68 | r.futureValue 69 | } 70 | 71 | "BRPOP blocking" in { 72 | val redisB = RedisBlockingClient(port = port) 73 | val rr = within(1.seconds, 10.seconds) { 74 | val r = redis 75 | .del("brpopBlock") 76 | .flatMap(_ => { 77 | val brpop = redisB.brpop(Seq("brpopBlock")) 78 | Thread.sleep(1000) 79 | redis.rpush("brpopBlock", "a", "b", "c") 80 | brpop 81 | }) 82 | r.futureValue shouldBe Some("brpopBlock" -> ByteString("c")) 83 | } 84 | redisB.stop() 85 | rr 86 | } 87 | 88 | "BRPOP blocking timeout" in { 89 | val redisB = RedisBlockingClient(port = port) 90 | val rr = within(1.seconds, 10.seconds) { 91 | val r = redis 92 | .del("brpopBlockTimeout") 93 | .flatMap(_ => { 94 | redisB.brpop(Seq("brpopBlockTimeout"), 1.seconds) 95 | }) 96 | r.futureValue shouldBe empty 97 | } 98 | redisB.stop() 99 | rr 100 | } 101 | 102 | "BRPOPLPUSH already containing elements" in { 103 | val redisB = RedisBlockingClient(port = port) 104 | val r = for { 105 | _ <- redis.del("brpopplush1", "brpopplush2") 106 | p <- redis.rpush("brpopplush1", "a", "b", "c") 107 | b <- redisB.brpoplpush("brpopplush1", "brpopplush2") 108 | } yield { 109 | b shouldBe Some(ByteString("c")) 110 | } 111 | val rr = r.futureValue 112 | redisB.stop() 113 | rr 114 | } 115 | 116 | "BRPOPLPUSH blocking" in { 117 | val redisB = RedisBlockingClient(port = port) 118 | val rr = within(1.seconds, 10.seconds) { 119 | val r = redis 120 | .del("brpopplushBlock1", "brpopplushBlock2") 121 | .flatMap(_ => { 122 | val brpopplush = redisB.brpoplpush("brpopplushBlock1", "brpopplushBlock2") 123 | Thread.sleep(1000) 124 | redis.rpush("brpopplushBlock1", "a", "b", "c") 125 | brpopplush 126 | }) 127 | r.futureValue shouldBe Some(ByteString("c")) 128 | } 129 | redisB.stop() 130 | rr 131 | } 132 | 133 | "BRPOPLPUSH blocking timeout" in { 134 | val redisB = RedisBlockingClient(port = port) 135 | val rr = within(1.seconds, 10.seconds) { 136 | val r = redis 137 | .del("brpopplushBlockTimeout1", "brpopplushBlockTimeout2") 138 | .flatMap(_ => { 139 | redisB.brpoplpush("brpopplushBlockTimeout1", "brpopplushBlockTimeout2", 1.seconds) 140 | }) 141 | r.futureValue shouldBe empty 142 | } 143 | redisB.stop() 144 | rr 145 | } 146 | 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /src/main/scala/redis/RedisCommand.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import akka.util.ByteString 4 | import redis.protocol._ 5 | 6 | 7 | trait RedisCommand[RedisReplyT <: RedisReply, +T] { 8 | val isMasterOnly: Boolean 9 | val encodedRequest: ByteString 10 | 11 | def decodeReply(r: RedisReplyT): T 12 | 13 | val decodeRedisReply: PartialFunction[ByteString, DecodeResult[RedisReplyT]] 14 | 15 | def encode(command: String) = RedisProtocolRequest.inline(command) 16 | 17 | def encode(command: String, args: Seq[ByteString]) = RedisProtocolRequest.multiBulk(command, args) 18 | } 19 | 20 | 21 | trait RedisCommandStatus[T] extends RedisCommand[Status, T] { 22 | val decodeRedisReply: PartialFunction[ByteString, DecodeResult[Status]] = RedisProtocolReply.decodeReplyStatus 23 | } 24 | 25 | trait RedisCommandInteger[T] extends RedisCommand[Integer, T] { 26 | val decodeRedisReply: PartialFunction[ByteString, DecodeResult[Integer]] = RedisProtocolReply.decodeReplyInteger 27 | } 28 | 29 | trait RedisCommandBulk[T] extends RedisCommand[Bulk, T] { 30 | val decodeRedisReply: PartialFunction[ByteString, DecodeResult[Bulk]] = RedisProtocolReply.decodeReplyBulk 31 | } 32 | 33 | trait RedisCommandMultiBulk[T] extends RedisCommand[MultiBulk, T] { 34 | val decodeRedisReply: PartialFunction[ByteString, DecodeResult[MultiBulk]] = RedisProtocolReply.decodeReplyMultiBulk 35 | } 36 | 37 | trait RedisCommandRedisReply[T] extends RedisCommand[RedisReply, T] { 38 | val decodeRedisReply: PartialFunction[ByteString, DecodeResult[RedisReply]] = RedisProtocolReply.decodeReplyPF 39 | } 40 | 41 | trait RedisCommandStatusString extends RedisCommandStatus[String] { 42 | def decodeReply(s: Status) = s.toString 43 | } 44 | 45 | trait RedisCommandStatusBoolean extends RedisCommandStatus[Boolean] { 46 | def decodeReply(s: Status): Boolean = s.toBoolean 47 | } 48 | 49 | trait RedisCommandIntegerBoolean extends RedisCommandInteger[Boolean] { 50 | def decodeReply(i: Integer): Boolean = i.toBoolean 51 | } 52 | 53 | trait RedisCommandIntegerLong extends RedisCommandInteger[Long] { 54 | def decodeReply(i: Integer) = i.toLong 55 | } 56 | 57 | trait RedisCommandBulkOptionByteString[R] extends RedisCommandBulk[Option[R]] { 58 | val deserializer: ByteStringDeserializer[R] 59 | 60 | def decodeReply(bulk: Bulk) = bulk.response.map(deserializer.deserialize) 61 | } 62 | 63 | trait RedisCommandBulkDouble extends RedisCommandBulk[Double] { 64 | def decodeReply(bulk: Bulk) = bulk.response.map(ByteStringDeserializer.RedisDouble.deserialize).get 65 | } 66 | 67 | trait RedisCommandBulkOptionDouble extends RedisCommandBulk[Option[Double]] { 68 | def decodeReply(bulk: Bulk) = bulk.response.map(ByteStringDeserializer.RedisDouble.deserialize) 69 | } 70 | 71 | trait RedisCommandMultiBulkSeqByteString[R] extends RedisCommandMultiBulk[Seq[R]] { 72 | val deserializer: ByteStringDeserializer[R] 73 | 74 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toSeqByteString(mb)(deserializer) 75 | } 76 | 77 | trait RedisCommandMultiBulkSeqByteStringDouble[R] extends RedisCommandMultiBulk[Seq[(R, Double)]] { 78 | val deserializer: ByteStringDeserializer[R] 79 | 80 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toSeqTuple2ByteStringDouble(mb)(deserializer) 81 | } 82 | 83 | case class Cursor[T](index: Int, data: T) 84 | 85 | trait RedisCommandMultiBulkCursor[R] extends RedisCommandMultiBulk[Cursor[R]] { 86 | def decodeReply(mb: MultiBulk) = { 87 | mb.responses.map { responses => 88 | val cursor = ParseNumber.parseInt(responses.head.toByteString) 89 | val remainder = responses(1).asInstanceOf[MultiBulk] 90 | 91 | Cursor(cursor, remainder.responses.map(decodeResponses).getOrElse(empty)) 92 | }.getOrElse(Cursor(0, empty)) 93 | } 94 | 95 | def decodeResponses(responses: Seq[RedisReply]): R 96 | 97 | val empty: R 98 | val count: Option[Int] 99 | val matchGlob: Option[String] 100 | 101 | def withOptionalParams(params: Seq[ByteString]): Seq[ByteString] = { 102 | val withCount = count.fold(params)(c => params ++ Seq(ByteString("COUNT"), ByteString(c.toString))) 103 | matchGlob.fold(withCount)(m => withCount ++ Seq(ByteString("MATCH"), ByteString(m))) 104 | } 105 | } 106 | 107 | trait RedisCommandRedisReplyOptionLong extends RedisCommandRedisReply[Option[Long]] { 108 | def decodeReply(redisReply: RedisReply): Option[Long] = redisReply match { 109 | case i: Integer => Some(i.toLong) 110 | case _ => None 111 | } 112 | } 113 | 114 | trait RedisCommandRedisReplyRedisReply[R] extends RedisCommandRedisReply[R] { 115 | val deserializer: RedisReplyDeserializer[R] 116 | 117 | def decodeReply(redisReply: RedisReply): R = { 118 | if (deserializer.deserialize.isDefinedAt(redisReply)) 119 | deserializer.deserialize.apply(redisReply) 120 | else 121 | throw new RuntimeException("Could not deserialize") // todo make own type 122 | } 123 | } -------------------------------------------------------------------------------- /src/main/scala/redis/commands/SortedSets.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis.api._ 4 | import redis.api.sortedsets._ 5 | import redis.{ByteStringDeserializer, ByteStringSerializer, Cursor, Request} 6 | 7 | import scala.concurrent.Future 8 | 9 | trait SortedSets extends Request { 10 | 11 | def zadd[V: ByteStringSerializer](key: String, scoreMembers: (Double, V)*): Future[Long] = 12 | send(Zadd(key, Seq.empty, scoreMembers)) 13 | 14 | def zaddWithOptions[V: ByteStringSerializer](key: String, options: Seq[ZaddOption], scoreMembers: (Double, V)*): Future[Long] = 15 | send(Zadd(key, options, scoreMembers)) 16 | 17 | def zcard(key: String): Future[Long] = 18 | send(Zcard(key)) 19 | 20 | def zcount(key: String, min: Limit = Limit(Double.NegativeInfinity), max: Limit = Limit(Double.PositiveInfinity)): Future[Long] = 21 | send(Zcount(key, min, max)) 22 | 23 | def zincrby[V: ByteStringSerializer](key: String, increment: Double, member: V): Future[Double] = 24 | send(Zincrby(key, increment, member)) 25 | 26 | def zinterstore 27 | (destination: String, key: String, keys: Seq[String], aggregate: Aggregate = SUM): Future[Long] = 28 | send(Zinterstore(destination, key, keys, aggregate)) 29 | 30 | def zinterstoreWeighted(destination: String, keys: Map[String, Double], aggregate: Aggregate = SUM): Future[Long] = 31 | send(ZinterstoreWeighted(destination, keys, aggregate)) 32 | 33 | def zpopmin[R: ByteStringDeserializer](key: String, count: Long = 1): Future[Seq[R]] = 34 | send(Zpopmin(key, count)) 35 | 36 | def zpopmax[R: ByteStringDeserializer](key: String, count: Long = 1): Future[Seq[R]] = 37 | send(Zpopmax(key, count)) 38 | 39 | def zrange[R: ByteStringDeserializer](key: String, start: Long, stop: Long): Future[Seq[R]] = 40 | send(Zrange(key, start, stop)) 41 | 42 | def zrangeWithscores[R: ByteStringDeserializer](key: String, start: Long, stop: Long): Future[Seq[(R, Double)]] = 43 | send(ZrangeWithscores(key, start, stop)) 44 | 45 | def zrangebyscore[R: ByteStringDeserializer](key: String, min: Limit, max: Limit, limit: Option[(Long, Long)] = None): Future[Seq[R]] = 46 | send(Zrangebyscore(key, min, max, limit)) 47 | 48 | def zrangebyscoreWithscores[R: ByteStringDeserializer](key: String, min: Limit, max: Limit, limit: Option[(Long, Long)] = None): Future[Seq[(R, Double)]] = 49 | send(ZrangebyscoreWithscores(key, min, max, limit)) 50 | 51 | def zrank[V: ByteStringSerializer](key: String, member: V): Future[Option[Long]] = 52 | send(Zrank(key, member)) 53 | 54 | def zrem[V: ByteStringSerializer](key: String, members: V*): Future[Long] = 55 | send(Zrem(key, members)) 56 | 57 | def zremrangebylex(key: String, min: String, max: String): Future[Long] = 58 | send(Zremrangebylex(key, min, max)) 59 | 60 | def zremrangebyrank(key: String, start: Long, stop: Long): Future[Long] = 61 | send(Zremrangebyrank(key, start, stop)) 62 | 63 | def zremrangebyscore(key: String, min: Limit, max: Limit): Future[Long] = 64 | send(Zremrangebyscore(key, min, max)) 65 | 66 | def zrevrange[R: ByteStringDeserializer](key: String, start: Long, stop: Long): Future[Seq[R]] = 67 | send(Zrevrange(key, start, stop)) 68 | 69 | def zrevrangeWithscores[R: ByteStringDeserializer](key: String, start: Long, stop: Long): Future[Seq[(R, Double)]] = 70 | send(ZrevrangeWithscores(key, start, stop)) 71 | 72 | def zrevrangebyscore[R: ByteStringDeserializer](key: String, min: Limit, max: Limit, limit: Option[(Long, Long)] = None): Future[Seq[R]] = 73 | send(Zrevrangebyscore(key, min, max, limit)) 74 | 75 | def zrevrangebyscoreWithscores[R: ByteStringDeserializer](key: String, min: Limit, max: Limit, limit: Option[(Long, Long)] = None): Future[Seq[(R, Double)]] = 76 | send(ZrevrangebyscoreWithscores(key, min, max, limit)) 77 | 78 | def zrevrank[V: ByteStringSerializer](key: String, member: V): Future[Option[Long]] = 79 | send(Zrevrank(key, member)) 80 | 81 | def zscore[V: ByteStringSerializer](key: String, member: V): Future[Option[Double]] = 82 | send(Zscore(key, member)) 83 | 84 | def zunionstore 85 | (destination: String, key: String, keys: Seq[String], aggregate: Aggregate = SUM): Future[Long] = 86 | send(Zunionstore(destination, key, keys, aggregate)) 87 | 88 | def zunionstoreWeighted 89 | (destination: String, keys: Map[String, Double], aggregate: Aggregate = SUM): Future[Long] = 90 | send(ZunionstoreWeighted(destination, keys, aggregate)) 91 | 92 | def zrangebylex[R: ByteStringDeserializer](key: String, min: Option[String], max: Option[String], limit: Option[(Long, Long)] = None): Future[Seq[R]] = 93 | send(Zrangebylex(key, min.getOrElse("-"), max.getOrElse("+"), limit)) 94 | 95 | def zrevrangebylex[R: ByteStringDeserializer](key: String, max: Option[String], min: Option[String], limit: Option[(Long, Long)] = None): Future[Seq[R]] = 96 | send(Zrevrangebylex(key, max.getOrElse("+"), min.getOrElse("-"), limit)) 97 | 98 | def zscan[R: ByteStringDeserializer](key: String, cursor: Int = 0, count: Option[Int] = None, matchGlob: Option[String] = None): Future[Cursor[Seq[(Double, R)]]] = 99 | send(Zscan(key, cursor, count, matchGlob)) 100 | 101 | } 102 | 103 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/Lists.scala: -------------------------------------------------------------------------------- 1 | package redis.api.lists 2 | 3 | import redis._ 4 | import akka.util.ByteString 5 | import redis.api.ListPivot 6 | import redis.protocol.MultiBulk 7 | 8 | case class Lindex[K, R](key: K, index: Long)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandBulkOptionByteString[R] { 9 | val isMasterOnly = false 10 | val encodedRequest: ByteString = encode("LINDEX", Seq(keyAsString, ByteString(index.toString))) 11 | val deserializer: ByteStringDeserializer[R] = deserializerR 12 | } 13 | 14 | case class Linsert[K, KP, V](key: K, beforeAfter: ListPivot, pivot: KP, value: V) 15 | (implicit redisKey: ByteStringSerializer[K], redisPivot: ByteStringSerializer[KP], convert: ByteStringSerializer[V]) 16 | extends SimpleClusterKey[K] with RedisCommandIntegerLong { 17 | val isMasterOnly = true 18 | val encodedRequest: ByteString = encode("LINSERT", Seq(keyAsString, ByteString(beforeAfter.toString), redisPivot.serialize(pivot), convert.serialize(value))) 19 | } 20 | 21 | case class Llen[K](key: K)(implicit redisKey: ByteStringSerializer[K]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 22 | val isMasterOnly = false 23 | val encodedRequest: ByteString = encode("LLEN", Seq(keyAsString)) 24 | } 25 | 26 | case class Lpop[K, R](key: K)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandBulkOptionByteString[R] { 27 | val isMasterOnly = true 28 | val encodedRequest: ByteString = encode("LPOP", Seq(keyAsString)) 29 | val deserializer: ByteStringDeserializer[R] = deserializerR 30 | } 31 | 32 | case class Lpush[K, V](key: K, values: Seq[V])(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 33 | val isMasterOnly = true 34 | val encodedRequest: ByteString = encode("LPUSH", keyAsString +: values.map(v => convert.serialize(v))) 35 | } 36 | 37 | 38 | case class Lpushx[K, V](key: K, value: V)(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 39 | val isMasterOnly = true 40 | val encodedRequest: ByteString = encode("LPUSHX", Seq(keyAsString, convert.serialize(value))) 41 | } 42 | 43 | case class Lrange[K, R](key: K, start: Long, stop: Long)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandMultiBulk[Seq[R]] { 44 | val isMasterOnly = false 45 | val encodedRequest: ByteString = encode("LRANGE", Seq(keyAsString, ByteString(start.toString), ByteString(stop.toString))) 46 | 47 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toSeqByteString(mb) 48 | } 49 | 50 | case class Lrem[K, V](key: K, count: Long, value: V)(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) 51 | extends SimpleClusterKey[K] with RedisCommandIntegerLong { 52 | val isMasterOnly = true 53 | val encodedRequest: ByteString = encode("LREM", Seq(keyAsString, ByteString(count.toString), convert.serialize(value))) 54 | } 55 | 56 | case class Lset[K, V](key: K, index: Long, value: V)(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) 57 | extends SimpleClusterKey[K] with RedisCommandStatusBoolean { 58 | val isMasterOnly = true 59 | val encodedRequest: ByteString = encode("LSET", Seq(keyAsString, ByteString(index.toString), convert.serialize(value))) 60 | } 61 | 62 | case class Ltrim[K](key: K, start: Long, stop: Long)(implicit redisKey: ByteStringSerializer[K]) extends SimpleClusterKey[K] with RedisCommandStatusBoolean { 63 | val isMasterOnly = true 64 | val encodedRequest: ByteString = encode("LTRIM", Seq(keyAsString, ByteString(start.toString), ByteString(stop.toString))) 65 | } 66 | 67 | case class Rpop[K, R](key: K)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandBulkOptionByteString[R] { 68 | val isMasterOnly = true 69 | val encodedRequest: ByteString = encode("RPOP", Seq(keyAsString)) 70 | val deserializer: ByteStringDeserializer[R] = deserializerR 71 | 72 | } 73 | 74 | case class Rpoplpush[KS, KD, R](source: KS, destination: KD)(implicit sourceSer: ByteStringSerializer[KS], destSer: ByteStringSerializer[KD], deserializerR: ByteStringDeserializer[R]) extends RedisCommandBulkOptionByteString[R] { 75 | val isMasterOnly = true 76 | val encodedRequest: ByteString = encode("RPOPLPUSH", Seq(sourceSer.serialize(source), destSer.serialize(destination))) 77 | val deserializer: ByteStringDeserializer[R] = deserializerR 78 | 79 | } 80 | 81 | case class Rpush[K, V](key: K, values: Seq[V])(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 82 | val isMasterOnly = true 83 | val encodedRequest: ByteString = encode("RPUSH", keyAsString +: values.map(v => convert.serialize(v))) 84 | } 85 | 86 | case class Rpushx[K, V](key: K, value: V)(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 87 | val isMasterOnly = true 88 | val encodedRequest: ByteString = encode("RPUSHX", Seq(keyAsString, convert.serialize(value))) 89 | } -------------------------------------------------------------------------------- /src/test/scala/redis/commands/HashesSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import redis._ 4 | 5 | import akka.util.ByteString 6 | 7 | class HashesSpec extends RedisStandaloneServer { 8 | 9 | "Hashes commands" should { 10 | "HDEL" in { 11 | val r = for { 12 | _ <- redis.hset("hdelKey", "field", "value") 13 | d <- redis.hdel("hdelKey", "field", "fieldNonexisting") 14 | } yield { 15 | d shouldBe 1 16 | } 17 | r.futureValue 18 | } 19 | 20 | "HEXISTS" in { 21 | val r = for { 22 | _ <- redis.hset("hexistsKey", "field", "value") 23 | exist <- redis.hexists("hexistsKey", "field") 24 | notExist <- redis.hexists("hexistsKey", "fieldNotExisting") 25 | } yield { 26 | exist shouldBe true 27 | notExist shouldBe false 28 | } 29 | r.futureValue 30 | } 31 | 32 | "HGET" in { 33 | val r = for { 34 | _ <- redis.hset("hgetKey", "field", "value") 35 | get <- redis.hget("hgetKey", "field") 36 | get2 <- redis.hget("hgetKey", "fieldNotExisting") 37 | } yield { 38 | get shouldBe Some(ByteString("value")) 39 | get2 shouldBe None 40 | } 41 | r.futureValue 42 | } 43 | 44 | "HGETALL" in { 45 | val r = for { 46 | _ <- redis.hset("hgetallKey", "field", "value") 47 | get <- redis.hgetall("hgetallKey") 48 | get2 <- redis.hgetall("hgetallKeyNotExisting") 49 | } yield { 50 | get shouldBe Map("field" -> ByteString("value")) 51 | get2 shouldBe Map.empty 52 | } 53 | r.futureValue 54 | } 55 | 56 | "HINCRBY" in { 57 | val r = for { 58 | _ <- redis.hset("hincrbyKey", "field", "10") 59 | i <- redis.hincrby("hincrbyKey", "field", 1) 60 | ii <- redis.hincrby("hincrbyKey", "field", -1) 61 | } yield { 62 | i shouldBe 11 63 | ii shouldBe 10 64 | } 65 | r.futureValue 66 | } 67 | 68 | "HINCRBYFLOAT" in { 69 | val r = for { 70 | _ <- redis.hset("hincrbyfloatKey", "field", "10.5") 71 | i <- redis.hincrbyfloat("hincrbyfloatKey", "field", 0.1) 72 | ii <- redis.hincrbyfloat("hincrbyfloatKey", "field", -1.1) 73 | } yield { 74 | i shouldBe 10.6 75 | ii shouldBe 9.5 76 | } 77 | r.futureValue 78 | } 79 | 80 | "HKEYS" in { 81 | val r = for { 82 | _ <- redis.hset("hkeysKey", "field", "value") 83 | keys <- redis.hkeys("hkeysKey") 84 | } yield { 85 | keys shouldBe Seq("field") 86 | } 87 | r.futureValue 88 | } 89 | 90 | "HLEN" in { 91 | val r = for { 92 | _ <- redis.hset("hlenKey", "field", "value") 93 | hLength <- redis.hlen("hlenKey") 94 | } yield { 95 | hLength shouldBe 1 96 | } 97 | r.futureValue 98 | } 99 | 100 | "HMGET" in { 101 | val r = for { 102 | _ <- redis.hset("hmgetKey", "field", "value") 103 | hmget <- redis.hmget("hmgetKey", "field", "nofield") 104 | } yield { 105 | hmget shouldBe Seq(Some(ByteString("value")), None) 106 | } 107 | r.futureValue 108 | } 109 | 110 | "HMSET" in { 111 | val r = for { 112 | _ <- redis.hmset("hmsetKey", Map("field" -> "value1", "field2" -> "value2")) 113 | v1 <- redis.hget("hmsetKey", "field") 114 | v2 <- redis.hget("hmsetKey", "field2") 115 | } yield { 116 | v1 shouldBe Some(ByteString("value1")) 117 | v2 shouldBe Some(ByteString("value2")) 118 | } 119 | r.futureValue 120 | } 121 | 122 | "HMSET update" in { 123 | val r = for { 124 | _ <- redis.hdel("hsetKey", "field") 125 | set <- redis.hset("hsetKey", "field", "value") 126 | update <- redis.hset("hsetKey", "field", "value2") 127 | v1 <- redis.hget("hsetKey", "field") 128 | } yield { 129 | set shouldBe true 130 | update shouldBe false 131 | v1 shouldBe Some(ByteString("value2")) 132 | } 133 | r.futureValue 134 | } 135 | 136 | "HMSETNX" in { 137 | val r = for { 138 | _ <- redis.hdel("hsetnxKey", "field") 139 | set <- redis.hsetnx("hsetnxKey", "field", "value") 140 | doNothing <- redis.hsetnx("hsetnxKey", "field", "value2") 141 | v1 <- redis.hget("hsetnxKey", "field") 142 | } yield { 143 | set shouldBe true 144 | doNothing shouldBe false 145 | v1 shouldBe Some(ByteString("value")) 146 | } 147 | r.futureValue 148 | } 149 | 150 | "HSCAN" in { 151 | val initialData = (1 to 20).grouped(2).map(x => x.head.toString -> x.tail.head.toString).toMap 152 | val r = for { 153 | _ <- redis.del("hscan") 154 | _ <- redis.hmset("hscan", initialData) 155 | scanResult <- redis.hscan[String]("hscan", count = Some(300)) 156 | } yield { 157 | scanResult.data.values.toList.map(_.toInt).sorted shouldBe (2 to 20 by 2) 158 | scanResult.index shouldBe 0 159 | } 160 | r.futureValue 161 | } 162 | 163 | "HVALS" in { 164 | val r = for { 165 | _ <- redis.hdel("hvalsKey", "field") 166 | emp <- redis.hvals("hvalsKey") 167 | _ <- redis.hset("hvalsKey", "field", "value") 168 | some <- redis.hvals("hvalsKey") 169 | } yield { 170 | emp shouldBe empty 171 | some shouldBe Seq(ByteString("value")) 172 | } 173 | r.futureValue 174 | } 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/test/scala/redis/RedisClusterTest.scala: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import akka.util.ByteString 4 | import redis.api.clusters.ClusterSlots 5 | import redis.protocol._ 6 | 7 | 8 | 9 | /** 10 | * Created by npeters on 20/05/16. 11 | */ 12 | class RedisClusterTest extends RedisClusterClients { 13 | 14 | var redisCluster: RedisCluster = null 15 | override def beforeAll(): Unit = { 16 | super.beforeAll() 17 | redisCluster = RedisCluster(nodePorts.map(p => RedisServer("127.0.0.1", p))) 18 | } 19 | 20 | "RedisComputeSlot" should { 21 | "simple" in { 22 | RedisComputeSlot.hashSlot("foo") shouldBe 12182 23 | RedisComputeSlot.hashSlot("somekey") shouldBe 11058 24 | RedisComputeSlot.hashSlot("somekey3452345325453532452345") shouldBe 15278 25 | RedisComputeSlot.hashSlot("rzarzaZERAZERfqsfsdQSFD") shouldBe 14258 26 | RedisComputeSlot.hashSlot("{foo}46546546546") shouldBe 12182 27 | RedisComputeSlot.hashSlot("foo_312312") shouldBe 5839 28 | RedisComputeSlot.hashSlot("aazaza{aa") shouldBe 11473 29 | } 30 | } 31 | 32 | "clusterSlots" should { 33 | "encoding" ignore { 34 | val clusterSlotsAsByteString = ByteString(new sun.misc.BASE64Decoder().decodeBuffer( 35 | "KjMNCio0DQo6MA0KOjU0NjANCiozDQokOQ0KMTI3LjAuMC4xDQo6NzAwMA0KJDQwDQplNDM1OTlkZmY2ZTNhN2I5ZWQ1M2IxY2EwZGI0YmQwMDlhODUwYmE1DQoqMw0KJDkNCjEyNy4wLjAuMQ0KOjcwMDMNCiQ0MA0KYzBmNmYzOWI2NDg4MTVhMTllNDlkYzQ1MzZkMmExM2IxNDdhOWY1MA0KKjQNCjoxMDkyMw0KOjE2MzgzDQoqMw0KJDkNCjEyNy4wLjAuMQ0KOjcwMDINCiQ0MA0KNDhkMzcxMjBmMjEzNTc4Y2IxZWFjMzhlNWYyYmY1ODlkY2RhNGEwYg0KKjMNCiQ5DQoxMjcuMC4wLjENCjo3MDA1DQokNDANCjE0Zjc2OWVlNmU1YWY2MmZiMTc5NjZlZDRlZWRmMTIxOWNjYjE1OTINCio0DQo6NTQ2MQ0KOjEwOTIyDQoqMw0KJDkNCjEyNy4wLjAuMQ0KOjcwMDENCiQ0MA0KYzhlYzM5MmMyMjY5NGQ1ODlhNjRhMjA5OTliNGRkNWNiNDBlNDIwMQ0KKjMNCiQ5DQoxMjcuMC4wLjENCjo3MDA0DQokNDANCmVmYThmZDc0MDQxYTNhOGQ3YWYyNWY3MDkwM2I5ZTFmNGMwNjRhMjENCg==")) 36 | val clusterSlotsAsBulk: DecodeResult[RedisReply] = RedisProtocolReply.decodeReply(clusterSlotsAsByteString) 37 | val dr: DecodeResult[String] = clusterSlotsAsBulk.map({ 38 | case a: MultiBulk => 39 | ClusterSlots().decodeReply(a).toString() 40 | case _ => "fail" 41 | }) 42 | 43 | val r = dr match { 44 | case FullyDecoded(decodeValue, _) => 45 | decodeValue shouldBe "Vector(ClusterSlot(0,5460,ClusterNode(127.0.0.1,7000,e43599dff6e3a7b9ed53b1ca0db4bd009a850ba5),Stream(ClusterNode(127.0.0.1,7003,c0f6f39b648815a19e49dc4536d2a13b147a9f50), ?)), " + 46 | "ClusterSlot(10923,16383,ClusterNode(127.0.0.1,7002,48d37120f213578cb1eac38e5f2bf589dcda4a0b),Stream(ClusterNode(127.0.0.1,7005,14f769ee6e5af62fb17966ed4eedf1219ccb1592), ?)), " + 47 | "ClusterSlot(5461,10922,ClusterNode(127.0.0.1,7001,c8ec392c22694d589a64a20999b4dd5cb40e4201),Stream(ClusterNode(127.0.0.1,7004,efa8fd74041a3a8d7af25f70903b9e1f4c064a21), ?)))" 48 | 49 | case _ => fail() 50 | } 51 | 52 | r 53 | } 54 | 55 | } 56 | 57 | "Strings" should { 58 | "set-get" in { 59 | log.debug("set") 60 | redisCluster.set[String]("foo", "FOO").futureValue 61 | log.debug("exists") 62 | redisCluster.exists("foo").futureValue shouldBe (true) 63 | 64 | log.debug("get") 65 | redisCluster.get[String]("foo").futureValue shouldBe Some("FOO") 66 | 67 | log.debug("del") 68 | redisCluster.del("foo", "foo").futureValue 69 | 70 | log.debug("exists") 71 | redisCluster.exists("foo").futureValue shouldBe (false) 72 | 73 | } 74 | 75 | "mset-mget" in { 76 | log.debug("mset") 77 | redisCluster.mset[String](Map("{foo}1" -> "FOO1", "{foo}2" -> "FOO2")).futureValue 78 | log.debug("exists") 79 | redisCluster.exists("{foo}1").futureValue shouldBe (true) 80 | redisCluster.exists("{foo}2").futureValue shouldBe (true) 81 | 82 | log.debug("mget") 83 | redisCluster.mget[String]("{foo}1", "{foo}2").futureValue shouldBe Seq(Some("FOO1"), Some("FOO2")) 84 | 85 | log.debug("del") 86 | redisCluster.del("{foo}1", "{foo}2").futureValue 87 | 88 | log.debug("exists") 89 | redisCluster.exists("{foo}1").futureValue shouldBe (false) 90 | 91 | } 92 | } 93 | 94 | "tools" should { 95 | "groupby" in { 96 | redisCluster 97 | .groupByCluserServer(Seq("{foo1}1", "{foo2}1", "{foo1}2", "{foo2}2")) 98 | .sortBy(_.head) 99 | .toList shouldBe Seq(Seq("{foo2}1", "{foo2}2"), Seq("{foo1}1", "{foo1}2")).sortBy(_.head) 100 | } 101 | } 102 | 103 | "long run" should { 104 | "wait" in { 105 | log.debug("set " + redisCluster.getClusterAndConnection(RedisComputeSlot.hashSlot("foo1")).get._1.master.toString) 106 | redisCluster.set[String]("foo1", "FOO").futureValue 107 | redisCluster.get[String]("foo1").futureValue 108 | log.debug("wait...") 109 | log.debug("get") 110 | redisCluster.get[String]("foo1").futureValue shouldBe Some("FOO") 111 | } 112 | } 113 | 114 | "clusterInfo" should { 115 | "just work" in { 116 | val res = redisCluster.clusterInfo().futureValue 117 | res should not be empty 118 | for (v <- res) { 119 | log.debug(s"Key ${v._1} value ${v._2}") 120 | } 121 | res("cluster_state") shouldBe "ok" 122 | } 123 | } 124 | 125 | "clusterNodes" should { 126 | "just work" in { 127 | val res = redisCluster.clusterNodes().futureValue 128 | res should not be empty 129 | for (m <- res) { 130 | log.debug(m.toString) 131 | } 132 | res.size shouldBe 6 133 | res.count(_.link_state == "connected") shouldBe 6 134 | } 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /src/main/scala/redis/api/Servers.scala: -------------------------------------------------------------------------------- 1 | package redis.api.servers 2 | 3 | import redis._ 4 | import akka.util.ByteString 5 | import redis.protocol.{MultiBulk, Bulk} 6 | import redis.api.ShutdownModifier 7 | 8 | case object Bgrewriteaof extends RedisCommandStatusString { 9 | val isMasterOnly = true 10 | val encodedRequest: ByteString = encode("BGREWRITEAOF") 11 | } 12 | 13 | case object Bgsave extends RedisCommandStatusString { 14 | val isMasterOnly = true 15 | val encodedRequest: ByteString = encode("BGSAVE") 16 | } 17 | 18 | case class ClientKill(ip: String, port: Int) extends RedisCommandStatusBoolean { 19 | val isMasterOnly: Boolean = true 20 | val encodedRequest: ByteString = encode("CLIENT", Seq(ByteString("KILL"), ByteString(ip + ":" + port))) 21 | } 22 | 23 | case object ClientList extends RedisCommandBulk[Seq[Map[String, String]]] { 24 | val isMasterOnly: Boolean = true 25 | val encodedRequest: ByteString = encode("CLIENT", Seq(ByteString("LIST"))) 26 | 27 | def decodeReply(r: Bulk): Seq[Map[String, String]] = r.asOptByteString.map(bs => { 28 | val s = bs.utf8String 29 | val r = s.split('\n').map(line => { 30 | line.split(' ').map(kv => { 31 | val keyValue = kv.split('=') 32 | val value = if (keyValue.length > 1) keyValue(1) else "" 33 | (keyValue(0), value) 34 | }).toMap 35 | }).toSeq 36 | r 37 | }).getOrElse(Seq.empty) 38 | } 39 | 40 | case object ClientGetname extends RedisCommandBulkOptionByteString[String] { 41 | val isMasterOnly: Boolean = true 42 | val encodedRequest: ByteString = encode("CLIENT", Seq(ByteString("GETNAME"))) 43 | val deserializer: ByteStringDeserializer[String] = ByteStringDeserializer.String 44 | } 45 | 46 | 47 | case class ClientSetname(connectionName: String) extends RedisCommandStatusBoolean { 48 | val isMasterOnly: Boolean = true 49 | val encodedRequest: ByteString = encode("CLIENT", Seq(ByteString("SETNAME"), ByteString(connectionName))) 50 | } 51 | 52 | case class ConfigGet(parameter: String) extends RedisCommandMultiBulk[Map[String, String]] { 53 | val isMasterOnly: Boolean = true 54 | val encodedRequest: ByteString = encode("CONFIG", Seq(ByteString("GET"), ByteString(parameter))) 55 | 56 | def decodeReply(r: MultiBulk): Map[String, String] = MultiBulkConverter.toMapString(r) 57 | } 58 | 59 | case class ConfigSet(parameter: String, value: String) extends RedisCommandStatusBoolean { 60 | val isMasterOnly: Boolean = true 61 | val encodedRequest: ByteString = encode("CONFIG", Seq(ByteString("SET"), ByteString(parameter), ByteString(value))) 62 | } 63 | 64 | case object ConfigResetstat extends RedisCommandStatusBoolean { 65 | val isMasterOnly: Boolean = true 66 | val encodedRequest: ByteString = encode("CONFIG", Seq(ByteString("RESETSTAT"))) 67 | } 68 | 69 | case object Dbsize extends RedisCommandIntegerLong { 70 | val isMasterOnly: Boolean = true 71 | val encodedRequest: ByteString = encode("DBSIZE") 72 | } 73 | 74 | case class DebugObject[K](key: K)(implicit redisKey: ByteStringSerializer[K]) extends RedisCommandStatusString { 75 | val isMasterOnly: Boolean = true 76 | val encodedRequest: ByteString = encode("DEBUG", Seq(ByteString("OBJECT"), redisKey.serialize(key))) 77 | } 78 | 79 | case object DebugSegfault extends RedisCommandStatusString { 80 | val isMasterOnly: Boolean = true 81 | val encodedRequest: ByteString = encode("DEBUG SEGFAULT") 82 | } 83 | 84 | case object Flushall extends RedisCommandStatusBoolean { 85 | val isMasterOnly: Boolean = true 86 | val encodedRequest: ByteString = encode("FLUSHALL") 87 | } 88 | 89 | case object Flushdb extends RedisCommandStatusBoolean { 90 | val isMasterOnly: Boolean = true 91 | val encodedRequest: ByteString = encode("FLUSHDB") 92 | } 93 | 94 | case class Info(section: Option[String] = None) extends RedisCommandBulk[String] { 95 | val isMasterOnly: Boolean = true 96 | val encodedRequest: ByteString = encode("INFO", section.map(s => Seq(ByteString(s))).getOrElse(Seq())) 97 | 98 | def decodeReply(r: Bulk): String = r.toOptString.get 99 | } 100 | 101 | case object Lastsave extends RedisCommandIntegerLong { 102 | val isMasterOnly: Boolean = true 103 | val encodedRequest: ByteString = encode("LASTSAVE") 104 | } 105 | 106 | case object Save extends RedisCommandStatusBoolean { 107 | val isMasterOnly: Boolean = true 108 | val encodedRequest: ByteString = encode("SAVE") 109 | } 110 | 111 | case class Slaveof(ip: String, port: Int) extends RedisCommandStatusBoolean { 112 | val isMasterOnly: Boolean = true 113 | val encodedRequest: ByteString = encode("SLAVEOF", Seq(ByteString(ip), ByteString(port.toString))) 114 | } 115 | 116 | case class Shutdown(modifier: Option[ShutdownModifier] = None) extends RedisCommandStatusBoolean { 117 | val isMasterOnly: Boolean = true 118 | val encodedRequest: ByteString = encode("SHUTDOWN", modifier.map(m => Seq(ByteString(m.toString))).getOrElse(Seq.empty)) 119 | } 120 | 121 | case object SlaveofNoOne extends RedisCommandStatusBoolean { 122 | val isMasterOnly: Boolean = true 123 | val encodedRequest: ByteString = encode("SLAVEOF NO ONE") 124 | } 125 | 126 | case object Time extends RedisCommandMultiBulk[(Long, Long)] { 127 | val isMasterOnly: Boolean = true 128 | val encodedRequest: ByteString = encode("TIME") 129 | 130 | def decodeReply(mb: MultiBulk): (Long, Long) = { 131 | mb.responses.map(r => { 132 | (r.head.toByteString.utf8String.toLong, r.tail.head.toByteString.utf8String.toLong) 133 | }).get 134 | } 135 | } 136 | 137 | //case class Log(id: Long, timestamp: Long, duration: Long, command: Seq[ByteString]) 138 | 139 | //case class Slowlog(subcommand: String, argurment: String) -------------------------------------------------------------------------------- /src/main/scala/redis/actors/RedisWorkerIO.scala: -------------------------------------------------------------------------------- 1 | package redis.actors 2 | 3 | import akka.actor.{ActorLogging, ActorRef, Actor} 4 | import akka.io.Tcp 5 | import akka.util.{ByteStringBuilder, ByteString} 6 | import java.net.InetSocketAddress 7 | import akka.io.Tcp._ 8 | import akka.io.Tcp.Connected 9 | import akka.io.Tcp.Register 10 | import akka.io.Tcp.Connect 11 | import akka.io.Tcp.CommandFailed 12 | import akka.io.Tcp.Received 13 | import scala.concurrent.duration.FiniteDuration 14 | 15 | abstract class RedisWorkerIO(val address: InetSocketAddress, onConnectStatus: Boolean => Unit, connectTimeout: Option[FiniteDuration] = None) extends Actor with ActorLogging { 16 | 17 | private var currAddress = address 18 | 19 | import context._ 20 | 21 | val tcp = akka.io.IO(Tcp)(context.system) 22 | 23 | // todo watch tcpWorker 24 | var tcpWorker: ActorRef = null 25 | 26 | val bufferWrite: ByteStringBuilder = new ByteStringBuilder 27 | 28 | var readyToWrite = false 29 | 30 | override def preStart(): Unit = { 31 | if (tcpWorker != null) { 32 | tcpWorker ! Close 33 | } 34 | log.info(s"Connect to $currAddress") 35 | // Create a new InetSocketAddress to clear the cached IP address. 36 | currAddress = new InetSocketAddress(currAddress.getHostName, currAddress.getPort) 37 | tcp ! Connect(remoteAddress = currAddress, options = SO.KeepAlive(on = true) :: Nil, timeout = connectTimeout) 38 | } 39 | 40 | def reconnect() = { 41 | become(receive) 42 | preStart() 43 | } 44 | 45 | override def postStop(): Unit = { 46 | log.info("RedisWorkerIO stop") 47 | } 48 | 49 | def initConnectedBuffer(): Unit = { 50 | readyToWrite = true 51 | } 52 | 53 | def receive = connecting orElse writing 54 | 55 | def connecting: Receive = { 56 | case a: InetSocketAddress => onAddressChanged(a) 57 | case c: Connected => onConnected(c) 58 | case Reconnect => reconnect() 59 | case c: CommandFailed => onConnectingCommandFailed(c) 60 | case c: ConnectionClosed => onClosingConnectionClosed() // not the current opening connection 61 | } 62 | 63 | def onConnected(cmd: Connected) = { 64 | sender ! Register(self) 65 | tcpWorker = sender 66 | initConnectedBuffer() 67 | tryInitialWrite() // TODO write something in head buffer 68 | become(connected) 69 | log.info("Connected to " + cmd.remoteAddress) 70 | onConnectStatus(true) 71 | } 72 | 73 | def onConnectingCommandFailed(cmdFailed: CommandFailed) = { 74 | log.error(cmdFailed.toString) 75 | scheduleReconnect() 76 | } 77 | 78 | def connected: Receive = writing orElse reading 79 | 80 | private def reading: Receive = { 81 | case WriteAck => tryWrite() 82 | case Received(dataByteString) => { 83 | if(sender == tcpWorker) 84 | onDataReceived(dataByteString) 85 | else 86 | onDataReceivedOnClosingConnection(dataByteString) 87 | } 88 | case a: InetSocketAddress => onAddressChanged(a) 89 | case c: ConnectionClosed => { 90 | if(sender == tcpWorker) 91 | onConnectionClosed(c) 92 | else { 93 | onConnectStatus(false) 94 | onClosingConnectionClosed() 95 | } 96 | } 97 | case c: CommandFailed => onConnectedCommandFailed(c) 98 | } 99 | 100 | def onAddressChanged(addr: InetSocketAddress): Unit = { 101 | log.info(s"Address change [old=$address, new=$addr]") 102 | tcpWorker ! ConfirmedClose // close the sending direction of the connection (TCP FIN) 103 | currAddress = addr 104 | scheduleReconnect() 105 | } 106 | 107 | def onConnectionClosed(c: ConnectionClosed) = { 108 | log.warning(s"ConnectionClosed $c") 109 | scheduleReconnect() 110 | } 111 | 112 | /** O/S buffer was full 113 | * Maybe to much data in the Command ? 114 | */ 115 | def onConnectedCommandFailed(commandFailed: CommandFailed) = { 116 | log.error(commandFailed.toString) // O/S buffer was full 117 | tcpWorker ! commandFailed.cmd 118 | } 119 | 120 | def scheduleReconnect(): Unit = { 121 | cleanState() 122 | log.info(s"Trying to reconnect in $reconnectDuration") 123 | this.context.system.scheduler.scheduleOnce(reconnectDuration, self, Reconnect) 124 | become(receive) 125 | } 126 | 127 | def cleanState(): Unit = { 128 | onConnectStatus(false) 129 | onConnectionClosed() 130 | readyToWrite = false 131 | bufferWrite.clear() 132 | } 133 | 134 | def writing: Receive 135 | 136 | def onConnectionClosed(): Unit 137 | 138 | def onDataReceived(dataByteString: ByteString): Unit 139 | 140 | def onDataReceivedOnClosingConnection(dataByteString: ByteString): Unit 141 | 142 | def onClosingConnectionClosed(): Unit 143 | 144 | def onWriteSent(): Unit 145 | 146 | def restartConnection() = reconnect() 147 | 148 | def onConnectWrite(): ByteString 149 | 150 | def tryInitialWrite(): Unit = { 151 | val data = onConnectWrite() 152 | 153 | if (data.nonEmpty) { 154 | writeWorker(data ++ bufferWrite.result()) 155 | bufferWrite.clear() 156 | } else { 157 | tryWrite() 158 | } 159 | } 160 | 161 | def tryWrite(): Unit = { 162 | if (bufferWrite.length == 0) { 163 | readyToWrite = true 164 | } else { 165 | writeWorker(bufferWrite.result()) 166 | bufferWrite.clear() 167 | } 168 | } 169 | 170 | def write(byteString: ByteString): Unit = { 171 | if (readyToWrite) { 172 | writeWorker(byteString) 173 | } else { 174 | bufferWrite.append(byteString) 175 | } 176 | } 177 | 178 | import scala.concurrent.duration.{DurationInt, FiniteDuration} 179 | 180 | def reconnectDuration: FiniteDuration = 2 seconds 181 | 182 | private def writeWorker(byteString: ByteString): Unit = { 183 | onWriteSent() 184 | tcpWorker ! Write(byteString, WriteAck) 185 | readyToWrite = false 186 | } 187 | 188 | } 189 | 190 | 191 | object WriteAck extends Event 192 | 193 | object Reconnect -------------------------------------------------------------------------------- /src/main/scala/redis/api/Hashes.scala: -------------------------------------------------------------------------------- 1 | package redis.api.hashes 2 | 3 | import redis._ 4 | import akka.util.ByteString 5 | import scala.collection.mutable 6 | import scala.annotation.tailrec 7 | import redis.protocol.{RedisReply, MultiBulk} 8 | 9 | case class Hdel[K, KK](key: K, fields: Seq[KK])(implicit redisKey: ByteStringSerializer[K], redisFields: ByteStringSerializer[KK]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 10 | val isMasterOnly = true 11 | val encodedRequest: ByteString = encode("HDEL", keyAsString +: fields.map(redisFields.serialize)) 12 | } 13 | 14 | case class Hexists[K, KK](key: K, field: KK)(implicit redisKey: ByteStringSerializer[K], redisFields: ByteStringSerializer[KK]) extends SimpleClusterKey[K] with RedisCommandIntegerBoolean { 15 | val isMasterOnly = false 16 | val encodedRequest: ByteString = encode("HEXISTS", Seq(keyAsString, redisFields.serialize(field))) 17 | } 18 | 19 | case class Hget[K, KK, R](key: K, field: KK)(implicit redisKey: ByteStringSerializer[K], redisFields: ByteStringSerializer[KK], deserializerR: ByteStringDeserializer[R]) 20 | extends SimpleClusterKey[K] with RedisCommandBulkOptionByteString[R] { 21 | val isMasterOnly = false 22 | val encodedRequest: ByteString = encode("HGET", Seq(keyAsString, redisFields.serialize(field))) 23 | val deserializer: ByteStringDeserializer[R] = deserializerR 24 | } 25 | 26 | case class Hgetall[K, R](key: K)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandMultiBulk[Map[String, R]] { 27 | val isMasterOnly = false 28 | val encodedRequest: ByteString = encode("HGETALL", Seq(keyAsString)) 29 | 30 | def decodeReply(mb: MultiBulk) = mb.responses.map(r => { 31 | val builder = Map.newBuilder[String, R] 32 | builder.sizeHint(r.length / 2) 33 | seqToMap(r, builder) 34 | builder.result() 35 | }).get 36 | 37 | @tailrec 38 | private def seqToMap(seq: Vector[RedisReply], builder: mutable.Builder[(String, R), Map[String, R]]): Unit = { 39 | if (seq.nonEmpty) { 40 | val head = seq.head.toByteString 41 | val tail = seq.tail 42 | builder += (head.utf8String -> deserializerR.deserialize(tail.head.toByteString)) 43 | seqToMap(tail.tail, builder) 44 | } 45 | } 46 | } 47 | 48 | case class Hincrby[K, KK](key: K, fields: KK, increment: Long)(implicit redisKey: ByteStringSerializer[K], redisFields: ByteStringSerializer[KK]) 49 | extends SimpleClusterKey[K] with RedisCommandIntegerLong { 50 | val isMasterOnly = true 51 | val encodedRequest: ByteString = encode("HINCRBY", Seq(keyAsString, redisFields.serialize(fields), ByteString(increment.toString))) 52 | } 53 | 54 | case class Hincrbyfloat[K, KK](key: K, fields: KK, increment: Double)(implicit redisKey: ByteStringSerializer[K], redisFields: ByteStringSerializer[KK]) 55 | extends SimpleClusterKey[K] with RedisCommandBulkDouble { 56 | val isMasterOnly = true 57 | val encodedRequest: ByteString = encode("HINCRBYFLOAT", Seq(keyAsString, redisFields.serialize(fields), ByteString(increment.toString))) 58 | } 59 | 60 | case class Hkeys[K](key: K)(implicit redisKey: ByteStringSerializer[K]) extends SimpleClusterKey[K] with RedisCommandMultiBulk[Seq[String]] { 61 | val isMasterOnly = false 62 | val encodedRequest: ByteString = encode("HKEYS", Seq(keyAsString)) 63 | 64 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toSeqString(mb) 65 | } 66 | 67 | case class Hlen[K](key: K)(implicit redisKey: ByteStringSerializer[K]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 68 | val isMasterOnly = false 69 | val encodedRequest: ByteString = encode("HLEN", Seq(keyAsString)) 70 | } 71 | 72 | case class Hmget[K, KK, R](key: K, fields: Seq[KK])(implicit redisKey: ByteStringSerializer[K], redisFields: ByteStringSerializer[KK], deserializerR: ByteStringDeserializer[R]) 73 | extends SimpleClusterKey[K] with RedisCommandMultiBulk[Seq[Option[R]]] { 74 | val isMasterOnly = false 75 | val encodedRequest: ByteString = encode("HMGET", keyAsString +: fields.map(redisFields.serialize)) 76 | 77 | def decodeReply(mb: MultiBulk) = MultiBulkConverter.toSeqOptionByteString(mb) 78 | } 79 | 80 | case class Hmset[K, KK, V](key: K, keysValues: Map[KK, V])(implicit redisKey: ByteStringSerializer[K], redisFields: ByteStringSerializer[KK], convert: ByteStringSerializer[V]) 81 | extends SimpleClusterKey[K] with RedisCommandStatusBoolean { 82 | val isMasterOnly = true 83 | val encodedRequest: ByteString = encode("HMSET", keyAsString +: keysValues.foldLeft(Seq.empty[ByteString])({ 84 | case (acc, e) => redisFields.serialize(e._1) +: convert.serialize(e._2) +: acc 85 | })) 86 | } 87 | 88 | case class Hset[K, KK, V](key: K, field: KK, value: V)(implicit redisKey: ByteStringSerializer[K], redisFields: ByteStringSerializer[KK], convert: ByteStringSerializer[V]) 89 | extends SimpleClusterKey[K] with RedisCommandIntegerBoolean { 90 | val isMasterOnly = true 91 | val encodedRequest: ByteString = encode("HSET", Seq(keyAsString, redisFields.serialize(field), convert.serialize(value))) 92 | } 93 | 94 | case class Hsetnx[K, KK, V](key: K, field: KK, value: V)(implicit redisKey: ByteStringSerializer[K], redisFields: ByteStringSerializer[KK], convert: ByteStringSerializer[V]) 95 | extends SimpleClusterKey[K] with RedisCommandIntegerBoolean { 96 | val isMasterOnly = true 97 | val encodedRequest: ByteString = encode("HSETNX", Seq(keyAsString, redisFields.serialize(field), convert.serialize(value))) 98 | } 99 | 100 | case class Hvals[K, R](key: K)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandMultiBulkSeqByteString[R] { 101 | val isMasterOnly = false 102 | val encodedRequest: ByteString = encode("HVALS", Seq(keyAsString)) 103 | val deserializer: ByteStringDeserializer[R] = deserializerR 104 | } 105 | 106 | case class HScan[K, C, R](key: K, cursor: C, count: Option[Int], matchGlob: Option[String])(implicit redisKey: ByteStringSerializer[K], deserializer: ByteStringDeserializer[R], cursorConverter: ByteStringSerializer[C]) extends SimpleClusterKey[K] with RedisCommandMultiBulkCursor[Map[String, R]] { 107 | val isMasterOnly: Boolean = false 108 | 109 | val encodedRequest: ByteString = encode("HSCAN", withOptionalParams(Seq(keyAsString, cursorConverter.serialize(cursor)))) 110 | 111 | def decodeResponses(responses: Seq[RedisReply]) = 112 | responses.grouped(2).map { xs => 113 | val k = xs.head 114 | val v = xs(1) 115 | 116 | k.toByteString.utf8String -> deserializer.deserialize(v.toByteString) 117 | }.toMap 118 | 119 | val empty: Map[String, R] = Map.empty 120 | } -------------------------------------------------------------------------------- /src/main/scala/redis/api/Sets.scala: -------------------------------------------------------------------------------- 1 | package redis.api.sets 2 | 3 | import redis._ 4 | import akka.util.ByteString 5 | import redis.protocol.RedisReply 6 | 7 | case class Sadd[K, V](key: K, members: Seq[V])(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 8 | val isMasterOnly = true 9 | val encodedRequest: ByteString = encode("SADD", keyAsString +: members.map(v => convert.serialize(v))) 10 | } 11 | 12 | case class Scard[K](key: K)(implicit redisKey: ByteStringSerializer[K]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 13 | val isMasterOnly = false 14 | val encodedRequest: ByteString = encode("SCARD", Seq(keyAsString)) 15 | } 16 | 17 | case class Sdiff[K, KK, R](key: K, keys: Seq[KK])(implicit redisKey: ByteStringSerializer[K], redisKeys: ByteStringSerializer[KK], deserializerR: ByteStringDeserializer[R]) 18 | extends RedisCommandMultiBulkSeqByteString[R] { 19 | val isMasterOnly = false 20 | val encodedRequest: ByteString = encode("SDIFF", redisKey.serialize(key) +: keys.map(redisKeys.serialize)) 21 | val deserializer: ByteStringDeserializer[R] = deserializerR 22 | } 23 | 24 | case class Sdiffstore[KD, K, KK](destination: KD, key: K, keys: Seq[KK]) 25 | (implicit redisDest: ByteStringSerializer[KD], redisKey: ByteStringSerializer[K], redisKeys: ByteStringSerializer[KK]) 26 | extends RedisCommandIntegerLong { 27 | val isMasterOnly = true 28 | val encodedRequest: ByteString = encode("SDIFFSTORE", redisDest.serialize(destination) +: redisKey.serialize(key) +: keys.map(redisKeys.serialize)) 29 | } 30 | 31 | case class Sinter[K, KK, R](key: K, keys: Seq[KK])(implicit redisKey: ByteStringSerializer[K], redisKeys: ByteStringSerializer[KK], deserializerR: ByteStringDeserializer[R]) 32 | extends RedisCommandMultiBulkSeqByteString[R] { 33 | val isMasterOnly = false 34 | val encodedRequest: ByteString = encode("SINTER", redisKey.serialize(key) +: keys.map(redisKeys.serialize)) 35 | val deserializer: ByteStringDeserializer[R] = deserializerR 36 | } 37 | 38 | case class Sinterstore[KD, K, KK](destination: KD, key: K, keys: Seq[KK]) 39 | (implicit redisDest: ByteStringSerializer[KD], redisKey: ByteStringSerializer[K], redisKeys: ByteStringSerializer[KK]) 40 | extends RedisCommandIntegerLong { 41 | val isMasterOnly = true 42 | val encodedRequest: ByteString = encode("SINTERSTORE", redisDest.serialize(destination) +: redisKey.serialize(key) +: keys.map(redisKeys.serialize)) 43 | } 44 | 45 | case class Sismember[K, V](key: K, member: V)(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) extends SimpleClusterKey[K] with RedisCommandIntegerBoolean { 46 | val isMasterOnly = false 47 | val encodedRequest: ByteString = encode("SISMEMBER", Seq(keyAsString, convert.serialize(member))) 48 | } 49 | 50 | case class Smembers[K, R](key: K)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandMultiBulkSeqByteString[R] { 51 | val isMasterOnly = true 52 | val encodedRequest: ByteString = encode("SMEMBERS", Seq(keyAsString)) 53 | val deserializer: ByteStringDeserializer[R] = deserializerR 54 | } 55 | 56 | case class Smove[KS, KD, V](source: KS, destination: KD, member: V)(implicit redisSource: ByteStringSerializer[KS], redisDest: ByteStringSerializer[KD], convert: ByteStringSerializer[V]) 57 | extends RedisCommandIntegerBoolean { 58 | val isMasterOnly = true 59 | val encodedRequest: ByteString = encode("SMOVE", Seq(redisSource.serialize(source), redisDest.serialize(destination), convert.serialize(member))) 60 | } 61 | 62 | case class Spop[K, R](key: K)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandBulkOptionByteString[R] { 63 | val isMasterOnly = true 64 | val encodedRequest: ByteString = encode("SPOP", Seq(keyAsString)) 65 | val deserializer: ByteStringDeserializer[R] = deserializerR 66 | } 67 | 68 | case class Srandmember[K, R](key: K)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandBulkOptionByteString[R] { 69 | val isMasterOnly = false 70 | val encodedRequest: ByteString = encode("SRANDMEMBER", Seq(keyAsString)) 71 | val deserializer: ByteStringDeserializer[R] = deserializerR 72 | } 73 | 74 | case class Srandmembers[K, R](key: K, count: Long)(implicit redisKey: ByteStringSerializer[K], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandMultiBulkSeqByteString[R] { 75 | val isMasterOnly = false 76 | val encodedRequest: ByteString = encode("SRANDMEMBER", Seq(keyAsString, ByteString(count.toString))) 77 | val deserializer: ByteStringDeserializer[R] = deserializerR 78 | } 79 | 80 | case class Srem[K, V](key: K, members: Seq[V])(implicit redisKey: ByteStringSerializer[K], convert: ByteStringSerializer[V]) extends SimpleClusterKey[K] with RedisCommandIntegerLong { 81 | val isMasterOnly = true 82 | val encodedRequest: ByteString = encode("SREM", keyAsString +: members.map(v => convert.serialize(v))) 83 | } 84 | 85 | case class Sunion[K, KK, R](key: K, keys: Seq[KK])(implicit redisKey: ByteStringSerializer[K], redisKeys: ByteStringSerializer[KK], deserializerR: ByteStringDeserializer[R]) 86 | extends RedisCommandMultiBulkSeqByteString[R] { 87 | val isMasterOnly = false 88 | val encodedRequest: ByteString = encode("SUNION", redisKey.serialize(key) +: keys.map(redisKeys.serialize)) 89 | val deserializer: ByteStringDeserializer[R] = deserializerR 90 | } 91 | 92 | 93 | case class Sunionstore[KD, K, KK](destination: KD, key: K, keys: Seq[KK]) 94 | (implicit redisDest: ByteStringSerializer[KD], redisKey: ByteStringSerializer[K], redisKeys: ByteStringSerializer[KK]) 95 | extends RedisCommandIntegerLong { 96 | val isMasterOnly = true 97 | val encodedRequest: ByteString = encode("SUNIONSTORE", redisDest.serialize(destination) +: redisKey.serialize(key) +: keys.map(redisKeys.serialize)) 98 | } 99 | 100 | case class Sscan[K, C, R](key: K, cursor: C, count: Option[Int], matchGlob: Option[String])(implicit redisKey: ByteStringSerializer[K], redisCursor: ByteStringSerializer[C], deserializerR: ByteStringDeserializer[R]) extends SimpleClusterKey[K] with RedisCommandMultiBulkCursor[Seq[R]] { 101 | val isMasterOnly = false 102 | val encodedRequest = encode("SSCAN", withOptionalParams(Seq(keyAsString, redisCursor.serialize(cursor)))) 103 | 104 | val empty = Seq.empty 105 | 106 | def decodeResponses(responses: Seq[RedisReply]) = 107 | responses.map(response => deserializerR.deserialize(response.toByteString)) 108 | } -------------------------------------------------------------------------------- /src/test/scala/redis/actors/RedisClientActorSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.actors 2 | 3 | import java.net.InetSocketAddress 4 | 5 | import akka.actor._ 6 | import akka.testkit._ 7 | import akka.util.ByteString 8 | import redis.api.connection.Ping 9 | import redis.api.strings.Get 10 | import redis.{Operation, Redis, TestBase} 11 | 12 | import scala.collection.mutable 13 | import scala.concurrent.{Await, Promise} 14 | 15 | class RedisClientActorSpec extends TestKit(ActorSystem()) with TestBase with ImplicitSender { 16 | 17 | import scala.concurrent.duration._ 18 | 19 | val getConnectOperations: () => Seq[Operation[_, _]] = () => { 20 | Seq() 21 | } 22 | 23 | val timeout = 120.seconds dilated 24 | 25 | val onConnectStatus: (Boolean) => Unit = (status: Boolean) => {} 26 | 27 | "RedisClientActor" should { 28 | 29 | "ok" in within(timeout) { 30 | val probeReplyDecoder = TestProbe() 31 | val probeMock = TestProbe() 32 | 33 | val promiseConnect1 = Promise[String]() 34 | val opConnectPing = Operation(Ping, promiseConnect1) 35 | val promiseConnect2 = Promise[Option[ByteString]]() 36 | val getCmd = Get("key") 37 | val opConnectGet = Operation(getCmd, promiseConnect2) 38 | 39 | val getConnectOperations: () => Seq[Operation[_, _]] = () => { 40 | Seq(opConnectPing, opConnectGet) 41 | } 42 | 43 | val redisClientActor = TestActorRef[RedisClientActorMock]( 44 | Props( 45 | classOf[RedisClientActorMock], 46 | probeReplyDecoder.ref, 47 | probeMock.ref, 48 | getConnectOperations, 49 | onConnectStatus) 50 | .withDispatcher(Redis.dispatcher.name)) 51 | 52 | val promise = Promise[String]() 53 | val op1 = Operation(Ping, promise) 54 | redisClientActor ! op1 55 | val promise2 = Promise[String]() 56 | val op2 = Operation(Ping, promise2) 57 | redisClientActor ! op2 58 | 59 | probeMock.expectMsg(WriteMock) shouldBe WriteMock 60 | awaitAssert(redisClientActor.underlyingActor.queuePromises.length shouldBe 2) 61 | 62 | //onConnectWrite 63 | redisClientActor.underlyingActor.onConnectWrite() 64 | awaitAssert(redisClientActor.underlyingActor.queuePromises.toSeq shouldBe Seq(opConnectPing, opConnectGet, op1, op2)) 65 | awaitAssert(redisClientActor.underlyingActor.queuePromises.length shouldBe 4) 66 | 67 | //onWriteSent 68 | redisClientActor.underlyingActor.onWriteSent() 69 | probeReplyDecoder.expectMsgType[QueuePromises] shouldBe QueuePromises( 70 | mutable.Queue(opConnectPing, opConnectGet, op1, op2)) 71 | awaitAssert(redisClientActor.underlyingActor.queuePromises shouldBe empty) 72 | 73 | //onDataReceived 74 | awaitAssert(redisClientActor.underlyingActor.onDataReceived(ByteString.empty)) 75 | probeReplyDecoder.expectMsgType[ByteString] shouldBe ByteString.empty 76 | 77 | awaitAssert(redisClientActor.underlyingActor.onDataReceived(ByteString("bytestring"))) 78 | probeReplyDecoder.expectMsgType[ByteString] shouldBe ByteString("bytestring") 79 | 80 | //onConnectionClosed 81 | val deathWatcher = TestProbe() 82 | deathWatcher.watch(probeReplyDecoder.ref) 83 | redisClientActor.underlyingActor.onConnectionClosed() 84 | deathWatcher.expectTerminated(probeReplyDecoder.ref, 30 seconds) shouldBe a[Terminated] 85 | } 86 | 87 | "onConnectionClosed with promises queued" in { 88 | val probeReplyDecoder = TestProbe() 89 | val probeMock = TestProbe() 90 | 91 | val redisClientActor = TestActorRef[RedisClientActorMock]( 92 | Props( 93 | classOf[RedisClientActorMock], 94 | probeReplyDecoder.ref, 95 | probeMock.ref, 96 | getConnectOperations, 97 | onConnectStatus) 98 | .withDispatcher(Redis.dispatcher.name)).underlyingActor 99 | 100 | val promise3 = Promise[String]() 101 | redisClientActor.receive(Operation(Ping, promise3)) 102 | redisClientActor.queuePromises.length shouldBe 1 103 | 104 | val deathWatcher = TestProbe() 105 | deathWatcher.watch(probeReplyDecoder.ref) 106 | 107 | redisClientActor.onConnectionClosed() 108 | deathWatcher.expectTerminated(probeReplyDecoder.ref, 30 seconds) shouldBe a[Terminated] 109 | a[NoConnectionException.type] should be thrownBy Await.result(promise3.future, 10 seconds) 110 | } 111 | 112 | "replyDecoder died -> reset connection" in { 113 | val probeReplyDecoder = TestProbe() 114 | val probeMock = TestProbe() 115 | 116 | val redisClientActorRef = TestActorRef[RedisClientActorMock]( 117 | Props( 118 | classOf[RedisClientActorMock], 119 | probeReplyDecoder.ref, 120 | probeMock.ref, 121 | getConnectOperations, 122 | onConnectStatus) 123 | .withDispatcher(Redis.dispatcher.name)) 124 | val redisClientActor = redisClientActorRef.underlyingActor 125 | 126 | val promiseSent = Promise[String]() 127 | val promiseNotSent = Promise[String]() 128 | val operation = Operation(Ping, promiseSent) 129 | redisClientActor.receive(operation) 130 | redisClientActor.queuePromises.length shouldBe 1 131 | 132 | redisClientActor.onWriteSent() 133 | redisClientActor.queuePromises shouldBe empty 134 | probeReplyDecoder.expectMsgType[QueuePromises] shouldBe QueuePromises(mutable.Queue(operation)) 135 | 136 | redisClientActor.receive(Operation(Ping, promiseNotSent)) 137 | redisClientActor.queuePromises.length shouldBe 1 138 | 139 | val deathWatcher = TestProbe() 140 | deathWatcher.watch(probeReplyDecoder.ref) 141 | deathWatcher.watch(redisClientActorRef) 142 | 143 | probeReplyDecoder.ref ! Kill 144 | deathWatcher.expectTerminated(probeReplyDecoder.ref) shouldBe a[Terminated] 145 | redisClientActor.queuePromises.length shouldBe 1 146 | } 147 | } 148 | } 149 | 150 | class RedisClientActorMock(probeReplyDecoder: ActorRef, 151 | probeMock: ActorRef, 152 | getConnectOperations: () => Seq[Operation[_, _]], 153 | onConnectStatus: Boolean => Unit) 154 | extends RedisClientActor( 155 | new InetSocketAddress("localhost", 6379), 156 | getConnectOperations, 157 | onConnectStatus, 158 | Redis.dispatcher.name) { 159 | override def initRepliesDecoder() = probeReplyDecoder 160 | 161 | override def preStart(): Unit = { 162 | // disable preStart of RedisWorkerIO 163 | } 164 | 165 | override def write(byteString: ByteString): Unit = { 166 | probeMock ! WriteMock 167 | } 168 | } 169 | 170 | object WriteMock 171 | -------------------------------------------------------------------------------- /src/test/scala/redis/commands/ListsSpec.scala: -------------------------------------------------------------------------------- 1 | package redis.commands 2 | 3 | import akka.util.ByteString 4 | import redis._ 5 | 6 | 7 | 8 | class ListsSpec extends RedisStandaloneServer { 9 | 10 | "Lists commands" should { 11 | 12 | "LINDEX" in { 13 | val r = for { 14 | _ <- redis.del("lindexKey") 15 | _ <- redis.lpush("lindexKey", "World", "Hello") 16 | hello <- redis.lindex("lindexKey", 0) 17 | world <- redis.lindex("lindexKey", 1) 18 | none <- redis.lindex("lindexKey", 2) 19 | } yield { 20 | hello shouldBe Some(ByteString("Hello")) 21 | world shouldBe Some(ByteString("World")) 22 | none shouldBe None 23 | } 24 | r.futureValue 25 | } 26 | 27 | "LINSERT" in { 28 | val r = for { 29 | _ <- redis.del("linsertKey") 30 | _ <- redis.lpush("linsertKey", "World", "Hello") 31 | length <- redis.linsertBefore("linsertKey", "World", "There") 32 | list <- redis.lrange("linsertKey", 0, -1) 33 | length4 <- redis.linsertAfter("linsertKey", "World", "!!!") 34 | list4 <- redis.lrange("linsertKey", 0, -1) 35 | } yield { 36 | length shouldBe 3 37 | list shouldBe Seq(ByteString("Hello"), ByteString("There"), ByteString("World")) 38 | length4 shouldBe 4 39 | list4 shouldBe Seq(ByteString("Hello"), ByteString("There"), ByteString("World"), ByteString("!!!")) 40 | } 41 | r.futureValue 42 | } 43 | 44 | "LLEN" in { 45 | val r = for { 46 | _ <- redis.del("llenKey") 47 | _ <- redis.lpush("llenKey", "World", "Hello") 48 | length <- redis.llen("llenKey") 49 | } yield { 50 | length shouldBe 2 51 | } 52 | r.futureValue 53 | } 54 | 55 | "LPOP" in { 56 | val r = for { 57 | _ <- redis.del("lpopKey") 58 | _ <- redis.rpush("lpopKey", "one", "two", "three") 59 | e <- redis.lpop("lpopKey") 60 | } yield { 61 | e shouldBe Some(ByteString("one")) 62 | } 63 | r.futureValue 64 | } 65 | 66 | "LPUSH" in { 67 | val r = for { 68 | _ <- redis.del("lpushKey") 69 | _ <- redis.lpush("lpushKey", "World", "Hello") 70 | list <- redis.lrange("lpushKey", 0, -1) 71 | } yield { 72 | list shouldBe Seq(ByteString("Hello"), ByteString("World")) 73 | } 74 | r.futureValue 75 | } 76 | 77 | "LPUSHX" in { 78 | val r = for { 79 | _ <- redis.del("lpushxKey") 80 | _ <- redis.del("lpushxKeyOther") 81 | i <- redis.rpush("lpushxKey", "world") 82 | ii <- redis.lpushx("lpushxKey", "hello") 83 | zero <- redis.lpushx("lpushxKeyOther", "hello") 84 | list <- redis.lrange("lpushxKey", 0, -1) 85 | listOther <- redis.lrange("lpushxKeyOther", 0, -1) 86 | } yield { 87 | i shouldBe 1 88 | ii shouldBe 2 89 | zero shouldBe 0 90 | list shouldBe Seq(ByteString("hello"), ByteString("world")) 91 | listOther shouldBe empty 92 | } 93 | r.futureValue 94 | } 95 | 96 | "LRANGE" in { 97 | val r = for { 98 | _ <- redis.del("lrangeKey") 99 | _ <- redis.rpush("lrangeKey", "one", "two", "three") 100 | list <- redis.lrange("lrangeKey", 0, 0) 101 | list2 <- redis.lrange("lrangeKey", -3, 2) 102 | list3 <- redis.lrange("lrangeKey", 5, 10) 103 | nonExisting <- redis.lrange("lrangeKeyNonexisting", 5, 10) 104 | } yield { 105 | list shouldBe Seq(ByteString("one")) 106 | list2 shouldBe Seq(ByteString("one"), ByteString("two"), ByteString("three")) 107 | list3 shouldBe empty 108 | nonExisting shouldBe empty 109 | } 110 | r.futureValue 111 | } 112 | 113 | "LREM" in { 114 | val r = for { 115 | _ <- redis.del("lremKey") 116 | _ <- redis.rpush("lremKey", "hello", "hello", "foo", "hello") 117 | lrem <- redis.lrem("lremKey", -2, "hello") 118 | list2 <- redis.lrange("lremKey", 0, -1) 119 | } yield { 120 | lrem shouldBe 2 121 | list2 shouldBe Seq(ByteString("hello"), ByteString("foo")) 122 | } 123 | r.futureValue 124 | } 125 | 126 | "LSET" in { 127 | val r = for { 128 | _ <- redis.del("lsetKey") 129 | _ <- redis.rpush("lsetKey", "one", "two", "three") 130 | lset1 <- redis.lset("lsetKey", 0, "four") 131 | lset2 <- redis.lset("lsetKey", -2, "five") 132 | list <- redis.lrange("lsetKey", 0, -1) 133 | } yield { 134 | lset1 shouldBe true 135 | lset2 shouldBe true 136 | list shouldBe Seq(ByteString("four"), ByteString("five"), ByteString("three")) 137 | } 138 | r.futureValue 139 | } 140 | 141 | "LTRIM" in { 142 | val r = for { 143 | _ <- redis.del("ltrimKey") 144 | _ <- redis.rpush("ltrimKey", "one", "two", "three") 145 | ltrim <- redis.ltrim("ltrimKey", 1, -1) 146 | list <- redis.lrange("ltrimKey", 0, -1) 147 | } yield { 148 | ltrim shouldBe true 149 | list shouldBe Seq(ByteString("two"), ByteString("three")) 150 | } 151 | r.futureValue 152 | } 153 | 154 | "RPOP" in { 155 | val r = for { 156 | _ <- redis.del("rpopKey") 157 | _ <- redis.rpush("rpopKey", "one", "two", "three") 158 | rpop <- redis.rpop("rpopKey") 159 | list <- redis.lrange("rpopKey", 0, -1) 160 | } yield { 161 | rpop shouldBe Some(ByteString("three")) 162 | list shouldBe Seq(ByteString("one"), ByteString("two")) 163 | } 164 | r.futureValue 165 | } 166 | 167 | "RPOPLPUSH" in { 168 | val r = for { 169 | _ <- redis.del("rpoplpushKey") 170 | _ <- redis.del("rpoplpushKeyOther") 171 | _ <- redis.rpush("rpoplpushKey", "one", "two", "three") 172 | rpoplpush <- redis.rpoplpush("rpoplpushKey", "rpoplpushKeyOther") 173 | list <- redis.lrange("rpoplpushKey", 0, -1) 174 | listOther <- redis.lrange("rpoplpushKeyOther", 0, -1) 175 | } yield { 176 | rpoplpush shouldBe Some(ByteString("three")) 177 | list shouldBe Seq(ByteString("one"), ByteString("two")) 178 | listOther shouldBe Seq(ByteString("three")) 179 | } 180 | r.futureValue 181 | } 182 | 183 | "RPUSH" in { 184 | val r = for { 185 | _ <- redis.del("rpushKey") 186 | i <- redis.rpush("rpushKey", "hello") 187 | ii <- redis.rpush("rpushKey", "world") 188 | list <- redis.lrange("rpushKey", 0, -1) 189 | } yield { 190 | i shouldBe 1 191 | ii shouldBe 2 192 | list shouldBe Seq(ByteString("hello"), ByteString("world")) 193 | } 194 | r.futureValue 195 | } 196 | 197 | "RPUSHX" in { 198 | val r = for { 199 | _ <- redis.del("rpushxKey") 200 | _ <- redis.del("rpushxKeyOther") 201 | i <- redis.rpush("rpushxKey", "hello") 202 | ii <- redis.rpushx("rpushxKey", "world") 203 | zero <- redis.rpushx("rpushxKeyOther", "world") 204 | list <- redis.lrange("rpushxKey", 0, -1) 205 | listOther <- redis.lrange("rpushxKeyOther", 0, -1) 206 | } yield { 207 | i shouldBe 1 208 | ii shouldBe 2 209 | zero shouldBe 0 210 | list shouldBe Seq(ByteString("hello"), ByteString("world")) 211 | listOther shouldBe empty 212 | } 213 | r.futureValue 214 | } 215 | } 216 | } 217 | --------------------------------------------------------------------------------