├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── build.sbt ├── project └── plugins.sbt ├── scalariform.sbt ├── src ├── main │ ├── resources │ │ └── reference.conf │ └── scala │ │ ├── Connection.scala │ │ ├── Redis.scala │ │ ├── RedisConnectionSupervisor.scala │ │ ├── RedisSentinel.scala │ │ ├── ReplyParser.scala │ │ ├── Request.scala │ │ ├── Response.scala │ │ ├── Sentinel.scala │ │ ├── ShardManager.scala │ │ └── StashingRedis.scala └── test │ ├── resources │ ├── application.conf │ └── crime_and_punishment.txt │ └── scala │ ├── RedisClientSentinelTest.scala │ ├── RedisClientTest.scala │ ├── ReplyParserTest.scala │ ├── RequestTest.scala │ ├── ResponseTest.scala │ ├── SentinelTest.scala │ ├── ShardManagerTest.scala │ └── StashingRedisClientTest.scala └── test-config ├── redis-slave.conf ├── redis.conf └── sentinel.conf /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | target/ 3 | project/boot/ 4 | project/build/target/ 5 | project/plugins/target/ 6 | project/plugins/lib_managed/ 7 | project/plugins/src_managed/ 8 | *.log 9 | .cache 10 | .classpath 11 | .project 12 | .settings 13 | *.rdb 14 | .idea/ 15 | .idea_modules/ 16 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: scala 2 | scala: 3 | - "2.10.4" 4 | - "2.11.0" 5 | cache: 6 | directories: 7 | - $HOME/.ivy2 8 | before_script: 9 | - sudo redis-server `pwd`/test-config/sentinel.conf --sentinel & 10 | - sudo redis-server `pwd`/test-config/redis.conf --loglevel verbose 11 | - sudo mkdir /var/lib/redis-slave 12 | - sudo redis-server `pwd`/test-config/redis-slave.conf --loglevel verbose 13 | - cat /var/log/redis/redis-slave-server.log 14 | - cat /var/log/redis/redis-server.log 15 | - sleep 5 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2013 Chris Dinn 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Brando 2 | ====== 3 | 4 | [![Build Status](https://travis-ci.org/chrisdinn/brando.svg?branch=master)](https://travis-ci.org/chrisdinn/brando) 5 | 6 | A lightweight Redis client for use with [Akka](http://akka.io). 7 | 8 | ## Using 9 | 10 | In your build.sbt 11 | 12 | resolvers += "chrisdinn" at "http://chrisdinn.github.io/releases/" 13 | 14 | libraryDependencies += "com.digital-achiever" %% "brando" % "3.0.3" 15 | 16 | ### Getting started 17 | 18 | Brando is a lightweight wrapper around the [Redis protocol](http://redis.io/topics/protocol). 19 | 20 | Create a Redis actor with your server host and port. 21 | 22 | import brando._ 23 | 24 | val redis = system.actorOf(Redis("localhost", 6379)) 25 | 26 | You should specify a database and password if you intend to use them. 27 | 28 | val redis = system.actorOf(Redis("localhost", 6379, database = 5, auth = "password")) 29 | 30 | This is important; if your Redis actor restarts you want be sure it reconnects successfully and to the same database. 31 | 32 | Next, send it a command and get your response as a reply. 33 | 34 | redis ! Request("PING") 35 | 36 | // Response: Some(Pong) 37 | 38 | The Redis protocol supports 5 standard types of reply: Status, Error, Integer, Bulk and Multi Bulk as well as a special NULL Bulk/Multi Bulk reply. 39 | 40 | Status replies are returned as case objects, such as `Pong` and `Ok`. 41 | 42 | redis ! Request("SET", "some-key", "this-value") 43 | 44 | // Response: Some(Ok) 45 | 46 | Error replies are returned as `akka.actor.Status.Failure` objects containing an an exception with server's response as its message. 47 | 48 | redis ! Request("EXPIRE", "1", "key") 49 | 50 | // Response: Failure(brando.RedisException: ERR value is not an integer or out of range) 51 | 52 | Integer replies are returned as `Option[Long]`. 53 | 54 | redis ! Request("SADD", "some-set", "one", "two") 55 | 56 | // Response: Some(2) 57 | 58 | Bulk replies as `Option[akka.util.ByteString]`. 59 | 60 | redis ! Request("GET", "some-key") 61 | 62 | // Response: Some(ByteString("this-value")) 63 | 64 | Multi Bulk replies as `Option[List[Option[ByteString]]]`. 65 | 66 | redis ! Request("SMEMBERS", "some-set") 67 | 68 | // Response: Some(List(Some(ByteString("one")), Some(ByteString("two")))) 69 | 70 | NULL replies are returned as `None` and may appear either on their own or nested inside a Multi Bulk reply. 71 | 72 | redis ! Request("GET", "non-existent-key") 73 | 74 | // Response: None 75 | 76 | If you're not sure what to expect in response to a request, please refer to the Redis command documentation at [http://redis.io/commands](http://redis.io/commands) where the reply type for each is clearly stated. 77 | 78 | To ensure that a list of requests are executed back to back, the Redis actor can receive the following message : 79 | 80 | redis ! Batch(Request("MULTI"), Request("SET", "mykey", "somevalue"), Request("GET", "mykey"), Request("EXEC")) 81 | 82 | // Response : List(Some(Ok), Some(Queued), Some(Queued), Some("somevalue")) 83 | 84 | This is very usefull in that case since it'll make sure no other requests are executed between the MULTI and EXEC commands. 85 | Responses will also be grouped in a single list of the same size as the Batch requests. 86 | 87 | ### Response extractors 88 | 89 | Use the provided response extractors to map your Redis reply to a more appropriate Scala type. 90 | 91 | for{ Response.AsString(value) ← redis ? Request("GET", "key") } yield value 92 | 93 | //value: String 94 | 95 | for{ Response.AsStrings(values) ← redis ? Request("KEYS", "*") } yield values 96 | 97 | //values: Seq[String] 98 | 99 | for{ Response.AsByteSeqs(value) ← redis ? Request("GET", "key") } yield value 100 | 101 | //value: Seq[Byte] 102 | 103 | for{ Response.AsStringsHash(fields) ← redis ? Request("HGETALL", "hash-key") } yield fields 104 | 105 | //fields: Map[String,String] 106 | 107 | ### Monitoring Connection State Changes 108 | 109 | If a set of listeners is provided to the Redis actor when it is created , it will inform the those listeners about state changes to the underlying Redis connection. For example (from inside an actor): 110 | 111 | val redis = context.actorOf(Redis("localhost", 6379, listeners = Set(self))) 112 | 113 | Currently, the possible messages sent to each listener include the following: 114 | 115 | * `Connecting`: When creating a TCP connection. 116 | * `Connected`: When a TCP connection has been created, and Authentication (if applicable) has succeeded. 117 | * `Disconnected`: The connection has been lost. Redis transparently handles disconnects and will automatically reconnect, so typically no user action at all is needed here. During the time that Redis is disconnected, Redis commands sent will be queued be processed once the connection is reestablished. 118 | * `AuthenticationFailed`: The TCP connected was made, but Redis auth failed. 119 | * `ConnectionFailed`: A connection could not be established after the number of attempts defined during creation `connectionRetryAttempts`. Brando will not attempt to recover from this state; the user should take action. 120 | 121 | All these messages inherit from the `Connection.StateChange` trait. 122 | 123 | ### Requests stashing when connection is not established 124 | 125 | Current Brando implementation throws an exception when the connection is not established and 126 | it receives a request. However, sometimes you do not need the answer right now and you can wait 127 | a bit before the connection gets established. The `StashingRedis` actor provides this functionality, 128 | it is designed as a proxy to be used. When it receives requests while the connection is not established, 129 | it stashes them. Once the connection gets established, it unstashes them and passed them to the actual 130 | `Redis` actor. When the connection is established, all incoming requests are passed right through. 131 | 132 | This was the built-in behavior of Brando 2.x.x 133 | 134 | 135 | // create the Redis actor 136 | val brando = system.actorOf(Redis(listeners = Set(self))) 137 | // create the stashing proxy 138 | val proxy = system.actorOf(StashingRedis(brando)) 139 | // query the proxy 140 | proxy ? Request("PING") 141 | 142 | **Note:** `StashingRedis` cannot be used with a sharded connection. 143 | 144 | 145 | ### Sentinel 146 | 147 | #### Sentinel Client 148 | 149 | Sentinel provides support for `monitoring`, `notification` and `automatic failover` using [sentinel](http://redis.io/topics/sentinel). It is implemented based on the following [guidelines](http://redis.io/topics/sentinel-clients) and requires redis 2.8.12 or later. 150 | 151 | A sentinel client can be created like this. Here, we are using two servers and we provide a listener to receive `Connection.StateChange` events. 152 | 153 | val sentinel = system.actorOf(Sentinel(Seq( 154 | Server("localhost", 26380), 155 | Server("localhost", 26379)), Set(probe.ref))) 156 | 157 | You can listen for events using the following: 158 | 159 | sentinel ! Request("SENTINEL","SUBSCRIBE", "failover-end") 160 | 161 | You can also send commands such as 162 | 163 | sentinel ! Request("SENTINEL", "MASTERS") 164 | 165 | 166 | #### Redis with Sentinel 167 | 168 | Redis can be used with Sentinel to provide automatic failover and discovery. To do so you need to create a `Sentinel` and a `RedisSentinel` actor. In this example we are connecting to the master `mymaster` 169 | 170 | val sentinel = system.actorOf(Sentinel(Seq( 171 | Server("localhost", 26380), 172 | Server("localhost", 26379)))) 173 | 174 | val redis = system.actorOf(RedisSentinel("mymaster", sentinel)) 175 | 176 | redis ! Request("PING") 177 | 178 | For reliability we encourage to pass `connectionHeartbeatDelay` when using RedisSentinel, this will generate a heartbeat to Redis and will improve failures detections in the case of network partitions. 179 | 180 | ### Sharding 181 | 182 | Brando provides support for sharding, as outlined [in the Redis documentation](http://redis.io/topics/partitioning) and in [this blog post from antirez](http://oldblog.antirez.com/post/redis-presharding.html). 183 | 184 | To use it, simply create an instance of `ShardManager`, passing it a list of Redis shards you'd like it to connect to. From there, we create a pool of `Redis` instances - one for each shard. 185 | 186 | val shards = Seq(RedisShard("redis1", "10.0.0.1", 6379), 187 | RedisShard("redis2", "10.0.0.2", 6379), 188 | RedisShard("redis3", "10.0.0.3", 6379)) 189 | 190 | val shardManager = context.actorOf(ShardManager(shards)) 191 | 192 | Once an instance of `ShardManager` has been created, it can be sent several types of messages: 193 | 194 | * `Request` objects for inferring the shard key from the params 195 | * `Tuple2[String, Request]` objects for specifying the shard key explicitly 196 | * `ShardBroadcast` objects for broadcasting requests to all shards 197 | 198 | Here are some examples, 199 | 200 | shardManager ! Request("SET", "mykey", "some_value") 201 | shardManager ! ("myshardkey", Request("SET", "mykey", "some_value")) 202 | shardManager ! BroadcastRequest("LPOP", "mylistkey") // don't use the ask pattern 203 | 204 | Note that the `ShardManager` explicitly requires a key for all operations except for the `BroadcastRequest`. This is because the key is used to determined which shard each request should be forwarded to. In this context, operations which operate on multiple keys (e.g. `MSET`, `MGET`) or no keys at all (e.g. `SELECT`, `FLUSHDB`) should be avoided, as they break the Redis sharding model. Also note that the `BroadcastRequest` must not be used with the `ask` pattern in Akka or responses will be lost! 205 | 206 | Individual shards can have their configuration updated on the fly. To do this, send a `Shard` message to `ShardManager`. 207 | 208 | shardManager ! RedisShard("redis1", "10.0.0.4", 6379) 209 | 210 | 211 | val shardManager = context.actorOf(ShardManager(shards, listeners = Set(self))) 212 | 213 | The `ShardManager` will forward all `Connection.StateChange` messages when a shard changes state. 214 | 215 | 216 | #### Sharding with sentinel 217 | 218 | It's possible to use sharding with Sentinel, to do so you need to use `SentinelShard` instead of `RedisShard` 219 | 220 | val shards = Seq( 221 | SentinelShard("mymaster1"), 222 | SentinelShard("mymaster2")) 223 | 224 | val sentinel = system.actorOf(Sentinel()) //defaults host and port are localhost:26379 225 | 226 | val shardManager = context.actorOf(ShardManager(shards,sentinel)) 227 | 228 | ## Run the tests 229 | 230 | * Start sentinel 231 | 232 | sudo redis-sentinel redis-config/sentinel.conf --sentinel 233 | 234 | * Start a Redis master and slave 235 | 236 | sudo redis-server test-config/redis.conf --loglevel verbose 237 | sudo mkdir /var/lib/redis-slave 238 | sudo redis-server test-config/redis-slave.conf --loglevel verbose 239 | 240 | * Run the tests 241 | 242 | sbt test 243 | 244 | 245 | ## Documentation 246 | 247 | Read the API documentation here: [http://chrisdinn.github.io/api/brando-3.0.3/](http://chrisdinn.github.io/api/brando-3.0.3/) 248 | 249 | ## Mailing list 250 | 251 | Send questions, comments or discussion topics to the mailing list brando@librelist.com. 252 | 253 | ## License 254 | 255 | This project is released under the Apache License v2, for more details see the 'LICENSE' file. 256 | 257 | ## Contributing 258 | 259 | Fork the project, add tests if possible and send a pull request. 260 | 261 | ## Contributors 262 | 263 | Chris Dinn, Jason Goodwin, Tyson Hamilton, Gaetan Hervouet, Damien Levin, Matt MacAulay, Arron Norwell 264 | 265 | ## Changelog 266 | 267 | ### v3.x.x 268 | 269 | Brando no longer implements `akka.actor.Stash`. In consequence, all incoming requests throw 270 | a `RedisDisconnectedException` if the `Connection` is not established. This version delegates the 271 | responsibility to the sender, it is no longer handled by the Brando itself. In older versions, when 272 | the connection was not established, the requests were stashed. When established, all stashed 273 | requests were unstashed and processed. For smoother migration, there is `StashingRedis` providing 274 | the same behavior. For more details see *Requests stashing when connection is not established* 275 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | name := "brando" 2 | 3 | organization := "com.digital-achiever" 4 | 5 | version := "3.0.3" 6 | 7 | scalaVersion := "2.11.4" 8 | 9 | crossScalaVersions := Seq("2.10.4", "2.11.4") 10 | 11 | scalacOptions ++= Seq("-unchecked", "-deprecation", "-feature") 12 | 13 | libraryDependencies ++= Seq( 14 | "com.typesafe.akka" %% "akka-actor" % "2.3.9", 15 | "org.scalatest" %% "scalatest" % "2.1.3" % "test", 16 | "com.typesafe.akka" %% "akka-testkit" % "2.3.9" % "test" 17 | ) 18 | 19 | parallelExecution in Test := false 20 | 21 | resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/" 22 | 23 | publishTo <<= version { (v: String) => 24 | if (v.trim.endsWith("-SNAPSHOT")) 25 | Some(Resolver.file("Snapshots", file("../chrisdinn.github.com/snapshots/"))) 26 | else 27 | Some(Resolver.file("Releases", file("../chrisdinn.github.com/releases/"))) 28 | } 29 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.sbt" % "sbt-scalariform" % "1.3.0") -------------------------------------------------------------------------------- /scalariform.sbt: -------------------------------------------------------------------------------- 1 | import scalariform.formatter.preferences._ 2 | 3 | scalariformSettings 4 | 5 | ScalariformKeys.preferences := (FormattingPreferences(). 6 | setPreference(RewriteArrowSymbols, true). 7 | setPreference(DoubleIndentClassDeclaration, true). 8 | setPreference(AlignSingleLineCaseStatements, true)) -------------------------------------------------------------------------------- /src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | brando{ 2 | connection{ 3 | timeout = 2s 4 | 5 | #Delay before trying to reconnect 6 | retry.delay = 1 s 7 | 8 | #Number of connect attempts before failure 9 | retry.attempts = 3 10 | } 11 | 12 | #Stashing buffer capacity to limit memory footprint 13 | stashing.capacity = 50 14 | } 15 | -------------------------------------------------------------------------------- /src/main/scala/Connection.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | import akka.actor.ActorDSL._ 5 | import akka.pattern._ 6 | import akka.io._ 7 | import akka.util._ 8 | 9 | import scala.collection.mutable 10 | import scala.concurrent._ 11 | import scala.concurrent.duration._ 12 | 13 | import java.net.InetSocketAddress 14 | 15 | object Connection { 16 | trait StateChange 17 | case class Connecting(host: String, port: Int) extends StateChange 18 | case class Connected(host: String, port: Int) extends StateChange 19 | case class Disconnected(host: String, port: Int) extends StateChange 20 | case class ConnectionFailed(host: String, port: Int) extends StateChange 21 | 22 | private[brando] case object Connect 23 | private[brando] case class CommandAck(sender: ActorRef) extends Tcp.Event 24 | private[brando] case class Heartbeat(delay: FiniteDuration) 25 | } 26 | 27 | private[brando] class Connection( 28 | listener: ActorRef, 29 | host: String, 30 | port: Int, 31 | connectionTimeout: FiniteDuration, 32 | heartbeatDelay: Option[FiniteDuration]) extends Actor with ReplyParser { 33 | 34 | import Connection._ 35 | import context.dispatcher 36 | 37 | var socket: ActorRef = _ 38 | 39 | var lastDataReceived = now 40 | 41 | val requesterQueue = mutable.Queue.empty[ActorRef] 42 | var subscribers: Map[ByteString, Seq[ActorRef]] = Map.empty 43 | 44 | def getSubscribers(channel: ByteString): Seq[ActorRef] = 45 | subscribers.get(channel).getOrElse(Seq.empty[ActorRef]) 46 | 47 | override def preStart(): Unit = self ! Connect 48 | 49 | def receive = { 50 | case subRequest: Request if (subRequest.command.utf8String.toLowerCase == "subscribe") ⇒ 51 | subRequest.params map { x ⇒ 52 | subscribers = subscribers + ((x, getSubscribers(x).+:(sender))) 53 | } 54 | socket ! Tcp.Write(subRequest.toByteString, CommandAck(sender)) 55 | 56 | case request: Request ⇒ 57 | socket ! Tcp.Write(request.toByteString, CommandAck(sender)) 58 | 59 | case batch: Batch ⇒ 60 | val requester = sender 61 | val batcher = actor(new Act { 62 | var responses = List[Any]() 63 | become { 64 | case response if (responses.size + 1) < batch.requests.size ⇒ 65 | responses = responses :+ response 66 | case response ⇒ 67 | requester ! (responses :+ response) 68 | self ! PoisonPill 69 | } 70 | }) 71 | batch.requests.foreach(self.tell(_, batcher)) 72 | 73 | case CommandAck(sender) ⇒ 74 | requesterQueue.enqueue(sender) 75 | 76 | case Tcp.Received(data) ⇒ 77 | lastDataReceived = now 78 | parseReply(data) { reply ⇒ 79 | reply match { 80 | case Some(List( 81 | Some(x: ByteString), 82 | Some(channel: ByteString), 83 | Some(message: ByteString))) if (x.utf8String == "message") ⇒ 84 | 85 | val pubSubMessage = PubSubMessage(channel.utf8String, message.utf8String) 86 | getSubscribers(channel).map { x ⇒ 87 | x ! pubSubMessage 88 | } 89 | 90 | case _ ⇒ 91 | requesterQueue.dequeue ! (reply match { 92 | case Some(failure: Status.Failure) ⇒ 93 | failure 94 | case success ⇒ 95 | success 96 | }) 97 | } 98 | } 99 | 100 | case Tcp.CommandFailed(writeMessage: Tcp.Write) ⇒ 101 | socket ! writeMessage //just retry immediately 102 | case Tcp.CommandFailed(_: Tcp.Connect) ⇒ 103 | listener ! ConnectionFailed(host, port) 104 | 105 | case x: Tcp.ConnectionClosed ⇒ 106 | requesterQueue.clear 107 | listener ! Disconnected(host, port) 108 | 109 | case Connect ⇒ 110 | val address = new InetSocketAddress(host, port) 111 | listener ! Connecting(host, port) 112 | IO(Tcp)(context.system) ! Tcp.Connect(address, timeout = Some(connectionTimeout)) 113 | 114 | case x: Tcp.Connected ⇒ 115 | socket = sender 116 | socket ! Tcp.Register(self, useResumeWriting = false) 117 | (self ? Request("PING"))(connectionTimeout) map { 118 | case _ ⇒ 119 | listener ! Connected(host, port) 120 | heartbeatDelay map (d ⇒ 121 | context.system.scheduler.schedule(0.seconds, 1.seconds, self, Heartbeat(d))) 122 | } recover { 123 | case _ ⇒ 124 | listener ! ConnectionFailed(host, port) 125 | } 126 | 127 | case Heartbeat(delay) ⇒ 128 | val idle = now - lastDataReceived 129 | ((idle > delay.toMillis * 2), (idle > (delay.toMillis))) match { 130 | case (true, true) ⇒ 131 | socket ! Tcp.Close 132 | case (false, true) ⇒ 133 | self ! Request("PING") 134 | case _ ⇒ 135 | } 136 | 137 | case _ ⇒ 138 | } 139 | 140 | def now = System.currentTimeMillis 141 | } 142 | 143 | -------------------------------------------------------------------------------- /src/main/scala/Redis.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | 5 | import scala.concurrent.duration._ 6 | 7 | import com.typesafe.config.ConfigFactory 8 | import java.util.concurrent.TimeUnit 9 | 10 | object Redis { 11 | def apply( 12 | host: String = "localhost", 13 | port: Int = 6379, 14 | database: Int = 0, 15 | auth: Option[String] = None, 16 | listeners: Set[ActorRef] = Set(), 17 | connectionTimeout: Option[FiniteDuration] = None, 18 | connectionRetryDelay: Option[FiniteDuration] = None, 19 | connectionRetryAttempts: Option[Int] = None, 20 | connectionHeartbeatDelay: Option[FiniteDuration] = None): Props = { 21 | 22 | val config = ConfigFactory.load() 23 | Props(classOf[Redis], 24 | host, 25 | port, 26 | database, 27 | auth, 28 | listeners, 29 | connectionTimeout.getOrElse( 30 | config.getDuration("brando.connection.timeout", TimeUnit.MILLISECONDS).millis), 31 | Some(connectionRetryDelay.getOrElse( 32 | config.getDuration("brando.connection.retry.delay", TimeUnit.MILLISECONDS).millis)), 33 | connectionRetryAttempts, 34 | connectionHeartbeatDelay) 35 | } 36 | 37 | case class AuthenticationFailed(host: String, port: Int) extends Connection.StateChange 38 | } 39 | 40 | class Redis( 41 | host: String, 42 | port: Int, 43 | database: Int, 44 | auth: Option[String], 45 | listeners: Set[ActorRef], 46 | connectionTimeout: FiniteDuration, 47 | connectionRetryDelay: Option[FiniteDuration], 48 | connectionRetryAttempts: Option[Int], 49 | connectionHeartbeatDelay: Option[FiniteDuration]) extends RedisConnectionSupervisor( 50 | database, auth, listeners, connectionTimeout, connectionHeartbeatDelay) { 51 | 52 | import ConnectionSupervisor.{ Connect, Reconnect } 53 | import context.dispatcher 54 | 55 | var retries = 0 56 | 57 | override def preStart: Unit = { 58 | listeners.map(context.watch(_)) 59 | self ! Connect(host, port) 60 | } 61 | 62 | override def disconnected: Receive = 63 | disconnectedWithRetry orElse super.disconnected 64 | 65 | def disconnectedWithRetry: Receive = { 66 | case _@ (_: Request | _: Batch) ⇒ 67 | sender ! Status.Failure(new RedisDisconnectedException(s"Disconnected from $host:$port")) 68 | 69 | case ("auth_ok", x: Connection.Connected) ⇒ 70 | retries = 0 71 | notifyStateChange(x) 72 | context.become(connected) 73 | 74 | case Reconnect ⇒ 75 | (connectionRetryDelay, connectionRetryAttempts) match { 76 | case (Some(delay), Some(maxAttempts)) if (maxAttempts > retries) ⇒ 77 | retries += 1 78 | context.system.scheduler.scheduleOnce(delay, connection, Connection.Connect) 79 | case (Some(delay), None) ⇒ 80 | context.system.scheduler.scheduleOnce(delay, connection, Connection.Connect) 81 | case _ ⇒ 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/main/scala/RedisConnectionSupervisor.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | import akka.pattern._ 5 | import akka.util._ 6 | 7 | import scala.concurrent.duration._ 8 | import scala.concurrent.Future 9 | 10 | object ConnectionSupervisor { 11 | private[brando] case class Connect(host: String, port: Int) 12 | private[brando] case object Reconnect 13 | } 14 | 15 | private[brando] abstract class RedisConnectionSupervisor( 16 | database: Int, 17 | auth: Option[String], 18 | var listeners: Set[ActorRef], 19 | connectionTimeout: FiniteDuration, 20 | connectionHeartbeatDelay: Option[FiniteDuration]) extends Actor { 21 | 22 | import ConnectionSupervisor.{ Connect, Reconnect } 23 | import context.dispatcher 24 | 25 | implicit val timeout = Timeout(connectionTimeout) 26 | 27 | var connection = context.system.deadLetters 28 | 29 | protected var status: Connection.StateChange = Connection.Disconnected("unknown", 0) 30 | 31 | def receive = disconnected 32 | 33 | def connected: Receive = handleListeners orElse { 34 | case m @ (_: Request | _: Batch) ⇒ 35 | connection forward m 36 | 37 | case x: Connection.Disconnected ⇒ 38 | notifyStateChange(x) 39 | context.become(disconnected) 40 | self ! Reconnect 41 | } 42 | 43 | def disconnected: Receive = handleListeners orElse { 44 | case Connect(host, port) ⇒ 45 | connection ! PoisonPill 46 | connection = context.actorOf(Props(classOf[Connection], 47 | self, host, port, connectionTimeout, connectionHeartbeatDelay)) 48 | 49 | case x: Connection.Connecting ⇒ 50 | notifyStateChange(x) 51 | 52 | case x: Connection.Connected ⇒ 53 | authenticate(x) 54 | 55 | case ("auth_ok", x: Connection.Connected) ⇒ 56 | notifyStateChange(x) 57 | context.become(connected) 58 | 59 | case x: Connection.ConnectionFailed ⇒ 60 | notifyStateChange(x) 61 | self ! Reconnect 62 | } 63 | 64 | def handleListeners: Receive = { 65 | case s: ActorRef ⇒ 66 | listeners = listeners + s 67 | s ! status // notify the new listener about current status 68 | 69 | case Terminated(l) ⇒ 70 | listeners = listeners - l 71 | } 72 | 73 | def notifyStateChange(newState: Connection.StateChange) { 74 | status = newState 75 | listeners foreach { _ ! newState } 76 | } 77 | 78 | def authenticate(x: Connection.Connected) { 79 | (for { 80 | auth ← if (auth.isDefined) 81 | connection ? Request(ByteString("AUTH"), ByteString(auth.get)) else Future.successful(Unit) 82 | database ← if (database != 0) 83 | connection ? Request(ByteString("SELECT"), ByteString(database.toString)) else Future.successful(Unit) 84 | } yield ("auth_ok", x)) map { 85 | self ! _ 86 | } onFailure { 87 | case e: Exception ⇒ 88 | notifyStateChange(Redis.AuthenticationFailed(x.host, x.port)) 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/main/scala/RedisSentinel.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | import akka.pattern._ 5 | import akka.util._ 6 | 7 | import scala.concurrent._ 8 | import scala.concurrent.duration._ 9 | 10 | import com.typesafe.config.ConfigFactory 11 | import java.util.concurrent.TimeUnit 12 | 13 | object RedisSentinel { 14 | def apply( 15 | master: String, 16 | sentinelClient: ActorRef, 17 | database: Int = 0, 18 | auth: Option[String] = None, 19 | listeners: Set[ActorRef] = Set(), 20 | connectionTimeout: Option[FiniteDuration] = None, 21 | connectionRetryDelay: Option[FiniteDuration] = None, 22 | connectionHeartbeatDelay: Option[FiniteDuration] = None): Props = { 23 | 24 | val config = ConfigFactory.load() 25 | Props(classOf[RedisSentinel], 26 | master, 27 | sentinelClient, 28 | database, 29 | auth, 30 | listeners, 31 | connectionTimeout.getOrElse( 32 | config.getDuration("brando.connection.timeout", TimeUnit.MILLISECONDS).millis), 33 | connectionRetryDelay.getOrElse( 34 | config.getDuration("brando.connection.retry.delay", TimeUnit.MILLISECONDS).millis), 35 | connectionHeartbeatDelay) 36 | } 37 | 38 | private[brando] case object SentinelConnect 39 | } 40 | 41 | class RedisSentinel( 42 | master: String, 43 | sentinelClient: ActorRef, 44 | database: Int, 45 | auth: Option[String], 46 | listeners: Set[ActorRef], 47 | connectionTimeout: FiniteDuration, 48 | connectionRetryDelay: FiniteDuration, 49 | connectionHeartbeatDelay: Option[FiniteDuration]) extends RedisConnectionSupervisor(database, auth, 50 | listeners, connectionTimeout, connectionHeartbeatDelay) { 51 | 52 | import RedisSentinel._ 53 | import ConnectionSupervisor.{ Connect, Reconnect } 54 | import context.dispatcher 55 | 56 | override def preStart: Unit = { 57 | listeners.map(context.watch(_)) 58 | self ! SentinelConnect 59 | } 60 | 61 | override def disconnected: Receive = 62 | disconnectedWithSentinel orElse super.disconnected 63 | 64 | def disconnectedWithSentinel: Receive = { 65 | case _@ (_: Request | _: Batch) ⇒ 66 | sender ! Status.Failure(new RedisDisconnectedException(s"Disconnected from $master")) 67 | 68 | case Reconnect ⇒ 69 | context.system.scheduler.scheduleOnce(connectionRetryDelay, self, SentinelConnect) 70 | 71 | case SentinelConnect ⇒ 72 | (sentinelClient ? Request("SENTINEL", "MASTER", master)) map { 73 | case Response.AsStrings(res) ⇒ 74 | val (ip, port) = extractIpPort(res) 75 | self ! Connect(ip, port) 76 | } recover { case _ ⇒ self ! Reconnect } 77 | 78 | case x: Connection.Connected ⇒ 79 | isValidMaster map { 80 | case true ⇒ 81 | authenticate(x) 82 | case false ⇒ 83 | self ! Reconnect 84 | } recover { case _ ⇒ self ! Reconnect } 85 | } 86 | 87 | def extractIpPort(config: Seq[String]): (String, Int) = { 88 | var i, port = 0 89 | var ip: String = "" 90 | while ((i < config.size) && (ip == "" || port == 0)) { 91 | if (config(i) == "port") port = config(i + 1).toInt 92 | if (config(i) == "ip") ip = config(i + 1) 93 | i = i + 1 94 | } 95 | (ip, port) 96 | } 97 | 98 | def isValidMaster: Future[Boolean] = { 99 | (connection ? Request("INFO")) map { 100 | case Response.AsString(res) ⇒ 101 | res.contains("role:master") 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/main/scala/ReplyParser.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import annotation.tailrec 4 | import akka.actor.Status 5 | import akka.util.ByteString 6 | 7 | sealed abstract class StatusReply(val status: String) { 8 | val bytes = ByteString(status) 9 | } 10 | 11 | object ValueType { 12 | case object String extends StatusReply("string") 13 | case object List extends StatusReply("list") 14 | case object Set extends StatusReply("set") 15 | case object ZSet extends StatusReply("set") 16 | case object Hash extends StatusReply("hash") 17 | } 18 | 19 | case object Ok extends StatusReply("OK") 20 | case object Pong extends StatusReply("PONG") 21 | case object Queued extends StatusReply("QUEUED") 22 | 23 | private[brando] object StatusReply { 24 | import ValueType._ 25 | 26 | def fromString(status: String) = { 27 | status match { 28 | case Ok.status ⇒ Some(Ok) 29 | case Pong.status ⇒ Some(Pong) 30 | case Queued.status ⇒ Some(Queued) 31 | 32 | case String.status ⇒ Some(String) 33 | case List.status ⇒ Some(List) 34 | case Set.status ⇒ Some(Set) 35 | case ZSet.status ⇒ Some(ZSet) 36 | case Hash.status ⇒ Some(Hash) 37 | } 38 | } 39 | } 40 | 41 | private[brando] trait ReplyParser { 42 | 43 | var remainingBuffer = ByteString.empty 44 | 45 | trait Result { 46 | val reply: Option[Any] 47 | val next: ByteString 48 | } 49 | case class Success(reply: Option[Any], next: ByteString = ByteString.empty) 50 | extends Result 51 | case class Failure(next: ByteString) 52 | extends Result { 53 | val reply = None 54 | } 55 | 56 | def splitLine(buffer: ByteString): Option[(String, ByteString)] = { 57 | val start = buffer.takeWhile(_ != '\r') 58 | val rest = buffer.drop(start.size) 59 | if (rest.take(2) == ByteString("\r\n")) { 60 | Some((start.drop(1).utf8String, rest.drop(2))) 61 | } else { 62 | None 63 | } 64 | } 65 | 66 | def readErrorReply(buffer: ByteString) = splitLine(buffer) match { 67 | case Some((error, rest)) ⇒ 68 | Success(Some(Status.Failure(new RedisException(error))), rest) 69 | case _ ⇒ Failure(buffer) 70 | } 71 | 72 | def readSimpleStringReply(buffer: ByteString) = splitLine(buffer) match { 73 | case Some((status, rest)) ⇒ 74 | Success(StatusReply.fromString(status), rest) 75 | case _ ⇒ Failure(buffer) 76 | } 77 | 78 | def readIntegerReply(buffer: ByteString) = splitLine(buffer) match { 79 | case Some((int, rest)) ⇒ Success(Some(int.toLong), rest) 80 | case x ⇒ Failure(buffer) 81 | } 82 | 83 | def readBulkStringReply(buffer: ByteString): Result = splitLine(buffer) match { 84 | case Some((length, rest)) ⇒ 85 | val dataLength = length.toInt 86 | 87 | if (dataLength == -1) Success(None, rest) //null response 88 | else if (rest.length >= dataLength + 2) { //rest = data + "\r\n" 89 | val data = rest.take(dataLength) 90 | val remainder = rest.drop(dataLength + 2) 91 | Success(Some(data), remainder) 92 | } else Failure(buffer) 93 | 94 | case _ ⇒ Failure(buffer) 95 | } 96 | 97 | def readArrayReply(buffer: ByteString): Result = splitLine(buffer) match { 98 | case Some(("-1", rest)) ⇒ 99 | Success(None, rest) 100 | 101 | case Some((count, rest)) ⇒ 102 | val itemCount = count.toInt 103 | 104 | @tailrec def readElements(remaining: Int, result: Result): Result = remaining match { 105 | case 0 ⇒ result 106 | case _ if result.next.isEmpty ⇒ Failure(buffer) 107 | case _ ⇒ 108 | (parse(result.next), result.reply) match { 109 | case (failure: Failure, _) ⇒ 110 | Failure(buffer) 111 | case (Success(element, next), Some(elements: List[_])) ⇒ 112 | if (remaining == 1) //Add last element to the array reply and reorder 113 | readElements(0, Success(Some((element +: elements).reverse), next)) 114 | else 115 | readElements(remaining - 1, Success(Some(element +: elements), next)) 116 | } 117 | } 118 | 119 | readElements(itemCount, Success(Some(List.empty[Option[Any]]), rest)) 120 | 121 | case _ ⇒ Failure(buffer) 122 | } 123 | 124 | def readPubSubMessage(buffer: ByteString) = splitLine(buffer) match { 125 | case Some((int, rest)) ⇒ Success(Some(int.toLong), rest) 126 | case x ⇒ Failure(buffer) 127 | } 128 | 129 | def parse(reply: ByteString) = reply(0) match { 130 | case '+' ⇒ readSimpleStringReply(reply) 131 | case '-' ⇒ readErrorReply(reply) 132 | case ':' ⇒ readIntegerReply(reply) 133 | case '$' ⇒ readBulkStringReply(reply) 134 | case '*' ⇒ readArrayReply(reply) 135 | } 136 | 137 | @tailrec final def parseReply(bytes: ByteString)(withReply: Any ⇒ Unit) { 138 | if (bytes.size > 0) { 139 | parse(remainingBuffer ++ bytes) match { 140 | case Failure(leftoverBytes) ⇒ 141 | remainingBuffer = leftoverBytes 142 | 143 | case Success(reply, leftoverBytes) ⇒ 144 | remainingBuffer = ByteString.empty 145 | withReply(reply) 146 | 147 | if (leftoverBytes.size > 0) { 148 | parseReply(leftoverBytes)(withReply) 149 | } 150 | } 151 | } else Failure(bytes) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/main/scala/Request.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.util.ByteString 4 | 5 | object Request { 6 | def apply(command: String, params: String*) = 7 | new Request(ByteString(command), params map (ByteString(_)): _*) 8 | } 9 | 10 | //Helps creating a request like HMSET key k1 v1 k2 v2... 11 | object HashRequest { 12 | def apply(cmd: String, key: String, map: Map[String, String]) = { 13 | val args = Seq(key) ++ map.map(e ⇒ Seq(e._1, e._2)).flatten 14 | Request(cmd, args: _*) 15 | } 16 | } 17 | 18 | case class Request(command: ByteString, params: ByteString*) { 19 | val CRLF = ByteString("\r\n") 20 | 21 | def args = command :: params.toList 22 | 23 | def toByteString = args.map(argLine(_)).foldLeft(header)(_ ++ _) 24 | 25 | private def header = ByteString("*" + args.length) ++ CRLF 26 | 27 | private def argLine(bytes: ByteString) = 28 | ByteString("$" + bytes.length) ++ CRLF ++ bytes ++ CRLF 29 | } 30 | 31 | object BroadcastRequest { 32 | def apply(cmd: String, params: String*) = 33 | new BroadcastRequest(ByteString(cmd), params map (ByteString(_)): _*) 34 | } 35 | 36 | case class BroadcastRequest(command: ByteString, params: ByteString*) 37 | 38 | case class Batch(requests: Request*) 39 | -------------------------------------------------------------------------------- /src/main/scala/Response.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.util.ByteString 4 | 5 | case class PubSubMessage(channel: String, message: String) 6 | 7 | case class RedisException(message: String) extends Exception(message) { 8 | override lazy val toString = "%s: %s\n".format(getClass.getName, message) 9 | } 10 | case class RedisDisconnectedException(message: String) extends Exception(message) { 11 | override lazy val toString = "%s: %s\n".format(getClass.getName, message) 12 | } 13 | 14 | object Response { 15 | 16 | def collectItems[T](value: Any, mapper: PartialFunction[Any, T]): Option[Seq[T]] = { 17 | value match { 18 | case Some(v: List[_]) ⇒ v.foldLeft(Option(Seq[T]())) { 19 | case (Some(acc), e @ (Some(_: ByteString) | None)) if (mapper.isDefinedAt(e)) ⇒ 20 | Some(acc :+ mapper(e)) 21 | case (Some(acc), e @ (Some(_: ByteString) | None)) ⇒ Some(acc) 22 | case _ ⇒ None 23 | } 24 | case _ ⇒ None 25 | } 26 | } 27 | 28 | object AsStrings { 29 | def unapply(value: Any) = { 30 | collectItems(value, { case Some(v: ByteString) ⇒ v.utf8String }) 31 | } 32 | } 33 | 34 | object AsStringOptions { 35 | def unapply(value: Any) = { 36 | collectItems(value, { case Some(v: ByteString) ⇒ Some(v.utf8String); case _ ⇒ None }) 37 | } 38 | } 39 | 40 | object AsByteSeqs { 41 | def unapply(value: Any) = { 42 | collectItems(value, { case Some(v: ByteString) ⇒ v.toArray.toList }) 43 | } 44 | } 45 | 46 | object AsStringsHash { 47 | def unapply(value: Any) = { 48 | value match { 49 | case AsStrings(result) if (result.size % 2 == 0) ⇒ 50 | val map = result.grouped(2).map { 51 | subseq ⇒ subseq(0) -> subseq(1) 52 | }.toMap 53 | Some(map) 54 | case _ ⇒ None 55 | } 56 | } 57 | } 58 | 59 | object AsString { 60 | def unapply(value: Any) = { 61 | value match { 62 | case Some(str: ByteString) ⇒ Some(str.utf8String) 63 | case _ ⇒ None 64 | } 65 | } 66 | } 67 | 68 | object AsStringOption { 69 | def unapply(value: Any) = { 70 | value match { 71 | case Some(str: ByteString) ⇒ Some(Some(str.utf8String)) 72 | case _ ⇒ Some(None) 73 | } 74 | } 75 | } 76 | } 77 | 78 | -------------------------------------------------------------------------------- /src/main/scala/Sentinel.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | import akka.pattern._ 5 | import akka.util._ 6 | 7 | import scala.concurrent._ 8 | import scala.concurrent.duration._ 9 | 10 | import com.typesafe.config.ConfigFactory 11 | import java.util.concurrent.TimeUnit 12 | 13 | object Sentinel { 14 | def apply( 15 | sentinels: Seq[Server] = Seq(Server("localhost", 26379)), 16 | listeners: Set[ActorRef] = Set(), 17 | connectionTimeout: Option[FiniteDuration] = None, 18 | connectionHeartbeatDelay: Option[FiniteDuration] = None): Props = { 19 | 20 | val config = ConfigFactory.load() 21 | Props(classOf[Sentinel], sentinels, listeners, 22 | connectionTimeout.getOrElse( 23 | config.getDuration("brando.connection.timeout", TimeUnit.MILLISECONDS).millis), 24 | connectionHeartbeatDelay) 25 | } 26 | 27 | case class Server(host: String, port: Int) 28 | private[brando] case class Connect(sentinels: Seq[Server]) 29 | case class ConnectionFailed(sentinels: Seq[Server]) extends Connection.StateChange 30 | } 31 | 32 | class Sentinel( 33 | var sentinels: Seq[Sentinel.Server], 34 | var listeners: Set[ActorRef], 35 | connectionTimeout: FiniteDuration, 36 | connectionHeartbeatDelay: Option[FiniteDuration]) extends Actor { 37 | 38 | import Sentinel._ 39 | import context.dispatcher 40 | 41 | implicit val timeout = Timeout(connectionTimeout) 42 | 43 | var connection = context.system.deadLetters 44 | var retries = 0 45 | 46 | override def preStart: Unit = { 47 | listeners.map(context.watch(_)) 48 | self ! Connect(sentinels) 49 | } 50 | 51 | def receive: Receive = disconnected 52 | 53 | def connected: Receive = handleListeners orElse { 54 | case x: Connection.Disconnected ⇒ 55 | connection ! PoisonPill 56 | context.become(disconnected) 57 | notifyStateChange(x) 58 | self ! Connect(sentinels) 59 | 60 | case request: Request ⇒ 61 | connection forward request 62 | 63 | case _ ⇒ 64 | } 65 | 66 | def disconnected: Receive = handleListeners orElse { 67 | case Connect(Server(host, port) :: tail) ⇒ 68 | notifyStateChange(Connection.Connecting(host, port)) 69 | retries += 1 70 | connection = context.actorOf(Props(classOf[Connection], 71 | self, host, port, connectionTimeout, connectionHeartbeatDelay)) 72 | 73 | case x: Connection.Connected ⇒ 74 | context.become(connected) 75 | retries = 0 76 | val Server(host, port) = sentinels.head 77 | notifyStateChange(Connection.Connected(host, port)) 78 | 79 | case x: Connection.ConnectionFailed ⇒ 80 | context.become(disconnected) 81 | (retries < sentinels.size) match { 82 | case true ⇒ 83 | sentinels = sentinels.tail :+ sentinels.head 84 | self ! Connect(sentinels) 85 | case false ⇒ 86 | notifyStateChange(ConnectionFailed(sentinels)) 87 | } 88 | 89 | case request: Request ⇒ 90 | sender ! Status.Failure(new RedisException("Disconnected from the sentinel cluster")) 91 | 92 | case _ ⇒ 93 | } 94 | 95 | def handleListeners: Receive = { 96 | case s: ActorRef ⇒ 97 | listeners = listeners + s 98 | 99 | case Terminated(l) ⇒ 100 | listeners = listeners - l 101 | } 102 | 103 | def notifyStateChange(newState: Connection.StateChange) { 104 | listeners foreach { _ ! newState } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/main/scala/ShardManager.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | import akka.util._ 5 | import collection.mutable 6 | import java.util.zip.CRC32 7 | 8 | import scala.util.Failure 9 | import scala.concurrent._ 10 | import scala.concurrent.duration._ 11 | 12 | import com.typesafe.config.ConfigFactory 13 | import java.util.concurrent.TimeUnit 14 | 15 | object ShardManager { 16 | def apply( 17 | shards: Seq[Shard], 18 | listeners: Set[ActorRef] = Set(), 19 | sentinelClient: Option[ActorRef] = None, 20 | hashFunction: (Array[Byte] ⇒ Long) = defaultHashFunction, 21 | connectionTimeout: Option[FiniteDuration] = None, 22 | connectionRetryDelay: Option[FiniteDuration] = None, 23 | connectionHeartbeatDelay: Option[FiniteDuration] = None): Props = { 24 | 25 | val config = ConfigFactory.load() 26 | Props(classOf[ShardManager], shards, hashFunction, listeners, sentinelClient, 27 | connectionTimeout.getOrElse( 28 | config.getDuration("brando.connection.timeout", TimeUnit.MILLISECONDS).millis), 29 | connectionRetryDelay.getOrElse( 30 | config.getDuration("brando.connection.retry.delay", TimeUnit.MILLISECONDS).millis), 31 | connectionHeartbeatDelay) 32 | } 33 | 34 | def defaultHashFunction(input: Array[Byte]): Long = { 35 | val crc32 = new CRC32 36 | crc32.update(input) 37 | crc32.getValue 38 | } 39 | 40 | private[brando] trait Shard { val id: String } 41 | case class RedisShard(id: String, host: String, 42 | port: Int, database: Int = 0, 43 | auth: Option[String] = None) extends Shard 44 | case class SentinelShard(id: String, database: Int = 0, 45 | auth: Option[String] = None) extends Shard 46 | 47 | private[brando] case class SetShard(shard: Shard) 48 | } 49 | 50 | class ShardManager( 51 | shards: Seq[ShardManager.Shard], 52 | hashFunction: (Array[Byte] ⇒ Long), 53 | var listeners: Set[ActorRef] = Set(), 54 | sentinelClient: Option[ActorRef] = None, 55 | connectionTimeout: FiniteDuration, 56 | connectionRetryDelay: FiniteDuration, 57 | connectionHeartbeatDelay: Option[FiniteDuration]) extends Actor { 58 | 59 | import ShardManager._ 60 | import context.dispatcher 61 | 62 | val pool = mutable.Map.empty[String, ActorRef] 63 | val shardLookup = mutable.Map.empty[ActorRef, Shard] 64 | var poolKeys: Seq[String] = Seq() 65 | 66 | override def preStart: Unit = { 67 | listeners.map(context.watch(_)) 68 | shards.map(self ! SetShard(_)) 69 | } 70 | 71 | def receive = { 72 | case (key: ByteString, request: Request) ⇒ 73 | forward(key, request) 74 | 75 | case (key: String, request: Request) ⇒ 76 | forward(ByteString(key), request) 77 | 78 | case request: Request ⇒ 79 | request.params.length match { 80 | case 0 ⇒ 81 | sender ! Failure(new IllegalArgumentException("Received empty Request params, can not shard without a key")) 82 | 83 | case s ⇒ 84 | forward(request.params.head, request) 85 | } 86 | 87 | case broadcast: BroadcastRequest ⇒ 88 | for ((_, shard) ← pool) { 89 | shard forward Request(broadcast.command, broadcast.params: _*) 90 | } 91 | 92 | case SetShard(shard) ⇒ 93 | pool.get(shard.id) map (context.stop(_)) 94 | (shard, sentinelClient) match { 95 | case (RedisShard(id, host, port, database, auth), _) ⇒ 96 | val brando = 97 | context.actorOf(Redis(host, port, database, auth, listeners, 98 | Some(connectionTimeout), Some(connectionRetryDelay), None, 99 | connectionHeartbeatDelay)) 100 | add(shard, brando) 101 | case (SentinelShard(id, database, auth), Some(sClient)) ⇒ 102 | val brando = 103 | context.actorOf(RedisSentinel(id, sClient, database, auth, 104 | listeners, Some(connectionTimeout), Some(connectionRetryDelay), 105 | connectionHeartbeatDelay)) 106 | add(shard, brando) 107 | case _ ⇒ 108 | } 109 | 110 | case Terminated(l) ⇒ 111 | listeners = listeners - l 112 | 113 | case _ ⇒ 114 | } 115 | 116 | def forward(key: ByteString, req: Request) = { 117 | val s = sender 118 | Future { lookup(key).tell(req, s) } 119 | } 120 | 121 | def lookup(key: ByteString) = { 122 | val hash = hashFunction(key.toArray) 123 | val mod = hash % poolKeys.size 124 | val id = poolKeys(mod.toInt) 125 | pool(id) 126 | } 127 | 128 | def add(shard: Shard, brando: ActorRef) { 129 | shardLookup(brando) = shard 130 | pool(shard.id) = brando 131 | poolKeys = pool.keys.toIndexedSeq 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/main/scala/StashingRedis.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | 5 | /** 6 | * This actor implements Akka stashing to stash requests to Redis when the connection 7 | * is not established. Instead of direct invocation of the Brando, which throws an 8 | * exception, this actor stashes all requests and passes them to the Brando when the 9 | * connection is established. It avoids exceptions during the Brando startup or when 10 | * the connection is temporarily broken down. 11 | * 12 | * Note: This actor re-enables the behavior of the Brando in version 2.x.x 13 | * 14 | * To use it: 15 | * 16 | * 1. create Brando actor 17 | * 2. create this actor with the Brando as parameter 18 | * 3. use this actor to querying redis 19 | */ 20 | class StashingRedis(redis: ActorRef) extends Actor with Stash { 21 | 22 | override def receive: Receive = setDisconnected() 23 | 24 | protected def stateChanges: Receive = { 25 | case Connection.Connecting(_, _) ⇒ // do nothing 26 | case Connection.ConnectionFailed(_, _) ⇒ // do nothing 27 | case Sentinel.ConnectionFailed(_) ⇒ // do nothing 28 | case Redis.AuthenticationFailed(_, _) ⇒ // do nothing 29 | case Connection.Connected(_, _) ⇒ setConnected() 30 | case Connection.Disconnected(_, _) ⇒ setDisconnected() 31 | } 32 | 33 | /** pass everything through, connection is established */ 34 | protected def connected: Receive = { 35 | case message ⇒ redis.tell(message, sender) 36 | } 37 | 38 | /** stash everything, the connection is not established */ 39 | protected def disconnected: Receive = { 40 | case message ⇒ stash() 41 | } 42 | 43 | /** set the actor's behavior as 'connected' */ 44 | protected def setConnected() = { 45 | unstashAll() // restore all stashed messages 46 | context.become(stateChanges orElse connected) 47 | } 48 | 49 | /** set the actor's behavior as 'disconnected' */ 50 | protected def setDisconnected(): Receive = { 51 | val behavior = stateChanges orElse disconnected 52 | context.become(behavior) 53 | behavior 54 | } 55 | 56 | // register self as a listener on the redis instance to listen on state changes 57 | redis ! self 58 | } 59 | 60 | object StashingRedis { 61 | 62 | def apply(redis: ActorRef) = 63 | Props(classOf[StashingRedis], redis) 64 | 65 | } 66 | -------------------------------------------------------------------------------- /src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | akka { 2 | log-dead-letters = 0 3 | log-dead-letters-during-shutdown = off 4 | } 5 | 6 | bounded-stash-mailbox { 7 | mailbox-type = "akka.dispatch.BoundedDequeBasedMailbox" 8 | mailbox-capacity = 1000 9 | mailbox-push-timeout-time = 10s 10 | stash-capacity = 2 11 | } 12 | -------------------------------------------------------------------------------- /src/test/scala/RedisClientSentinelTest.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | import akka.pattern._ 5 | import akka.testkit._ 6 | 7 | import scala.concurrent._ 8 | import scala.concurrent.duration._ 9 | 10 | import org.scalatest._ 11 | 12 | class RedisClientSentinelTest extends TestKit(ActorSystem("RedisClientSentinelTest")) with FunSpecLike 13 | with ImplicitSender { 14 | 15 | import RedisSentinel._ 16 | import Sentinel._ 17 | import Connection._ 18 | 19 | describe("RedisClientSentinel") { 20 | describe("when connecting") { 21 | it("should use sentinel to resolve the ip and port") { 22 | val sentinelProbe = TestProbe() 23 | val brando = system.actorOf( 24 | RedisSentinel("mymaster", sentinelProbe.ref, 0, None)) 25 | 26 | sentinelProbe.expectMsg(Request("SENTINEL", "MASTER", "mymaster")) 27 | } 28 | 29 | it("should connect to sentinel and redis") { 30 | val redisProbe = TestProbe() 31 | val sentinelProbe = TestProbe() 32 | 33 | val sentinel = system.actorOf(Sentinel( 34 | sentinels = Seq(Server("localhost", 26379)), 35 | listeners = Set(sentinelProbe.ref))) 36 | val brando = system.actorOf(RedisSentinel( 37 | master = "mymaster", 38 | sentinelClient = sentinel, 39 | listeners = Set(redisProbe.ref))) 40 | 41 | sentinelProbe.expectMsg( 42 | Connecting("localhost", 26379)) 43 | sentinelProbe.expectMsg( 44 | Connected("localhost", 26379)) 45 | redisProbe.expectMsg( 46 | Connecting("127.0.0.1", 6379)) 47 | redisProbe.expectMsg( 48 | Connected("127.0.0.1", 6379)) 49 | } 50 | } 51 | 52 | describe("when disconnected") { 53 | it("should recreate a connection using sentinel") { 54 | val redisProbe = TestProbe() 55 | val sentinelProbe = TestProbe() 56 | 57 | val sentinel = system.actorOf(Sentinel( 58 | sentinels = Seq(Server("localhost", 26379)), 59 | listeners = Set(sentinelProbe.ref))) 60 | val brando = system.actorOf(RedisSentinel( 61 | master = "mymaster", 62 | sentinelClient = sentinel, 63 | listeners = Set(redisProbe.ref))) 64 | 65 | sentinelProbe.expectMsg( 66 | Connecting("localhost", 26379)) 67 | sentinelProbe.expectMsg( 68 | Connected("localhost", 26379)) 69 | redisProbe.expectMsg( 70 | Connecting("127.0.0.1", 6379)) 71 | redisProbe.expectMsg( 72 | Connected("127.0.0.1", 6379)) 73 | 74 | brando ! Disconnected("127.0.0.1", 6379) 75 | 76 | redisProbe.expectMsg( 77 | Disconnected("127.0.0.1", 6379)) 78 | redisProbe.expectMsg( 79 | Connecting("127.0.0.1", 6379)) 80 | redisProbe.expectMsg( 81 | Connected("127.0.0.1", 6379)) 82 | } 83 | 84 | it("should return a failure when disconnected") { 85 | val sentinel = system.actorOf(Sentinel( 86 | sentinels = Seq(Server("localhost", 26379)))) 87 | val brando = system.actorOf(RedisSentinel( 88 | master = "mymaster", 89 | sentinelClient = sentinel)) 90 | 91 | brando ! Request("PING") 92 | 93 | expectMsg(Status.Failure(new RedisDisconnectedException("Disconnected from mymaster"))) 94 | } 95 | } 96 | } 97 | } 98 | 99 | -------------------------------------------------------------------------------- /src/test/scala/RedisClientTest.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import org.scalatest.FunSpecLike 4 | import akka.testkit._ 5 | import akka.actor._ 6 | import akka.pattern._ 7 | import akka.io.{ IO, Tcp } 8 | import akka.util._ 9 | import scala.concurrent.Await 10 | import scala.concurrent.duration._ 11 | import java.util.UUID 12 | import java.net.ServerSocket 13 | 14 | class RedisClientTest extends TestKit(ActorSystem("RedisClientTest")) with FunSpecLike 15 | with ImplicitSender { 16 | 17 | import Connection._ 18 | 19 | describe("ping") { 20 | it("should respond with Pong") { 21 | val brando = system.actorOf(Redis(listeners = Set(self))) 22 | expectMsg(Connecting("localhost", 6379)) 23 | expectMsg(Connected("localhost", 6379)) 24 | 25 | brando ! Request("PING") 26 | 27 | expectMsg(Some(Pong)) 28 | } 29 | } 30 | 31 | describe("flushdb") { 32 | it("should respond with OK") { 33 | val brando = system.actorOf(Redis(listeners = Set(self))) 34 | expectMsg(Connecting("localhost", 6379)) 35 | expectMsg(Connected("localhost", 6379)) 36 | 37 | brando ! Request("FLUSHDB") 38 | 39 | expectMsg(Some(Ok)) 40 | } 41 | } 42 | 43 | describe("set") { 44 | it("should respond with OK") { 45 | val brando = system.actorOf(Redis(listeners = Set(self))) 46 | expectMsg(Connecting("localhost", 6379)) 47 | expectMsg(Connected("localhost", 6379)) 48 | 49 | brando ! Request("SET", "mykey", "somevalue") 50 | 51 | expectMsg(Some(Ok)) 52 | 53 | brando ! Request("FLUSHDB") 54 | expectMsg(Some(Ok)) 55 | } 56 | } 57 | 58 | describe("get") { 59 | it("should respond with value option for existing key") { 60 | val brando = system.actorOf(Redis(listeners = Set(self))) 61 | expectMsg(Connecting("localhost", 6379)) 62 | expectMsg(Connected("localhost", 6379)) 63 | 64 | brando ! Request("SET", "mykey", "somevalue") 65 | 66 | expectMsg(Some(Ok)) 67 | 68 | brando ! Request("GET", "mykey") 69 | 70 | expectMsg(Some(ByteString("somevalue"))) 71 | 72 | brando ! Request("FLUSHDB") 73 | expectMsg(Some(Ok)) 74 | } 75 | 76 | it("should respond with None for non-existent key") { 77 | val brando = system.actorOf(Redis(listeners = Set(self))) 78 | expectMsg(Connecting("localhost", 6379)) 79 | expectMsg(Connected("localhost", 6379)) 80 | 81 | brando ! Request("GET", "mykey") 82 | 83 | expectMsg(None) 84 | } 85 | } 86 | 87 | describe("incr") { 88 | it("should increment and return value for existing key") { 89 | val brando = system.actorOf(Redis(listeners = Set(self))) 90 | expectMsg(Connecting("localhost", 6379)) 91 | expectMsg(Connected("localhost", 6379)) 92 | 93 | brando ! Request("SET", "incr-test", "10") 94 | 95 | expectMsg(Some(Ok)) 96 | 97 | brando ! Request("INCR", "incr-test") 98 | 99 | expectMsg(Some(11)) 100 | 101 | brando ! Request("FLUSHDB") 102 | expectMsg(Some(Ok)) 103 | } 104 | 105 | it("should return 1 for non-existent key") { 106 | val brando = system.actorOf(Redis(listeners = Set(self))) 107 | expectMsg(Connecting("localhost", 6379)) 108 | expectMsg(Connected("localhost", 6379)) 109 | 110 | brando ! Request("INCR", "incr-test") 111 | 112 | expectMsg(Some(1)) 113 | 114 | brando ! Request("FLUSHDB") 115 | expectMsg(Some(Ok)) 116 | } 117 | } 118 | 119 | describe("sadd") { 120 | it("should return number of members added to set") { 121 | val brando = system.actorOf(Redis(listeners = Set(self))) 122 | expectMsg(Connecting("localhost", 6379)) 123 | expectMsg(Connected("localhost", 6379)) 124 | 125 | brando ! Request("SADD", "sadd-test", "one") 126 | 127 | expectMsg(Some(1)) 128 | 129 | brando ! Request("SADD", "sadd-test", "two", "three") 130 | 131 | expectMsg(Some(2)) 132 | 133 | brando ! Request("SADD", "sadd-test", "one", "four") 134 | 135 | expectMsg(Some(1)) 136 | 137 | brando ! Request("FLUSHDB") 138 | expectMsg(Some(Ok)) 139 | } 140 | } 141 | 142 | describe("smembers") { 143 | it("should return all members in a set") { 144 | val brando = system.actorOf(Redis(listeners = Set(self))) 145 | expectMsg(Connecting("localhost", 6379)) 146 | expectMsg(Connected("localhost", 6379)) 147 | 148 | brando ! Request("SADD", "smembers-test", "one", "two", "three", "four") 149 | 150 | expectMsg(Some(4)) 151 | 152 | brando ! Request("SMEMBERS", "smembers-test") 153 | 154 | val resp = receiveOne(500.millis).asInstanceOf[Option[List[Any]]] 155 | assert(resp.getOrElse(List()).toSet === 156 | Set(Some(ByteString("one")), Some(ByteString("two")), 157 | Some(ByteString("three")), Some(ByteString("four")))) 158 | 159 | brando ! Request("FLUSHDB") 160 | expectMsg(Some(Ok)) 161 | } 162 | 163 | } 164 | 165 | describe("pipelining") { 166 | it("should respond to a Seq of multiple requests all at once") { 167 | val brando = system.actorOf(Redis(listeners = Set(self))) 168 | expectMsg(Connecting("localhost", 6379)) 169 | expectMsg(Connected("localhost", 6379)) 170 | 171 | val ping = Request("PING") 172 | 173 | brando ! ping 174 | brando ! ping 175 | brando ! ping 176 | 177 | expectMsg(Some(Pong)) 178 | expectMsg(Some(Pong)) 179 | expectMsg(Some(Pong)) 180 | 181 | } 182 | 183 | it("should support pipelines of setex commands") { 184 | val brando = system.actorOf(Redis(listeners = Set(self))) 185 | expectMsg(Connecting("localhost", 6379)) 186 | expectMsg(Connected("localhost", 6379)) 187 | 188 | val setex = Request("SETEX", "pipeline-setex-path", "10", "Some data") 189 | 190 | brando ! setex 191 | brando ! setex 192 | brando ! setex 193 | 194 | expectMsg(Some(Ok)) 195 | expectMsg(Some(Ok)) 196 | expectMsg(Some(Ok)) 197 | } 198 | 199 | it("should receive responses in the right order") { 200 | val brando = system.actorOf(Redis(listeners = Set(self))) 201 | expectMsg(Connecting("localhost", 6379)) 202 | expectMsg(Connected("localhost", 6379)) 203 | 204 | val ping = Request("PING") 205 | val setex = Request("SETEX", "pipeline-setex-path", "10", "Some data") 206 | 207 | brando ! setex 208 | brando ! ping 209 | brando ! setex 210 | brando ! ping 211 | brando ! setex 212 | 213 | expectMsg(Some(Ok)) 214 | expectMsg(Some(Pong)) 215 | expectMsg(Some(Ok)) 216 | expectMsg(Some(Pong)) 217 | expectMsg(Some(Ok)) 218 | } 219 | } 220 | 221 | describe("large data sets") { 222 | it("should read and write large files") { 223 | import java.io.{ File, FileInputStream } 224 | 225 | val file = new File("src/test/resources/crime_and_punishment.txt") 226 | val in = new FileInputStream(file) 227 | val bytes = new Array[Byte](file.length.toInt) 228 | in.read(bytes) 229 | in.close() 230 | 231 | val largeText = new String(bytes, "UTF-8") 232 | 233 | val brando = system.actorOf(Redis(listeners = Set(self))) 234 | expectMsg(Connecting("localhost", 6379)) 235 | expectMsg(Connected("localhost", 6379)) 236 | 237 | brando ! Request("SET", "crime+and+punishment", largeText) 238 | 239 | expectMsg(Some(Ok)) 240 | 241 | brando ! Request("GET", "crime+and+punishment") 242 | 243 | expectMsg(Some(ByteString(largeText))) 244 | 245 | brando ! Request("FLUSHDB") 246 | expectMsg(Some(Ok)) 247 | } 248 | } 249 | 250 | describe("error reply") { 251 | it("should receive a failure with the redis error message") { 252 | val brando = system.actorOf(Redis(listeners = Set(self))) 253 | expectMsg(Connecting("localhost", 6379)) 254 | expectMsg(Connected("localhost", 6379)) 255 | 256 | brando ! Request("SET", "key") 257 | 258 | expectMsgPF(5.seconds) { 259 | case Status.Failure(e) ⇒ 260 | assert(e.isInstanceOf[RedisException]) 261 | assert(e.getMessage === "ERR wrong number of arguments for 'set' command") 262 | } 263 | 264 | brando ! Request("EXPIRE", "1", "key") 265 | 266 | expectMsgPF(5.seconds) { 267 | case Status.Failure(e) ⇒ 268 | assert(e.isInstanceOf[RedisException]) 269 | assert(e.getMessage === "ERR value is not an integer or out of range") 270 | } 271 | } 272 | } 273 | 274 | describe("select") { 275 | it("should execute commands on the selected database") { 276 | val brando = system.actorOf(Redis("localhost", 6379, 5, listeners = Set(self))) 277 | expectMsg(Connecting("localhost", 6379)) 278 | expectMsg(Connected("localhost", 6379)) 279 | 280 | brando ! Request("SET", "mykey", "somevalue") 281 | 282 | expectMsg(Some(Ok)) 283 | 284 | brando ! Request("GET", "mykey") 285 | 286 | expectMsg(Some(ByteString("somevalue"))) 287 | 288 | brando ! Request("SELECT", "0") 289 | 290 | expectMsg(Some(Ok)) 291 | 292 | brando ! Request("GET", "mykey") 293 | 294 | expectMsg(None) 295 | 296 | brando ! Request("SELECT", "5") 297 | expectMsg(Some(Ok)) 298 | 299 | brando ! Request("FLUSHDB") 300 | expectMsg(Some(Ok)) 301 | } 302 | } 303 | 304 | describe("multi/exec requests") { 305 | it("should support multi requests as an atomic transaction") { 306 | val brando = system.actorOf(Redis("localhost", 6379, 5, listeners = Set(self))) 307 | expectMsg(Connecting("localhost", 6379)) 308 | expectMsg(Connected("localhost", 6379)) 309 | 310 | brando ! Batch(Request("MULTI"), Request("SET", "mykey", "somevalue"), Request("GET", "mykey"), Request("EXEC")) 311 | expectMsg(List(Some(Ok), 312 | Some(Queued), 313 | Some(Queued), 314 | Some(List(Some(Ok), Some(ByteString("somevalue")))))) 315 | } 316 | 317 | it("should support multi requests with multiple results") { 318 | val brando = system.actorOf(Redis("localhost", 6379, 5, listeners = Set(self))) 319 | expectMsg(Connecting("localhost", 6379)) 320 | expectMsg(Connected("localhost", 6379)) 321 | 322 | brando ! Batch(Request("MULTI"), Request("SET", "mykey", "somevalue"), Request("GET", "mykey"), Request("GET", "mykey"), Request("EXEC")) 323 | expectMsg(List(Some(Ok), 324 | Some(Queued), 325 | Some(Queued), 326 | Some(Queued), 327 | Some(List(Some(Ok), Some(ByteString("somevalue")), Some(ByteString("somevalue")))))) 328 | } 329 | } 330 | 331 | describe("blocking requests") { 332 | describe("subscribe") { 333 | 334 | it("should be able to subscribe to a pubsub channel") { 335 | val channel = UUID.randomUUID().toString 336 | val subscriber = system.actorOf(Redis(listeners = Set(self))) 337 | expectMsg(Connecting("localhost", 6379)) 338 | expectMsg(Connected("localhost", 6379)) 339 | 340 | subscriber ! Request("SUBSCRIBE", channel) 341 | 342 | expectMsg(Some(List(Some( 343 | ByteString("subscribe")), 344 | Some(ByteString(channel)), 345 | Some(1)))) 346 | } 347 | 348 | it("should receive published messages from a pubsub channel") { 349 | val channel = UUID.randomUUID().toString 350 | val subscriber = system.actorOf(Redis(listeners = Set(self))) 351 | expectMsg(Connecting("localhost", 6379)) 352 | expectMsg(Connected("localhost", 6379)) 353 | 354 | val publisher = system.actorOf(Redis(listeners = Set(self))) 355 | expectMsg(Connecting("localhost", 6379)) 356 | expectMsg(Connected("localhost", 6379)) 357 | 358 | subscriber ! Request("SUBSCRIBE", channel) 359 | 360 | expectMsg(Some(List(Some( 361 | ByteString("subscribe")), 362 | Some(ByteString(channel)), 363 | Some(1)))) 364 | 365 | publisher ! Request("PUBLISH", channel, "test") 366 | expectMsg(Some(1)) //publisher gets back number of subscribers when publishing 367 | 368 | expectMsg(PubSubMessage(channel, "test")) 369 | } 370 | 371 | it("should be able to unsubscribe from a pubsub channel") { 372 | val channel = UUID.randomUUID().toString 373 | val subscriber = system.actorOf(Redis(listeners = Set(self))) 374 | expectMsg(Connecting("localhost", 6379)) 375 | expectMsg(Connected("localhost", 6379)) 376 | 377 | val publisher = system.actorOf(Redis(listeners = Set(self))) 378 | expectMsg(Connecting("localhost", 6379)) 379 | expectMsg(Connected("localhost", 6379)) 380 | 381 | subscriber ! Request("SUBSCRIBE", channel) 382 | 383 | expectMsg(Some(List(Some( 384 | ByteString("subscribe")), 385 | Some(ByteString(channel)), 386 | Some(1)))) 387 | 388 | subscriber ! Request("UNSUBSCRIBE", channel) 389 | 390 | expectMsg(Some(List(Some( 391 | ByteString("unsubscribe")), 392 | Some(ByteString(channel)), 393 | Some(0)))) 394 | 395 | publisher ! Request("PUBLISH", channel, "test") 396 | expectMsg(Some(0)) 397 | 398 | expectNoMsg 399 | } 400 | } 401 | } 402 | 403 | describe("blop") { 404 | it("should block then reply") { 405 | val brando = system.actorOf(Redis(listeners = Set(self))) 406 | expectMsg(Connecting("localhost", 6379)) 407 | expectMsg(Connected("localhost", 6379)) 408 | 409 | try { 410 | val channel = UUID.randomUUID().toString 411 | val popRedis = system.actorOf(Redis(listeners = Set(self))) 412 | expectMsg(Connecting("localhost", 6379)) 413 | expectMsg(Connected("localhost", 6379)) 414 | 415 | val probeRedis = TestProbe() 416 | val probePopRedis = TestProbe() 417 | 418 | popRedis.tell(Request("BLPOP", "blpop:list", "0"), probePopRedis.ref) 419 | 420 | probePopRedis.expectNoMsg 421 | 422 | brando.tell(Request("LPUSH", "blpop:list", "blpop-value"), probeRedis.ref) 423 | 424 | probePopRedis.expectMsg( 425 | Some(List(Some( 426 | ByteString("blpop:list")), 427 | Some(ByteString("blpop-value"))))) 428 | 429 | probeRedis.expectMsg(Some(1)) 430 | 431 | } finally { 432 | implicit val timeout = Timeout(1.seconds) 433 | Await.ready((brando ? Request("del", "blpop:list")), 1.seconds) 434 | } 435 | } 436 | 437 | it("should reply with Nil when timeout") { 438 | 439 | val popRedis = system.actorOf(Redis(listeners = Set(self))) 440 | expectMsg(Connecting("localhost", 6379)) 441 | expectMsg(Connected("localhost", 6379)) 442 | val probePopRedis = TestProbe() 443 | 444 | popRedis.tell(Request("BLPOP", "blpop:inexistant-list", "1"), probePopRedis.ref) 445 | 446 | probePopRedis.expectMsg(5.seconds, None) 447 | } 448 | } 449 | 450 | describe("notifications") { 451 | it("should send a Connected event if connecting succeeds") { 452 | val probe = TestProbe() 453 | val brando = system.actorOf(Redis("localhost", 6379, listeners = Set(probe.ref))) 454 | 455 | probe.expectMsg(Connecting("localhost", 6379)) 456 | probe.expectMsg(Connected("localhost", 6379)) 457 | } 458 | 459 | it("should send an ConnectionFailed event if connecting fails") { 460 | val probe = TestProbe() 461 | val brando = system.actorOf(Redis("localhost", 13579, listeners = Set(probe.ref))) 462 | 463 | probe.expectMsg(Connecting("localhost", 13579)) 464 | probe.expectMsg(ConnectionFailed("localhost", 13579)) 465 | } 466 | 467 | it("should send an AuthenticationFailed event if connecting succeeds but authentication fails") { 468 | val probe = TestProbe() 469 | val brando = system.actorOf(Redis("localhost", 6379, auth = Some("not-the-auth"), listeners = Set(probe.ref))) 470 | 471 | probe.expectMsg(Connecting("localhost", 6379)) 472 | probe.expectMsg(Redis.AuthenticationFailed("localhost", 6379)) 473 | } 474 | 475 | it("should send a ConnectionFailed if redis is not responsive during connection") { 476 | val serverSocket = new ServerSocket(0) 477 | val port = serverSocket.getLocalPort() 478 | 479 | val probe = TestProbe() 480 | val brando = system.actorOf(Redis("localhost", port, listeners = Set(probe.ref))) 481 | 482 | probe.expectMsg(Connecting("localhost", port)) 483 | probe.expectMsg(ConnectionFailed("localhost", port)) 484 | } 485 | 486 | it("should send a notification to later added listener") { 487 | val probe = TestProbe() 488 | val probe2 = TestProbe() 489 | val brando = system.actorOf(Redis("localhost", 13579, listeners = Set(probe2.ref))) 490 | brando ! probe.ref 491 | 492 | probe.expectMsg(Disconnected("unknown", 0)) 493 | probe2.expectMsg(Connecting("localhost", 13579)) 494 | probe.expectMsg(Connecting("localhost", 13579)) 495 | probe2.expectMsg(ConnectionFailed("localhost", 13579)) 496 | probe.expectMsg(ConnectionFailed("localhost", 13579)) 497 | } 498 | 499 | it("should send a notification with the current status to later added listener") { 500 | val probe = TestProbe() 501 | val probe2 = TestProbe() 502 | val brando = system.actorOf(Redis("localhost", 6379, listeners = Set(probe2.ref))) 503 | 504 | probe2.expectMsg(Connecting("localhost", 6379)) 505 | probe2.expectMsg(Connected("localhost", 6379)) 506 | brando ! probe.ref 507 | probe.expectMsg(Connected("localhost", 6379)) 508 | } 509 | } 510 | 511 | describe("connection") { 512 | import Connection._ 513 | it("should try to reconnect if connectionRetryDelay and connectionRetryAttempts are defined") { 514 | val listener = TestProbe() 515 | val brando = TestActorRef(new Redis( 516 | "localhost", 6379, 0, None, Set(listener.ref), 2.seconds, Some(1.seconds), Some(1), None)) 517 | 518 | listener.expectMsg(Connecting("localhost", 6379)) 519 | assert(brando.underlyingActor.retries === 0) 520 | listener.expectMsg(Connected("localhost", 6379)) 521 | assert(brando.underlyingActor.retries === 0) 522 | 523 | brando ! Disconnected("localhost", 6379) 524 | 525 | listener.expectMsg(Disconnected("localhost", 6379)) 526 | listener.expectMsg(Connecting("localhost", 6379)) 527 | } 528 | 529 | it("should not try to reconnect if connectionRetryDelay and connectionRetryAttempts are not defined") { 530 | val listener = TestProbe() 531 | val brando = TestActorRef(new Redis( 532 | "localhost", 6379, 0, None, Set(listener.ref), 2.seconds, None, None, None)) 533 | 534 | listener.expectMsg(Connecting("localhost", 6379)) 535 | listener.expectMsg(Connected("localhost", 6379)) 536 | 537 | brando ! Disconnected("localhost", 6379) 538 | 539 | listener.expectMsg(Disconnected("localhost", 6379)) 540 | listener.expectNoMsg 541 | } 542 | 543 | it("should not try to reconnect once the max retry attempts is reached") { 544 | val listener = TestProbe() 545 | val brando = TestActorRef(new Redis( 546 | "localhost", 16379, 0, None, Set(listener.ref), 2.seconds, Some(1.seconds), Some(1), None)) 547 | 548 | listener.expectMsg(Connecting("localhost", 16379)) 549 | assert(brando.underlyingActor.retries === 0) 550 | listener.expectMsg(ConnectionFailed("localhost", 16379)) 551 | 552 | listener.expectMsg(Connecting("localhost", 16379)) 553 | assert(brando.underlyingActor.retries === 1) 554 | listener.expectMsg(ConnectionFailed("localhost", 16379)) 555 | 556 | listener.expectNoMsg 557 | } 558 | } 559 | 560 | describe("eval") { 561 | it("should respond with result of operation") { 562 | val brando = system.actorOf(Redis(listeners = Set(self))) 563 | expectMsg(Connecting("localhost", 6379)) 564 | expectMsg(Connected("localhost", 6379)) 565 | 566 | brando ! Request("eval", 567 | "if ARGV[1] == \"hello\" then return 1 end", 568 | "0", 569 | "hello") 570 | 571 | expectMsg(Some(1)) 572 | } 573 | 574 | } 575 | 576 | } 577 | -------------------------------------------------------------------------------- /src/test/scala/ReplyParserTest.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.util.ByteString 4 | import org.scalatest.{ FunSpec, BeforeAndAfterEach } 5 | 6 | class ReplyParserTest extends FunSpec with BeforeAndAfterEach { 7 | 8 | object Parser extends ReplyParser 9 | import Parser._ 10 | 11 | override def afterEach() { 12 | remainingBuffer = ByteString.empty 13 | } 14 | 15 | describe("Simple String reply") { 16 | it("should decode Ok") { 17 | val result = parse(ByteString("+OK\r\n")) 18 | 19 | assert(result === Success(Some(Ok))) 20 | } 21 | 22 | it("should decode Pong") { 23 | val result = parse(ByteString("+PONG\r\n")) 24 | 25 | assert(result === Success(Some(Pong))) 26 | } 27 | } 28 | 29 | describe("Integer reply") { 30 | it("should decode as long") { 31 | parse(ByteString(":17575\r\n")) match { 32 | case Success(Some(i: Long), next) ⇒ assert(i == 17575L) 33 | case _ ⇒ assert(false) 34 | } 35 | } 36 | } 37 | 38 | describe("Error reply") { 39 | it("should decode the error") { 40 | val result = parse(ByteString("-err\r\n")) 41 | result match { 42 | case Success(Some(akka.actor.Status.Failure(e)), _) ⇒ 43 | assert(e.getMessage === "err") 44 | case x ⇒ fail(s"Parsed unexpected message $x") 45 | } 46 | } 47 | } 48 | 49 | describe("Bulk String reply") { 50 | it("should decode as ByteString option") { 51 | val result = parse(ByteString("$6\r\nfoobar\r\n")) 52 | 53 | assert(result === Success(Some(ByteString("foobar")))) 54 | } 55 | 56 | it("should decode null as None") { 57 | val result = parse(ByteString("$-1\r\n")) 58 | 59 | assert(result === Success(None)) 60 | } 61 | } 62 | 63 | describe("Array reply") { 64 | it("should decode list of bulk string reply values") { 65 | val result = parse(ByteString("*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$4\r\nfoob\r\n$6\r\nfoobar\r\n")) 66 | 67 | val expected = Some(List(Some(ByteString("foo")), Some(ByteString("bar")), 68 | Some(ByteString("foob")), Some(ByteString("foobar")))) 69 | 70 | assert(result === Success(expected)) 71 | } 72 | 73 | it("should decode list with nil values") { 74 | val result = parse(ByteString("*3\r\n$-1\r\n$3\r\nbar\r\n$6\r\nfoobar\r\n")) 75 | 76 | val expected = Some(List(None, Some(ByteString("bar")), 77 | Some(ByteString("foobar")))) 78 | 79 | assert(result === Success(expected)) 80 | } 81 | 82 | it("should decode list with integer values") { 83 | val result = parse(ByteString("*3\r\n$3\r\nbar\r\n:37282\r\n$6\r\nfoobar\r\n")) 84 | 85 | val expected = Some(List(Some(ByteString("bar")), Some(37282), 86 | Some(ByteString("foobar")))) 87 | 88 | assert(result === Success(expected)) 89 | } 90 | 91 | it("should decode list with nested array reply") { 92 | val result = parse(ByteString("*3\r\n$3\r\nbar\r\n*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$4\r\nfoob\r\n$6\r\nfoobar\r\n$6\r\nfoobaz\r\n")) 93 | 94 | val expected = Some(List(Some(ByteString("bar")), 95 | Some(List(Some(ByteString("foo")), Some(ByteString("bar")), 96 | Some(ByteString("foob")), Some(ByteString("foobar")))), 97 | Some(ByteString("foobaz")))) 98 | 99 | assert(result === Success(expected)) 100 | } 101 | 102 | it("should decode null list") { 103 | assert(parse(ByteString("*-1\r\n")) === Success(None)) 104 | } 105 | } 106 | 107 | describe("parsing empty replies") { 108 | it("should return a failure if remaining partial response is empty") { 109 | parseReply(ByteString()) { r ⇒ fail("nothing to parse") } 110 | } 111 | } 112 | 113 | describe("parsing incomplete replies") { 114 | 115 | var parsed = false 116 | it("should handle an array reply split into two parts") { 117 | parseReply(ByteString("*")) { _ ⇒ fail("nothing to parse yet") } 118 | 119 | parseReply(ByteString("1\r\n$3\r\nfoo\r\n")) { result ⇒ 120 | val expected = Some(List(Some(ByteString("foo")))) 121 | assert(result === expected) 122 | parsed = true 123 | } 124 | if (!parsed) fail("Did not parse anything") 125 | } 126 | 127 | it("should handle an array reply split before an entry") { 128 | parseReply(ByteString("*1\r\n")) { _ ⇒ fail("nothing to parse yet") } 129 | 130 | var parsed = false 131 | parseReply(ByteString("$3\r\nfoo\r\n")) { result ⇒ 132 | val expected = Some(List(Some(ByteString("foo")))) 133 | assert(result === expected) 134 | parsed = true 135 | } 136 | if (!parsed) fail("Did not parse anything") 137 | } 138 | 139 | it("should handle an array reply split in an entry") { 140 | parseReply(ByteString("*1\r\n$3\r\n")) { _ ⇒ fail("nothing to parse yet") } 141 | 142 | var parsed = false 143 | parseReply(ByteString("foo\r\n")) { result ⇒ 144 | val expected = Some(List(Some(ByteString("foo")))) 145 | assert(result === expected) 146 | parsed = true 147 | } 148 | if (!parsed) fail("Did not parse anything") 149 | } 150 | 151 | it("should handle an array reply split between entries") { 152 | parseReply(ByteString("*2\r\n$3\r\nfoo\r\n")) { _ ⇒ fail("nothing to parse yet") } 153 | 154 | var parsed = false 155 | parseReply(ByteString("$3\r\nbar\r\n")) { result ⇒ 156 | val expected = Some(List(Some(ByteString("foo")), Some(ByteString("bar")))) 157 | assert(result === expected) 158 | parsed = true 159 | } 160 | if (!parsed) fail("Did not parse anything") 161 | assert(remainingBuffer === ByteString.empty) 162 | } 163 | 164 | it("should handle a string reply split into two parts") { 165 | parseReply(ByteString("+")) { _ ⇒ fail("nothing to parse yet") } 166 | 167 | var parsed = false 168 | parseReply(ByteString("OK\r\n")) { result ⇒ 169 | val expected = Some(Ok) 170 | assert(result === expected) 171 | parsed = true 172 | } 173 | if (!parsed) fail("Did not parse anything") 174 | assert(remainingBuffer === ByteString.empty) 175 | } 176 | 177 | it("should handle a string reply split after the \\r") { 178 | parseReply(ByteString("+OK\r")) { _ ⇒ fail("nothing to parse yet") } 179 | 180 | var parsed = false 181 | parseReply(ByteString("\n")) { result ⇒ 182 | val expected = Some(Ok) 183 | assert(result === expected) 184 | parsed = true 185 | } 186 | if (!parsed) fail("Did not parse anything") 187 | assert(remainingBuffer === ByteString.empty) 188 | } 189 | 190 | it("should handle an integer reply split into two parts") { 191 | parseReply(ByteString(":")) { _ ⇒ fail("nothing to parse yet") } 192 | 193 | var parsed = false 194 | parseReply(ByteString("17575\r\n")) { result ⇒ 195 | val expected = Some(17575) 196 | assert(result === expected) 197 | parsed = true 198 | } 199 | if (!parsed) fail("Did not parse anything") 200 | assert(remainingBuffer === ByteString.empty) 201 | } 202 | 203 | it("should handle a bulk string reply split into two parts") { 204 | parseReply(ByteString("$")) { _ ⇒ fail("nothing to parse yet") } 205 | 206 | var parsed = false 207 | parseReply(ByteString("3\r\nfoo\r\n")) { result ⇒ 208 | val expected = Some(ByteString("foo")) 209 | assert(result === expected) 210 | parsed = true 211 | } 212 | if (!parsed) fail("Did not parse anything") 213 | assert(remainingBuffer === ByteString.empty) 214 | } 215 | 216 | it("should handle a bulk string reply split after the number") { 217 | parseReply(ByteString("$3")) { _ ⇒ fail("nothing to parse yet") } 218 | 219 | var parsed = false 220 | parseReply(ByteString("\r\nfoo\r\n")) { result ⇒ 221 | val expected = Some(ByteString("foo")) 222 | assert(result === expected) 223 | parsed = true 224 | } 225 | if (!parsed) fail("Did not parse anything") 226 | assert(remainingBuffer === ByteString.empty) 227 | } 228 | 229 | it("should handle a bulk string reply split after the first line") { 230 | parseReply(ByteString("$3\r\n")) { _ ⇒ fail("nothing to parse yet") } 231 | 232 | var parsed = false 233 | parseReply(ByteString("foo\r\n")) { result ⇒ 234 | val expected = Some(ByteString("foo")) 235 | assert(result === expected) 236 | parsed = true 237 | } 238 | if (!parsed) fail("Did not parse anything") 239 | assert(remainingBuffer === ByteString.empty) 240 | } 241 | 242 | it("should handle an error reply split into two parts") { 243 | parseReply(ByteString("-")) { _ ⇒ fail("nothing to parse yet") } 244 | 245 | var parsed = false 246 | parseReply(ByteString("foo\r\n")) { result ⇒ 247 | result match { 248 | case Some(akka.actor.Status.Failure(e)) ⇒ 249 | assert(e.getMessage === "foo") 250 | case x ⇒ fail(s"Parsed unexpected message $x") 251 | } 252 | parsed = true 253 | } 254 | if (!parsed) fail("Did not parse anything") 255 | assert(remainingBuffer === ByteString.empty) 256 | } 257 | } 258 | } 259 | -------------------------------------------------------------------------------- /src/test/scala/RequestTest.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import org.scalatest.FunSpec 4 | import akka.util.ByteString 5 | 6 | class RequestTest extends FunSpec { 7 | describe("toByteString") { 8 | it("should encode request as Redis protocol ByteString") { 9 | val request = Request("GET", "mykey") 10 | val expected = ByteString("*2\r\n$3\r\nGET\r\n$5\r\nmykey\r\n") 11 | 12 | assert(request.toByteString === expected) 13 | } 14 | 15 | it("should encode request with 2 arguments") { 16 | val request = Request("SET", "mykey", "somevalue") 17 | val expected = ByteString("*3\r\n$3\r\nSET\r\n$5\r\nmykey\r\n$9\r\nsomevalue\r\n") 18 | 19 | assert(request.toByteString === expected) 20 | } 21 | } 22 | 23 | describe("HashRequest") { 24 | it("should create a request that contains all the arguments merged together") { 25 | val req = HashRequest("HMSET", "setkey", Map("a" -> "a", "b" -> "b")) 26 | assertResult(Request("HMSET", "setkey", "a", "a", "b", "b"))(req) 27 | } 28 | } 29 | } -------------------------------------------------------------------------------- /src/test/scala/ResponseTest.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.util.ByteString 4 | import org.scalatest.FunSpec 5 | 6 | class ResponseTest extends FunSpec { 7 | describe("Utf8String") { 8 | it("should extract an utf8 String from a ByteString") { 9 | val Response.AsString(r) = Some(ByteString("ok")) 10 | assert(r === "ok") 11 | } 12 | 13 | describe("option") { 14 | it("should extract the utf8 string if it matches") { 15 | val Response.AsStringOption(r) = Some(ByteString("ok")) 16 | assert(r === Some("ok")) 17 | } 18 | it("should return none otherwise") { 19 | val Response.AsStringOption(r) = None 20 | assert(r === None) 21 | } 22 | } 23 | } 24 | 25 | describe("StringOptions") { 26 | it("should extract a list of option string from a option bytestring list ") { 27 | val resp = Some(List(Some(ByteString("l1")), None, Some(ByteString("l2")))) 28 | val seq = Response.AsStringOptions.unapply(resp) 29 | assertResult(Some(Seq(Some("l1"), None, Some("l2"))))(seq) 30 | } 31 | } 32 | 33 | describe("Strings") { 34 | it("should extract a list of string from a option bytestring list ") { 35 | val resp = Some(List(Some(ByteString("l1")), Some(ByteString("l2")), Some(ByteString("l3")))) 36 | val seq = Response.AsStrings.unapply(resp) 37 | assertResult(Some(Seq("l1", "l2", "l3")))(seq) 38 | } 39 | 40 | it("shouldn't extract if it is another type") { 41 | val seq = Response.AsStrings.unapply(Some(List(Some(12L)))) 42 | assertResult(None)(seq) 43 | } 44 | } 45 | 46 | describe("Bytes Sequences") { 47 | it("should extract a list of string from a option bytestring list ") { 48 | val resp = Some(List(Some(ByteString(0, 1)), Some(ByteString(2, 3)))) 49 | val seq = Response.AsByteSeqs.unapply(resp) 50 | assertResult(Some(Seq(Seq(0, 1), Seq(2, 3))))(seq) 51 | } 52 | 53 | it("shouldn't extract if it is another type") { 54 | val seq = Response.AsByteSeqs.unapply(Some(List(Some(12L)))) 55 | assertResult(None)(seq) 56 | } 57 | } 58 | 59 | describe("Strings Hashes") { 60 | it("should extract a map when the result list has an heaven size") { 61 | val resp = Some(List(Some(ByteString("k1")), Some(ByteString("v1")), Some(ByteString("k2")), Some(ByteString("v2")))) 62 | val map = Response.AsStringsHash.unapply(resp) 63 | assertResult(Some(Map("k1" -> "v1", "k2" -> "v2")))(map) 64 | } 65 | 66 | it("should extract an empty map when the result list is empty") { 67 | val map = Response.AsStringsHash.unapply(Some(List.empty)) 68 | assertResult(Some(Map.empty))(map) 69 | } 70 | 71 | it("should fails when the result list has an odd size") { 72 | val map = Response.AsStringsHash.unapply(Some(List(Some(ByteString("k1"))))) 73 | assertResult(None)(map) 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/test/scala/SentinelTest.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | import akka.util._ 5 | import akka.pattern._ 6 | import akka.testkit._ 7 | 8 | import scala.concurrent._ 9 | import scala.concurrent.duration._ 10 | 11 | import org.scalatest._ 12 | 13 | class SentinelTest extends TestKit(ActorSystem("SentinelTest")) with FunSpecLike 14 | with ImplicitSender { 15 | 16 | import Sentinel._ 17 | import Connection._ 18 | 19 | describe("Sentinel") { 20 | describe("connection to sentinel instances") { 21 | it("should connect to the first working sentinel instance") { 22 | val probe = TestProbe() 23 | val sentinel = system.actorOf(Sentinel(Seq( 24 | Server("wrong-host", 26379), 25 | Server("localhost", 26379)), Set(probe.ref))) 26 | 27 | probe.expectMsg(Connecting("wrong-host", 26379)) 28 | probe.expectMsg(Connecting("localhost", 26379)) 29 | probe.expectMsg(Connected("localhost", 26379)) 30 | } 31 | 32 | it("should send a notification to the listeners when connecting") { 33 | val probe = TestProbe() 34 | val sentinel = system.actorOf(Sentinel(Seq( 35 | Server("localhost", 26379)), 36 | Set(probe.ref))) 37 | 38 | probe.expectMsg(Connecting("localhost", 26379)) 39 | } 40 | 41 | it("should send a notification to the listeners when connected") { 42 | val probe = TestProbe() 43 | val sentinel = system.actorOf(Sentinel(Seq( 44 | Server("localhost", 26379)), Set(probe.ref))) 45 | 46 | probe.receiveN(1) 47 | probe.expectMsg(Connected("localhost", 26379)) 48 | } 49 | 50 | it("should send a notification to the listeners when disconnected") { 51 | val probe = TestProbe() 52 | val sentinel = system.actorOf(Sentinel(Seq( 53 | Server("localhost", 26379)), Set(probe.ref))) 54 | 55 | probe.receiveN(1) 56 | probe.expectMsg(Connected("localhost", 26379)) 57 | 58 | sentinel ! Disconnected("localhost", 26379) 59 | 60 | probe.expectMsg(Disconnected("localhost", 26379)) 61 | } 62 | 63 | it("should send a notification to the listeners for connection failure") { 64 | val probe = TestProbe() 65 | val sentinels = Seq(Server("wrong-host", 26379)) 66 | val sentinel = system.actorOf(Sentinel(sentinels, Set(probe.ref))) 67 | 68 | probe.receiveN(1) 69 | probe.expectMsg(Sentinel.ConnectionFailed(sentinels)) 70 | } 71 | 72 | it("should make sure the working instance will be tried first next reconnection") { 73 | val probe = TestProbe() 74 | val sentinel = system.actorOf(Sentinel(Seq( 75 | Server("wrong-host", 26379), 76 | Server("localhost", 26379)), Set(probe.ref))) 77 | 78 | probe.expectMsg(Connecting("wrong-host", 26379)) 79 | probe.expectMsg(Connecting("localhost", 26379)) 80 | probe.expectMsg(Connected("localhost", 26379)) 81 | 82 | sentinel ! Disconnected("localhost", 26379) 83 | 84 | probe.expectMsg(Disconnected("localhost", 26379)) 85 | 86 | probe.expectMsg(Connecting("localhost", 26379)) 87 | } 88 | 89 | it("should send a notification to the listeners if it can't connect to any instance") { 90 | val probe = TestProbe() 91 | val sentinels = Seq( 92 | Server("wrong-host-1", 26379), 93 | Server("wrong-host-2", 26379)) 94 | val sentinel = system.actorOf(Sentinel(sentinels.reverse, Set(probe.ref))) 95 | 96 | probe.receiveN(2) 97 | probe.expectMsg(Sentinel.ConnectionFailed(sentinels)) 98 | } 99 | } 100 | 101 | describe("Request") { 102 | it("should return a failure when disconnected") { 103 | val sentinel = system.actorOf(Sentinel(Seq( 104 | Server("localhost", 26379)))) 105 | 106 | sentinel ! Request("PING") 107 | 108 | expectMsg(Status.Failure(new RedisException("Disconnected from the sentinel cluster"))) 109 | 110 | } 111 | } 112 | 113 | describe("Subscriptions") { 114 | it("should receive pub/sub notifications") { 115 | val sentinel = system.actorOf(Sentinel(Seq( 116 | Server("localhost", 26379)), Set(self))) 117 | val sentinel2 = system.actorOf(Sentinel(Seq( 118 | Server("localhost", 26379)), Set(self))) 119 | 120 | expectMsg(Connecting("localhost", 26379)) 121 | expectMsg(Connecting("localhost", 26379)) 122 | expectMsg(Connected("localhost", 26379)) 123 | expectMsg(Connected("localhost", 26379)) 124 | 125 | sentinel ! Request("subscribe", "+failover-end") 126 | 127 | expectMsg(Some(List( 128 | Some(ByteString("subscribe")), 129 | Some(ByteString("+failover-end")), 130 | Some(1)))) 131 | 132 | sentinel2 ! Request("sentinel", "failover", "mymaster") 133 | expectMsg(Some(Ok)) 134 | 135 | expectMsg(PubSubMessage("+failover-end", "master mymaster 127.0.0.1 6379")) 136 | 137 | Thread.sleep(2000) 138 | 139 | sentinel2 ! Request("sentinel", "failover", "mymaster") 140 | expectMsg(Some(Ok)) 141 | 142 | expectMsg(PubSubMessage("+failover-end", "master mymaster 127.0.0.1 6380")) 143 | } 144 | } 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /src/test/scala/ShardManagerTest.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import org.scalatest.FunSpecLike 4 | import akka.testkit._ 5 | 6 | import akka.actor._ 7 | import akka.util.ByteString 8 | import scala.concurrent.duration._ 9 | import scala.util.Failure 10 | 11 | class ShardManagerTest extends TestKit(ActorSystem("ShardManagerTest")) 12 | with FunSpecLike with ImplicitSender { 13 | 14 | import ShardManager._ 15 | import Connection._ 16 | 17 | describe("creating shards") { 18 | it("should create a pool of clients mapped to ids") { 19 | val shards = Seq( 20 | RedisShard("server1", "localhost", 6379, 0), 21 | RedisShard("server2", "localhost", 6379, 1), 22 | RedisShard("server3", "localhost", 6379, 2)) 23 | 24 | val shardManager = TestActorRef[ShardManager](ShardManager( 25 | shards)) 26 | 27 | assert(shardManager.underlyingActor.pool.keys === Set("server1", "server2", "server3")) 28 | } 29 | 30 | it("should support updating existing shards but not creating new ones") { 31 | val shards = Seq( 32 | RedisShard("server1", "localhost", 6379, 0), 33 | RedisShard("server2", "localhost", 6379, 1), 34 | RedisShard("server3", "localhost", 6379, 2)) 35 | 36 | val shardManager = TestActorRef[ShardManager](ShardManager( 37 | shards)) 38 | 39 | assert(shardManager.underlyingActor.pool.keys === Set("server1", "server2", "server3")) 40 | 41 | shardManager ! RedisShard("server1", "localhost", 6379, 6) 42 | 43 | assert(shardManager.underlyingActor.pool.keys === Set("server1", "server2", "server3")) 44 | 45 | shardManager ! RedisShard("new_server", "localhost", 6378, 3) 46 | 47 | assert(shardManager.underlyingActor.pool.keys === Set("server1", "server2", "server3")) 48 | } 49 | } 50 | 51 | describe("sending requests") { 52 | describe("using sentinel") { 53 | it("should forward each request to the appropriate client transparently") { 54 | val sentinelProbe = TestProbe() 55 | val redisProbe = TestProbe() 56 | val sentinel = system.actorOf(Sentinel(listeners = Set(sentinelProbe.ref))) 57 | val shardManager = system.actorOf(ShardManager( 58 | shards = Seq(SentinelShard("mymaster", 0)), 59 | sentinelClient = Some(sentinel), 60 | listeners = Set(redisProbe.ref))) 61 | 62 | sentinelProbe.expectMsg( 63 | Connecting("localhost", 26379)) 64 | sentinelProbe.expectMsg( 65 | Connected("localhost", 26379)) 66 | 67 | redisProbe.expectMsg( 68 | Connecting("127.0.0.1", 6379)) 69 | redisProbe.expectMsg( 70 | Connected("127.0.0.1", 6379)) 71 | 72 | shardManager ! ("key", Request("SET", "shard_manager_test", "some value")) 73 | 74 | expectMsg(Some(Ok)) 75 | 76 | shardManager ! ("key", Request("GET", "shard_manager_test")) 77 | 78 | expectMsg(Some(ByteString("some value"))) 79 | } 80 | } 81 | 82 | it("should forward each request to the appropriate client transparently") { 83 | val shards = Seq( 84 | RedisShard("server1", "127.0.0.1", 6379, 0), 85 | RedisShard("server2", "127.0.0.1", 6379, 1), 86 | RedisShard("server3", "127.0.0.1", 6379, 2)) 87 | 88 | val sentinelProbe = TestProbe() 89 | val redisProbe = TestProbe() 90 | val sentinel = system.actorOf(Sentinel(listeners = Set(sentinelProbe.ref))) 91 | val shardManager = system.actorOf(ShardManager( 92 | shards = shards, 93 | sentinelClient = Some(sentinel), 94 | listeners = Set(redisProbe.ref))) 95 | 96 | sentinelProbe.expectMsg( 97 | Connecting("localhost", 26379)) 98 | sentinelProbe.expectMsg( 99 | Connected("localhost", 26379)) 100 | 101 | redisProbe.expectMsg( 102 | Connecting("127.0.0.1", 6379)) 103 | redisProbe.expectMsg( 104 | Connecting("127.0.0.1", 6379)) 105 | redisProbe.expectMsg( 106 | Connecting("127.0.0.1", 6379)) 107 | 108 | redisProbe.expectMsg( 109 | Connected("127.0.0.1", 6379)) 110 | redisProbe.expectMsg( 111 | Connected("127.0.0.1", 6379)) 112 | redisProbe.expectMsg( 113 | Connected("127.0.0.1", 6379)) 114 | 115 | shardManager ! ("key", Request("SET", "shard_manager_test", "some value")) 116 | 117 | expectMsg(Some(Ok)) 118 | 119 | shardManager ! ("key", Request("GET", "shard_manager_test")) 120 | 121 | expectMsg(Some(ByteString("some value"))) 122 | } 123 | 124 | it("should infer the key from the params list") { 125 | val shards = Seq( 126 | RedisShard("server1", "127.0.0.1", 6379, 0), 127 | RedisShard("server2", "127.0.0.1", 6379, 1), 128 | RedisShard("server3", "127.0.0.1", 6379, 2)) 129 | 130 | val redisProbe = TestProbe() 131 | val shardManager = TestActorRef[ShardManager](ShardManager( 132 | shards, listeners = Set(redisProbe.ref))) 133 | 134 | redisProbe.expectMsg( 135 | Connecting("127.0.0.1", 6379)) 136 | redisProbe.expectMsg( 137 | Connecting("127.0.0.1", 6379)) 138 | redisProbe.expectMsg( 139 | Connecting("127.0.0.1", 6379)) 140 | 141 | redisProbe.expectMsg( 142 | Connected("127.0.0.1", 6379)) 143 | redisProbe.expectMsg( 144 | Connected("127.0.0.1", 6379)) 145 | redisProbe.expectMsg( 146 | Connected("127.0.0.1", 6379)) 147 | 148 | shardManager ! Request("SET", "shard_manager_test", "some value") 149 | 150 | expectMsg(Some(Ok)) 151 | 152 | shardManager ! Request("GET", "shard_manager_test") 153 | 154 | expectMsg(Some(ByteString("some value"))) 155 | } 156 | 157 | it("should fail with IllegalArgumentException when params is empty") { 158 | val shards = Seq( 159 | RedisShard("server1", "127.0.0.1", 6379, 0), 160 | RedisShard("server2", "127.0.0.1", 6379, 1), 161 | RedisShard("server3", "127.0.0.1", 6379, 2)) 162 | 163 | val redisProbe = TestProbe() 164 | val shardManager = TestActorRef[ShardManager](ShardManager( 165 | shards, listeners = Set(redisProbe.ref))) 166 | 167 | redisProbe.expectMsg( 168 | Connecting("127.0.0.1", 6379)) 169 | redisProbe.expectMsg( 170 | Connecting("127.0.0.1", 6379)) 171 | redisProbe.expectMsg( 172 | Connecting("127.0.0.1", 6379)) 173 | 174 | redisProbe.expectMsg( 175 | Connected("127.0.0.1", 6379)) 176 | redisProbe.expectMsg( 177 | Connected("127.0.0.1", 6379)) 178 | redisProbe.expectMsg( 179 | Connected("127.0.0.1", 6379)) 180 | 181 | shardManager ! Request("SET") 182 | 183 | expectMsgClass(classOf[Failure[IllegalArgumentException]]) 184 | } 185 | 186 | it("should broadcast a Request to all shards") { 187 | val shards = Seq( 188 | RedisShard("server1", "127.0.0.1", 6379, 0), 189 | RedisShard("server2", "127.0.0.1", 6379, 1), 190 | RedisShard("server3", "127.0.0.1", 6379, 2)) 191 | 192 | val redisProbe = TestProbe() 193 | val shardManager = TestActorRef[ShardManager](ShardManager( 194 | shards, listeners = Set(redisProbe.ref))) 195 | 196 | redisProbe.expectMsg( 197 | Connecting("127.0.0.1", 6379)) 198 | redisProbe.expectMsg( 199 | Connecting("127.0.0.1", 6379)) 200 | redisProbe.expectMsg( 201 | Connecting("127.0.0.1", 6379)) 202 | 203 | redisProbe.expectMsg( 204 | Connected("127.0.0.1", 6379)) 205 | redisProbe.expectMsg( 206 | Connected("127.0.0.1", 6379)) 207 | redisProbe.expectMsg( 208 | Connected("127.0.0.1", 6379)) 209 | 210 | val listName = scala.util.Random.nextString(5) 211 | 212 | shardManager ! BroadcastRequest("LPUSH", listName, "somevalue") 213 | 214 | shards.foreach { _ ⇒ expectMsg(Some(new java.lang.Long(1))) } 215 | 216 | shardManager ! BroadcastRequest("LPOP", listName) 217 | 218 | shards.foreach { _ ⇒ expectMsg(Some(ByteString("somevalue"))) } 219 | } 220 | } 221 | 222 | describe("Listening to Shard state changes") { 223 | it("should notify listeners when a shard connect successfully") { 224 | val shards = Seq(RedisShard("server1", "localhost", 6379, 0)) 225 | 226 | val probe = TestProbe() 227 | 228 | val shardManager = TestActorRef[ShardManager](ShardManager( 229 | shards, Set(probe.ref))) 230 | 231 | probe.expectMsg(Connecting("localhost", 6379)) 232 | probe.expectMsg(Connected("localhost", 6379)) 233 | } 234 | 235 | it("should notify listeners when a shard fails to connect") { 236 | val shards = Seq( 237 | RedisShard("server2", "localhost", 13579, 1)) 238 | 239 | val probe = TestProbe() 240 | 241 | val shardManager = TestActorRef[ShardManager](ShardManager( 242 | shards, Set(probe.ref))) 243 | 244 | probe.expectMsg(Connecting("localhost", 13579)) 245 | probe.expectMsg(ConnectionFailed("localhost", 13579)) 246 | } 247 | 248 | it("should cleaned up any dead listeners") { 249 | 250 | val shards = Seq( 251 | RedisShard("server1", "localhost", 6379, 0)) 252 | 253 | val probe1 = TestProbe() 254 | val probe2 = TestProbe() 255 | 256 | val shardManager = TestActorRef[ShardManager](ShardManager( 257 | shards, Set(probe1.ref, probe2.ref))).underlyingActor 258 | assertResult(2)(shardManager.listeners.size) 259 | 260 | probe1.ref ! PoisonPill 261 | 262 | probe2.expectMsg(Connecting("localhost", 6379)) 263 | probe2.expectMsg(Connected("localhost", 6379)) 264 | 265 | assertResult(1)(shardManager.listeners.size) 266 | 267 | } 268 | 269 | it("should notify listeners when a shard fails to authenticate") { 270 | val shards = Seq( 271 | RedisShard("server1", "localhost", 6379, 0), 272 | RedisShard("server2", "localhost", 6379, 1, auth = Some("not-valid-auth"))) 273 | 274 | val probe = TestProbe() 275 | 276 | val shardManager = TestActorRef[ShardManager](ShardManager( 277 | shards, Set(probe.ref))) 278 | 279 | probe.expectMsg(Connecting("localhost", 6379)) 280 | probe.expectMsg(Connecting("localhost", 6379)) 281 | probe.expectMsg(Connected("localhost", 6379)) 282 | probe.expectMsg(Redis.AuthenticationFailed("localhost", 6379)) 283 | } 284 | } 285 | } 286 | -------------------------------------------------------------------------------- /src/test/scala/StashingRedisClientTest.scala: -------------------------------------------------------------------------------- 1 | package brando 2 | 3 | import akka.actor._ 4 | import akka.testkit._ 5 | import org.scalatest.FunSpecLike 6 | 7 | class StashingRedisClientTest extends TestKit(ActorSystem("StashingRedisClientTest")) with FunSpecLike with ImplicitSender { 8 | 9 | import Connection._ 10 | 11 | describe("stashing client should") { 12 | it("respond with Pong after connected") { 13 | val brando = system.actorOf(Redis(listeners = Set(self))) 14 | val stashing = system.actorOf(StashingRedis(brando)) 15 | expectMsg(Connecting("localhost", 6379)) 16 | expectMsg(Connected("localhost", 6379)) 17 | stashing ! Request("PING") 18 | expectMsg(Some(Pong)) 19 | } 20 | 21 | it("respond with Pong before connected") { 22 | val brando = system.actorOf(Redis(listeners = Set(self))) 23 | val stashing = system.actorOf(StashingRedis(brando)) 24 | stashing ! Request("PING") 25 | expectMsg(Connecting("localhost", 6379)) 26 | expectMsg(Connected("localhost", 6379)) 27 | expectMsg(Some(Pong)) 28 | } 29 | 30 | it("drop old messages when capacity is full") { 31 | val brando = system.actorOf(Redis(listeners = Set(self))) 32 | val stashing = system.actorOf(StashingRedis(brando).withMailbox("bounded-stash-mailbox")) 33 | stashing ! Request("PING") 34 | stashing ! Request("PING") 35 | stashing ! Request("PING") 36 | stashing ! Request("GET", "non-existing-key") 37 | stashing ! Request("PING") 38 | expectMsg(Connecting("localhost", 6379)) 39 | expectMsg(Connected("localhost", 6379)) 40 | expectMsg(Some(Pong)) 41 | expectMsg(Some(Pong)) 42 | expectNoMsg() 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /test-config/redis-slave.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example 2 | 3 | # Note on units: when memory size is needed, it is possible to specify 4 | # it in the usual form of 1k 5GB 4M and so forth: 5 | # 6 | # 1k => 1000 bytes 7 | # 1kb => 1024 bytes 8 | # 1m => 1000000 bytes 9 | # 1mb => 1024*1024 bytes 10 | # 1g => 1000000000 bytes 11 | # 1gb => 1024*1024*1024 bytes 12 | # 13 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 14 | 15 | ################################## INCLUDES ################################### 16 | 17 | # Include one or more other config files here. This is useful if you 18 | # have a standard template that goes to all Redis server but also need 19 | # to customize a few per-server settings. Include files can include 20 | # other files, so use this wisely. 21 | # 22 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 23 | # from admin or Redis Sentinel. Since Redis always uses the last processed 24 | # line as value of a configuration directive, you'd better put includes 25 | # at the beginning of this file to avoid overwriting config change at runtime. 26 | # 27 | # If instead you are interested in using includes to override configuration 28 | # options, it is better to use include as the last line. 29 | # 30 | # include /path/to/local.conf 31 | # include /path/to/other.conf 32 | 33 | ################################ GENERAL ##################################### 34 | 35 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 36 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 37 | daemonize yes 38 | 39 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by 40 | # default. You can specify a custom pid file location here. 41 | pidfile "/var/run/redis/redis-slave-server.pid" 42 | 43 | # Accept connections on the specified port, default is 6379. 44 | # If port 0 is specified Redis will not listen on a TCP socket. 45 | port 6380 46 | 47 | # By default Redis listens for connections from all the network interfaces 48 | # available on the server. It is possible to listen to just one or multiple 49 | # interfaces using the "bind" configuration directive, followed by one or 50 | # more IP addresses. 51 | # 52 | # Examples: 53 | # 54 | # bind 192.168.1.100 10.0.0.1 55 | bind 127.0.0.1 56 | 57 | # Specify the path for the unix socket that will be used to listen for 58 | # incoming connections. There is no default, so Redis will not listen 59 | # on a unix socket when not specified. 60 | # 61 | # unixsocket /var/run/redis/redis.sock 62 | # unixsocketperm 755 63 | 64 | # Close the connection after a client is idle for N seconds (0 to disable) 65 | timeout 0 66 | 67 | # TCP keepalive. 68 | # 69 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 70 | # of communication. This is useful for two reasons: 71 | # 72 | # 1) Detect dead peers. 73 | # 2) Take the connection alive from the point of view of network 74 | # equipment in the middle. 75 | # 76 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 77 | # Note that to close the connection the double of the time is needed. 78 | # On other kernels the period depends on the kernel configuration. 79 | # 80 | # A reasonable value for this option is 60 seconds. 81 | tcp-keepalive 0 82 | 83 | # Specify the server verbosity level. 84 | # This can be one of: 85 | # debug (a lot of information, useful for development/testing) 86 | # verbose (many rarely useful info, but not a mess like the debug level) 87 | # notice (moderately verbose, what you want in production probably) 88 | # warning (only very important / critical messages are logged) 89 | loglevel verbose 90 | 91 | # Specify the log file name. Also the empty string can be used to force 92 | # Redis to log on the standard output. Note that if you use standard 93 | # output for logging but daemonize, logs will be sent to /dev/null 94 | logfile "/var/log/redis/redis-slave-server.log" 95 | 96 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 97 | # and optionally update the other syslog parameters to suit your needs. 98 | # syslog-enabled no 99 | 100 | # Specify the syslog identity. 101 | # syslog-ident redis 102 | 103 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 104 | # syslog-facility local0 105 | 106 | # Set the number of databases. The default database is DB 0, you can select 107 | # a different one on a per-connection basis using SELECT where 108 | # dbid is a number between 0 and 'databases'-1 109 | databases 16 110 | 111 | ################################ SNAPSHOTTING ################################ 112 | # 113 | # Save the DB on disk: 114 | # 115 | # save 116 | # 117 | # Will save the DB if both the given number of seconds and the given 118 | # number of write operations against the DB occurred. 119 | # 120 | # In the example below the behaviour will be to save: 121 | # after 900 sec (15 min) if at least 1 key changed 122 | # after 300 sec (5 min) if at least 10 keys changed 123 | # after 60 sec if at least 10000 keys changed 124 | # 125 | # Note: you can disable saving at all commenting all the "save" lines. 126 | # 127 | # It is also possible to remove all the previously configured save 128 | # points by adding a save directive with a single empty string argument 129 | # like in the following example: 130 | # 131 | # save "" 132 | 133 | save 900 1 134 | save 300 10 135 | save 60 10000 136 | 137 | # By default Redis will stop accepting writes if RDB snapshots are enabled 138 | # (at least one save point) and the latest background save failed. 139 | # This will make the user aware (in a hard way) that data is not persisting 140 | # on disk properly, otherwise chances are that no one will notice and some 141 | # disaster will happen. 142 | # 143 | # If the background saving process will start working again Redis will 144 | # automatically allow writes again. 145 | # 146 | # However if you have setup your proper monitoring of the Redis server 147 | # and persistence, you may want to disable this feature so that Redis will 148 | # continue to work as usual even if there are problems with disk, 149 | # permissions, and so forth. 150 | stop-writes-on-bgsave-error yes 151 | 152 | # Compress string objects using LZF when dump .rdb databases? 153 | # For default that's set to 'yes' as it's almost always a win. 154 | # If you want to save some CPU in the saving child set it to 'no' but 155 | # the dataset will likely be bigger if you have compressible values or keys. 156 | rdbcompression yes 157 | 158 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 159 | # This makes the format more resistant to corruption but there is a performance 160 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 161 | # for maximum performances. 162 | # 163 | # RDB files created with checksum disabled have a checksum of zero that will 164 | # tell the loading code to skip the check. 165 | rdbchecksum yes 166 | 167 | # The filename where to dump the DB 168 | dbfilename "dump.rdb" 169 | 170 | # The working directory. 171 | # 172 | # The DB will be written inside this directory, with the filename specified 173 | # above using the 'dbfilename' configuration directive. 174 | # 175 | # The Append Only File will also be created inside this directory. 176 | # 177 | # Note that you must specify a directory here, not a file name. 178 | dir "/var/lib/redis-slave" 179 | 180 | ################################# REPLICATION ################################# 181 | 182 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 183 | # another Redis server. Note that the configuration is local to the slave 184 | # so for example it is possible to configure the slave to save the DB with a 185 | # different interval, or to listen to another port, and so on. 186 | # 187 | 188 | # If the master is password protected (using the "requirepass" configuration 189 | # directive below) it is possible to tell the slave to authenticate before 190 | # starting the replication synchronization process, otherwise the master will 191 | # refuse the slave request. 192 | # 193 | # masterauth 194 | 195 | # When a slave loses its connection with the master, or when the replication 196 | # is still in progress, the slave can act in two different ways: 197 | # 198 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 199 | # still reply to client requests, possibly with out of date data, or the 200 | # data set may just be empty if this is the first synchronization. 201 | # 202 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 203 | # an error "SYNC with master in progress" to all the kind of commands 204 | # but to INFO and SLAVEOF. 205 | # 206 | slave-serve-stale-data yes 207 | slaveof 127.0.0.1 6379 208 | # You can configure a slave instance to accept writes or not. Writing against 209 | # a slave instance may be useful to store some ephemeral data (because data 210 | # written on a slave will be easily deleted after resync with the master) but 211 | # may also cause problems if clients are writing to it because of a 212 | # misconfiguration. 213 | # 214 | # Since Redis 2.6 by default slaves are read-only. 215 | # 216 | # Note: read only slaves are not designed to be exposed to untrusted clients 217 | # on the internet. It's just a protection layer against misuse of the instance. 218 | # Still a read only slave exports by default all the administrative commands 219 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 220 | # security of read only slaves using 'rename-command' to shadow all the 221 | # administrative / dangerous commands. 222 | slave-read-only yes 223 | 224 | # Slaves send PINGs to server in a predefined interval. It's possible to change 225 | # this interval with the repl_ping_slave_period option. The default value is 10 226 | # seconds. 227 | # 228 | # repl-ping-slave-period 10 229 | 230 | # The following option sets the replication timeout for: 231 | # 232 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 233 | # 2) Master timeout from the point of view of slaves (data, pings). 234 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 235 | # 236 | # It is important to make sure that this value is greater than the value 237 | # specified for repl-ping-slave-period otherwise a timeout will be detected 238 | # every time there is low traffic between the master and the slave. 239 | # 240 | # repl-timeout 60 241 | 242 | # Disable TCP_NODELAY on the slave socket after SYNC? 243 | # 244 | # If you select "yes" Redis will use a smaller number of TCP packets and 245 | # less bandwidth to send data to slaves. But this can add a delay for 246 | # the data to appear on the slave side, up to 40 milliseconds with 247 | # Linux kernels using a default configuration. 248 | # 249 | # If you select "no" the delay for data to appear on the slave side will 250 | # be reduced but more bandwidth will be used for replication. 251 | # 252 | # By default we optimize for low latency, but in very high traffic conditions 253 | # or when the master and slaves are many hops away, turning this to "yes" may 254 | # be a good idea. 255 | repl-disable-tcp-nodelay no 256 | 257 | # Set the replication backlog size. The backlog is a buffer that accumulates 258 | # slave data when slaves are disconnected for some time, so that when a slave 259 | # wants to reconnect again, often a full resync is not needed, but a partial 260 | # resync is enough, just passing the portion of data the slave missed while 261 | # disconnected. 262 | # 263 | # The biggest the replication backlog, the longer the time the slave can be 264 | # disconnected and later be able to perform a partial resynchronization. 265 | # 266 | # The backlog is only allocated once there is at least a slave connected. 267 | # 268 | # repl-backlog-size 1mb 269 | 270 | # After a master has no longer connected slaves for some time, the backlog 271 | # will be freed. The following option configures the amount of seconds that 272 | # need to elapse, starting from the time the last slave disconnected, for 273 | # the backlog buffer to be freed. 274 | # 275 | # A value of 0 means to never release the backlog. 276 | # 277 | # repl-backlog-ttl 3600 278 | 279 | # The slave priority is an integer number published by Redis in the INFO output. 280 | # It is used by Redis Sentinel in order to select a slave to promote into a 281 | # master if the master is no longer working correctly. 282 | # 283 | # A slave with a low priority number is considered better for promotion, so 284 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 285 | # pick the one with priority 10, that is the lowest. 286 | # 287 | # However a special priority of 0 marks the slave as not able to perform the 288 | # role of master, so a slave with priority of 0 will never be selected by 289 | # Redis Sentinel for promotion. 290 | # 291 | # By default the priority is 100. 292 | slave-priority 100 293 | 294 | # It is possible for a master to stop accepting writes if there are less than 295 | # N slaves connected, having a lag less or equal than M seconds. 296 | # 297 | # The N slaves need to be in "online" state. 298 | # 299 | # The lag in seconds, that must be <= the specified value, is calculated from 300 | # the last ping received from the slave, that is usually sent every second. 301 | # 302 | # This option does not GUARANTEES that N replicas will accept the write, but 303 | # will limit the window of exposure for lost writes in case not enough slaves 304 | # are available, to the specified number of seconds. 305 | # 306 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 307 | # 308 | # min-slaves-to-write 3 309 | # min-slaves-max-lag 10 310 | # 311 | # Setting one or the other to 0 disables the feature. 312 | # 313 | # By default min-slaves-to-write is set to 0 (feature disabled) and 314 | # min-slaves-max-lag is set to 10. 315 | 316 | ################################## SECURITY ################################### 317 | 318 | # Require clients to issue AUTH before processing any other 319 | # commands. This might be useful in environments in which you do not trust 320 | # others with access to the host running redis-server. 321 | # 322 | # This should stay commented out for backward compatibility and because most 323 | # people do not need auth (e.g. they run their own servers). 324 | # 325 | # Warning: since Redis is pretty fast an outside user can try up to 326 | # 150k passwords per second against a good box. This means that you should 327 | # use a very strong password otherwise it will be very easy to break. 328 | # 329 | # requirepass foobared 330 | 331 | # Command renaming. 332 | # 333 | # It is possible to change the name of dangerous commands in a shared 334 | # environment. For instance the CONFIG command may be renamed into something 335 | # hard to guess so that it will still be available for internal-use tools 336 | # but not available for general clients. 337 | # 338 | # Example: 339 | # 340 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 341 | # 342 | # It is also possible to completely kill a command by renaming it into 343 | # an empty string: 344 | # 345 | # rename-command CONFIG "" 346 | # 347 | # Please note that changing the name of commands that are logged into the 348 | # AOF file or transmitted to slaves may cause problems. 349 | 350 | ################################### LIMITS #################################### 351 | 352 | # Set the max number of connected clients at the same time. By default 353 | # this limit is set to 10000 clients, however if the Redis server is not 354 | # able to configure the process file limit to allow for the specified limit 355 | # the max number of allowed clients is set to the current file limit 356 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 357 | # 358 | # Once the limit is reached Redis will close all the new connections sending 359 | # an error 'max number of clients reached'. 360 | # 361 | # maxclients 10000 362 | 363 | # Don't use more memory than the specified amount of bytes. 364 | # When the memory limit is reached Redis will try to remove keys 365 | # according to the eviction policy selected (see maxmemory-policy). 366 | # 367 | # If Redis can't remove keys according to the policy, or if the policy is 368 | # set to 'noeviction', Redis will start to reply with errors to commands 369 | # that would use more memory, like SET, LPUSH, and so on, and will continue 370 | # to reply to read-only commands like GET. 371 | # 372 | # This option is usually useful when using Redis as an LRU cache, or to set 373 | # a hard memory limit for an instance (using the 'noeviction' policy). 374 | # 375 | # WARNING: If you have slaves attached to an instance with maxmemory on, 376 | # the size of the output buffers needed to feed the slaves are subtracted 377 | # from the used memory count, so that network problems / resyncs will 378 | # not trigger a loop where keys are evicted, and in turn the output 379 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 380 | # of more keys, and so forth until the database is completely emptied. 381 | # 382 | # In short... if you have slaves attached it is suggested that you set a lower 383 | # limit for maxmemory so that there is some free RAM on the system for slave 384 | # output buffers (but this is not needed if the policy is 'noeviction'). 385 | # 386 | # maxmemory 387 | 388 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 389 | # is reached. You can select among five behaviors: 390 | # 391 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 392 | # allkeys-lru -> remove any key accordingly to the LRU algorithm 393 | # volatile-random -> remove a random key with an expire set 394 | # allkeys-random -> remove a random key, any key 395 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 396 | # noeviction -> don't expire at all, just return an error on write operations 397 | # 398 | # Note: with any of the above policies, Redis will return an error on write 399 | # operations, when there are not suitable keys for eviction. 400 | # 401 | # At the date of writing this commands are: set setnx setex append 402 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 403 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 404 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 405 | # getset mset msetnx exec sort 406 | # 407 | # The default is: 408 | # 409 | # maxmemory-policy volatile-lru 410 | 411 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 412 | # algorithms (in order to save memory), so you can select as well the sample 413 | # size to check. For instance for default Redis will check three keys and 414 | # pick the one that was used less recently, you can change the sample size 415 | # using the following configuration directive. 416 | # 417 | # maxmemory-samples 3 418 | 419 | ############################## APPEND ONLY MODE ############################### 420 | 421 | # By default Redis asynchronously dumps the dataset on disk. This mode is 422 | # good enough in many applications, but an issue with the Redis process or 423 | # a power outage may result into a few minutes of writes lost (depending on 424 | # the configured save points). 425 | # 426 | # The Append Only File is an alternative persistence mode that provides 427 | # much better durability. For instance using the default data fsync policy 428 | # (see later in the config file) Redis can lose just one second of writes in a 429 | # dramatic event like a server power outage, or a single write if something 430 | # wrong with the Redis process itself happens, but the operating system is 431 | # still running correctly. 432 | # 433 | # AOF and RDB persistence can be enabled at the same time without problems. 434 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 435 | # with the better durability guarantees. 436 | # 437 | # Please check http://redis.io/topics/persistence for more information. 438 | 439 | appendonly no 440 | 441 | # The name of the append only file (default: "appendonly.aof") 442 | 443 | appendfilename "appendonly.aof" 444 | 445 | # The fsync() call tells the Operating System to actually write data on disk 446 | # instead to wait for more data in the output buffer. Some OS will really flush 447 | # data on disk, some other OS will just try to do it ASAP. 448 | # 449 | # Redis supports three different modes: 450 | # 451 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 452 | # always: fsync after every write to the append only log . Slow, Safest. 453 | # everysec: fsync only one time every second. Compromise. 454 | # 455 | # The default is "everysec", as that's usually the right compromise between 456 | # speed and data safety. It's up to you to understand if you can relax this to 457 | # "no" that will let the operating system flush the output buffer when 458 | # it wants, for better performances (but if you can live with the idea of 459 | # some data loss consider the default persistence mode that's snapshotting), 460 | # or on the contrary, use "always" that's very slow but a bit safer than 461 | # everysec. 462 | # 463 | # More details please check the following article: 464 | # http://antirez.com/post/redis-persistence-demystified.html 465 | # 466 | # If unsure, use "everysec". 467 | 468 | # appendfsync always 469 | appendfsync everysec 470 | # appendfsync no 471 | 472 | # When the AOF fsync policy is set to always or everysec, and a background 473 | # saving process (a background save or AOF log background rewriting) is 474 | # performing a lot of I/O against the disk, in some Linux configurations 475 | # Redis may block too long on the fsync() call. Note that there is no fix for 476 | # this currently, as even performing fsync in a different thread will block 477 | # our synchronous write(2) call. 478 | # 479 | # In order to mitigate this problem it's possible to use the following option 480 | # that will prevent fsync() from being called in the main process while a 481 | # BGSAVE or BGREWRITEAOF is in progress. 482 | # 483 | # This means that while another child is saving, the durability of Redis is 484 | # the same as "appendfsync none". In practical terms, this means that it is 485 | # possible to lose up to 30 seconds of log in the worst scenario (with the 486 | # default Linux settings). 487 | # 488 | # If you have latency problems turn this to "yes". Otherwise leave it as 489 | # "no" that is the safest pick from the point of view of durability. 490 | 491 | no-appendfsync-on-rewrite no 492 | 493 | # Automatic rewrite of the append only file. 494 | # Redis is able to automatically rewrite the log file implicitly calling 495 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 496 | # 497 | # This is how it works: Redis remembers the size of the AOF file after the 498 | # latest rewrite (if no rewrite has happened since the restart, the size of 499 | # the AOF at startup is used). 500 | # 501 | # This base size is compared to the current size. If the current size is 502 | # bigger than the specified percentage, the rewrite is triggered. Also 503 | # you need to specify a minimal size for the AOF file to be rewritten, this 504 | # is useful to avoid rewriting the AOF file even if the percentage increase 505 | # is reached but it is still pretty small. 506 | # 507 | # Specify a percentage of zero in order to disable the automatic AOF 508 | # rewrite feature. 509 | 510 | auto-aof-rewrite-percentage 100 511 | auto-aof-rewrite-min-size 64mb 512 | 513 | ################################ LUA SCRIPTING ############################### 514 | 515 | # Max execution time of a Lua script in milliseconds. 516 | # 517 | # If the maximum execution time is reached Redis will log that a script is 518 | # still in execution after the maximum allowed time and will start to 519 | # reply to queries with an error. 520 | # 521 | # When a long running script exceed the maximum execution time only the 522 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 523 | # used to stop a script that did not yet called write commands. The second 524 | # is the only way to shut down the server in the case a write commands was 525 | # already issue by the script but the user don't want to wait for the natural 526 | # termination of the script. 527 | # 528 | # Set it to 0 or a negative value for unlimited execution without warnings. 529 | lua-time-limit 5000 530 | 531 | ################################## SLOW LOG ################################### 532 | 533 | # The Redis Slow Log is a system to log queries that exceeded a specified 534 | # execution time. The execution time does not include the I/O operations 535 | # like talking with the client, sending the reply and so forth, 536 | # but just the time needed to actually execute the command (this is the only 537 | # stage of command execution where the thread is blocked and can not serve 538 | # other requests in the meantime). 539 | # 540 | # You can configure the slow log with two parameters: one tells Redis 541 | # what is the execution time, in microseconds, to exceed in order for the 542 | # command to get logged, and the other parameter is the length of the 543 | # slow log. When a new command is logged the oldest one is removed from the 544 | # queue of logged commands. 545 | 546 | # The following time is expressed in microseconds, so 1000000 is equivalent 547 | # to one second. Note that a negative number disables the slow log, while 548 | # a value of zero forces the logging of every command. 549 | slowlog-log-slower-than 10000 550 | 551 | # There is no limit to this length. Just be aware that it will consume memory. 552 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 553 | slowlog-max-len 128 554 | 555 | ############################# Event notification ############################## 556 | 557 | # Redis can notify Pub/Sub clients about events happening in the key space. 558 | # This feature is documented at http://redis.io/topics/keyspace-events 559 | # 560 | # For instance if keyspace events notification is enabled, and a client 561 | # performs a DEL operation on key "foo" stored in the Database 0, two 562 | # messages will be published via Pub/Sub: 563 | # 564 | # PUBLISH __keyspace@0__:foo del 565 | # PUBLISH __keyevent@0__:del foo 566 | # 567 | # It is possible to select the events that Redis will notify among a set 568 | # of classes. Every class is identified by a single character: 569 | # 570 | # K Keyspace events, published with __keyspace@__ prefix. 571 | # E Keyevent events, published with __keyevent@__ prefix. 572 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 573 | # $ String commands 574 | # l List commands 575 | # s Set commands 576 | # h Hash commands 577 | # z Sorted set commands 578 | # x Expired events (events generated every time a key expires) 579 | # e Evicted events (events generated when a key is evicted for maxmemory) 580 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 581 | # 582 | # The "notify-keyspace-events" takes as argument a string that is composed 583 | # by zero or multiple characters. The empty string means that notifications 584 | # are disabled at all. 585 | # 586 | # Example: to enable list and generic events, from the point of view of the 587 | # event name, use: 588 | # 589 | # notify-keyspace-events Elg 590 | # 591 | # Example 2: to get the stream of the expired keys subscribing to channel 592 | # name __keyevent@0__:expired use: 593 | # 594 | # notify-keyspace-events Ex 595 | # 596 | # By default all notifications are disabled because most users don't need 597 | # this feature and the feature has some overhead. Note that if you don't 598 | # specify at least one of K or E, no events will be delivered. 599 | notify-keyspace-events "" 600 | 601 | ############################### ADVANCED CONFIG ############################### 602 | 603 | # Hashes are encoded using a memory efficient data structure when they have a 604 | # small number of entries, and the biggest entry does not exceed a given 605 | # threshold. These thresholds can be configured using the following directives. 606 | hash-max-ziplist-entries 512 607 | hash-max-ziplist-value 64 608 | 609 | # Similarly to hashes, small lists are also encoded in a special way in order 610 | # to save a lot of space. The special representation is only used when 611 | # you are under the following limits: 612 | list-max-ziplist-entries 512 613 | list-max-ziplist-value 64 614 | 615 | # Sets have a special encoding in just one case: when a set is composed 616 | # of just strings that happens to be integers in radix 10 in the range 617 | # of 64 bit signed integers. 618 | # The following configuration setting sets the limit in the size of the 619 | # set in order to use this special memory saving encoding. 620 | set-max-intset-entries 512 621 | 622 | # Similarly to hashes and lists, sorted sets are also specially encoded in 623 | # order to save a lot of space. This encoding is only used when the length and 624 | # elements of a sorted set are below the following limits: 625 | zset-max-ziplist-entries 128 626 | zset-max-ziplist-value 64 627 | 628 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 629 | # order to help rehashing the main Redis hash table (the one mapping top-level 630 | # keys to values). The hash table implementation Redis uses (see dict.c) 631 | # performs a lazy rehashing: the more operation you run into a hash table 632 | # that is rehashing, the more rehashing "steps" are performed, so if the 633 | # server is idle the rehashing is never complete and some more memory is used 634 | # by the hash table. 635 | # 636 | # The default is to use this millisecond 10 times every second in order to 637 | # active rehashing the main dictionaries, freeing memory when possible. 638 | # 639 | # If unsure: 640 | # use "activerehashing no" if you have hard latency requirements and it is 641 | # not a good thing in your environment that Redis can reply form time to time 642 | # to queries with 2 milliseconds delay. 643 | # 644 | # use "activerehashing yes" if you don't have such hard requirements but 645 | # want to free memory asap when possible. 646 | activerehashing yes 647 | 648 | # The client output buffer limits can be used to force disconnection of clients 649 | # that are not reading data from the server fast enough for some reason (a 650 | # common reason is that a Pub/Sub client can't consume messages as fast as the 651 | # publisher can produce them). 652 | # 653 | # The limit can be set differently for the three different classes of clients: 654 | # 655 | # normal -> normal clients 656 | # slave -> slave clients and MONITOR clients 657 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 658 | # 659 | # The syntax of every client-output-buffer-limit directive is the following: 660 | # 661 | # client-output-buffer-limit 662 | # 663 | # A client is immediately disconnected once the hard limit is reached, or if 664 | # the soft limit is reached and remains reached for the specified number of 665 | # seconds (continuously). 666 | # So for instance if the hard limit is 32 megabytes and the soft limit is 667 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 668 | # if the size of the output buffers reach 32 megabytes, but will also get 669 | # disconnected if the client reaches 16 megabytes and continuously overcomes 670 | # the limit for 10 seconds. 671 | # 672 | # By default normal clients are not limited because they don't receive data 673 | # without asking (in a push way), but just after a request, so only 674 | # asynchronous clients may create a scenario where data is requested faster 675 | # than it can read. 676 | # 677 | # Instead there is a default limit for pubsub and slave clients, since 678 | # subscribers and slaves receive data in a push fashion. 679 | # 680 | # Both the hard or the soft limit can be disabled by setting them to zero. 681 | client-output-buffer-limit normal 0 0 0 682 | client-output-buffer-limit slave 256mb 64mb 60 683 | client-output-buffer-limit pubsub 32mb 8mb 60 684 | 685 | # Redis calls an internal function to perform many background tasks, like 686 | # closing connections of clients in timeout, purging expired keys that are 687 | # never requested, and so forth. 688 | # 689 | # Not all tasks are performed with the same frequency, but Redis checks for 690 | # tasks to perform accordingly to the specified "hz" value. 691 | # 692 | # By default "hz" is set to 10. Raising the value will use more CPU when 693 | # Redis is idle, but at the same time will make Redis more responsive when 694 | # there are many keys expiring at the same time, and timeouts may be 695 | # handled with more precision. 696 | # 697 | # The range is between 1 and 500, however a value over 100 is usually not 698 | # a good idea. Most users should use the default of 10 and raise this up to 699 | # 100 only in environments where very low latency is required. 700 | hz 10 701 | 702 | # When a child rewrites the AOF file, if the following option is enabled 703 | # the file will be fsync-ed every 32 MB of data generated. This is useful 704 | # in order to commit the file to the disk more incrementally and avoid 705 | # big latency spikes. 706 | aof-rewrite-incremental-fsync yes 707 | 708 | -------------------------------------------------------------------------------- /test-config/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example 2 | 3 | # Note on units: when memory size is needed, it is possible to specify 4 | # it in the usual form of 1k 5GB 4M and so forth: 5 | # 6 | # 1k => 1000 bytes 7 | # 1kb => 1024 bytes 8 | # 1m => 1000000 bytes 9 | # 1mb => 1024*1024 bytes 10 | # 1g => 1000000000 bytes 11 | # 1gb => 1024*1024*1024 bytes 12 | # 13 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 14 | 15 | ################################## INCLUDES ################################### 16 | 17 | # Include one or more other config files here. This is useful if you 18 | # have a standard template that goes to all Redis server but also need 19 | # to customize a few per-server settings. Include files can include 20 | # other files, so use this wisely. 21 | # 22 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 23 | # from admin or Redis Sentinel. Since Redis always uses the last processed 24 | # line as value of a configuration directive, you'd better put includes 25 | # at the beginning of this file to avoid overwriting config change at runtime. 26 | # 27 | # If instead you are interested in using includes to override configuration 28 | # options, it is better to use include as the last line. 29 | # 30 | # include /path/to/local.conf 31 | # include /path/to/other.conf 32 | 33 | ################################ GENERAL ##################################### 34 | 35 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 36 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 37 | daemonize yes 38 | 39 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by 40 | # default. You can specify a custom pid file location here. 41 | pidfile "/var/run/redis/redis-server.pid" 42 | 43 | # Accept connections on the specified port, default is 6379. 44 | # If port 0 is specified Redis will not listen on a TCP socket. 45 | port 6379 46 | 47 | # By default Redis listens for connections from all the network interfaces 48 | # available on the server. It is possible to listen to just one or multiple 49 | # interfaces using the "bind" configuration directive, followed by one or 50 | # more IP addresses. 51 | # 52 | # Examples: 53 | # 54 | # bind 192.168.1.100 10.0.0.1 55 | bind 127.0.0.1 56 | 57 | # Specify the path for the unix socket that will be used to listen for 58 | # incoming connections. There is no default, so Redis will not listen 59 | # on a unix socket when not specified. 60 | # 61 | # unixsocket /var/run/redis/redis.sock 62 | # unixsocketperm 755 63 | 64 | # Close the connection after a client is idle for N seconds (0 to disable) 65 | timeout 0 66 | 67 | # TCP keepalive. 68 | # 69 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 70 | # of communication. This is useful for two reasons: 71 | # 72 | # 1) Detect dead peers. 73 | # 2) Take the connection alive from the point of view of network 74 | # equipment in the middle. 75 | # 76 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 77 | # Note that to close the connection the double of the time is needed. 78 | # On other kernels the period depends on the kernel configuration. 79 | # 80 | # A reasonable value for this option is 60 seconds. 81 | tcp-keepalive 0 82 | 83 | # Specify the server verbosity level. 84 | # This can be one of: 85 | # debug (a lot of information, useful for development/testing) 86 | # verbose (many rarely useful info, but not a mess like the debug level) 87 | # notice (moderately verbose, what you want in production probably) 88 | # warning (only very important / critical messages are logged) 89 | loglevel verbose 90 | 91 | # Specify the log file name. Also the empty string can be used to force 92 | # Redis to log on the standard output. Note that if you use standard 93 | # output for logging but daemonize, logs will be sent to /dev/null 94 | logfile "/var/log/redis/redis-server.log" 95 | 96 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 97 | # and optionally update the other syslog parameters to suit your needs. 98 | # syslog-enabled no 99 | 100 | # Specify the syslog identity. 101 | # syslog-ident redis 102 | 103 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 104 | # syslog-facility local0 105 | 106 | # Set the number of databases. The default database is DB 0, you can select 107 | # a different one on a per-connection basis using SELECT where 108 | # dbid is a number between 0 and 'databases'-1 109 | databases 16 110 | 111 | ################################ SNAPSHOTTING ################################ 112 | # 113 | # Save the DB on disk: 114 | # 115 | # save 116 | # 117 | # Will save the DB if both the given number of seconds and the given 118 | # number of write operations against the DB occurred. 119 | # 120 | # In the example below the behaviour will be to save: 121 | # after 900 sec (15 min) if at least 1 key changed 122 | # after 300 sec (5 min) if at least 10 keys changed 123 | # after 60 sec if at least 10000 keys changed 124 | # 125 | # Note: you can disable saving at all commenting all the "save" lines. 126 | # 127 | # It is also possible to remove all the previously configured save 128 | # points by adding a save directive with a single empty string argument 129 | # like in the following example: 130 | # 131 | # save "" 132 | 133 | save 900 1 134 | save 300 10 135 | save 60 10000 136 | 137 | # By default Redis will stop accepting writes if RDB snapshots are enabled 138 | # (at least one save point) and the latest background save failed. 139 | # This will make the user aware (in a hard way) that data is not persisting 140 | # on disk properly, otherwise chances are that no one will notice and some 141 | # disaster will happen. 142 | # 143 | # If the background saving process will start working again Redis will 144 | # automatically allow writes again. 145 | # 146 | # However if you have setup your proper monitoring of the Redis server 147 | # and persistence, you may want to disable this feature so that Redis will 148 | # continue to work as usual even if there are problems with disk, 149 | # permissions, and so forth. 150 | stop-writes-on-bgsave-error yes 151 | 152 | # Compress string objects using LZF when dump .rdb databases? 153 | # For default that's set to 'yes' as it's almost always a win. 154 | # If you want to save some CPU in the saving child set it to 'no' but 155 | # the dataset will likely be bigger if you have compressible values or keys. 156 | rdbcompression yes 157 | 158 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 159 | # This makes the format more resistant to corruption but there is a performance 160 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 161 | # for maximum performances. 162 | # 163 | # RDB files created with checksum disabled have a checksum of zero that will 164 | # tell the loading code to skip the check. 165 | rdbchecksum yes 166 | 167 | # The filename where to dump the DB 168 | dbfilename "dump.rdb" 169 | 170 | # The working directory. 171 | # 172 | # The DB will be written inside this directory, with the filename specified 173 | # above using the 'dbfilename' configuration directive. 174 | # 175 | # The Append Only File will also be created inside this directory. 176 | # 177 | # Note that you must specify a directory here, not a file name. 178 | dir "/var/lib/redis" 179 | 180 | ################################# REPLICATION ################################# 181 | 182 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 183 | # another Redis server. Note that the configuration is local to the slave 184 | # so for example it is possible to configure the slave to save the DB with a 185 | # different interval, or to listen to another port, and so on. 186 | # 187 | # slaveof 188 | 189 | # If the master is password protected (using the "requirepass" configuration 190 | # directive below) it is possible to tell the slave to authenticate before 191 | # starting the replication synchronization process, otherwise the master will 192 | # refuse the slave request. 193 | # 194 | # masterauth 195 | 196 | # When a slave loses its connection with the master, or when the replication 197 | # is still in progress, the slave can act in two different ways: 198 | # 199 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 200 | # still reply to client requests, possibly with out of date data, or the 201 | # data set may just be empty if this is the first synchronization. 202 | # 203 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 204 | # an error "SYNC with master in progress" to all the kind of commands 205 | # but to INFO and SLAVEOF. 206 | # 207 | slave-serve-stale-data yes 208 | 209 | # You can configure a slave instance to accept writes or not. Writing against 210 | # a slave instance may be useful to store some ephemeral data (because data 211 | # written on a slave will be easily deleted after resync with the master) but 212 | # may also cause problems if clients are writing to it because of a 213 | # misconfiguration. 214 | # 215 | # Since Redis 2.6 by default slaves are read-only. 216 | # 217 | # Note: read only slaves are not designed to be exposed to untrusted clients 218 | # on the internet. It's just a protection layer against misuse of the instance. 219 | # Still a read only slave exports by default all the administrative commands 220 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 221 | # security of read only slaves using 'rename-command' to shadow all the 222 | # administrative / dangerous commands. 223 | slave-read-only yes 224 | 225 | # Slaves send PINGs to server in a predefined interval. It's possible to change 226 | # this interval with the repl_ping_slave_period option. The default value is 10 227 | # seconds. 228 | # 229 | # repl-ping-slave-period 10 230 | 231 | # The following option sets the replication timeout for: 232 | # 233 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 234 | # 2) Master timeout from the point of view of slaves (data, pings). 235 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 236 | # 237 | # It is important to make sure that this value is greater than the value 238 | # specified for repl-ping-slave-period otherwise a timeout will be detected 239 | # every time there is low traffic between the master and the slave. 240 | # 241 | # repl-timeout 60 242 | 243 | # Disable TCP_NODELAY on the slave socket after SYNC? 244 | # 245 | # If you select "yes" Redis will use a smaller number of TCP packets and 246 | # less bandwidth to send data to slaves. But this can add a delay for 247 | # the data to appear on the slave side, up to 40 milliseconds with 248 | # Linux kernels using a default configuration. 249 | # 250 | # If you select "no" the delay for data to appear on the slave side will 251 | # be reduced but more bandwidth will be used for replication. 252 | # 253 | # By default we optimize for low latency, but in very high traffic conditions 254 | # or when the master and slaves are many hops away, turning this to "yes" may 255 | # be a good idea. 256 | repl-disable-tcp-nodelay no 257 | 258 | # Set the replication backlog size. The backlog is a buffer that accumulates 259 | # slave data when slaves are disconnected for some time, so that when a slave 260 | # wants to reconnect again, often a full resync is not needed, but a partial 261 | # resync is enough, just passing the portion of data the slave missed while 262 | # disconnected. 263 | # 264 | # The biggest the replication backlog, the longer the time the slave can be 265 | # disconnected and later be able to perform a partial resynchronization. 266 | # 267 | # The backlog is only allocated once there is at least a slave connected. 268 | # 269 | # repl-backlog-size 1mb 270 | 271 | # After a master has no longer connected slaves for some time, the backlog 272 | # will be freed. The following option configures the amount of seconds that 273 | # need to elapse, starting from the time the last slave disconnected, for 274 | # the backlog buffer to be freed. 275 | # 276 | # A value of 0 means to never release the backlog. 277 | # 278 | # repl-backlog-ttl 3600 279 | 280 | # The slave priority is an integer number published by Redis in the INFO output. 281 | # It is used by Redis Sentinel in order to select a slave to promote into a 282 | # master if the master is no longer working correctly. 283 | # 284 | # A slave with a low priority number is considered better for promotion, so 285 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 286 | # pick the one with priority 10, that is the lowest. 287 | # 288 | # However a special priority of 0 marks the slave as not able to perform the 289 | # role of master, so a slave with priority of 0 will never be selected by 290 | # Redis Sentinel for promotion. 291 | # 292 | # By default the priority is 100. 293 | slave-priority 100 294 | 295 | # It is possible for a master to stop accepting writes if there are less than 296 | # N slaves connected, having a lag less or equal than M seconds. 297 | # 298 | # The N slaves need to be in "online" state. 299 | # 300 | # The lag in seconds, that must be <= the specified value, is calculated from 301 | # the last ping received from the slave, that is usually sent every second. 302 | # 303 | # This option does not GUARANTEES that N replicas will accept the write, but 304 | # will limit the window of exposure for lost writes in case not enough slaves 305 | # are available, to the specified number of seconds. 306 | # 307 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 308 | # 309 | # min-slaves-to-write 3 310 | # min-slaves-max-lag 10 311 | # 312 | # Setting one or the other to 0 disables the feature. 313 | # 314 | # By default min-slaves-to-write is set to 0 (feature disabled) and 315 | # min-slaves-max-lag is set to 10. 316 | 317 | ################################## SECURITY ################################### 318 | 319 | # Require clients to issue AUTH before processing any other 320 | # commands. This might be useful in environments in which you do not trust 321 | # others with access to the host running redis-server. 322 | # 323 | # This should stay commented out for backward compatibility and because most 324 | # people do not need auth (e.g. they run their own servers). 325 | # 326 | # Warning: since Redis is pretty fast an outside user can try up to 327 | # 150k passwords per second against a good box. This means that you should 328 | # use a very strong password otherwise it will be very easy to break. 329 | # 330 | # requirepass foobared 331 | 332 | # Command renaming. 333 | # 334 | # It is possible to change the name of dangerous commands in a shared 335 | # environment. For instance the CONFIG command may be renamed into something 336 | # hard to guess so that it will still be available for internal-use tools 337 | # but not available for general clients. 338 | # 339 | # Example: 340 | # 341 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 342 | # 343 | # It is also possible to completely kill a command by renaming it into 344 | # an empty string: 345 | # 346 | # rename-command CONFIG "" 347 | # 348 | # Please note that changing the name of commands that are logged into the 349 | # AOF file or transmitted to slaves may cause problems. 350 | 351 | ################################### LIMITS #################################### 352 | 353 | # Set the max number of connected clients at the same time. By default 354 | # this limit is set to 10000 clients, however if the Redis server is not 355 | # able to configure the process file limit to allow for the specified limit 356 | # the max number of allowed clients is set to the current file limit 357 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 358 | # 359 | # Once the limit is reached Redis will close all the new connections sending 360 | # an error 'max number of clients reached'. 361 | # 362 | # maxclients 10000 363 | 364 | # Don't use more memory than the specified amount of bytes. 365 | # When the memory limit is reached Redis will try to remove keys 366 | # according to the eviction policy selected (see maxmemory-policy). 367 | # 368 | # If Redis can't remove keys according to the policy, or if the policy is 369 | # set to 'noeviction', Redis will start to reply with errors to commands 370 | # that would use more memory, like SET, LPUSH, and so on, and will continue 371 | # to reply to read-only commands like GET. 372 | # 373 | # This option is usually useful when using Redis as an LRU cache, or to set 374 | # a hard memory limit for an instance (using the 'noeviction' policy). 375 | # 376 | # WARNING: If you have slaves attached to an instance with maxmemory on, 377 | # the size of the output buffers needed to feed the slaves are subtracted 378 | # from the used memory count, so that network problems / resyncs will 379 | # not trigger a loop where keys are evicted, and in turn the output 380 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 381 | # of more keys, and so forth until the database is completely emptied. 382 | # 383 | # In short... if you have slaves attached it is suggested that you set a lower 384 | # limit for maxmemory so that there is some free RAM on the system for slave 385 | # output buffers (but this is not needed if the policy is 'noeviction'). 386 | # 387 | # maxmemory 388 | 389 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 390 | # is reached. You can select among five behaviors: 391 | # 392 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 393 | # allkeys-lru -> remove any key accordingly to the LRU algorithm 394 | # volatile-random -> remove a random key with an expire set 395 | # allkeys-random -> remove a random key, any key 396 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 397 | # noeviction -> don't expire at all, just return an error on write operations 398 | # 399 | # Note: with any of the above policies, Redis will return an error on write 400 | # operations, when there are not suitable keys for eviction. 401 | # 402 | # At the date of writing this commands are: set setnx setex append 403 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 404 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 405 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 406 | # getset mset msetnx exec sort 407 | # 408 | # The default is: 409 | # 410 | # maxmemory-policy volatile-lru 411 | 412 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 413 | # algorithms (in order to save memory), so you can select as well the sample 414 | # size to check. For instance for default Redis will check three keys and 415 | # pick the one that was used less recently, you can change the sample size 416 | # using the following configuration directive. 417 | # 418 | # maxmemory-samples 3 419 | 420 | ############################## APPEND ONLY MODE ############################### 421 | 422 | # By default Redis asynchronously dumps the dataset on disk. This mode is 423 | # good enough in many applications, but an issue with the Redis process or 424 | # a power outage may result into a few minutes of writes lost (depending on 425 | # the configured save points). 426 | # 427 | # The Append Only File is an alternative persistence mode that provides 428 | # much better durability. For instance using the default data fsync policy 429 | # (see later in the config file) Redis can lose just one second of writes in a 430 | # dramatic event like a server power outage, or a single write if something 431 | # wrong with the Redis process itself happens, but the operating system is 432 | # still running correctly. 433 | # 434 | # AOF and RDB persistence can be enabled at the same time without problems. 435 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 436 | # with the better durability guarantees. 437 | # 438 | # Please check http://redis.io/topics/persistence for more information. 439 | 440 | appendonly no 441 | 442 | # The name of the append only file (default: "appendonly.aof") 443 | 444 | appendfilename "appendonly.aof" 445 | 446 | # The fsync() call tells the Operating System to actually write data on disk 447 | # instead to wait for more data in the output buffer. Some OS will really flush 448 | # data on disk, some other OS will just try to do it ASAP. 449 | # 450 | # Redis supports three different modes: 451 | # 452 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 453 | # always: fsync after every write to the append only log . Slow, Safest. 454 | # everysec: fsync only one time every second. Compromise. 455 | # 456 | # The default is "everysec", as that's usually the right compromise between 457 | # speed and data safety. It's up to you to understand if you can relax this to 458 | # "no" that will let the operating system flush the output buffer when 459 | # it wants, for better performances (but if you can live with the idea of 460 | # some data loss consider the default persistence mode that's snapshotting), 461 | # or on the contrary, use "always" that's very slow but a bit safer than 462 | # everysec. 463 | # 464 | # More details please check the following article: 465 | # http://antirez.com/post/redis-persistence-demystified.html 466 | # 467 | # If unsure, use "everysec". 468 | 469 | # appendfsync always 470 | appendfsync everysec 471 | # appendfsync no 472 | 473 | # When the AOF fsync policy is set to always or everysec, and a background 474 | # saving process (a background save or AOF log background rewriting) is 475 | # performing a lot of I/O against the disk, in some Linux configurations 476 | # Redis may block too long on the fsync() call. Note that there is no fix for 477 | # this currently, as even performing fsync in a different thread will block 478 | # our synchronous write(2) call. 479 | # 480 | # In order to mitigate this problem it's possible to use the following option 481 | # that will prevent fsync() from being called in the main process while a 482 | # BGSAVE or BGREWRITEAOF is in progress. 483 | # 484 | # This means that while another child is saving, the durability of Redis is 485 | # the same as "appendfsync none". In practical terms, this means that it is 486 | # possible to lose up to 30 seconds of log in the worst scenario (with the 487 | # default Linux settings). 488 | # 489 | # If you have latency problems turn this to "yes". Otherwise leave it as 490 | # "no" that is the safest pick from the point of view of durability. 491 | 492 | no-appendfsync-on-rewrite no 493 | 494 | # Automatic rewrite of the append only file. 495 | # Redis is able to automatically rewrite the log file implicitly calling 496 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 497 | # 498 | # This is how it works: Redis remembers the size of the AOF file after the 499 | # latest rewrite (if no rewrite has happened since the restart, the size of 500 | # the AOF at startup is used). 501 | # 502 | # This base size is compared to the current size. If the current size is 503 | # bigger than the specified percentage, the rewrite is triggered. Also 504 | # you need to specify a minimal size for the AOF file to be rewritten, this 505 | # is useful to avoid rewriting the AOF file even if the percentage increase 506 | # is reached but it is still pretty small. 507 | # 508 | # Specify a percentage of zero in order to disable the automatic AOF 509 | # rewrite feature. 510 | 511 | auto-aof-rewrite-percentage 100 512 | auto-aof-rewrite-min-size 64mb 513 | 514 | ################################ LUA SCRIPTING ############################### 515 | 516 | # Max execution time of a Lua script in milliseconds. 517 | # 518 | # If the maximum execution time is reached Redis will log that a script is 519 | # still in execution after the maximum allowed time and will start to 520 | # reply to queries with an error. 521 | # 522 | # When a long running script exceed the maximum execution time only the 523 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 524 | # used to stop a script that did not yet called write commands. The second 525 | # is the only way to shut down the server in the case a write commands was 526 | # already issue by the script but the user don't want to wait for the natural 527 | # termination of the script. 528 | # 529 | # Set it to 0 or a negative value for unlimited execution without warnings. 530 | lua-time-limit 5000 531 | 532 | ################################## SLOW LOG ################################### 533 | 534 | # The Redis Slow Log is a system to log queries that exceeded a specified 535 | # execution time. The execution time does not include the I/O operations 536 | # like talking with the client, sending the reply and so forth, 537 | # but just the time needed to actually execute the command (this is the only 538 | # stage of command execution where the thread is blocked and can not serve 539 | # other requests in the meantime). 540 | # 541 | # You can configure the slow log with two parameters: one tells Redis 542 | # what is the execution time, in microseconds, to exceed in order for the 543 | # command to get logged, and the other parameter is the length of the 544 | # slow log. When a new command is logged the oldest one is removed from the 545 | # queue of logged commands. 546 | 547 | # The following time is expressed in microseconds, so 1000000 is equivalent 548 | # to one second. Note that a negative number disables the slow log, while 549 | # a value of zero forces the logging of every command. 550 | slowlog-log-slower-than 10000 551 | 552 | # There is no limit to this length. Just be aware that it will consume memory. 553 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 554 | slowlog-max-len 128 555 | 556 | ############################# Event notification ############################## 557 | 558 | # Redis can notify Pub/Sub clients about events happening in the key space. 559 | # This feature is documented at http://redis.io/topics/keyspace-events 560 | # 561 | # For instance if keyspace events notification is enabled, and a client 562 | # performs a DEL operation on key "foo" stored in the Database 0, two 563 | # messages will be published via Pub/Sub: 564 | # 565 | # PUBLISH __keyspace@0__:foo del 566 | # PUBLISH __keyevent@0__:del foo 567 | # 568 | # It is possible to select the events that Redis will notify among a set 569 | # of classes. Every class is identified by a single character: 570 | # 571 | # K Keyspace events, published with __keyspace@__ prefix. 572 | # E Keyevent events, published with __keyevent@__ prefix. 573 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 574 | # $ String commands 575 | # l List commands 576 | # s Set commands 577 | # h Hash commands 578 | # z Sorted set commands 579 | # x Expired events (events generated every time a key expires) 580 | # e Evicted events (events generated when a key is evicted for maxmemory) 581 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 582 | # 583 | # The "notify-keyspace-events" takes as argument a string that is composed 584 | # by zero or multiple characters. The empty string means that notifications 585 | # are disabled at all. 586 | # 587 | # Example: to enable list and generic events, from the point of view of the 588 | # event name, use: 589 | # 590 | # notify-keyspace-events Elg 591 | # 592 | # Example 2: to get the stream of the expired keys subscribing to channel 593 | # name __keyevent@0__:expired use: 594 | # 595 | # notify-keyspace-events Ex 596 | # 597 | # By default all notifications are disabled because most users don't need 598 | # this feature and the feature has some overhead. Note that if you don't 599 | # specify at least one of K or E, no events will be delivered. 600 | notify-keyspace-events "" 601 | 602 | ############################### ADVANCED CONFIG ############################### 603 | 604 | # Hashes are encoded using a memory efficient data structure when they have a 605 | # small number of entries, and the biggest entry does not exceed a given 606 | # threshold. These thresholds can be configured using the following directives. 607 | hash-max-ziplist-entries 512 608 | hash-max-ziplist-value 64 609 | 610 | # Similarly to hashes, small lists are also encoded in a special way in order 611 | # to save a lot of space. The special representation is only used when 612 | # you are under the following limits: 613 | list-max-ziplist-entries 512 614 | list-max-ziplist-value 64 615 | 616 | # Sets have a special encoding in just one case: when a set is composed 617 | # of just strings that happens to be integers in radix 10 in the range 618 | # of 64 bit signed integers. 619 | # The following configuration setting sets the limit in the size of the 620 | # set in order to use this special memory saving encoding. 621 | set-max-intset-entries 512 622 | 623 | # Similarly to hashes and lists, sorted sets are also specially encoded in 624 | # order to save a lot of space. This encoding is only used when the length and 625 | # elements of a sorted set are below the following limits: 626 | zset-max-ziplist-entries 128 627 | zset-max-ziplist-value 64 628 | 629 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 630 | # order to help rehashing the main Redis hash table (the one mapping top-level 631 | # keys to values). The hash table implementation Redis uses (see dict.c) 632 | # performs a lazy rehashing: the more operation you run into a hash table 633 | # that is rehashing, the more rehashing "steps" are performed, so if the 634 | # server is idle the rehashing is never complete and some more memory is used 635 | # by the hash table. 636 | # 637 | # The default is to use this millisecond 10 times every second in order to 638 | # active rehashing the main dictionaries, freeing memory when possible. 639 | # 640 | # If unsure: 641 | # use "activerehashing no" if you have hard latency requirements and it is 642 | # not a good thing in your environment that Redis can reply form time to time 643 | # to queries with 2 milliseconds delay. 644 | # 645 | # use "activerehashing yes" if you don't have such hard requirements but 646 | # want to free memory asap when possible. 647 | activerehashing yes 648 | 649 | # The client output buffer limits can be used to force disconnection of clients 650 | # that are not reading data from the server fast enough for some reason (a 651 | # common reason is that a Pub/Sub client can't consume messages as fast as the 652 | # publisher can produce them). 653 | # 654 | # The limit can be set differently for the three different classes of clients: 655 | # 656 | # normal -> normal clients 657 | # slave -> slave clients and MONITOR clients 658 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 659 | # 660 | # The syntax of every client-output-buffer-limit directive is the following: 661 | # 662 | # client-output-buffer-limit 663 | # 664 | # A client is immediately disconnected once the hard limit is reached, or if 665 | # the soft limit is reached and remains reached for the specified number of 666 | # seconds (continuously). 667 | # So for instance if the hard limit is 32 megabytes and the soft limit is 668 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 669 | # if the size of the output buffers reach 32 megabytes, but will also get 670 | # disconnected if the client reaches 16 megabytes and continuously overcomes 671 | # the limit for 10 seconds. 672 | # 673 | # By default normal clients are not limited because they don't receive data 674 | # without asking (in a push way), but just after a request, so only 675 | # asynchronous clients may create a scenario where data is requested faster 676 | # than it can read. 677 | # 678 | # Instead there is a default limit for pubsub and slave clients, since 679 | # subscribers and slaves receive data in a push fashion. 680 | # 681 | # Both the hard or the soft limit can be disabled by setting them to zero. 682 | client-output-buffer-limit normal 0 0 0 683 | client-output-buffer-limit slave 256mb 64mb 60 684 | client-output-buffer-limit pubsub 32mb 8mb 60 685 | 686 | # Redis calls an internal function to perform many background tasks, like 687 | # closing connections of clients in timeout, purging expired keys that are 688 | # never requested, and so forth. 689 | # 690 | # Not all tasks are performed with the same frequency, but Redis checks for 691 | # tasks to perform accordingly to the specified "hz" value. 692 | # 693 | # By default "hz" is set to 10. Raising the value will use more CPU when 694 | # Redis is idle, but at the same time will make Redis more responsive when 695 | # there are many keys expiring at the same time, and timeouts may be 696 | # handled with more precision. 697 | # 698 | # The range is between 1 and 500, however a value over 100 is usually not 699 | # a good idea. Most users should use the default of 10 and raise this up to 700 | # 100 only in environments where very low latency is required. 701 | hz 10 702 | 703 | # When a child rewrites the AOF file, if the following option is enabled 704 | # the file will be fsync-ed every 32 MB of data generated. This is useful 705 | # in order to commit the file to the disk more incrementally and avoid 706 | # big latency spikes. 707 | aof-rewrite-incremental-fsync yes 708 | -------------------------------------------------------------------------------- /test-config/sentinel.conf: -------------------------------------------------------------------------------- 1 | # Example sentinel.conf 2 | 3 | # port 4 | # The port that this sentinel instance will run on 5 | port 26379 6 | 7 | #sentinel monitor test 127.0.0.1 6379 1 8 | # 9 | # Tells Sentinel to monitor this master, and to consider it in O_DOWN 10 | # (Objectively Down) state only if at least sentinels agree. 11 | # 12 | # Note that whatever is the ODOWN quorum, a Sentinel will require to 13 | # be elected by the majority of the known Sentinels in order to 14 | # start a failover, so no failover can be performed in minority. 15 | # 16 | # Note: master name should not include special characters or spaces. 17 | # The valid charset is A-z 0-9 and the three characters ".-_". 18 | sentinel monitor mymaster 127.0.0.1 6379 1 19 | 20 | # sentinel auth-pass 21 | # 22 | # Set the password to use to authenticate with the master and slaves. 23 | # Useful if there is a password set in the Redis instances to monitor. 24 | # 25 | # Note that the master password is also used for slaves, so it is not 26 | # possible to set a different password in masters and slaves instances 27 | # if you want to be able to monitor these instances with Sentinel. 28 | # 29 | # However you can have Redis instances without the authentication enabled 30 | # mixed with Redis instances requiring the authentication (as long as the 31 | # password set is the same for all the instances requiring the password) as 32 | # the AUTH command will have no effect in Redis instances with authentication 33 | # switched off. 34 | # 35 | # Example: 36 | # 37 | # sentinel auth-pass mymaster MySUPER--secret-0123passw0rd 38 | 39 | # sentinel down-after-milliseconds 40 | # 41 | # Number of milliseconds the master (or any attached slave or sentinel) should 42 | # be unreachable (as in, not acceptable reply to PING, continuously, for the 43 | # specified period) in order to consider it in S_DOWN state (Subjectively 44 | # Down). 45 | # 46 | # Default is 30 seconds. 47 | sentinel failover-timeout mymaster 30000 48 | 49 | # sentinel parallel-syncs 50 | # 51 | # How many slaves we can reconfigure to point to the new slave simultaneously 52 | # during the failover. Use a low number if you use the slaves to serve query 53 | # to avoid that all the slaves will be unreachable at about the same 54 | # time while performing the synchronization with the master. 55 | sentinel config-epoch mymaster 17 56 | 57 | # sentinel failover-timeout 58 | # 59 | # Specifies the failover timeout in milliseconds. It is used in many ways: 60 | # 61 | # - The time needed to re-start a failover after a previous failover was 62 | # already tried against the same master by a given Sentinel, is two 63 | # times the failover timeout. 64 | # 65 | # - The time needed for a slave replicating to a wrong master according 66 | # to a Sentinel current configuration, to be forced to replicate 67 | # with the right master, is exactly the failover timeout (counting since 68 | # the moment a Sentinel detected the misconfiguration). 69 | # 70 | # - The time needed to cancel a failover that is already in progress but 71 | # did not produced any configuration change (SLAVEOF NO ONE yet not 72 | # acknowledged by the promoted slave). 73 | # 74 | # - The maximum time a failover in progress waits for all the slaves to be 75 | # reconfigured as slaves of the new master. However even after this time 76 | # the slaves will be reconfigured by the Sentinels anyway, but not with 77 | # the exact parallel-syncs progression as specified. 78 | # 79 | # Default is 3 minutes. 80 | sentinel leader-epoch mymaster 26 81 | 82 | # SCRIPTS EXECUTION 83 | # 84 | # sentinel notification-script and sentinel reconfig-script are used in order 85 | # to configure scripts that are called to notify the system administrator 86 | # or to reconfigure clients after a failover. The scripts are executed 87 | # with the following rules for error handling: 88 | # 89 | # If script exists with "1" the execution is retried later (up to a maximum 90 | # number of times currently set to 10). 91 | # 92 | # If script exists with "2" (or an higher value) the script execution is 93 | # not retried. 94 | # 95 | # If script terminates because it receives a signal the behavior is the same 96 | # as exit code 1. 97 | # 98 | # A script has a maximum running time of 60 seconds. After this limit is 99 | # reached the script is terminated with a SIGKILL and the execution retried. 100 | 101 | # NOTIFICATION SCRIPT 102 | # 103 | # sentinel notification-script 104 | # 105 | # Call the specified notification script for any sentinel event that is 106 | # generated in the WARNING level (for instance -sdown, -odown, and so forth). 107 | # This script should notify the system administrator via email, SMS, or any 108 | # other messaging system, that there is something wrong with the monitored 109 | # Redis systems. 110 | # 111 | # The script is called with just two arguments: the first is the event type 112 | # and the second the event description. 113 | # 114 | # The script must exist and be executable in order for sentinel to start if 115 | # this option is provided. 116 | # 117 | # Example: 118 | # 119 | # sentinel notification-script mymaster /var/redis/notify.sh 120 | 121 | # CLIENTS RECONFIGURATION SCRIPT 122 | # 123 | # sentinel client-reconfig-script 124 | # 125 | # When the master changed because of a failover a script can be called in 126 | # order to perform application-specific tasks to notify the clients that the 127 | # configuration has changed and the master is at a different address. 128 | # 129 | # The following arguments are passed to the script: 130 | # 131 | # 132 | # 133 | # is currently always "failover" 134 | # is either "leader" or "observer" 135 | # 136 | # The arguments from-ip, from-port, to-ip, to-port are used to communicate 137 | # the old address of the master and the new address of the elected slave 138 | # (now a master). 139 | # 140 | # This script should be resistant to multiple invocations. 141 | # 142 | # Example: 143 | # 144 | # sentinel client-reconfig-script mymaster /var/redis/reconfig.sh 145 | --------------------------------------------------------------------------------