├── doc ├── intro.md ├── js │ └── page_effects.js ├── index.html ├── css │ └── default.css └── carmine-sentinel.core.html ├── .gitignore ├── project.clj ├── CHANGELOG.md ├── Makefile ├── test └── carmine_sentinel │ └── core_test.clj ├── README.md ├── raw-sentinel.conf ├── LICENSE ├── src └── carmine_sentinel │ └── core.clj └── raw-redis.conf /doc/intro.md: -------------------------------------------------------------------------------- 1 | # Introduction to carmine-sentinel 2 | 3 | TODO: write [great documentation](http://jacobian.org/writing/what-to-write/) 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /classes 3 | /checkouts 4 | pom.xml 5 | pom.xml.asc 6 | *.jar 7 | *.class 8 | /.lein-* 9 | /.nrepl-port 10 | .hgignore 11 | .hg/ 12 | -------------------------------------------------------------------------------- /project.clj: -------------------------------------------------------------------------------- 1 | (defproject net.fnil/carmine-sentinel "1.0.0" 2 | :description "A Clojure library designed to connect redis by sentinel, make carmine to support sentinel." 3 | :url "https://github.com/killme2008/carmine-sentinel" 4 | :license {:name "Eclipse Public License" 5 | :url "http://www.eclipse.org/legal/epl-v10.html"} 6 | :dependencies [[org.clojure/clojure "1.10.0"] 7 | [com.taoensso/carmine "2.19.1"]] 8 | :plugins [[codox "0.6.8"]] 9 | :warn-on-reflection true) 10 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | ## [0.3.0] - 2019-02-27 4 | ### Compatibility Breaking Change 5 | - Change defined sentinel commands format to be compatible with `carmine` version post `2.14`: will only work for `2.15`+ ! 6 | 7 | ### Fixed 8 | - carmine server connection settings are no longer lost: password connection works! 9 | 10 | ### Changed 11 | - Expanded hard to read use of macros 12 | - Change variable names from acronyms to full words from acronyms 13 | - `try-resolve-master-spec` now accepts `server-conn` argument 14 | - `get-sentinel-redis-spec` now checks arguments using `:pre` not `if ... throw` 15 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | THIS_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) 2 | 3 | sentinel.conf: 4 | cp raw-sentinel.conf $@ 5 | echo 'requirepass "foobar"' >> $@ 6 | echo 'daemonize yes' >> $@ 7 | 8 | sentinel.%.conf: sentinel.conf 9 | cp sentinel.conf sentinel.$*.conf 10 | echo pidfile $(THIS_DIR)/sentinel.$*.pid >> $@ 11 | 12 | sentinel.%: sentinel.%.conf 13 | redis-sentinel sentinel.$*.conf --port 500$* & 14 | 15 | sentinels: sentinel.conf 16 | $(MAKE) sentinel.0 17 | $(MAKE) sentinel.1 18 | $(MAKE) sentinel.2 19 | 20 | redis.conf: 21 | cp raw-redis.conf $@ 22 | echo 'requirepass "foobar"' >> $@ 23 | echo 'daemonize yes' >> $@ 24 | echo pidfile $(THIS_DIR)/redis.pid >> $@ 25 | 26 | redis: redis.conf 27 | redis-server redis.conf & 28 | 29 | test_setup: sentinels redis 30 | 31 | test: test_setup 32 | lein test 33 | $(MAKE) stop 34 | $(MAKE) clean 35 | 36 | clean: 37 | rm sentinel*.conf redis*.conf 38 | 39 | stop: 40 | kill `cat sentinel.*.pid` 41 | kill `cat redis.pid` 42 | 43 | .PRECIOUS: sentinel.%.conf redis.conf 44 | 45 | .PHONY: clean stop test 46 | -------------------------------------------------------------------------------- /doc/js/page_effects.js: -------------------------------------------------------------------------------- 1 | function visibleInParent(element) { 2 | var position = $(element).position().top 3 | return position > -50 && position < ($(element).offsetParent().height() - 50) 4 | } 5 | 6 | function hasFragment(link, fragment) { 7 | return $(link).attr("href").indexOf("#" + fragment) != -1 8 | } 9 | 10 | function findLinkByFragment(elements, fragment) { 11 | return $(elements).filter(function(i, e) { return hasFragment(e, fragment)}).first() 12 | } 13 | 14 | function setCurrentVarLink() { 15 | $('#vars li').removeClass('current') 16 | $('.public'). 17 | filter(function(index) { return visibleInParent(this) }). 18 | each(function(index, element) { 19 | findLinkByFragment("#vars a", element.id). 20 | parent(). 21 | addClass('current') 22 | }) 23 | } 24 | 25 | var hasStorage = (function() { try { return localStorage.getItem } catch(e) {} }()) 26 | 27 | function scrollPositionId(element) { 28 | var directory = window.location.href.replace(/[^\/]+\.html$/, '') 29 | return 'scroll::' + $(element).attr('id') + '::' + directory 30 | } 31 | 32 | function storeScrollPosition(element) { 33 | if (!hasStorage) return; 34 | localStorage.setItem(scrollPositionId(element) + "::x", $(element).scrollLeft()) 35 | localStorage.setItem(scrollPositionId(element) + "::y", $(element).scrollTop()) 36 | } 37 | 38 | function recallScrollPosition(element) { 39 | if (!hasStorage) return; 40 | $(element).scrollLeft(localStorage.getItem(scrollPositionId(element) + "::x")) 41 | $(element).scrollTop(localStorage.getItem(scrollPositionId(element) + "::y")) 42 | } 43 | 44 | function persistScrollPosition(element) { 45 | recallScrollPosition(element) 46 | $(element).scroll(function() { storeScrollPosition(element) }) 47 | } 48 | 49 | function sidebarContentWidth(element) { 50 | var widths = $(element).find('span').map(function() { return $(this).width() }) 51 | return Math.max.apply(Math, widths) 52 | } 53 | 54 | function resizeNamespaces() { 55 | var width = sidebarContentWidth('#namespaces') + 40 56 | $('#namespaces').css('width', width) 57 | $('#vars, .namespace-index').css('left', width + 1) 58 | $('.namespace-docs').css('left', $('#vars').width() + width + 2) 59 | } 60 | 61 | $(window).ready(resizeNamespaces) 62 | $(window).ready(setCurrentVarLink) 63 | $(window).ready(function() { persistScrollPosition('#namespaces')}) 64 | $(window).ready(function() { 65 | $('#content').scroll(setCurrentVarLink) 66 | $(window).resize(setCurrentVarLink) 67 | }) 68 | -------------------------------------------------------------------------------- /doc/index.html: -------------------------------------------------------------------------------- 1 | 2 | Carmine-sentinel 0.1.0-RC5 API documentation

Carmine-sentinel 0.1.0-RC5 API documentation

A Clojure library designed to connect redis by sentinel, make carmine to support sentinel.

carmine-sentinel.core

-------------------------------------------------------------------------------- /doc/css/default.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: sans-serif; 3 | font-size: 11pt; 4 | } 5 | 6 | pre, code { 7 | font-family: Monaco, DejaVu Sans Mono, Consolas, monospace; 8 | font-size: 9pt; 9 | } 10 | 11 | h2 { 12 | font-weight: normal; 13 | font-size: 18pt; 14 | margin: 10px 0 0.4em 0; 15 | } 16 | 17 | #header, #content, .sidebar { 18 | position: fixed; 19 | } 20 | 21 | #header { 22 | top: 0; 23 | left: 0; 24 | right: 0; 25 | height: 20px; 26 | background: #444; 27 | color: #fff; 28 | padding: 5px; 29 | } 30 | 31 | #content { 32 | top: 30px; 33 | right: 0; 34 | bottom: 0; 35 | overflow: auto; 36 | background: #fff; 37 | color: #333; 38 | padding: 0 1em; 39 | } 40 | 41 | .sidebar { 42 | position: fixed; 43 | top: 30px; 44 | bottom: 0; 45 | overflow: auto; 46 | } 47 | 48 | #namespaces { 49 | background: #e6e6e6; 50 | border-right: solid 1px #bbb; 51 | left: 0; 52 | width: 250px; 53 | } 54 | 55 | #vars { 56 | background: #eeeeee; 57 | border-right: solid 1px #ccc; 58 | left: 251px; 59 | width: 200px; 60 | } 61 | 62 | .namespace-index { 63 | left: 251px; 64 | } 65 | 66 | .namespace-docs { 67 | left: 452px; 68 | } 69 | 70 | #header { 71 | background: -moz-linear-gradient(top, #555 0%, #222 100%); 72 | background: -webkit-linear-gradient(top, #555 0%, #333 100%); 73 | background: -o-linear-gradient(top, #555 0%, #222 100%); 74 | background: -ms-linear-gradient(top, #555 0%, #222 100%); 75 | background: linear-gradient(top, #555 0%, #222 100%); 76 | box-shadow: 0 0 10px #555; 77 | z-index: 100; 78 | } 79 | 80 | #header h1 { 81 | margin: 0; 82 | padding: 0; 83 | font-size: 12pt; 84 | font-weight: normal; 85 | text-shadow: -1px -1px 0px #333; 86 | } 87 | 88 | #header a, .sidebar a { 89 | display: block; 90 | text-decoration: none; 91 | } 92 | 93 | #header a { 94 | color: #fff; 95 | } 96 | 97 | .sidebar a { 98 | color: #333; 99 | } 100 | 101 | #header h2 { 102 | float: right; 103 | font-size: 9pt; 104 | font-weight: normal; 105 | margin: 3px 3px; 106 | color: #bbb; 107 | } 108 | 109 | #header h2 a { 110 | display: inline; 111 | } 112 | 113 | .sidebar h3 { 114 | margin: 0; 115 | padding: 10px 0.5em 0 0.5em; 116 | font-size: 14pt; 117 | font-weight: normal 118 | } 119 | 120 | .sidebar ul { 121 | padding: 0.5em 0em; 122 | margin: 0; 123 | } 124 | 125 | .sidebar li { 126 | display: block; 127 | } 128 | 129 | .sidebar li a { 130 | padding: 7px 10px; 131 | } 132 | 133 | #namespaces li.current a { 134 | border-left: 3px solid #a33; 135 | padding-left: 7px; 136 | color: #a33; 137 | } 138 | 139 | #vars li.current a { 140 | border-left: 3px solid #33a; 141 | padding-left: 7px; 142 | color: #33a; 143 | } 144 | 145 | #content h3 { 146 | margin-bottom: 0.5em; 147 | font-size: 13pt; 148 | font-weight: bold; 149 | } 150 | 151 | .public h3, h4.macro { 152 | margin: 0; 153 | float: left; 154 | } 155 | 156 | .usage { 157 | clear: both; 158 | } 159 | 160 | h4.macro { 161 | font-variant: small-caps; 162 | font-size: 13px; 163 | font-weight: bold; 164 | color: #717171; 165 | margin-top: 3px; 166 | margin-left: 10px; 167 | } 168 | 169 | .public { 170 | margin-top: 1.5em; 171 | margin-bottom: 2.0em; 172 | } 173 | 174 | .public:last-child { 175 | margin-bottom: 20%; 176 | } 177 | 178 | .namespace:last-child { 179 | margin-bottom: 10%; 180 | } 181 | 182 | .index { 183 | padding: 0; 184 | } 185 | 186 | .index * { 187 | display: inline; 188 | } 189 | 190 | .index li { 191 | padding: 0 .5em; 192 | } 193 | 194 | .index ul { 195 | padding-left: 0; 196 | } 197 | 198 | .usage code { 199 | display: block; 200 | color: #008; 201 | } 202 | 203 | .doc { 204 | margin-bottom: .5em; 205 | } 206 | 207 | .src-link a { 208 | font-size: 9pt; 209 | } 210 | -------------------------------------------------------------------------------- /test/carmine_sentinel/core_test.clj: -------------------------------------------------------------------------------- 1 | (ns carmine-sentinel.core-test 2 | (:require [clojure.test :refer :all] 3 | [taoensso.carmine :as car] 4 | [carmine-sentinel.core :refer :all])) 5 | 6 | 7 | ;;; NOTE: 8 | ;;; add the following to `redis.conf`: 9 | ;;; requirepass foobar 10 | 11 | ;;; NOTE: 12 | ;;; SENTINEL CONF `PORT` SHOULD BE DIFFERENT FOR EACH SENTINEL 13 | ;;; MAKE SURE TO REMOVE `myid` OR HARDCODE DIFFERENT IDS FOR EACH SENTINEL 14 | ;;; NOTE: 15 | ;;; Use different files for each sentinel 16 | 17 | ;;; > cat sentinel.conf 18 | ;;; port 5000 19 | ;;; daemonize no 20 | ;;; pidfile "/var/run/redis-sentinel.pid" 21 | ;;; logfile "" 22 | ;;; dir "/tmp" 23 | ;;; sentinel deny-scripts-reconfig yes 24 | ;;; sentinel monitor mymaster 127.0.0.1 6379 2 25 | ;;; protected-mode no 26 | ;;; requirepass "foobar" 27 | 28 | 29 | (def token "foobar") 30 | (def host "127.0.0.1") 31 | (def conn {:pool {} 32 | :spec {:password token} 33 | :sentinel-group :group1 34 | :master-name "mymaster"}) 35 | 36 | (set-sentinel-groups! 37 | {:group1 38 | {:specs [{:host host :port 5000 :password token} 39 | {:host host :port 5001 :password token} 40 | {:host host :port 5002 :password token}]}}) 41 | 42 | 43 | (deftest resolve-master-spec 44 | (testing "Try to resolve the master's spec using the sentinels' specs" 45 | (is (= 46 | [{:password "foobar", :host "127.0.0.1", :port 6379} ()] 47 | (let [server-conn {:pool {}, 48 | :spec {:password "foobar"}, 49 | :sentinel-group :group1, 50 | :master-name "mymaster"} 51 | specs [{:host "127.0.0.1", :port 5002, :password "foobar"} 52 | {:host "127.0.0.1", :port 5001, :password "foobar"} 53 | {:host "127.0.0.1", :port 5000, :password "foobar"}] 54 | sentinel-group :group1 55 | master-name "mymaster"] 56 | (@#'carmine-sentinel.core/try-resolve-master-spec 57 | server-conn specs sentinel-group master-name)))))) 58 | 59 | (deftest subscribing-all-sentinels 60 | (testing "Check if sentinels are subscribed to correctly" 61 | (is (= 62 | [{:password "foobar", :port 5002, :host "127.0.0.1"} 63 | {:password "foobar", :port 5001, :host "127.0.0.1"} 64 | {:password "foobar", :port 5000, :host "127.0.0.1"}] 65 | (let [sentinel-group :group1 66 | master-name "mymaster" 67 | server-conn conn] 68 | (@#'carmine-sentinel.core/subscribe-all-sentinels 69 | sentinel-group 70 | master-name)))))) 71 | 72 | (deftest asking-sentinel-master 73 | (testing "Testing if master is found through sentinel" 74 | (is (= {:password "foobar", :host "127.0.0.1", :port 6379} 75 | (let [sentinel-group :group1 76 | master-name "mymaster" 77 | server-conn conn] 78 | (@#'carmine-sentinel.core/ask-sentinel-master sentinel-group 79 | master-name 80 | server-conn)))))) 81 | 82 | (deftest sentinel-redis-spec 83 | (testing "Trying to get redis spec by sentinel-group and master name" 84 | (is (= {:password "foobar", :host "127.0.0.1", :port 6379} 85 | (let [server-conn conn] 86 | (get-sentinel-redis-spec (:sentinel-group server-conn) 87 | (:master-name server-conn) 88 | server-conn)))))) 89 | 90 | (try 91 | (defmacro test-wcar* [& body] `(wcar conn ~@body)) 92 | (catch Exception e 93 | (println "WARNING: caught exception while defining wcar*," 94 | "can occur when re-running tests in the same repl." 95 | "Please verify and check if it isn't the cause of your tests' failure:" 96 | e))) 97 | 98 | 99 | (deftest ping 100 | (testing "Checking if ping works." 101 | (is (= "PONG" (test-wcar* (car/ping)))))) 102 | 103 | -------------------------------------------------------------------------------- /doc/carmine-sentinel.core.html: -------------------------------------------------------------------------------- 1 | 2 | carmine-sentinel.core documentation

carmine-sentinel.core documentation

add-sentinel-groups!

(add-sentinel-groups! conf)
Add sentinel groups,it will be merged into current conf:
 3 |  {:group-name {:specs  [{ :host host
 4 |                         :port port
 5 |                         :password password
 6 |                         :timeout-ms timeout-ms },
 7 |                         ...other sentinel instances...]
 8 |                :pool {<opts>}}}
 9 | The conf is a map of sentinel group to connection spec.

get-sentinel-redis-spec

(get-sentinel-redis-spec sg master-name {:keys [prefer-slave? slaves-balancer], :or {prefer-slave? false, slaves-balancer first}, :as opts})
Get redis spec by sentinel-group and master name.
10 | If it is not resolved, it will query from sentinel and
11 | cache the result in memory.
12 | Recommend to call this function at your app startup  to reduce
13 | resolving cost.

notify-event-listeners

(notify-event-listeners event)

register-listener!

(register-listener! listener)
Register listener for switching master.
14 | The listener will be called with an event:
15 |   {:event "+switch-master"
16 |    :old {:host old-master-ip
17 |          :port old-master-port
18 |    :new {:host new-master-ip
19 |          :port new-master-port}}}
20 | 

remove-last-resolved-spec!

(remove-last-resolved-spec! sg master-name)
Remove last resolved master spec by sentinel group and master name.
21 | 

remove-sentinel-group!

(remove-sentinel-group! group-name)
Remove a sentinel group configuration by name.
22 | 

sentinel-get-master-addr-by-name

(sentinel-get-master-addr-by-name name)
SENTINEL get-master-addr-by-name name
23 | 
24 | get master address by master name..
25 | 
26 | Available since: .
27 | 
28 | Time complexity: O(1)

sentinel-sentinels

(sentinel-sentinels name)
SENTINEL sentinels name
29 | 
30 | get sentinel instances by mater name..
31 | 
32 | Available since: .
33 | 
34 | Time complexity: O(1)

sentinel-slaves

(sentinel-slaves name)
SENTINEL slaves name
35 | 
36 | get slaves address by master name..
37 | 
38 | Available since: .
39 | 
40 | Time complexity: O(1)

set-sentinel-groups!

(set-sentinel-groups! conf)
Configure sentinel groups, it will replace current conf:
41 |  {:group-name {:specs  [{ :host host
42 |                         :port port
43 |                         :password password
44 |                         :timeout-ms timeout-ms },
45 |                        ...other sentinel instances...]
46 |                :pool {<opts>}}}
47 | The conf is a map of sentinel group to connection spec.

sync-on

macro

(sync-on sg mn & body)

unregister-listener!

(unregister-listener! listener)
Remove the listener for switching master.
48 | 

update-conn-spec

(update-conn-spec conn)
Cast a carmine-sentinel conn to carmine raw conn spec.
49 | It will resolve master from sentinel first time,then cache the result in
50 | memory for reusing.

wcar

macro

(wcar conn :as-pipeline & body)(wcar conn & body)
It's the same as taoensso.carmine/wcar, but supports
51 |    :master-name "mymaster"
52 |    :sentinel-group :default
53 | in conn for redis sentinel cluster.
54 | 
-------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # carmine-sentinel 2 | 3 | A Clojure library designed to connect redis by [sentinel](redis.io/topics/sentinel), make [carmine](https://github.com/ptaoussanis/carmine) to support [sentinel](redis.io/topics/sentinel)。 4 | 5 | ## Usage 6 | 7 | ```clojure 8 | [net.fnil/carmine-sentinel "1.0.0"] 9 | ``` 10 | 11 | **Carmine-sentinel require carmine version must be `>= 2.15.0`right now.** 12 | 13 | First, require carmine and carmine-sentinel: 14 | 15 | ```clojure 16 | (ns my-app 17 | (:require [taoensso.carmine :as car] 18 | [carmine-sentinel.core :as cs :refer [set-sentinel-groups!]])) 19 | ``` 20 | 21 | The only difference compares with carmine is that we will use `carmine-sentinel.core/wcar` to replace `taoensso.carmine/wcar` and add a new function `set-sentinel-groups!`. 22 | 23 | Second, configure sentinel groups: 24 | 25 | ```clojure 26 | (set-sentinel-groups! 27 | {:group1 28 | {:specs [{:host "127.0.0.1" :port 5000} {:host "127.0.0.1" :port 5001} {:host "127.0.0.1" :port 5002}] 29 | :pool {} }}) 30 | ``` 31 | 32 | There is only one group named `:group1` above, and it has three sentinel instances (port from 5000 to 5002 at 127.0.0.1). Optional, you can set the pool option values and add more sentinel groups. 33 | 34 | You can use `add-sentinel-groups!` and `remove-sentinel-group!` to manage the configuration all the time. 35 | 36 | Next, we can define the `wcar*`: 37 | 38 | ```clojure 39 | (def server1-conn {:pool {} :spec {} :sentinel-group :group1 :master-name "mymaster"}) 40 | (defmacro wcar* [& body] `(cs/wcar server1-conn ~@body)) 41 | ``` 42 | 43 | The spec in `server1-conn` can be left empty or contain general configurations, such as password or ssl function, and there are two new options in server1-conn: 44 | 45 | * `:sentinel-group` Which sentinel instances group to resolve master addr.Here is `:group1`. 46 | * `:master-name` Master name configured in that sentinel group.Here is `mymaster`. 47 | 48 | The `spec` in server1-conn will be merged to resolved master spec at runtime. 49 | So you can set `:password`,`:timeout-ms` etc. other options in it. 50 | 51 | Also, you can define many `wcar*`-like macros to use other sentinel group and master name. 52 | 53 | At last, you can use `wcar*` as the same in carmine. 54 | 55 | ```clojure 56 | (wcar* (car/set "key" 1)) 57 | (wcar* (car/get "key")) 58 | ``` 59 | 60 | If you want to bypass sentinel and connect to redis server directly such as doing testing on your local machine, you can ignore `sentinel-group` and `master-name`, just provide redis server connection spec you want connect to directly like this: 61 | 62 | ```clojure 63 | (def server1-conn {:pool {} :spec {:host "127.0.0.1" :port 6379}}) 64 | (defmacro wcar* [& body] `(cs/wcar server1-conn ~@body)) 65 | ``` 66 | 67 | ### Authentication 68 | 69 | Due to a bug fix in version `1.0.0` authentication requires slight modifications to the settings. 70 | Notice both the server connection and sentinel group require passing the authentication token: 71 | 72 | ```clojure 73 | (let [token "foobar" 74 | host "127.0.0.1"] 75 | 76 | (def server1-conn 77 | {:pool {} 78 | :spec {:password token} 79 | :sentinel-group :group1 80 | :master-name "mymaster"}) 81 | 82 | (set-sentinel-groups! 83 | {:group1 84 | {:specs [{:host host :port 5000 :password token} 85 | {:host host :port 5001 :password token} 86 | {:host host :port 5002 :password token}]}})) 87 | ``` 88 | 89 | `wcar*` is defined normally 90 | 91 | ## Pub/Sub 92 | 93 | Please use `carmine-sentinel.core/with-new-pubsub-listener` to replace `taoensso.carmine/with-new-pubsub-listener` and provide `master-name`, `sentinel-group` to take advantage of sentinel cluster like this: 94 | 95 | ```clojure 96 | (def server1-conn {:sentinel-group :group1 :master-name "mymaster"}) 97 | 98 | ;;Pub/Sub 99 | (def listener 100 | (with-new-pubsub-listener server1-conn 101 | {"foobar" (fn f1 [msg] (println "Channel match: " msg)) 102 | "foo*" (fn f2 [msg] (println "Pattern match: " msg))} 103 | (car/subscribe "foobar" "foobaz") 104 | (car/psubscribe "foo*"))) 105 | ``` 106 | 107 | `carmine-sentinel.core/with-new-pubsub-listener` also support bypass sentinel and connect to redis server directly. You just need to provide the redis server spec you want connect to while ignore `sentinel-group` and `master-name`: 108 | 109 | ```clojure 110 | (def server1-conn {:spec {:host "127.0.0.1" :port 6379}}) 111 | 112 | ;;Pub/Sub 113 | (def listener 114 | (with-new-pubsub-listener server1-conn 115 | {"foobar" (fn f1 [msg] (println "Channel match: " msg)) 116 | "foo*" (fn f2 [msg] (println "Pattern match: " msg))} 117 | (car/subscribe "foobar" "foobaz") 118 | (car/psubscribe "foo*"))) 119 | ``` 120 | 121 | ## MessageQueue and Lock 122 | 123 | You have to invoke `update-conn-spec` before using other APIs in carmine: 124 | 125 | ```clojure 126 | (def server1-conn {:pool {} :spec {} :sentinel-group :group1 :master-name "mymaster"}) 127 | 128 | 129 | ;;Message queue 130 | (def my-worker 131 | (car-mq/worker (cs/update-conn-spec server1-conn) "my-queue" 132 | {:handler (fn [{:keys [message attempt]}] 133 | (println "Received" message) 134 | {:status :success})})) 135 | 136 | 137 | ;;;Lock 138 | (locks/with-lock (cs/update-conn-spec server1-conn) "my-lock" 139 | 1000 ; Time to hold lock 140 | 500 ; Time to wait (block) for lock acquisition 141 | (println "This was printed under lock!")) 142 | ``` 143 | 144 | ## Reading From Slaves 145 | 146 | If you want to read data from slave, you can set `prefer-slave?` to be true: 147 | 148 | ```clojure 149 | (def slave-conn {:pool {} :spec {} 150 | :sentinel-group :group1 :master-name "mymaster" 151 | :prefer-slave? true}) 152 | 153 | (defmacro wcars* [& body] `(cs/wcar slave-conn ~@body)) 154 | 155 | (wcars* (car/set "key" 1)) ;; ExceptionInfo READONLY You can't write against a read only slave 156 | ``` 157 | 158 | If you have many slaves for one master, the default balancer is `first` function, but you can custom it by `slaves-balancer`, 159 | for example, using random strategy: 160 | 161 | ```clojure 162 | (def slave-conn {:pool {} :spec {} 163 | :sentinel-group :group1 164 | :master-name "mymaster" 165 | :prefer-slave? true 166 | :slaves-balancer rand-nth) 167 | ``` 168 | 169 | ## Listen on carmine-sentinel events 170 | 171 | You can register a listener to listen carmine-sentinel events such as `error`, `get-master-addr-by-name` 172 | and `+switch-master` etc. : 173 | 174 | ```clojure 175 | (cs/register-listener! (fn [e] (println "Event " e " happens"))) 176 | ``` 177 | 178 | ## Failover 179 | 180 | At startup, carmine-sentinel will connect the first sentinel instance to resolve the master address, if if fails, carmine-sentinel will try the next sentinel until find a resolved master address or throw an exception.The resolved addr will be cached in memory. 181 | 182 | And Carmine-sentinel subcribes `+switch-master` channel in sentinel.When the master redis instance is down, sentinel will publish a `+switch-master` message, while carmine-sentinel receives this message, it will clean the last cached result and try to connect the new redis master at once. 183 | 184 | At last, carmine-sentinel will refresh the sentinel instance list by the response from command `SENTINEL sentinels [name]`. 185 | 186 | ## API docs 187 | 188 | * [Carmine-sentinel APIs](http://fnil.net/docs/carmine_sentinel/) 189 | 190 | ## Testing 191 | 192 | Running the Makefile tests requires having `make`, `lein` and a Linux or MacOS machine. 193 | The test scenario in the is comprised of three sentinels and one master. 194 | 195 | To run the tests simply run in shell: 196 | ```shell 197 | make test 198 | ``` 199 | 200 | ## License 201 | 202 | Copyright © 2016 killme2008 203 | 204 | Distributed under the Eclipse Public License either version 1.0 or (at 205 | your option) any later version. 206 | -------------------------------------------------------------------------------- /raw-sentinel.conf: -------------------------------------------------------------------------------- 1 | # Example sentinel.conf 2 | 3 | # *** IMPORTANT *** 4 | # 5 | # By default Sentinel will not be reachable from interfaces different than 6 | # localhost, either use the 'bind' directive to bind to a list of network 7 | # interfaces, or disable protected mode with "protected-mode no" by 8 | # adding it to this configuration file. 9 | # 10 | # Before doing that MAKE SURE the instance is protected from the outside 11 | # world via firewalling or other means. 12 | # 13 | # For example you may use one of the following: 14 | # 15 | # bind 127.0.0.1 192.168.1.1 16 | # 17 | # protected-mode no 18 | 19 | # port 20 | # The port that this sentinel instance will run on 21 | port 26379 22 | 23 | # By default Redis Sentinel does not run as a daemon. Use 'yes' if you need it. 24 | # Note that Redis will write a pid file in /var/run/redis-sentinel.pid when 25 | # daemonized. 26 | daemonize no 27 | 28 | # When running daemonized, Redis Sentinel writes a pid file in 29 | # /var/run/redis-sentinel.pid by default. You can specify a custom pid file 30 | # location here. 31 | pidfile /var/run/redis-sentinel.pid 32 | 33 | # Specify the log file name. Also the empty string can be used to force 34 | # Sentinel to log on the standard output. Note that if you use standard 35 | # output for logging but daemonize, logs will be sent to /dev/null 36 | logfile "" 37 | 38 | # sentinel announce-ip 39 | # sentinel announce-port 40 | # 41 | # The above two configuration directives are useful in environments where, 42 | # because of NAT, Sentinel is reachable from outside via a non-local address. 43 | # 44 | # When announce-ip is provided, the Sentinel will claim the specified IP address 45 | # in HELLO messages used to gossip its presence, instead of auto-detecting the 46 | # local address as it usually does. 47 | # 48 | # Similarly when announce-port is provided and is valid and non-zero, Sentinel 49 | # will announce the specified TCP port. 50 | # 51 | # The two options don't need to be used together, if only announce-ip is 52 | # provided, the Sentinel will announce the specified IP and the server port 53 | # as specified by the "port" option. If only announce-port is provided, the 54 | # Sentinel will announce the auto-detected local IP and the specified port. 55 | # 56 | # Example: 57 | # 58 | # sentinel announce-ip 1.2.3.4 59 | 60 | # dir 61 | # Every long running process should have a well-defined working directory. 62 | # For Redis Sentinel to chdir to /tmp at startup is the simplest thing 63 | # for the process to don't interfere with administrative tasks such as 64 | # unmounting filesystems. 65 | dir /tmp 66 | 67 | # sentinel monitor 68 | # 69 | # Tells Sentinel to monitor this master, and to consider it in O_DOWN 70 | # (Objectively Down) state only if at least sentinels agree. 71 | # 72 | # Note that whatever is the ODOWN quorum, a Sentinel will require to 73 | # be elected by the majority of the known Sentinels in order to 74 | # start a failover, so no failover can be performed in minority. 75 | # 76 | # Replicas are auto-discovered, so you don't need to specify replicas in 77 | # any way. Sentinel itself will rewrite this configuration file adding 78 | # the replicas using additional configuration options. 79 | # Also note that the configuration file is rewritten when a 80 | # replica is promoted to master. 81 | # 82 | # Note: master name should not include special characters or spaces. 83 | # The valid charset is A-z 0-9 and the three characters ".-_". 84 | sentinel monitor mymaster 127.0.0.1 6379 2 85 | 86 | # sentinel auth-pass 87 | # 88 | # Set the password to use to authenticate with the master and replicas. 89 | # Useful if there is a password set in the Redis instances to monitor. 90 | # 91 | # Note that the master password is also used for replicas, so it is not 92 | # possible to set a different password in masters and replicas instances 93 | # if you want to be able to monitor these instances with Sentinel. 94 | # 95 | # However you can have Redis instances without the authentication enabled 96 | # mixed with Redis instances requiring the authentication (as long as the 97 | # password set is the same for all the instances requiring the password) as 98 | # the AUTH command will have no effect in Redis instances with authentication 99 | # switched off. 100 | # 101 | # Example: 102 | # 103 | # sentinel auth-pass mymaster MySUPER--secret-0123passw0rd 104 | 105 | # sentinel down-after-milliseconds 106 | # 107 | # Number of milliseconds the master (or any attached replica or sentinel) should 108 | # be unreachable (as in, not acceptable reply to PING, continuously, for the 109 | # specified period) in order to consider it in S_DOWN state (Subjectively 110 | # Down). 111 | # 112 | # Default is 30 seconds. 113 | sentinel down-after-milliseconds mymaster 30000 114 | 115 | # sentinel parallel-syncs 116 | # 117 | # How many replicas we can reconfigure to point to the new replica simultaneously 118 | # during the failover. Use a low number if you use the replicas to serve query 119 | # to avoid that all the replicas will be unreachable at about the same 120 | # time while performing the synchronization with the master. 121 | sentinel parallel-syncs mymaster 1 122 | 123 | # sentinel failover-timeout 124 | # 125 | # Specifies the failover timeout in milliseconds. It is used in many ways: 126 | # 127 | # - The time needed to re-start a failover after a previous failover was 128 | # already tried against the same master by a given Sentinel, is two 129 | # times the failover timeout. 130 | # 131 | # - The time needed for a replica replicating to a wrong master according 132 | # to a Sentinel current configuration, to be forced to replicate 133 | # with the right master, is exactly the failover timeout (counting since 134 | # the moment a Sentinel detected the misconfiguration). 135 | # 136 | # - The time needed to cancel a failover that is already in progress but 137 | # did not produced any configuration change (SLAVEOF NO ONE yet not 138 | # acknowledged by the promoted replica). 139 | # 140 | # - The maximum time a failover in progress waits for all the replicas to be 141 | # reconfigured as replicas of the new master. However even after this time 142 | # the replicas will be reconfigured by the Sentinels anyway, but not with 143 | # the exact parallel-syncs progression as specified. 144 | # 145 | # Default is 3 minutes. 146 | sentinel failover-timeout mymaster 180000 147 | 148 | # SCRIPTS EXECUTION 149 | # 150 | # sentinel notification-script and sentinel reconfig-script are used in order 151 | # to configure scripts that are called to notify the system administrator 152 | # or to reconfigure clients after a failover. The scripts are executed 153 | # with the following rules for error handling: 154 | # 155 | # If script exits with "1" the execution is retried later (up to a maximum 156 | # number of times currently set to 10). 157 | # 158 | # If script exits with "2" (or an higher value) the script execution is 159 | # not retried. 160 | # 161 | # If script terminates because it receives a signal the behavior is the same 162 | # as exit code 1. 163 | # 164 | # A script has a maximum running time of 60 seconds. After this limit is 165 | # reached the script is terminated with a SIGKILL and the execution retried. 166 | 167 | # NOTIFICATION SCRIPT 168 | # 169 | # sentinel notification-script 170 | # 171 | # Call the specified notification script for any sentinel event that is 172 | # generated in the WARNING level (for instance -sdown, -odown, and so forth). 173 | # This script should notify the system administrator via email, SMS, or any 174 | # other messaging system, that there is something wrong with the monitored 175 | # Redis systems. 176 | # 177 | # The script is called with just two arguments: the first is the event type 178 | # and the second the event description. 179 | # 180 | # The script must exist and be executable in order for sentinel to start if 181 | # this option is provided. 182 | # 183 | # Example: 184 | # 185 | # sentinel notification-script mymaster /var/redis/notify.sh 186 | 187 | # CLIENTS RECONFIGURATION SCRIPT 188 | # 189 | # sentinel client-reconfig-script 190 | # 191 | # When the master changed because of a failover a script can be called in 192 | # order to perform application-specific tasks to notify the clients that the 193 | # configuration has changed and the master is at a different address. 194 | # 195 | # The following arguments are passed to the script: 196 | # 197 | # 198 | # 199 | # is currently always "failover" 200 | # is either "leader" or "observer" 201 | # 202 | # The arguments from-ip, from-port, to-ip, to-port are used to communicate 203 | # the old address of the master and the new address of the elected replica 204 | # (now a master). 205 | # 206 | # This script should be resistant to multiple invocations. 207 | # 208 | # Example: 209 | # 210 | # sentinel client-reconfig-script mymaster /var/redis/reconfig.sh 211 | 212 | # SECURITY 213 | # 214 | # By default SENTINEL SET will not be able to change the notification-script 215 | # and client-reconfig-script at runtime. This avoids a trivial security issue 216 | # where clients can set the script to anything and trigger a failover in order 217 | # to get the program executed. 218 | 219 | sentinel deny-scripts-reconfig yes 220 | 221 | # REDIS COMMANDS RENAMING 222 | # 223 | # Sometimes the Redis server has certain commands, that are needed for Sentinel 224 | # to work correctly, renamed to unguessable strings. This is often the case 225 | # of CONFIG and SLAVEOF in the context of providers that provide Redis as 226 | # a service, and don't want the customers to reconfigure the instances outside 227 | # of the administration console. 228 | # 229 | # In such case it is possible to tell Sentinel to use different command names 230 | # instead of the normal ones. For example if the master "mymaster", and the 231 | # associated replicas, have "CONFIG" all renamed to "GUESSME", I could use: 232 | # 233 | # SENTINEL rename-command mymaster CONFIG GUESSME 234 | # 235 | # After such configuration is set, every time Sentinel would use CONFIG it will 236 | # use GUESSME instead. Note that there is no actual need to respect the command 237 | # case, so writing "config guessme" is the same in the example above. 238 | # 239 | # SENTINEL SET can also be used in order to perform this configuration at runtime. 240 | # 241 | # In order to set a command back to its original name (undo the renaming), it 242 | # is possible to just rename a command to itsef: 243 | # 244 | # SENTINEL rename-command mymaster CONFIG CONFIG 245 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC 2 | LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM 3 | CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. 4 | 5 | 1. DEFINITIONS 6 | 7 | "Contribution" means: 8 | 9 | a) in the case of the initial Contributor, the initial code and 10 | documentation distributed under this Agreement, and 11 | 12 | b) in the case of each subsequent Contributor: 13 | 14 | i) changes to the Program, and 15 | 16 | ii) additions to the Program; 17 | 18 | where such changes and/or additions to the Program originate from and are 19 | distributed by that particular Contributor. A Contribution 'originates' from 20 | a Contributor if it was added to the Program by such Contributor itself or 21 | anyone acting on such Contributor's behalf. Contributions do not include 22 | additions to the Program which: (i) are separate modules of software 23 | distributed in conjunction with the Program under their own license 24 | agreement, and (ii) are not derivative works of the Program. 25 | 26 | "Contributor" means any person or entity that distributes the Program. 27 | 28 | "Licensed Patents" mean patent claims licensable by a Contributor which are 29 | necessarily infringed by the use or sale of its Contribution alone or when 30 | combined with the Program. 31 | 32 | "Program" means the Contributions distributed in accordance with this 33 | Agreement. 34 | 35 | "Recipient" means anyone who receives the Program under this Agreement, 36 | including all Contributors. 37 | 38 | 2. GRANT OF RIGHTS 39 | 40 | a) Subject to the terms of this Agreement, each Contributor hereby grants 41 | Recipient a non-exclusive, worldwide, royalty-free copyright license to 42 | reproduce, prepare derivative works of, publicly display, publicly perform, 43 | distribute and sublicense the Contribution of such Contributor, if any, and 44 | such derivative works, in source code and object code form. 45 | 46 | b) Subject to the terms of this Agreement, each Contributor hereby grants 47 | Recipient a non-exclusive, worldwide, royalty-free patent license under 48 | Licensed Patents to make, use, sell, offer to sell, import and otherwise 49 | transfer the Contribution of such Contributor, if any, in source code and 50 | object code form. This patent license shall apply to the combination of the 51 | Contribution and the Program if, at the time the Contribution is added by the 52 | Contributor, such addition of the Contribution causes such combination to be 53 | covered by the Licensed Patents. The patent license shall not apply to any 54 | other combinations which include the Contribution. No hardware per se is 55 | licensed hereunder. 56 | 57 | c) Recipient understands that although each Contributor grants the licenses 58 | to its Contributions set forth herein, no assurances are provided by any 59 | Contributor that the Program does not infringe the patent or other 60 | intellectual property rights of any other entity. Each Contributor disclaims 61 | any liability to Recipient for claims brought by any other entity based on 62 | infringement of intellectual property rights or otherwise. As a condition to 63 | exercising the rights and licenses granted hereunder, each Recipient hereby 64 | assumes sole responsibility to secure any other intellectual property rights 65 | needed, if any. For example, if a third party patent license is required to 66 | allow Recipient to distribute the Program, it is Recipient's responsibility 67 | to acquire that license before distributing the Program. 68 | 69 | d) Each Contributor represents that to its knowledge it has sufficient 70 | copyright rights in its Contribution, if any, to grant the copyright license 71 | set forth in this Agreement. 72 | 73 | 3. REQUIREMENTS 74 | 75 | A Contributor may choose to distribute the Program in object code form under 76 | its own license agreement, provided that: 77 | 78 | a) it complies with the terms and conditions of this Agreement; and 79 | 80 | b) its license agreement: 81 | 82 | i) effectively disclaims on behalf of all Contributors all warranties and 83 | conditions, express and implied, including warranties or conditions of title 84 | and non-infringement, and implied warranties or conditions of merchantability 85 | and fitness for a particular purpose; 86 | 87 | ii) effectively excludes on behalf of all Contributors all liability for 88 | damages, including direct, indirect, special, incidental and consequential 89 | damages, such as lost profits; 90 | 91 | iii) states that any provisions which differ from this Agreement are offered 92 | by that Contributor alone and not by any other party; and 93 | 94 | iv) states that source code for the Program is available from such 95 | Contributor, and informs licensees how to obtain it in a reasonable manner on 96 | or through a medium customarily used for software exchange. 97 | 98 | When the Program is made available in source code form: 99 | 100 | a) it must be made available under this Agreement; and 101 | 102 | b) a copy of this Agreement must be included with each copy of the Program. 103 | 104 | Contributors may not remove or alter any copyright notices contained within 105 | the Program. 106 | 107 | Each Contributor must identify itself as the originator of its Contribution, 108 | if any, in a manner that reasonably allows subsequent Recipients to identify 109 | the originator of the Contribution. 110 | 111 | 4. COMMERCIAL DISTRIBUTION 112 | 113 | Commercial distributors of software may accept certain responsibilities with 114 | respect to end users, business partners and the like. While this license is 115 | intended to facilitate the commercial use of the Program, the Contributor who 116 | includes the Program in a commercial product offering should do so in a 117 | manner which does not create potential liability for other Contributors. 118 | Therefore, if a Contributor includes the Program in a commercial product 119 | offering, such Contributor ("Commercial Contributor") hereby agrees to defend 120 | and indemnify every other Contributor ("Indemnified Contributor") against any 121 | losses, damages and costs (collectively "Losses") arising from claims, 122 | lawsuits and other legal actions brought by a third party against the 123 | Indemnified Contributor to the extent caused by the acts or omissions of such 124 | Commercial Contributor in connection with its distribution of the Program in 125 | a commercial product offering. The obligations in this section do not apply 126 | to any claims or Losses relating to any actual or alleged intellectual 127 | property infringement. In order to qualify, an Indemnified Contributor must: 128 | a) promptly notify the Commercial Contributor in writing of such claim, and 129 | b) allow the Commercial Contributor tocontrol, and cooperate with the 130 | Commercial Contributor in, the defense and any related settlement 131 | negotiations. The Indemnified Contributor may participate in any such claim 132 | at its own expense. 133 | 134 | For example, a Contributor might include the Program in a commercial product 135 | offering, Product X. That Contributor is then a Commercial Contributor. If 136 | that Commercial Contributor then makes performance claims, or offers 137 | warranties related to Product X, those performance claims and warranties are 138 | such Commercial Contributor's responsibility alone. Under this section, the 139 | Commercial Contributor would have to defend claims against the other 140 | Contributors related to those performance claims and warranties, and if a 141 | court requires any other Contributor to pay any damages as a result, the 142 | Commercial Contributor must pay those damages. 143 | 144 | 5. NO WARRANTY 145 | 146 | EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON 147 | AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER 148 | EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR 149 | CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A 150 | PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the 151 | appropriateness of using and distributing the Program and assumes all risks 152 | associated with its exercise of rights under this Agreement , including but 153 | not limited to the risks and costs of program errors, compliance with 154 | applicable laws, damage to or loss of data, programs or equipment, and 155 | unavailability or interruption of operations. 156 | 157 | 6. DISCLAIMER OF LIABILITY 158 | 159 | EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY 160 | CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, 161 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION 162 | LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 163 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 164 | ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE 165 | EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY 166 | OF SUCH DAMAGES. 167 | 168 | 7. GENERAL 169 | 170 | If any provision of this Agreement is invalid or unenforceable under 171 | applicable law, it shall not affect the validity or enforceability of the 172 | remainder of the terms of this Agreement, and without further action by the 173 | parties hereto, such provision shall be reformed to the minimum extent 174 | necessary to make such provision valid and enforceable. 175 | 176 | If Recipient institutes patent litigation against any entity (including a 177 | cross-claim or counterclaim in a lawsuit) alleging that the Program itself 178 | (excluding combinations of the Program with other software or hardware) 179 | infringes such Recipient's patent(s), then such Recipient's rights granted 180 | under Section 2(b) shall terminate as of the date such litigation is filed. 181 | 182 | All Recipient's rights under this Agreement shall terminate if it fails to 183 | comply with any of the material terms or conditions of this Agreement and 184 | does not cure such failure in a reasonable period of time after becoming 185 | aware of such noncompliance. If all Recipient's rights under this Agreement 186 | terminate, Recipient agrees to cease use and distribution of the Program as 187 | soon as reasonably practicable. However, Recipient's obligations under this 188 | Agreement and any licenses granted by Recipient relating to the Program shall 189 | continue and survive. 190 | 191 | Everyone is permitted to copy and distribute copies of this Agreement, but in 192 | order to avoid inconsistency the Agreement is copyrighted and may only be 193 | modified in the following manner. The Agreement Steward reserves the right to 194 | publish new versions (including revisions) of this Agreement from time to 195 | time. No one other than the Agreement Steward has the right to modify this 196 | Agreement. The Eclipse Foundation is the initial Agreement Steward. The 197 | Eclipse Foundation may assign the responsibility to serve as the Agreement 198 | Steward to a suitable separate entity. Each new version of the Agreement will 199 | be given a distinguishing version number. The Program (including 200 | Contributions) may always be distributed subject to the version of the 201 | Agreement under which it was received. In addition, after a new version of 202 | the Agreement is published, Contributor may elect to distribute the Program 203 | (including its Contributions) under the new version. Except as expressly 204 | stated in Sections 2(a) and 2(b) above, Recipient receives no rights or 205 | licenses to the intellectual property of any Contributor under this 206 | Agreement, whether expressly, by implication, estoppel or otherwise. All 207 | rights in the Program not expressly granted under this Agreement are 208 | reserved. 209 | 210 | This Agreement is governed by the laws of the State of New York and the 211 | intellectual property laws of the United States of America. No party to this 212 | Agreement will bring a legal action under this Agreement more than one year 213 | after the cause of action arose. Each party waives its rights to a jury trial 214 | in any resulting litigation. 215 | -------------------------------------------------------------------------------- /src/carmine_sentinel/core.clj: -------------------------------------------------------------------------------- 1 | (ns carmine-sentinel.core 2 | (:require [taoensso.carmine :as car] 3 | [taoensso.carmine.commands :as cmds]) 4 | (:import (java.io EOFException) 5 | (taoensso.carmine Listener))) 6 | 7 | ;; {Sentinel group -> master-name -> spec} 8 | (defonce ^:private sentinel-resolved-specs (atom nil)) 9 | ;; {Sentinel group -> specs} 10 | (defonce ^:private sentinel-groups (volatile! nil)) 11 | ;; Sentinel event listeners 12 | (defonce ^:private sentinel-listeners (atom nil)) 13 | ;; Carmine-sentinel event listeners 14 | (defonce ^:private event-listeners (volatile! [])) 15 | ;; Locks for resolving spec 16 | (defonce ^:private locks (atom nil)) 17 | 18 | (defn- get-lock [sg mn] 19 | (if-let [lock (get @locks (str sg "/" mn))] 20 | lock 21 | (let [lock (Object.) 22 | curr @locks] 23 | (if (compare-and-set! locks curr (assoc curr (str sg "/" mn) lock)) 24 | lock 25 | (recur sg mn))))) 26 | 27 | (defmacro sync-on [sg mn & body] 28 | `(locking (get-lock ~sg ~mn) 29 | ~@body)) 30 | 31 | ;;define commands for sentinel 32 | (cmds/defcommand "SENTINEL get-master-addr-by-name" 33 | {:fn-name "sentinel-get-master-addr-by-name" 34 | :fn-params-fixed [name] 35 | :fn-params-more nil 36 | :req-args-fixed ["SENTINEL" "get-master-addr-by-name" name] 37 | :cluster-key-idx 2 38 | :fn-docstring "get master address by master name. complexity O(1)"}) 39 | 40 | (cmds/defcommand "SENTINEL slaves" 41 | {:fn-name "sentinel-slaves" 42 | :fn-params-fixed [name] 43 | :fn-params-more nil 44 | :req-args-fixed ["SENTINEL" "slaves" name] 45 | :cluster-key-idx 2 46 | :fn-docstring "get slaves address by master name. complexity O(1)"}) 47 | 48 | (cmds/defcommand "SENTINEL sentinels" 49 | {:fn-name "sentinel-sentinels" 50 | :fn-params-fixed [name] 51 | :fn-params-more nil 52 | :req-args-fixed ["SENTINEL" "sentinels" name] 53 | :cluster-key-idx 2 54 | :fn-docstring "get sentinel instances by mater name. complexity O(1)"}) 55 | 56 | (defn- master-role? [spec] 57 | (= "master" 58 | (first (car/wcar {:spec spec} 59 | (car/role))))) 60 | 61 | (defn- make-sure-master-role 62 | "Make sure the spec is a master role." 63 | [spec] 64 | (when-not (master-role? spec) 65 | (throw (IllegalStateException. 66 | (format "Spec %s is not master role." spec))))) 67 | 68 | (defn- dissoc-in 69 | [m [k & ks :as keys]] 70 | (if ks 71 | (if-let [nextmap (get m k)] 72 | (let [newmap (dissoc-in nextmap ks)] 73 | (if (seq newmap) 74 | (assoc m k newmap) 75 | (dissoc m k))) 76 | m) 77 | (dissoc m k))) 78 | 79 | (defmacro silently [& body] 80 | `(try ~@body (catch Exception _#))) 81 | 82 | (defn notify-event-listeners [event] 83 | (doseq [listener @event-listeners] 84 | (silently (listener event)))) 85 | 86 | (defn- handle-switch-master [sg msg] 87 | (when (= "message" (first msg)) 88 | (let [[master-name old-ip old-port new-ip new-port] 89 | (clojure.string/split (-> msg nnext first) #" ")] 90 | (when master-name 91 | ;;remove last resolved spec 92 | (swap! sentinel-resolved-specs dissoc-in [sg master-name]) 93 | (notify-event-listeners {:event "+switch-master" 94 | :old {:host old-ip 95 | :port (Integer/valueOf ^String old-port)} 96 | :new {:host new-ip 97 | :port (Integer/valueOf ^String new-port)}}))))) 98 | 99 | (defrecord SentinelListener [internal-pubsub-listener stopped-mark] 100 | java.io.Closeable 101 | (close [_] 102 | (reset! stopped-mark true) 103 | (some->> @internal-pubsub-listener (car/close-listener)))) 104 | 105 | (defn- subscribe-switch-master! [sg spec] 106 | (if-let [^SentinelListener sentinel-listener (get @sentinel-listeners spec)] 107 | (deref (.internal-pubsub-listener sentinel-listener)) 108 | (do 109 | (let [stop? (atom false) 110 | listener (atom nil)] 111 | (swap! sentinel-listeners assoc spec (SentinelListener. listener stop?)) 112 | (future 113 | (while (not @stop?) 114 | (silently 115 | ;; It's unusual to use timeout in redis pub/sub but due to Carmine does not 116 | ;; support ping/pong test for a connection waiting for an event publishing 117 | ;; from redis, we do need this to maintain liveness in case redis server 118 | ;; crash unintentionally. Ref. https://github.com/antirez/redis/issues/420 119 | (let [spec-with-timeout (update spec :timeout-ms #(or % 10000)) 120 | f (->> (car/with-new-pubsub-listener spec-with-timeout 121 | {"+switch-master" (partial handle-switch-master sg)} 122 | (car/subscribe "+switch-master")) 123 | (reset! listener) 124 | :future)] 125 | (when (not @stop?) 126 | (deref f)))) 127 | (silently (when-let [l @listener] (.close ^Listener l))) 128 | (silently (Thread/sleep 1000))))) 129 | (recur sg spec)))) 130 | 131 | (defn- unsubscribe-switch-master! [sentinel-spec] 132 | (silently 133 | (when-let [^SentinelListener sentinel-listener (get @sentinel-listeners sentinel-spec)] 134 | (.close sentinel-listener) 135 | (swap! sentinel-listeners dissoc sentinel-spec) 136 | true))) 137 | 138 | (defn- pick-specs-from-sentinel-raw-states 139 | [raw-states] 140 | (->> raw-states 141 | (map (partial apply hash-map)) 142 | (map (fn [{:strs [ip port]}] 143 | {:host ip 144 | :port (Integer/valueOf ^String port)})))) 145 | 146 | (defn- subscribe-all-sentinels [sentinel-group master-name] 147 | (when-let [old-sentinel-specs (not-empty (get-in @sentinel-groups [sentinel-group :specs]))] 148 | (let [valid-specs 149 | (->> old-sentinel-specs 150 | (mapv 151 | (fn [spec] 152 | (try 153 | (->> (car/wcar {:spec spec} (sentinel-sentinels master-name)) 154 | (pick-specs-from-sentinel-raw-states) 155 | (map (fn [new-raw-spec] (merge spec new-raw-spec))) 156 | (#(conj % spec))) 157 | (catch Exception _ [])))) 158 | (flatten) 159 | ;; remove duplicate sentinel spec 160 | (set)) 161 | invalid-specs (remove valid-specs old-sentinel-specs) 162 | ;; still keeping the invalid specs but append them to tail then 163 | ;; convert spec list to vector to take advantage of their order later 164 | all-specs (vec (concat valid-specs invalid-specs))] 165 | 166 | (doseq [spec valid-specs] 167 | (subscribe-switch-master! sentinel-group spec)) 168 | 169 | (vswap! sentinel-groups assoc-in [sentinel-group :specs] all-specs) 170 | 171 | (not-empty all-specs)))) 172 | 173 | (defn- try-resolve-master-spec [server-conn specs sentinel-group master-name] 174 | (let [sentinel-spec (first specs)] 175 | (try 176 | (when-let [[master slaves] 177 | (car/wcar {:spec sentinel-spec} :as-pipeline 178 | (sentinel-get-master-addr-by-name master-name) 179 | (sentinel-slaves master-name))] 180 | (let [master-spec (merge (:spec server-conn) 181 | {:host (first master) 182 | :port (Integer/valueOf ^String (second master))}) 183 | slaves (pick-specs-from-sentinel-raw-states slaves)] 184 | (make-sure-master-role master-spec) 185 | (swap! sentinel-resolved-specs assoc-in [sentinel-group master-name] 186 | {:master master-spec 187 | :slaves slaves}) 188 | (make-sure-master-role master-spec) 189 | (notify-event-listeners {:event "get-master-addr-by-name" 190 | :sentinel-group sentinel-group 191 | :master-name master-name 192 | :master master 193 | :slaves slaves}) 194 | [master-spec slaves])) 195 | (catch Exception e 196 | (swap! sentinel-resolved-specs dissoc-in [sentinel-group master-name]) 197 | (notify-event-listeners 198 | {:event "error" 199 | :sentinel-group sentinel-group 200 | :master-name master-name 201 | :sentinel-spec sentinel-spec 202 | :exception e}) 203 | ;;Close the listener 204 | (unsubscribe-switch-master! sentinel-spec) 205 | nil)))) 206 | 207 | (defn- choose-spec [mn master slaves prefer-slave? slaves-balancer] 208 | (when (= :error master) 209 | (throw (IllegalStateException. 210 | (str "Specs not found by master name: " mn)))) 211 | (if (and prefer-slave? (seq slaves)) 212 | (slaves-balancer slaves) 213 | master)) 214 | 215 | (defn- ask-sentinel-master [sentinel-group master-name 216 | {:keys [prefer-slave? slaves-balancer] 217 | :as server-conn}] 218 | (if-let [all-specs (subscribe-all-sentinels sentinel-group master-name)] 219 | (loop [specs all-specs 220 | tried-specs []] 221 | (if (seq specs) 222 | (if-let [[master-spec slaves] 223 | (try-resolve-master-spec server-conn specs sentinel-group master-name)] 224 | (do 225 | ;;Move the sentinel instance to the first position of sentinel list 226 | ;;to speedup next time resolving. 227 | (vswap! sentinel-groups assoc-in [sentinel-group :specs] 228 | (vec (concat specs tried-specs))) 229 | (choose-spec master-name master-spec slaves prefer-slave? slaves-balancer)) 230 | ;;Try next sentinel 231 | (recur (next specs) 232 | (conj tried-specs (first specs)))) 233 | ;;Tried all sentinel instancs, we don't get any valid specs 234 | ;;Set a :error mark for this situation. 235 | (do 236 | (swap! sentinel-resolved-specs assoc-in [sentinel-group master-name] 237 | {:master :error 238 | :slaves :error}) 239 | (notify-event-listeners {:event "get-master-addr-by-name" 240 | :sentinel-group sentinel-group 241 | :master-name master-name 242 | :master :error 243 | :slaves :error}) 244 | (throw (IllegalStateException. 245 | (str "Specs not found by master name: " master-name)))))) 246 | (throw (IllegalStateException. 247 | (str "Missing specs for sentinel group: " sentinel-group))))) 248 | 249 | ;;APIs 250 | (defn remove-invalid-resolved-master-specs! 251 | "Iterate all the resolved master specs and remove any invalid 252 | master spec found by checking role on redis. 253 | Please call this periodically to keep safe." 254 | [] 255 | (doseq [[group-id resolved-specs] @sentinel-resolved-specs] 256 | (doseq [[master-name master-specs] resolved-specs] 257 | (try 258 | (when-not (master-role? (:master master-specs)) 259 | (swap! sentinel-resolved-specs dissoc-in [group-id master-name])) 260 | (catch EOFException _ 261 | (swap! sentinel-resolved-specs dissoc-in [group-id master-name])))))) 262 | 263 | (defn register-listener! 264 | "Register listener for switching master. 265 | The listener will be called with an event: 266 | {:event \"+switch-master\" 267 | :old {:host old-master-ip 268 | :port old-master-port 269 | :new {:host new-master-ip 270 | :port new-master-port}}} 271 | " 272 | [listener] 273 | (vswap! event-listeners conj listener)) 274 | 275 | (defn unregister-listener! 276 | "Remove the listener for switching master." 277 | [listener] 278 | (vswap! event-listeners remove (partial = listener))) 279 | 280 | (defn get-sentinel-redis-spec 281 | "Get redis spec by sentinel-group and master name. 282 | If it is not resolved, it will query from sentinel and 283 | cache the result in memory. 284 | Recommend to call this function at your app startup to reduce 285 | resolving cost." 286 | [sentinel-group master-name 287 | {:keys [prefer-slave? slaves-balancer] 288 | :or {prefer-slave? false 289 | slaves-balancer first} 290 | :as server-conn}] 291 | {:pre [(not (nil? sentinel-group)) 292 | (not (empty? master-name))]} 293 | (if-let [resolved-spec (get-in @sentinel-resolved-specs [sentinel-group master-name])] 294 | (if-let [s (choose-spec master-name 295 | (:master resolved-spec) 296 | (:slaves resolved-spec) 297 | prefer-slave? 298 | slaves-balancer)] 299 | s 300 | (throw (IllegalStateException. 301 | (str "Spec not found: " sentinel-group "/" master-name ", " server-conn)))) 302 | ;;Synchronized on [sentinel-group master-name] lock 303 | (sync-on sentinel-group master-name 304 | ;;Double checking 305 | (if (nil? (get-in @sentinel-resolved-specs [sentinel-group master-name])) 306 | (ask-sentinel-master sentinel-group master-name server-conn) 307 | (get-sentinel-redis-spec sentinel-group master-name server-conn))))) 308 | 309 | (defn set-sentinel-groups! 310 | "Configure sentinel groups, it will replace current conf: 311 | {:group-name {:specs [{ :host host 312 | :port port 313 | :password password 314 | :timeout-ms timeout-ms }, 315 | ...other sentinel instances...] 316 | :pool {}}} 317 | The conf is a map of sentinel group to connection spec." 318 | [conf] 319 | (doseq [[_ group-conf] @sentinel-groups] 320 | (doseq [spec (:specs group-conf)] 321 | (unsubscribe-switch-master! spec))) 322 | (vreset! sentinel-groups conf) 323 | (reset! sentinel-resolved-specs nil)) 324 | 325 | (defn add-sentinel-groups! 326 | "Add sentinel groups,it will be merged into current conf: 327 | {:group-name {:specs [{ :host host 328 | :port port 329 | :password password 330 | :timeout-ms timeout-ms }, 331 | ...other sentinel instances...] 332 | :pool {}}} 333 | The conf is a map of sentinel group to connection spec." 334 | [conf] 335 | (vswap! sentinel-groups merge conf)) 336 | 337 | (defn remove-sentinel-group! 338 | "Remove a sentinel group configuration by name." 339 | [group-name] 340 | (doseq [sentinel-spec (get-in @sentinel-groups [group-name :specs])] 341 | (unsubscribe-switch-master! sentinel-spec)) 342 | (vswap! sentinel-groups dissoc group-name) 343 | (swap! sentinel-resolved-specs dissoc group-name)) 344 | 345 | (defn remove-last-resolved-spec! 346 | "Remove last resolved master spec by sentinel group and master name." 347 | [sg master-name] 348 | (swap! sentinel-resolved-specs dissoc-in [sg master-name])) 349 | 350 | (defn update-conn-spec 351 | "Cast a carmine-sentinel conn to carmine raw conn spec. 352 | It will resolve master from sentinel first time,then cache the result in 353 | memory for reusing." 354 | [server-conn] 355 | (if (and (:sentinel-group server-conn) (:master-name server-conn)) 356 | (update server-conn 357 | :spec 358 | merge 359 | (get-sentinel-redis-spec (:sentinel-group server-conn) 360 | (:master-name server-conn) 361 | server-conn)) 362 | server-conn)) 363 | 364 | (defmacro wcar 365 | "It's the same as taoensso.carmine/wcar, but supports 366 | :master-name \"mymaster\" 367 | :sentinel-group :default 368 | in conn for redis sentinel cluster. 369 | " 370 | {:arglists '([conn :as-pipeline & body] [conn & body])} 371 | [conn & sigs] 372 | `(car/wcar 373 | (update-conn-spec ~conn) 374 | ~@sigs)) 375 | 376 | (defmacro with-new-pubsub-listener 377 | "It's the same as taoensso.carmine/with-new-pubsub-listener, 378 | but supports 379 | :master-name \"mymaster\" 380 | :sentinel-group :default 381 | in conn for redis sentinel cluster. 382 | 383 | Please note that you can only pass connection spec like 384 | hostname and port to taoensso.carmine/with-new-pubsub-listener 385 | like: 386 | 387 | (taoensso.carmine/with-new-pubsub-listener 388 | {:host \"127.0.0.1\" :port 6379} 389 | {... channel and handler stuff ... } 390 | ... publish and subscribe stuff ... ) 391 | 392 | but for with-new-pubsub-listener in carmine-sentinel, you need 393 | to wrap connection spec with another layer along with master-name 394 | and sentinel-group to take advantage of sentinel cluster like: 395 | 396 | (carmine-sentinel/with-new-pubsub-listener 397 | {:spec {:host \"127.0.0.1\" :port 6379} 398 | :master-name \"mymaster\" 399 | :sentinel-group :default} 400 | {... channel and handler stuff ... } 401 | ... publish and subscribe stuff ... ) 402 | " 403 | [conn-spec & others] 404 | `(car/with-new-pubsub-listener 405 | (:spec (update-conn-spec ~conn-spec)) 406 | ~@others)) 407 | 408 | (defn sentinel-group-status 409 | "Get the status of all the registered sentinel groups and resolved redis cluster specs. 410 | 411 | For example, firstly we set sentinel groups: 412 | 413 | (set-sentinel-groups! 414 | {:group1 {:specs [{:host \"127.0.0.1\" :port 5000} 415 | {:host \"127.0.0.1\" :port 5001} 416 | {:host \"127.0.0.1\" :port 5002}]}}) 417 | 418 | Then do something to trigger the resolving of the redis cluster specs. 419 | 420 | (let [server1-conn {:pool {} :spec {} :sentinel-group :group1 :master-name \"mymaster\"}] 421 | (wcar server1-conn 422 | (car/set \"a\" 100))) 423 | 424 | At last we execute (sentinel-group-status), then got things like: 425 | 426 | {:group1 {:redis-clusters [{:master-name \"mymaster\", 427 | :master-spec {:host \"127.0.0.1\", :port 6379}, 428 | :slave-specs ({:host \"127.0.0.1\", :port 6380})}], 429 | :sentinels [{:host \"127.0.0.1\", :port 5000, :with-active-sentinel-listener? true} 430 | {:host \"127.0.0.1\", :port 5001, :with-active-sentinel-listener? true} 431 | {:host \"127.0.0.1\", :port 5002, :with-active-sentinel-listener? true}]}}" 432 | [] 433 | (reduce (fn [cur [group-name sentinel-specs]] 434 | (assoc cur 435 | group-name 436 | {:redis-clusters (->> (get @sentinel-resolved-specs group-name) 437 | (mapv (fn [[master-name specs]] 438 | {:master-name master-name 439 | :master-spec (:master specs) 440 | :slave-specs (:slaves specs)}))) 441 | :sentinels (->> (:specs sentinel-specs) 442 | (map #(let [^SentinelListener listener (get @sentinel-listeners %)] 443 | (assoc % 444 | :with-active-sentinel-listener? 445 | (and (some? listener) 446 | (not @(.stopped-mark listener)))))))})) 447 | {} 448 | @sentinel-groups)) 449 | 450 | (comment 451 | (set-sentinel-groups! 452 | {:group1 453 | {:specs [{:host "127.0.0.1" :port 5000} {:host "127.0.0.1" :port 5001} {:host "127.0.0.1" :port 5002}]}}) 454 | (let [server1-conn {:pool {} :spec {} :sentinel-group :group1 :master-name "mymaster"}] 455 | (println 456 | (wcar server1-conn 457 | (car/set "a" 100) 458 | (car/get "a"))))) 459 | 460 | (comment 461 | 462 | (do ;; reset environment 463 | (reset! sentinel-resolved-specs nil) 464 | (vreset! sentinel-groups nil) 465 | (reset! sentinel-listeners nil) 466 | (vreset! event-listeners []) 467 | (reset! locks nil) 468 | 469 | (let [token "foobar" 470 | host "127.0.0.1"] 471 | 472 | (def server1-conn 473 | {:pool {} 474 | :spec {:password token} 475 | :sentinel-group :group1 476 | :master-name "mymaster"}) 477 | 478 | (set-sentinel-groups! 479 | {:group1 480 | {:specs [{:host host :port 5000 :password token} 481 | {:host host :port 5001 :password token} 482 | {:host host :port 5002 :password token}]}}))) 483 | 484 | (defmacro wcar* [& body] `(wcar server1-conn ~@body)) 485 | 486 | (wcar* (car/ping)) 487 | 488 | (wcar* (car/set "key" 1)) 489 | 490 | (wcar* (car/get "key")) 491 | 492 | ) 493 | -------------------------------------------------------------------------------- /raw-redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################## MODULES ##################################### 39 | 40 | # Load modules at startup. If the server is not able to load modules 41 | # it will abort. It is possible to use multiple loadmodule directives. 42 | # 43 | # loadmodule /path/to/my_module.so 44 | # loadmodule /path/to/other_module.so 45 | 46 | ################################## NETWORK ##################################### 47 | 48 | # By default, if no "bind" configuration directive is specified, Redis listens 49 | # for connections from all the network interfaces available on the server. 50 | # It is possible to listen to just one or multiple selected interfaces using 51 | # the "bind" configuration directive, followed by one or more IP addresses. 52 | # 53 | # Examples: 54 | # 55 | # bind 192.168.1.100 10.0.0.1 56 | # bind 127.0.0.1 ::1 57 | # 58 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the 59 | # internet, binding to all the interfaces is dangerous and will expose the 60 | # instance to everybody on the internet. So by default we uncomment the 61 | # following bind directive, that will force Redis to listen only into 62 | # the IPv4 loopback interface address (this means Redis will be able to 63 | # accept connections only from clients running into the same computer it 64 | # is running). 65 | # 66 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES 67 | # JUST COMMENT THE FOLLOWING LINE. 68 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | bind 127.0.0.1 70 | 71 | # Protected mode is a layer of security protection, in order to avoid that 72 | # Redis instances left open on the internet are accessed and exploited. 73 | # 74 | # When protected mode is on and if: 75 | # 76 | # 1) The server is not binding explicitly to a set of addresses using the 77 | # "bind" directive. 78 | # 2) No password is configured. 79 | # 80 | # The server only accepts connections from clients connecting from the 81 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 82 | # sockets. 83 | # 84 | # By default protected mode is enabled. You should disable it only if 85 | # you are sure you want clients from other hosts to connect to Redis 86 | # even if no authentication is configured, nor a specific set of interfaces 87 | # are explicitly listed using the "bind" directive. 88 | protected-mode yes 89 | 90 | # Accept connections on the specified port, default is 6379 (IANA #815344). 91 | # If port 0 is specified Redis will not listen on a TCP socket. 92 | port 6379 93 | 94 | # TCP listen() backlog. 95 | # 96 | # In high requests-per-second environments you need an high backlog in order 97 | # to avoid slow clients connections issues. Note that the Linux kernel 98 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 99 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 100 | # in order to get the desired effect. 101 | tcp-backlog 511 102 | 103 | # Unix socket. 104 | # 105 | # Specify the path for the Unix socket that will be used to listen for 106 | # incoming connections. There is no default, so Redis will not listen 107 | # on a unix socket when not specified. 108 | # 109 | # unixsocket /tmp/redis.sock 110 | # unixsocketperm 700 111 | 112 | # Close the connection after a client is idle for N seconds (0 to disable) 113 | timeout 0 114 | 115 | # TCP keepalive. 116 | # 117 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 118 | # of communication. This is useful for two reasons: 119 | # 120 | # 1) Detect dead peers. 121 | # 2) Take the connection alive from the point of view of network 122 | # equipment in the middle. 123 | # 124 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 125 | # Note that to close the connection the double of the time is needed. 126 | # On other kernels the period depends on the kernel configuration. 127 | # 128 | # A reasonable value for this option is 300 seconds, which is the new 129 | # Redis default starting with Redis 3.2.1. 130 | tcp-keepalive 300 131 | 132 | ################################# GENERAL ##################################### 133 | 134 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 135 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 136 | daemonize no 137 | 138 | # If you run Redis from upstart or systemd, Redis can interact with your 139 | # supervision tree. Options: 140 | # supervised no - no supervision interaction 141 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode 142 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET 143 | # supervised auto - detect upstart or systemd method based on 144 | # UPSTART_JOB or NOTIFY_SOCKET environment variables 145 | # Note: these supervision methods only signal "process is ready." 146 | # They do not enable continuous liveness pings back to your supervisor. 147 | supervised no 148 | 149 | # If a pid file is specified, Redis writes it where specified at startup 150 | # and removes it at exit. 151 | # 152 | # When the server runs non daemonized, no pid file is created if none is 153 | # specified in the configuration. When the server is daemonized, the pid file 154 | # is used even if not specified, defaulting to "/var/run/redis.pid". 155 | # 156 | # Creating a pid file is best effort: if Redis is not able to create it 157 | # nothing bad happens, the server will start and run normally. 158 | pidfile /var/run/redis_6379.pid 159 | 160 | # Specify the server verbosity level. 161 | # This can be one of: 162 | # debug (a lot of information, useful for development/testing) 163 | # verbose (many rarely useful info, but not a mess like the debug level) 164 | # notice (moderately verbose, what you want in production probably) 165 | # warning (only very important / critical messages are logged) 166 | loglevel notice 167 | 168 | # Specify the log file name. Also the empty string can be used to force 169 | # Redis to log on the standard output. Note that if you use standard 170 | # output for logging but daemonize, logs will be sent to /dev/null 171 | logfile "" 172 | 173 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 174 | # and optionally update the other syslog parameters to suit your needs. 175 | # syslog-enabled no 176 | 177 | # Specify the syslog identity. 178 | # syslog-ident redis 179 | 180 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 181 | # syslog-facility local0 182 | 183 | # Set the number of databases. The default database is DB 0, you can select 184 | # a different one on a per-connection basis using SELECT where 185 | # dbid is a number between 0 and 'databases'-1 186 | databases 16 187 | 188 | # By default Redis shows an ASCII art logo only when started to log to the 189 | # standard output and if the standard output is a TTY. Basically this means 190 | # that normally a logo is displayed only in interactive sessions. 191 | # 192 | # However it is possible to force the pre-4.0 behavior and always show a 193 | # ASCII art logo in startup logs by setting the following option to yes. 194 | always-show-logo yes 195 | 196 | ################################ SNAPSHOTTING ################################ 197 | # 198 | # Save the DB on disk: 199 | # 200 | # save 201 | # 202 | # Will save the DB if both the given number of seconds and the given 203 | # number of write operations against the DB occurred. 204 | # 205 | # In the example below the behaviour will be to save: 206 | # after 900 sec (15 min) if at least 1 key changed 207 | # after 300 sec (5 min) if at least 10 keys changed 208 | # after 60 sec if at least 10000 keys changed 209 | # 210 | # Note: you can disable saving completely by commenting out all "save" lines. 211 | # 212 | # It is also possible to remove all the previously configured save 213 | # points by adding a save directive with a single empty string argument 214 | # like in the following example: 215 | # 216 | # save "" 217 | 218 | save 900 1 219 | save 300 10 220 | save 60 10000 221 | 222 | # By default Redis will stop accepting writes if RDB snapshots are enabled 223 | # (at least one save point) and the latest background save failed. 224 | # This will make the user aware (in a hard way) that data is not persisting 225 | # on disk properly, otherwise chances are that no one will notice and some 226 | # disaster will happen. 227 | # 228 | # If the background saving process will start working again Redis will 229 | # automatically allow writes again. 230 | # 231 | # However if you have setup your proper monitoring of the Redis server 232 | # and persistence, you may want to disable this feature so that Redis will 233 | # continue to work as usual even if there are problems with disk, 234 | # permissions, and so forth. 235 | stop-writes-on-bgsave-error yes 236 | 237 | # Compress string objects using LZF when dump .rdb databases? 238 | # For default that's set to 'yes' as it's almost always a win. 239 | # If you want to save some CPU in the saving child set it to 'no' but 240 | # the dataset will likely be bigger if you have compressible values or keys. 241 | rdbcompression yes 242 | 243 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 244 | # This makes the format more resistant to corruption but there is a performance 245 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 246 | # for maximum performances. 247 | # 248 | # RDB files created with checksum disabled have a checksum of zero that will 249 | # tell the loading code to skip the check. 250 | rdbchecksum yes 251 | 252 | # The filename where to dump the DB 253 | dbfilename dump.rdb 254 | 255 | # The working directory. 256 | # 257 | # The DB will be written inside this directory, with the filename specified 258 | # above using the 'dbfilename' configuration directive. 259 | # 260 | # The Append Only File will also be created inside this directory. 261 | # 262 | # Note that you must specify a directory here, not a file name. 263 | dir ./ 264 | 265 | ################################# REPLICATION ################################# 266 | 267 | # Master-Replica replication. Use replicaof to make a Redis instance a copy of 268 | # another Redis server. A few things to understand ASAP about Redis replication. 269 | # 270 | # +------------------+ +---------------+ 271 | # | Master | ---> | Replica | 272 | # | (receive writes) | | (exact copy) | 273 | # +------------------+ +---------------+ 274 | # 275 | # 1) Redis replication is asynchronous, but you can configure a master to 276 | # stop accepting writes if it appears to be not connected with at least 277 | # a given number of replicas. 278 | # 2) Redis replicas are able to perform a partial resynchronization with the 279 | # master if the replication link is lost for a relatively small amount of 280 | # time. You may want to configure the replication backlog size (see the next 281 | # sections of this file) with a sensible value depending on your needs. 282 | # 3) Replication is automatic and does not need user intervention. After a 283 | # network partition replicas automatically try to reconnect to masters 284 | # and resynchronize with them. 285 | # 286 | # replicaof 287 | 288 | # If the master is password protected (using the "requirepass" configuration 289 | # directive below) it is possible to tell the replica to authenticate before 290 | # starting the replication synchronization process, otherwise the master will 291 | # refuse the replica request. 292 | # 293 | # masterauth 294 | # 295 | # However this is not enough if you are using Redis ACLs (for Redis version 296 | # 6 or greater), and the default user is not capable of running the PSYNC 297 | # command and/or other commands needed for replication. In this case it's 298 | # better to configure a special user to use with replication, and specify the 299 | # masteruser configuration as such: 300 | # 301 | # masteruser 302 | # 303 | # When masteruser is specified, the replica will authenticate against its 304 | # master using the new AUTH form: AUTH . 305 | 306 | # When a replica loses its connection with the master, or when the replication 307 | # is still in progress, the replica can act in two different ways: 308 | # 309 | # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will 310 | # still reply to client requests, possibly with out of date data, or the 311 | # data set may just be empty if this is the first synchronization. 312 | # 313 | # 2) if replica-serve-stale-data is set to 'no' the replica will reply with 314 | # an error "SYNC with master in progress" to all the kind of commands 315 | # but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, 316 | # SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, 317 | # COMMAND, POST, HOST: and LATENCY. 318 | # 319 | replica-serve-stale-data yes 320 | 321 | # You can configure a replica instance to accept writes or not. Writing against 322 | # a replica instance may be useful to store some ephemeral data (because data 323 | # written on a replica will be easily deleted after resync with the master) but 324 | # may also cause problems if clients are writing to it because of a 325 | # misconfiguration. 326 | # 327 | # Since Redis 2.6 by default replicas are read-only. 328 | # 329 | # Note: read only replicas are not designed to be exposed to untrusted clients 330 | # on the internet. It's just a protection layer against misuse of the instance. 331 | # Still a read only replica exports by default all the administrative commands 332 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 333 | # security of read only replicas using 'rename-command' to shadow all the 334 | # administrative / dangerous commands. 335 | replica-read-only yes 336 | 337 | # Replication SYNC strategy: disk or socket. 338 | # 339 | # ------------------------------------------------------- 340 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 341 | # ------------------------------------------------------- 342 | # 343 | # New replicas and reconnecting replicas that are not able to continue the replication 344 | # process just receiving differences, need to do what is called a "full 345 | # synchronization". An RDB file is transmitted from the master to the replicas. 346 | # The transmission can happen in two different ways: 347 | # 348 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 349 | # file on disk. Later the file is transferred by the parent 350 | # process to the replicas incrementally. 351 | # 2) Diskless: The Redis master creates a new process that directly writes the 352 | # RDB file to replica sockets, without touching the disk at all. 353 | # 354 | # With disk-backed replication, while the RDB file is generated, more replicas 355 | # can be queued and served with the RDB file as soon as the current child producing 356 | # the RDB file finishes its work. With diskless replication instead once 357 | # the transfer starts, new replicas arriving will be queued and a new transfer 358 | # will start when the current one terminates. 359 | # 360 | # When diskless replication is used, the master waits a configurable amount of 361 | # time (in seconds) before starting the transfer in the hope that multiple replicas 362 | # will arrive and the transfer can be parallelized. 363 | # 364 | # With slow disks and fast (large bandwidth) networks, diskless replication 365 | # works better. 366 | repl-diskless-sync no 367 | 368 | # When diskless replication is enabled, it is possible to configure the delay 369 | # the server waits in order to spawn the child that transfers the RDB via socket 370 | # to the replicas. 371 | # 372 | # This is important since once the transfer starts, it is not possible to serve 373 | # new replicas arriving, that will be queued for the next RDB transfer, so the server 374 | # waits a delay in order to let more replicas arrive. 375 | # 376 | # The delay is specified in seconds, and by default is 5 seconds. To disable 377 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 378 | repl-diskless-sync-delay 5 379 | 380 | # Replicas send PINGs to server in a predefined interval. It's possible to change 381 | # this interval with the repl_ping_replica_period option. The default value is 10 382 | # seconds. 383 | # 384 | # repl-ping-replica-period 10 385 | 386 | # The following option sets the replication timeout for: 387 | # 388 | # 1) Bulk transfer I/O during SYNC, from the point of view of replica. 389 | # 2) Master timeout from the point of view of replicas (data, pings). 390 | # 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). 391 | # 392 | # It is important to make sure that this value is greater than the value 393 | # specified for repl-ping-replica-period otherwise a timeout will be detected 394 | # every time there is low traffic between the master and the replica. 395 | # 396 | # repl-timeout 60 397 | 398 | # Disable TCP_NODELAY on the replica socket after SYNC? 399 | # 400 | # If you select "yes" Redis will use a smaller number of TCP packets and 401 | # less bandwidth to send data to replicas. But this can add a delay for 402 | # the data to appear on the replica side, up to 40 milliseconds with 403 | # Linux kernels using a default configuration. 404 | # 405 | # If you select "no" the delay for data to appear on the replica side will 406 | # be reduced but more bandwidth will be used for replication. 407 | # 408 | # By default we optimize for low latency, but in very high traffic conditions 409 | # or when the master and replicas are many hops away, turning this to "yes" may 410 | # be a good idea. 411 | repl-disable-tcp-nodelay no 412 | 413 | # Set the replication backlog size. The backlog is a buffer that accumulates 414 | # replica data when replicas are disconnected for some time, so that when a replica 415 | # wants to reconnect again, often a full resync is not needed, but a partial 416 | # resync is enough, just passing the portion of data the replica missed while 417 | # disconnected. 418 | # 419 | # The bigger the replication backlog, the longer the time the replica can be 420 | # disconnected and later be able to perform a partial resynchronization. 421 | # 422 | # The backlog is only allocated once there is at least a replica connected. 423 | # 424 | # repl-backlog-size 1mb 425 | 426 | # After a master has no longer connected replicas for some time, the backlog 427 | # will be freed. The following option configures the amount of seconds that 428 | # need to elapse, starting from the time the last replica disconnected, for 429 | # the backlog buffer to be freed. 430 | # 431 | # Note that replicas never free the backlog for timeout, since they may be 432 | # promoted to masters later, and should be able to correctly "partially 433 | # resynchronize" with the replicas: hence they should always accumulate backlog. 434 | # 435 | # A value of 0 means to never release the backlog. 436 | # 437 | # repl-backlog-ttl 3600 438 | 439 | # The replica priority is an integer number published by Redis in the INFO output. 440 | # It is used by Redis Sentinel in order to select a replica to promote into a 441 | # master if the master is no longer working correctly. 442 | # 443 | # A replica with a low priority number is considered better for promotion, so 444 | # for instance if there are three replicas with priority 10, 100, 25 Sentinel will 445 | # pick the one with priority 10, that is the lowest. 446 | # 447 | # However a special priority of 0 marks the replica as not able to perform the 448 | # role of master, so a replica with priority of 0 will never be selected by 449 | # Redis Sentinel for promotion. 450 | # 451 | # By default the priority is 100. 452 | replica-priority 100 453 | 454 | # It is possible for a master to stop accepting writes if there are less than 455 | # N replicas connected, having a lag less or equal than M seconds. 456 | # 457 | # The N replicas need to be in "online" state. 458 | # 459 | # The lag in seconds, that must be <= the specified value, is calculated from 460 | # the last ping received from the replica, that is usually sent every second. 461 | # 462 | # This option does not GUARANTEE that N replicas will accept the write, but 463 | # will limit the window of exposure for lost writes in case not enough replicas 464 | # are available, to the specified number of seconds. 465 | # 466 | # For example to require at least 3 replicas with a lag <= 10 seconds use: 467 | # 468 | # min-replicas-to-write 3 469 | # min-replicas-max-lag 10 470 | # 471 | # Setting one or the other to 0 disables the feature. 472 | # 473 | # By default min-replicas-to-write is set to 0 (feature disabled) and 474 | # min-replicas-max-lag is set to 10. 475 | 476 | # A Redis master is able to list the address and port of the attached 477 | # replicas in different ways. For example the "INFO replication" section 478 | # offers this information, which is used, among other tools, by 479 | # Redis Sentinel in order to discover replica instances. 480 | # Another place where this info is available is in the output of the 481 | # "ROLE" command of a master. 482 | # 483 | # The listed IP and address normally reported by a replica is obtained 484 | # in the following way: 485 | # 486 | # IP: The address is auto detected by checking the peer address 487 | # of the socket used by the replica to connect with the master. 488 | # 489 | # Port: The port is communicated by the replica during the replication 490 | # handshake, and is normally the port that the replica is using to 491 | # listen for connections. 492 | # 493 | # However when port forwarding or Network Address Translation (NAT) is 494 | # used, the replica may be actually reachable via different IP and port 495 | # pairs. The following two options can be used by a replica in order to 496 | # report to its master a specific set of IP and port, so that both INFO 497 | # and ROLE will report those values. 498 | # 499 | # There is no need to use both the options if you need to override just 500 | # the port or the IP address. 501 | # 502 | # replica-announce-ip 5.5.5.5 503 | # replica-announce-port 1234 504 | 505 | ################################## SECURITY ################################### 506 | 507 | # Warning: since Redis is pretty fast an outside user can try up to 508 | # 1 million passwords per second against a modern box. This means that you 509 | # should use very strong passwords, otherwise they will be very easy to break. 510 | # Note that because the password is really a shared secret between the client 511 | # and the server, and should not be memorized by any human, the password 512 | # can be easily a long string from /dev/urandom or whatever, so by using a 513 | # long and unguessable password no brute force attack will be possible. 514 | 515 | # Redis ACL users are defined in the following format: 516 | # 517 | # user ... acl rules ... 518 | # 519 | # For example: 520 | # 521 | # user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 522 | # 523 | # The special username "default" is used for new connections. If this user 524 | # has the "nopass" rule, then new connections will be immediately authenticated 525 | # as the "default" user without the need of any password provided via the 526 | # AUTH command. Otherwise if the "default" user is not flagged with "nopass" 527 | # the connections will start in not authenticated state, and will require 528 | # AUTH (or the HELLO command AUTH option) in order to be authenticated and 529 | # start to work. 530 | # 531 | # The ACL rules that describe what an user can do are the following: 532 | # 533 | # on Enable the user: it is possible to authenticate as this user. 534 | # off Disable the user: it's no longer possible to authenticate 535 | # with this user, however the already authenticated connections 536 | # will still work. 537 | # + Allow the execution of that command 538 | # - Disallow the execution of that command 539 | # +@ Allow the execution of all the commands in such category 540 | # with valid categories are like @admin, @set, @sortedset, ... 541 | # and so forth, see the full list in the server.c file where 542 | # the Redis command table is described and defined. 543 | # The special category @all means all the commands, but currently 544 | # present in the server, and that will be loaded in the future 545 | # via modules. 546 | # +|subcommand Allow a specific subcommand of an otherwise 547 | # disabled command. Note that this form is not 548 | # allowed as negative like -DEBUG|SEGFAULT, but 549 | # only additive starting with "+". 550 | # allcommands Alias for +@all. Note that it implies the ability to execute 551 | # all the future commands loaded via the modules system. 552 | # nocommands Alias for -@all. 553 | # ~ Add a pattern of keys that can be mentioned as part of 554 | # commands. For instance ~* allows all the keys. The pattern 555 | # is a glob-style pattern like the one of KEYS. 556 | # It is possible to specify multiple patterns. 557 | # allkeys Alias for ~* 558 | # resetkeys Flush the list of allowed keys patterns. 559 | # > Add this passowrd to the list of valid password for the user. 560 | # For example >mypass will add "mypass" to the list. 561 | # This directive clears the "nopass" flag (see later). 562 | # < Remove this password from the list of valid passwords. 563 | # nopass All the set passwords of the user are removed, and the user 564 | # is flagged as requiring no password: it means that every 565 | # password will work against this user. If this directive is 566 | # used for the default user, every new connection will be 567 | # immediately authenticated with the default user without 568 | # any explicit AUTH command required. Note that the "resetpass" 569 | # directive will clear this condition. 570 | # resetpass Flush the list of allowed passwords. Moreover removes the 571 | # "nopass" status. After "resetpass" the user has no associated 572 | # passwords and there is no way to authenticate without adding 573 | # some password (or setting it as "nopass" later). 574 | # reset Performs the following actions: resetpass, resetkeys, off, 575 | # -@all. The user returns to the same state it has immediately 576 | # after its creation. 577 | # 578 | # ACL rules can be specified in any order: for instance you can start with 579 | # passwords, then flags, or key patterns. However note that the additive 580 | # and subtractive rules will CHANGE MEANING depending on the ordering. 581 | # For instance see the following example: 582 | # 583 | # user alice on +@all -DEBUG ~* >somepassword 584 | # 585 | # This will allow "alice" to use all the commands with the exception of the 586 | # DEBUG command, since +@all added all the commands to the set of the commands 587 | # alice can use, and later DEBUG was removed. However if we invert the order 588 | # of two ACL rules the result will be different: 589 | # 590 | # user alice on -DEBUG +@all ~* >somepassword 591 | # 592 | # Now DEBUG was removed when alice had yet no commands in the set of allowed 593 | # commands, later all the commands are added, so the user will be able to 594 | # execute everything. 595 | # 596 | # Basically ACL rules are processed left-to-right. 597 | # 598 | # For more information about ACL configuration please refer to 599 | # the Redis web site at https://redis.io/topics/acl 600 | 601 | # Using an external ACL file 602 | # 603 | # Instead of configuring users here in this file, it is possible to use 604 | # a stand-alone file just listing users. The two methods cannot be mixed: 605 | # if you configure users here and at the same time you activate the exteranl 606 | # ACL file, the server will refuse to start. 607 | # 608 | # The format of the external ACL user file is exactly the same as the 609 | # format that is used inside redis.conf to describe users. 610 | # 611 | # aclfile /etc/redis/users.acl 612 | 613 | # IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity 614 | # layer on top of the new ACL system. The option effect will be just setting 615 | # the password for the default user. Clients will still authenticate using 616 | # AUTH as usually, or more explicitly with AUTH default 617 | # if they follow the new protocol: both will work. 618 | # 619 | # requirepass foobared 620 | 621 | # Command renaming (DEPRECATED). 622 | # 623 | # ------------------------------------------------------------------------ 624 | # WARNING: avoid using this option if possible. Instead use ACLs to remove 625 | # commands from the default user, and put them only in some admin user you 626 | # create for administrative purposes. 627 | # ------------------------------------------------------------------------ 628 | # 629 | # It is possible to change the name of dangerous commands in a shared 630 | # environment. For instance the CONFIG command may be renamed into something 631 | # hard to guess so that it will still be available for internal-use tools 632 | # but not available for general clients. 633 | # 634 | # Example: 635 | # 636 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 637 | # 638 | # It is also possible to completely kill a command by renaming it into 639 | # an empty string: 640 | # 641 | # rename-command CONFIG "" 642 | # 643 | # Please note that changing the name of commands that are logged into the 644 | # AOF file or transmitted to replicas may cause problems. 645 | 646 | ################################### CLIENTS #################################### 647 | 648 | # Set the max number of connected clients at the same time. By default 649 | # this limit is set to 10000 clients, however if the Redis server is not 650 | # able to configure the process file limit to allow for the specified limit 651 | # the max number of allowed clients is set to the current file limit 652 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 653 | # 654 | # Once the limit is reached Redis will close all the new connections sending 655 | # an error 'max number of clients reached'. 656 | # 657 | # maxclients 10000 658 | 659 | ############################## MEMORY MANAGEMENT ################################ 660 | 661 | # Set a memory usage limit to the specified amount of bytes. 662 | # When the memory limit is reached Redis will try to remove keys 663 | # according to the eviction policy selected (see maxmemory-policy). 664 | # 665 | # If Redis can't remove keys according to the policy, or if the policy is 666 | # set to 'noeviction', Redis will start to reply with errors to commands 667 | # that would use more memory, like SET, LPUSH, and so on, and will continue 668 | # to reply to read-only commands like GET. 669 | # 670 | # This option is usually useful when using Redis as an LRU or LFU cache, or to 671 | # set a hard memory limit for an instance (using the 'noeviction' policy). 672 | # 673 | # WARNING: If you have replicas attached to an instance with maxmemory on, 674 | # the size of the output buffers needed to feed the replicas are subtracted 675 | # from the used memory count, so that network problems / resyncs will 676 | # not trigger a loop where keys are evicted, and in turn the output 677 | # buffer of replicas is full with DELs of keys evicted triggering the deletion 678 | # of more keys, and so forth until the database is completely emptied. 679 | # 680 | # In short... if you have replicas attached it is suggested that you set a lower 681 | # limit for maxmemory so that there is some free RAM on the system for replica 682 | # output buffers (but this is not needed if the policy is 'noeviction'). 683 | # 684 | # maxmemory 685 | 686 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 687 | # is reached. You can select among five behaviors: 688 | # 689 | # volatile-lru -> Evict using approximated LRU among the keys with an expire set. 690 | # allkeys-lru -> Evict any key using approximated LRU. 691 | # volatile-lfu -> Evict using approximated LFU among the keys with an expire set. 692 | # allkeys-lfu -> Evict any key using approximated LFU. 693 | # volatile-random -> Remove a random key among the ones with an expire set. 694 | # allkeys-random -> Remove a random key, any key. 695 | # volatile-ttl -> Remove the key with the nearest expire time (minor TTL) 696 | # noeviction -> Don't evict anything, just return an error on write operations. 697 | # 698 | # LRU means Least Recently Used 699 | # LFU means Least Frequently Used 700 | # 701 | # Both LRU, LFU and volatile-ttl are implemented using approximated 702 | # randomized algorithms. 703 | # 704 | # Note: with any of the above policies, Redis will return an error on write 705 | # operations, when there are no suitable keys for eviction. 706 | # 707 | # At the date of writing these commands are: set setnx setex append 708 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 709 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 710 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 711 | # getset mset msetnx exec sort 712 | # 713 | # The default is: 714 | # 715 | # maxmemory-policy noeviction 716 | 717 | # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated 718 | # algorithms (in order to save memory), so you can tune it for speed or 719 | # accuracy. For default Redis will check five keys and pick the one that was 720 | # used less recently, you can change the sample size using the following 721 | # configuration directive. 722 | # 723 | # The default of 5 produces good enough results. 10 Approximates very closely 724 | # true LRU but costs more CPU. 3 is faster but not very accurate. 725 | # 726 | # maxmemory-samples 5 727 | 728 | # Starting from Redis 5, by default a replica will ignore its maxmemory setting 729 | # (unless it is promoted to master after a failover or manually). It means 730 | # that the eviction of keys will be just handled by the master, sending the 731 | # DEL commands to the replica as keys evict in the master side. 732 | # 733 | # This behavior ensures that masters and replicas stay consistent, and is usually 734 | # what you want, however if your replica is writable, or you want the replica to have 735 | # a different memory setting, and you are sure all the writes performed to the 736 | # replica are idempotent, then you may change this default (but be sure to understand 737 | # what you are doing). 738 | # 739 | # Note that since the replica by default does not evict, it may end using more 740 | # memory than the one set via maxmemory (there are certain buffers that may 741 | # be larger on the replica, or data structures may sometimes take more memory and so 742 | # forth). So make sure you monitor your replicas and make sure they have enough 743 | # memory to never hit a real out-of-memory condition before the master hits 744 | # the configured maxmemory setting. 745 | # 746 | # replica-ignore-maxmemory yes 747 | 748 | ############################# LAZY FREEING #################################### 749 | 750 | # Redis has two primitives to delete keys. One is called DEL and is a blocking 751 | # deletion of the object. It means that the server stops processing new commands 752 | # in order to reclaim all the memory associated with an object in a synchronous 753 | # way. If the key deleted is associated with a small object, the time needed 754 | # in order to execute the DEL command is very small and comparable to most other 755 | # O(1) or O(log_N) commands in Redis. However if the key is associated with an 756 | # aggregated value containing millions of elements, the server can block for 757 | # a long time (even seconds) in order to complete the operation. 758 | # 759 | # For the above reasons Redis also offers non blocking deletion primitives 760 | # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and 761 | # FLUSHDB commands, in order to reclaim memory in background. Those commands 762 | # are executed in constant time. Another thread will incrementally free the 763 | # object in the background as fast as possible. 764 | # 765 | # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. 766 | # It's up to the design of the application to understand when it is a good 767 | # idea to use one or the other. However the Redis server sometimes has to 768 | # delete keys or flush the whole database as a side effect of other operations. 769 | # Specifically Redis deletes objects independently of a user call in the 770 | # following scenarios: 771 | # 772 | # 1) On eviction, because of the maxmemory and maxmemory policy configurations, 773 | # in order to make room for new data, without going over the specified 774 | # memory limit. 775 | # 2) Because of expire: when a key with an associated time to live (see the 776 | # EXPIRE command) must be deleted from memory. 777 | # 3) Because of a side effect of a command that stores data on a key that may 778 | # already exist. For example the RENAME command may delete the old key 779 | # content when it is replaced with another one. Similarly SUNIONSTORE 780 | # or SORT with STORE option may delete existing keys. The SET command 781 | # itself removes any old content of the specified key in order to replace 782 | # it with the specified string. 783 | # 4) During replication, when a replica performs a full resynchronization with 784 | # its master, the content of the whole database is removed in order to 785 | # load the RDB file just transferred. 786 | # 787 | # In all the above cases the default is to delete objects in a blocking way, 788 | # like if DEL was called. However you can configure each case specifically 789 | # in order to instead release memory in a non-blocking way like if UNLINK 790 | # was called, using the following configuration directives: 791 | 792 | lazyfree-lazy-eviction no 793 | lazyfree-lazy-expire no 794 | lazyfree-lazy-server-del no 795 | replica-lazy-flush no 796 | 797 | ############################## APPEND ONLY MODE ############################### 798 | 799 | # By default Redis asynchronously dumps the dataset on disk. This mode is 800 | # good enough in many applications, but an issue with the Redis process or 801 | # a power outage may result into a few minutes of writes lost (depending on 802 | # the configured save points). 803 | # 804 | # The Append Only File is an alternative persistence mode that provides 805 | # much better durability. For instance using the default data fsync policy 806 | # (see later in the config file) Redis can lose just one second of writes in a 807 | # dramatic event like a server power outage, or a single write if something 808 | # wrong with the Redis process itself happens, but the operating system is 809 | # still running correctly. 810 | # 811 | # AOF and RDB persistence can be enabled at the same time without problems. 812 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 813 | # with the better durability guarantees. 814 | # 815 | # Please check http://redis.io/topics/persistence for more information. 816 | 817 | appendonly no 818 | 819 | # The name of the append only file (default: "appendonly.aof") 820 | 821 | appendfilename "appendonly.aof" 822 | 823 | # The fsync() call tells the Operating System to actually write data on disk 824 | # instead of waiting for more data in the output buffer. Some OS will really flush 825 | # data on disk, some other OS will just try to do it ASAP. 826 | # 827 | # Redis supports three different modes: 828 | # 829 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 830 | # always: fsync after every write to the append only log. Slow, Safest. 831 | # everysec: fsync only one time every second. Compromise. 832 | # 833 | # The default is "everysec", as that's usually the right compromise between 834 | # speed and data safety. It's up to you to understand if you can relax this to 835 | # "no" that will let the operating system flush the output buffer when 836 | # it wants, for better performances (but if you can live with the idea of 837 | # some data loss consider the default persistence mode that's snapshotting), 838 | # or on the contrary, use "always" that's very slow but a bit safer than 839 | # everysec. 840 | # 841 | # More details please check the following article: 842 | # http://antirez.com/post/redis-persistence-demystified.html 843 | # 844 | # If unsure, use "everysec". 845 | 846 | # appendfsync always 847 | appendfsync everysec 848 | # appendfsync no 849 | 850 | # When the AOF fsync policy is set to always or everysec, and a background 851 | # saving process (a background save or AOF log background rewriting) is 852 | # performing a lot of I/O against the disk, in some Linux configurations 853 | # Redis may block too long on the fsync() call. Note that there is no fix for 854 | # this currently, as even performing fsync in a different thread will block 855 | # our synchronous write(2) call. 856 | # 857 | # In order to mitigate this problem it's possible to use the following option 858 | # that will prevent fsync() from being called in the main process while a 859 | # BGSAVE or BGREWRITEAOF is in progress. 860 | # 861 | # This means that while another child is saving, the durability of Redis is 862 | # the same as "appendfsync none". In practical terms, this means that it is 863 | # possible to lose up to 30 seconds of log in the worst scenario (with the 864 | # default Linux settings). 865 | # 866 | # If you have latency problems turn this to "yes". Otherwise leave it as 867 | # "no" that is the safest pick from the point of view of durability. 868 | 869 | no-appendfsync-on-rewrite no 870 | 871 | # Automatic rewrite of the append only file. 872 | # Redis is able to automatically rewrite the log file implicitly calling 873 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 874 | # 875 | # This is how it works: Redis remembers the size of the AOF file after the 876 | # latest rewrite (if no rewrite has happened since the restart, the size of 877 | # the AOF at startup is used). 878 | # 879 | # This base size is compared to the current size. If the current size is 880 | # bigger than the specified percentage, the rewrite is triggered. Also 881 | # you need to specify a minimal size for the AOF file to be rewritten, this 882 | # is useful to avoid rewriting the AOF file even if the percentage increase 883 | # is reached but it is still pretty small. 884 | # 885 | # Specify a percentage of zero in order to disable the automatic AOF 886 | # rewrite feature. 887 | 888 | auto-aof-rewrite-percentage 100 889 | auto-aof-rewrite-min-size 64mb 890 | 891 | # An AOF file may be found to be truncated at the end during the Redis 892 | # startup process, when the AOF data gets loaded back into memory. 893 | # This may happen when the system where Redis is running 894 | # crashes, especially when an ext4 filesystem is mounted without the 895 | # data=ordered option (however this can't happen when Redis itself 896 | # crashes or aborts but the operating system still works correctly). 897 | # 898 | # Redis can either exit with an error when this happens, or load as much 899 | # data as possible (the default now) and start if the AOF file is found 900 | # to be truncated at the end. The following option controls this behavior. 901 | # 902 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 903 | # the Redis server starts emitting a log to inform the user of the event. 904 | # Otherwise if the option is set to no, the server aborts with an error 905 | # and refuses to start. When the option is set to no, the user requires 906 | # to fix the AOF file using the "redis-check-aof" utility before to restart 907 | # the server. 908 | # 909 | # Note that if the AOF file will be found to be corrupted in the middle 910 | # the server will still exit with an error. This option only applies when 911 | # Redis will try to read more data from the AOF file but not enough bytes 912 | # will be found. 913 | aof-load-truncated yes 914 | 915 | # When rewriting the AOF file, Redis is able to use an RDB preamble in the 916 | # AOF file for faster rewrites and recoveries. When this option is turned 917 | # on the rewritten AOF file is composed of two different stanzas: 918 | # 919 | # [RDB file][AOF tail] 920 | # 921 | # When loading Redis recognizes that the AOF file starts with the "REDIS" 922 | # string and loads the prefixed RDB file, and continues loading the AOF 923 | # tail. 924 | aof-use-rdb-preamble yes 925 | 926 | ################################ LUA SCRIPTING ############################### 927 | 928 | # Max execution time of a Lua script in milliseconds. 929 | # 930 | # If the maximum execution time is reached Redis will log that a script is 931 | # still in execution after the maximum allowed time and will start to 932 | # reply to queries with an error. 933 | # 934 | # When a long running script exceeds the maximum execution time only the 935 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 936 | # used to stop a script that did not yet called write commands. The second 937 | # is the only way to shut down the server in the case a write command was 938 | # already issued by the script but the user doesn't want to wait for the natural 939 | # termination of the script. 940 | # 941 | # Set it to 0 or a negative value for unlimited execution without warnings. 942 | lua-time-limit 5000 943 | 944 | ################################ REDIS CLUSTER ############################### 945 | # 946 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 947 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however 948 | # in order to mark it as "mature" we need to wait for a non trivial percentage 949 | # of users to deploy it in production. 950 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 951 | # 952 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 953 | # started as cluster nodes can. In order to start a Redis instance as a 954 | # cluster node enable the cluster support uncommenting the following: 955 | # 956 | # cluster-enabled yes 957 | 958 | # Every cluster node has a cluster configuration file. This file is not 959 | # intended to be edited by hand. It is created and updated by Redis nodes. 960 | # Every Redis Cluster node requires a different cluster configuration file. 961 | # Make sure that instances running in the same system do not have 962 | # overlapping cluster configuration file names. 963 | # 964 | # cluster-config-file nodes-6379.conf 965 | 966 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 967 | # for it to be considered in failure state. 968 | # Most other internal time limits are multiple of the node timeout. 969 | # 970 | # cluster-node-timeout 15000 971 | 972 | # A replica of a failing master will avoid to start a failover if its data 973 | # looks too old. 974 | # 975 | # There is no simple way for a replica to actually have an exact measure of 976 | # its "data age", so the following two checks are performed: 977 | # 978 | # 1) If there are multiple replicas able to failover, they exchange messages 979 | # in order to try to give an advantage to the replica with the best 980 | # replication offset (more data from the master processed). 981 | # Replicas will try to get their rank by offset, and apply to the start 982 | # of the failover a delay proportional to their rank. 983 | # 984 | # 2) Every single replica computes the time of the last interaction with 985 | # its master. This can be the last ping or command received (if the master 986 | # is still in the "connected" state), or the time that elapsed since the 987 | # disconnection with the master (if the replication link is currently down). 988 | # If the last interaction is too old, the replica will not try to failover 989 | # at all. 990 | # 991 | # The point "2" can be tuned by user. Specifically a replica will not perform 992 | # the failover if, since the last interaction with the master, the time 993 | # elapsed is greater than: 994 | # 995 | # (node-timeout * replica-validity-factor) + repl-ping-replica-period 996 | # 997 | # So for example if node-timeout is 30 seconds, and the replica-validity-factor 998 | # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the 999 | # replica will not try to failover if it was not able to talk with the master 1000 | # for longer than 310 seconds. 1001 | # 1002 | # A large replica-validity-factor may allow replicas with too old data to failover 1003 | # a master, while a too small value may prevent the cluster from being able to 1004 | # elect a replica at all. 1005 | # 1006 | # For maximum availability, it is possible to set the replica-validity-factor 1007 | # to a value of 0, which means, that replicas will always try to failover the 1008 | # master regardless of the last time they interacted with the master. 1009 | # (However they'll always try to apply a delay proportional to their 1010 | # offset rank). 1011 | # 1012 | # Zero is the only value able to guarantee that when all the partitions heal 1013 | # the cluster will always be able to continue. 1014 | # 1015 | # cluster-replica-validity-factor 10 1016 | 1017 | # Cluster replicas are able to migrate to orphaned masters, that are masters 1018 | # that are left without working replicas. This improves the cluster ability 1019 | # to resist to failures as otherwise an orphaned master can't be failed over 1020 | # in case of failure if it has no working replicas. 1021 | # 1022 | # Replicas migrate to orphaned masters only if there are still at least a 1023 | # given number of other working replicas for their old master. This number 1024 | # is the "migration barrier". A migration barrier of 1 means that a replica 1025 | # will migrate only if there is at least 1 other working replica for its master 1026 | # and so forth. It usually reflects the number of replicas you want for every 1027 | # master in your cluster. 1028 | # 1029 | # Default is 1 (replicas migrate only if their masters remain with at least 1030 | # one replica). To disable migration just set it to a very large value. 1031 | # A value of 0 can be set but is useful only for debugging and dangerous 1032 | # in production. 1033 | # 1034 | # cluster-migration-barrier 1 1035 | 1036 | # By default Redis Cluster nodes stop accepting queries if they detect there 1037 | # is at least an hash slot uncovered (no available node is serving it). 1038 | # This way if the cluster is partially down (for example a range of hash slots 1039 | # are no longer covered) all the cluster becomes, eventually, unavailable. 1040 | # It automatically returns available as soon as all the slots are covered again. 1041 | # 1042 | # However sometimes you want the subset of the cluster which is working, 1043 | # to continue to accept queries for the part of the key space that is still 1044 | # covered. In order to do so, just set the cluster-require-full-coverage 1045 | # option to no. 1046 | # 1047 | # cluster-require-full-coverage yes 1048 | 1049 | # This option, when set to yes, prevents replicas from trying to failover its 1050 | # master during master failures. However the master can still perform a 1051 | # manual failover, if forced to do so. 1052 | # 1053 | # This is useful in different scenarios, especially in the case of multiple 1054 | # data center operations, where we want one side to never be promoted if not 1055 | # in the case of a total DC failure. 1056 | # 1057 | # cluster-replica-no-failover no 1058 | 1059 | # In order to setup your cluster make sure to read the documentation 1060 | # available at http://redis.io web site. 1061 | 1062 | ########################## CLUSTER DOCKER/NAT support ######################## 1063 | 1064 | # In certain deployments, Redis Cluster nodes address discovery fails, because 1065 | # addresses are NAT-ted or because ports are forwarded (the typical case is 1066 | # Docker and other containers). 1067 | # 1068 | # In order to make Redis Cluster working in such environments, a static 1069 | # configuration where each node knows its public address is needed. The 1070 | # following two options are used for this scope, and are: 1071 | # 1072 | # * cluster-announce-ip 1073 | # * cluster-announce-port 1074 | # * cluster-announce-bus-port 1075 | # 1076 | # Each instruct the node about its address, client port, and cluster message 1077 | # bus port. The information is then published in the header of the bus packets 1078 | # so that other nodes will be able to correctly map the address of the node 1079 | # publishing the information. 1080 | # 1081 | # If the above options are not used, the normal Redis Cluster auto-detection 1082 | # will be used instead. 1083 | # 1084 | # Note that when remapped, the bus port may not be at the fixed offset of 1085 | # clients port + 10000, so you can specify any port and bus-port depending 1086 | # on how they get remapped. If the bus-port is not set, a fixed offset of 1087 | # 10000 will be used as usually. 1088 | # 1089 | # Example: 1090 | # 1091 | # cluster-announce-ip 10.1.1.5 1092 | # cluster-announce-port 6379 1093 | # cluster-announce-bus-port 6380 1094 | 1095 | ################################## SLOW LOG ################################### 1096 | 1097 | # The Redis Slow Log is a system to log queries that exceeded a specified 1098 | # execution time. The execution time does not include the I/O operations 1099 | # like talking with the client, sending the reply and so forth, 1100 | # but just the time needed to actually execute the command (this is the only 1101 | # stage of command execution where the thread is blocked and can not serve 1102 | # other requests in the meantime). 1103 | # 1104 | # You can configure the slow log with two parameters: one tells Redis 1105 | # what is the execution time, in microseconds, to exceed in order for the 1106 | # command to get logged, and the other parameter is the length of the 1107 | # slow log. When a new command is logged the oldest one is removed from the 1108 | # queue of logged commands. 1109 | 1110 | # The following time is expressed in microseconds, so 1000000 is equivalent 1111 | # to one second. Note that a negative number disables the slow log, while 1112 | # a value of zero forces the logging of every command. 1113 | slowlog-log-slower-than 10000 1114 | 1115 | # There is no limit to this length. Just be aware that it will consume memory. 1116 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 1117 | slowlog-max-len 128 1118 | 1119 | ################################ LATENCY MONITOR ############################## 1120 | 1121 | # The Redis latency monitoring subsystem samples different operations 1122 | # at runtime in order to collect data related to possible sources of 1123 | # latency of a Redis instance. 1124 | # 1125 | # Via the LATENCY command this information is available to the user that can 1126 | # print graphs and obtain reports. 1127 | # 1128 | # The system only logs operations that were performed in a time equal or 1129 | # greater than the amount of milliseconds specified via the 1130 | # latency-monitor-threshold configuration directive. When its value is set 1131 | # to zero, the latency monitor is turned off. 1132 | # 1133 | # By default latency monitoring is disabled since it is mostly not needed 1134 | # if you don't have latency issues, and collecting data has a performance 1135 | # impact, that while very small, can be measured under big load. Latency 1136 | # monitoring can easily be enabled at runtime using the command 1137 | # "CONFIG SET latency-monitor-threshold " if needed. 1138 | latency-monitor-threshold 0 1139 | 1140 | ############################# EVENT NOTIFICATION ############################## 1141 | 1142 | # Redis can notify Pub/Sub clients about events happening in the key space. 1143 | # This feature is documented at http://redis.io/topics/notifications 1144 | # 1145 | # For instance if keyspace events notification is enabled, and a client 1146 | # performs a DEL operation on key "foo" stored in the Database 0, two 1147 | # messages will be published via Pub/Sub: 1148 | # 1149 | # PUBLISH __keyspace@0__:foo del 1150 | # PUBLISH __keyevent@0__:del foo 1151 | # 1152 | # It is possible to select the events that Redis will notify among a set 1153 | # of classes. Every class is identified by a single character: 1154 | # 1155 | # K Keyspace events, published with __keyspace@__ prefix. 1156 | # E Keyevent events, published with __keyevent@__ prefix. 1157 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 1158 | # $ String commands 1159 | # l List commands 1160 | # s Set commands 1161 | # h Hash commands 1162 | # z Sorted set commands 1163 | # x Expired events (events generated every time a key expires) 1164 | # e Evicted events (events generated when a key is evicted for maxmemory) 1165 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 1166 | # 1167 | # The "notify-keyspace-events" takes as argument a string that is composed 1168 | # of zero or multiple characters. The empty string means that notifications 1169 | # are disabled. 1170 | # 1171 | # Example: to enable list and generic events, from the point of view of the 1172 | # event name, use: 1173 | # 1174 | # notify-keyspace-events Elg 1175 | # 1176 | # Example 2: to get the stream of the expired keys subscribing to channel 1177 | # name __keyevent@0__:expired use: 1178 | # 1179 | # notify-keyspace-events Ex 1180 | # 1181 | # By default all notifications are disabled because most users don't need 1182 | # this feature and the feature has some overhead. Note that if you don't 1183 | # specify at least one of K or E, no events will be delivered. 1184 | notify-keyspace-events "" 1185 | 1186 | ############################### GOPHER SERVER ################################# 1187 | 1188 | # Redis contains an implementation of the Gopher protocol, as specified in 1189 | # the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). 1190 | # 1191 | # The Gopher protocol was very popular in the late '90s. It is an alternative 1192 | # to the web, and the implementation both server and client side is so simple 1193 | # that the Redis server has just 100 lines of code in order to implement this 1194 | # support. 1195 | # 1196 | # What do you do with Gopher nowadays? Well Gopher never *really* died, and 1197 | # lately there is a movement in order for the Gopher more hierarchical content 1198 | # composed of just plain text documents to be resurrected. Some want a simpler 1199 | # internet, others believe that the mainstream internet became too much 1200 | # controlled, and it's cool to create an alternative space for people that 1201 | # want a bit of fresh air. 1202 | # 1203 | # Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol 1204 | # as a gift. 1205 | # 1206 | # --- HOW IT WORKS? --- 1207 | # 1208 | # The Redis Gopher support uses the inline protocol of Redis, and specifically 1209 | # two kind of inline requests that were anyway illegal: an empty request 1210 | # or any request that starts with "/" (there are no Redis commands starting 1211 | # with such a slash). Normal RESP2/RESP3 requests are completely out of the 1212 | # path of the Gopher protocol implementation and are served as usually as well. 1213 | # 1214 | # If you open a connection to Redis when Gopher is enabled and send it 1215 | # a string like "/foo", if there is a key named "/foo" it is served via the 1216 | # Gopher protocol. 1217 | # 1218 | # In order to create a real Gopher "hole" (the name of a Gopher site in Gopher 1219 | # talking), you likely need a script like the following: 1220 | # 1221 | # https://github.com/antirez/gopher2redis 1222 | # 1223 | # --- SECURITY WARNING --- 1224 | # 1225 | # If you plan to put Redis on the internet in a publicly accessible address 1226 | # to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. 1227 | # Once a password is set: 1228 | # 1229 | # 1. The Gopher server (when enabled, not by default) will kill serve 1230 | # content via Gopher. 1231 | # 2. However other commands cannot be called before the client will 1232 | # authenticate. 1233 | # 1234 | # So use the 'requirepass' option to protect your instance. 1235 | # 1236 | # To enable Gopher support uncomment the following line and set 1237 | # the option from no (the default) to yes. 1238 | # 1239 | # gopher-enabled no 1240 | 1241 | ############################### ADVANCED CONFIG ############################### 1242 | 1243 | # Hashes are encoded using a memory efficient data structure when they have a 1244 | # small number of entries, and the biggest entry does not exceed a given 1245 | # threshold. These thresholds can be configured using the following directives. 1246 | hash-max-ziplist-entries 512 1247 | hash-max-ziplist-value 64 1248 | 1249 | # Lists are also encoded in a special way to save a lot of space. 1250 | # The number of entries allowed per internal list node can be specified 1251 | # as a fixed maximum size or a maximum number of elements. 1252 | # For a fixed maximum size, use -5 through -1, meaning: 1253 | # -5: max size: 64 Kb <-- not recommended for normal workloads 1254 | # -4: max size: 32 Kb <-- not recommended 1255 | # -3: max size: 16 Kb <-- probably not recommended 1256 | # -2: max size: 8 Kb <-- good 1257 | # -1: max size: 4 Kb <-- good 1258 | # Positive numbers mean store up to _exactly_ that number of elements 1259 | # per list node. 1260 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), 1261 | # but if your use case is unique, adjust the settings as necessary. 1262 | list-max-ziplist-size -2 1263 | 1264 | # Lists may also be compressed. 1265 | # Compress depth is the number of quicklist ziplist nodes from *each* side of 1266 | # the list to *exclude* from compression. The head and tail of the list 1267 | # are always uncompressed for fast push/pop operations. Settings are: 1268 | # 0: disable all list compression 1269 | # 1: depth 1 means "don't start compressing until after 1 node into the list, 1270 | # going from either the head or tail" 1271 | # So: [head]->node->node->...->node->[tail] 1272 | # [head], [tail] will always be uncompressed; inner nodes will compress. 1273 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] 1274 | # 2 here means: don't compress head or head->next or tail->prev or tail, 1275 | # but compress all nodes between them. 1276 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] 1277 | # etc. 1278 | list-compress-depth 0 1279 | 1280 | # Sets have a special encoding in just one case: when a set is composed 1281 | # of just strings that happen to be integers in radix 10 in the range 1282 | # of 64 bit signed integers. 1283 | # The following configuration setting sets the limit in the size of the 1284 | # set in order to use this special memory saving encoding. 1285 | set-max-intset-entries 512 1286 | 1287 | # Similarly to hashes and lists, sorted sets are also specially encoded in 1288 | # order to save a lot of space. This encoding is only used when the length and 1289 | # elements of a sorted set are below the following limits: 1290 | zset-max-ziplist-entries 128 1291 | zset-max-ziplist-value 64 1292 | 1293 | # HyperLogLog sparse representation bytes limit. The limit includes the 1294 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 1295 | # this limit, it is converted into the dense representation. 1296 | # 1297 | # A value greater than 16000 is totally useless, since at that point the 1298 | # dense representation is more memory efficient. 1299 | # 1300 | # The suggested value is ~ 3000 in order to have the benefits of 1301 | # the space efficient encoding without slowing down too much PFADD, 1302 | # which is O(N) with the sparse encoding. The value can be raised to 1303 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 1304 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 1305 | hll-sparse-max-bytes 3000 1306 | 1307 | # Streams macro node max size / items. The stream data structure is a radix 1308 | # tree of big nodes that encode multiple items inside. Using this configuration 1309 | # it is possible to configure how big a single node can be in bytes, and the 1310 | # maximum number of items it may contain before switching to a new node when 1311 | # appending new stream entries. If any of the following settings are set to 1312 | # zero, the limit is ignored, so for instance it is possible to set just a 1313 | # max entires limit by setting max-bytes to 0 and max-entries to the desired 1314 | # value. 1315 | stream-node-max-bytes 4096 1316 | stream-node-max-entries 100 1317 | 1318 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 1319 | # order to help rehashing the main Redis hash table (the one mapping top-level 1320 | # keys to values). The hash table implementation Redis uses (see dict.c) 1321 | # performs a lazy rehashing: the more operation you run into a hash table 1322 | # that is rehashing, the more rehashing "steps" are performed, so if the 1323 | # server is idle the rehashing is never complete and some more memory is used 1324 | # by the hash table. 1325 | # 1326 | # The default is to use this millisecond 10 times every second in order to 1327 | # actively rehash the main dictionaries, freeing memory when possible. 1328 | # 1329 | # If unsure: 1330 | # use "activerehashing no" if you have hard latency requirements and it is 1331 | # not a good thing in your environment that Redis can reply from time to time 1332 | # to queries with 2 milliseconds delay. 1333 | # 1334 | # use "activerehashing yes" if you don't have such hard requirements but 1335 | # want to free memory asap when possible. 1336 | activerehashing yes 1337 | 1338 | # The client output buffer limits can be used to force disconnection of clients 1339 | # that are not reading data from the server fast enough for some reason (a 1340 | # common reason is that a Pub/Sub client can't consume messages as fast as the 1341 | # publisher can produce them). 1342 | # 1343 | # The limit can be set differently for the three different classes of clients: 1344 | # 1345 | # normal -> normal clients including MONITOR clients 1346 | # replica -> replica clients 1347 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 1348 | # 1349 | # The syntax of every client-output-buffer-limit directive is the following: 1350 | # 1351 | # client-output-buffer-limit 1352 | # 1353 | # A client is immediately disconnected once the hard limit is reached, or if 1354 | # the soft limit is reached and remains reached for the specified number of 1355 | # seconds (continuously). 1356 | # So for instance if the hard limit is 32 megabytes and the soft limit is 1357 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 1358 | # if the size of the output buffers reach 32 megabytes, but will also get 1359 | # disconnected if the client reaches 16 megabytes and continuously overcomes 1360 | # the limit for 10 seconds. 1361 | # 1362 | # By default normal clients are not limited because they don't receive data 1363 | # without asking (in a push way), but just after a request, so only 1364 | # asynchronous clients may create a scenario where data is requested faster 1365 | # than it can read. 1366 | # 1367 | # Instead there is a default limit for pubsub and replica clients, since 1368 | # subscribers and replicas receive data in a push fashion. 1369 | # 1370 | # Both the hard or the soft limit can be disabled by setting them to zero. 1371 | client-output-buffer-limit normal 0 0 0 1372 | client-output-buffer-limit replica 256mb 64mb 60 1373 | client-output-buffer-limit pubsub 32mb 8mb 60 1374 | 1375 | # Client query buffers accumulate new commands. They are limited to a fixed 1376 | # amount by default in order to avoid that a protocol desynchronization (for 1377 | # instance due to a bug in the client) will lead to unbound memory usage in 1378 | # the query buffer. However you can configure it here if you have very special 1379 | # needs, such us huge multi/exec requests or alike. 1380 | # 1381 | # client-query-buffer-limit 1gb 1382 | 1383 | # In the Redis protocol, bulk requests, that are, elements representing single 1384 | # strings, are normally limited ot 512 mb. However you can change this limit 1385 | # here. 1386 | # 1387 | # proto-max-bulk-len 512mb 1388 | 1389 | # Redis calls an internal function to perform many background tasks, like 1390 | # closing connections of clients in timeout, purging expired keys that are 1391 | # never requested, and so forth. 1392 | # 1393 | # Not all tasks are performed with the same frequency, but Redis checks for 1394 | # tasks to perform according to the specified "hz" value. 1395 | # 1396 | # By default "hz" is set to 10. Raising the value will use more CPU when 1397 | # Redis is idle, but at the same time will make Redis more responsive when 1398 | # there are many keys expiring at the same time, and timeouts may be 1399 | # handled with more precision. 1400 | # 1401 | # The range is between 1 and 500, however a value over 100 is usually not 1402 | # a good idea. Most users should use the default of 10 and raise this up to 1403 | # 100 only in environments where very low latency is required. 1404 | hz 10 1405 | 1406 | # Normally it is useful to have an HZ value which is proportional to the 1407 | # number of clients connected. This is useful in order, for instance, to 1408 | # avoid too many clients are processed for each background task invocation 1409 | # in order to avoid latency spikes. 1410 | # 1411 | # Since the default HZ value by default is conservatively set to 10, Redis 1412 | # offers, and enables by default, the ability to use an adaptive HZ value 1413 | # which will temporary raise when there are many connected clients. 1414 | # 1415 | # When dynamic HZ is enabled, the actual configured HZ will be used as 1416 | # as a baseline, but multiples of the configured HZ value will be actually 1417 | # used as needed once more clients are connected. In this way an idle 1418 | # instance will use very little CPU time while a busy instance will be 1419 | # more responsive. 1420 | dynamic-hz yes 1421 | 1422 | # When a child rewrites the AOF file, if the following option is enabled 1423 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1424 | # in order to commit the file to the disk more incrementally and avoid 1425 | # big latency spikes. 1426 | aof-rewrite-incremental-fsync yes 1427 | 1428 | # When redis saves RDB file, if the following option is enabled 1429 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1430 | # in order to commit the file to the disk more incrementally and avoid 1431 | # big latency spikes. 1432 | rdb-save-incremental-fsync yes 1433 | 1434 | # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good 1435 | # idea to start with the default settings and only change them after investigating 1436 | # how to improve the performances and how the keys LFU change over time, which 1437 | # is possible to inspect via the OBJECT FREQ command. 1438 | # 1439 | # There are two tunable parameters in the Redis LFU implementation: the 1440 | # counter logarithm factor and the counter decay time. It is important to 1441 | # understand what the two parameters mean before changing them. 1442 | # 1443 | # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis 1444 | # uses a probabilistic increment with logarithmic behavior. Given the value 1445 | # of the old counter, when a key is accessed, the counter is incremented in 1446 | # this way: 1447 | # 1448 | # 1. A random number R between 0 and 1 is extracted. 1449 | # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). 1450 | # 3. The counter is incremented only if R < P. 1451 | # 1452 | # The default lfu-log-factor is 10. This is a table of how the frequency 1453 | # counter changes with a different number of accesses with different 1454 | # logarithmic factors: 1455 | # 1456 | # +--------+------------+------------+------------+------------+------------+ 1457 | # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | 1458 | # +--------+------------+------------+------------+------------+------------+ 1459 | # | 0 | 104 | 255 | 255 | 255 | 255 | 1460 | # +--------+------------+------------+------------+------------+------------+ 1461 | # | 1 | 18 | 49 | 255 | 255 | 255 | 1462 | # +--------+------------+------------+------------+------------+------------+ 1463 | # | 10 | 10 | 18 | 142 | 255 | 255 | 1464 | # +--------+------------+------------+------------+------------+------------+ 1465 | # | 100 | 8 | 11 | 49 | 143 | 255 | 1466 | # +--------+------------+------------+------------+------------+------------+ 1467 | # 1468 | # NOTE: The above table was obtained by running the following commands: 1469 | # 1470 | # redis-benchmark -n 1000000 incr foo 1471 | # redis-cli object freq foo 1472 | # 1473 | # NOTE 2: The counter initial value is 5 in order to give new objects a chance 1474 | # to accumulate hits. 1475 | # 1476 | # The counter decay time is the time, in minutes, that must elapse in order 1477 | # for the key counter to be divided by two (or decremented if it has a value 1478 | # less <= 10). 1479 | # 1480 | # The default value for the lfu-decay-time is 1. A Special value of 0 means to 1481 | # decay the counter every time it happens to be scanned. 1482 | # 1483 | # lfu-log-factor 10 1484 | # lfu-decay-time 1 1485 | 1486 | ########################### ACTIVE DEFRAGMENTATION ####################### 1487 | # 1488 | # WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested 1489 | # even in production and manually tested by multiple engineers for some 1490 | # time. 1491 | # 1492 | # What is active defragmentation? 1493 | # ------------------------------- 1494 | # 1495 | # Active (online) defragmentation allows a Redis server to compact the 1496 | # spaces left between small allocations and deallocations of data in memory, 1497 | # thus allowing to reclaim back memory. 1498 | # 1499 | # Fragmentation is a natural process that happens with every allocator (but 1500 | # less so with Jemalloc, fortunately) and certain workloads. Normally a server 1501 | # restart is needed in order to lower the fragmentation, or at least to flush 1502 | # away all the data and create it again. However thanks to this feature 1503 | # implemented by Oran Agra for Redis 4.0 this process can happen at runtime 1504 | # in an "hot" way, while the server is running. 1505 | # 1506 | # Basically when the fragmentation is over a certain level (see the 1507 | # configuration options below) Redis will start to create new copies of the 1508 | # values in contiguous memory regions by exploiting certain specific Jemalloc 1509 | # features (in order to understand if an allocation is causing fragmentation 1510 | # and to allocate it in a better place), and at the same time, will release the 1511 | # old copies of the data. This process, repeated incrementally for all the keys 1512 | # will cause the fragmentation to drop back to normal values. 1513 | # 1514 | # Important things to understand: 1515 | # 1516 | # 1. This feature is disabled by default, and only works if you compiled Redis 1517 | # to use the copy of Jemalloc we ship with the source code of Redis. 1518 | # This is the default with Linux builds. 1519 | # 1520 | # 2. You never need to enable this feature if you don't have fragmentation 1521 | # issues. 1522 | # 1523 | # 3. Once you experience fragmentation, you can enable this feature when 1524 | # needed with the command "CONFIG SET activedefrag yes". 1525 | # 1526 | # The configuration parameters are able to fine tune the behavior of the 1527 | # defragmentation process. If you are not sure about what they mean it is 1528 | # a good idea to leave the defaults untouched. 1529 | 1530 | # Enabled active defragmentation 1531 | # activedefrag yes 1532 | 1533 | # Minimum amount of fragmentation waste to start active defrag 1534 | # active-defrag-ignore-bytes 100mb 1535 | 1536 | # Minimum percentage of fragmentation to start active defrag 1537 | # active-defrag-threshold-lower 10 1538 | 1539 | # Maximum percentage of fragmentation at which we use maximum effort 1540 | # active-defrag-threshold-upper 100 1541 | 1542 | # Minimal effort for defrag in CPU percentage 1543 | # active-defrag-cycle-min 5 1544 | 1545 | # Maximal effort for defrag in CPU percentage 1546 | # active-defrag-cycle-max 75 1547 | 1548 | # Maximum number of set/hash/zset/list fields that will be processed from 1549 | # the main dictionary scan 1550 | # active-defrag-max-scan-fields 1000 1551 | 1552 | --------------------------------------------------------------------------------