├── example ├── Dockerfile ├── example ├── go.mod ├── go.sum └── main.go ├── openshift ├── is-output.yaml ├── is-base.yaml ├── redis-master-service.yaml ├── redis-sentinel-services.yaml ├── build.yaml ├── redis.yaml ├── redis-sentinel-dc.yaml └── redis-master-dc.yaml ├── Makefile ├── image ├── Dockerfile ├── run.sh ├── redis-slave.conf └── redis-master.conf ├── README.md └── list.yaml /example/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | 3 | ADD example ./app 4 | 5 | ENTRYPOINT [ "./app" ] 6 | -------------------------------------------------------------------------------- /example/example: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjudeikis/redis-openshift/HEAD/example/example -------------------------------------------------------------------------------- /example/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/mjudeikis/redis-openshift/example 2 | 3 | go 1.13 4 | 5 | require github.com/go-redis/redis v6.15.8+incompatible 6 | -------------------------------------------------------------------------------- /openshift/is-output.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ImageStream 3 | metadata: 4 | creationTimestamp: null 5 | generation: 1 6 | labels: 7 | app: redis 8 | name: redis 9 | name: redis 10 | spec: {} -------------------------------------------------------------------------------- /example/go.sum: -------------------------------------------------------------------------------- 1 | github.com/go-redis/redis v6.15.8+incompatible h1:BKZuG6mCnRj5AOaWJXoCgf6rqTYnYJLe4en2hxT7r9o= 2 | github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | cd image && podman build . -t quay.io/${USER}/redis-openshift 3 | 4 | build-example: 5 | cd example && go build 6 | 7 | build-example-image: 8 | cd example && podman build . -t quay.io/${USER}/redis-openshift-example 9 | -------------------------------------------------------------------------------- /openshift/is-base.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ImageStream 3 | metadata: 4 | generation: 2 5 | labels: 6 | app: rhel7-base 7 | name: rhel7-base 8 | name: rhel7 9 | spec: 10 | dockerImageRepository: registry.access.redhat.com/rhel7 11 | -------------------------------------------------------------------------------- /openshift/redis-master-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: master 6 | role: service 7 | name: redis-master 8 | spec: 9 | ports: 10 | - port: 6379 11 | targetPort: 6379 12 | selector: 13 | redis-master: "true" 14 | -------------------------------------------------------------------------------- /openshift/redis-sentinel-services.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: sentinel 6 | role: service 7 | name: redis-sentinel 8 | spec: 9 | ports: 10 | - port: 26379 11 | targetPort: 26379 12 | selector: 13 | redis-sentinel: "true" 14 | -------------------------------------------------------------------------------- /image/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | 3 | RUN yum install epel-release -y 4 | 5 | RUN yum install redis hostname -y ; yum clean all 6 | 7 | COPY redis-master.conf /redis-master/redis.conf 8 | COPY redis-slave.conf /redis-slave/redis.conf 9 | RUN mkdir -p /redis-sentinel ; chmod -R 777 /redis-sentinel /redis-slave 10 | COPY run.sh /run.sh 11 | 12 | CMD [ "/run.sh" ] 13 | 14 | ENTRYPOINT [ "bash", "-c" ] 15 | -------------------------------------------------------------------------------- /openshift/build.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: BuildConfig 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: redis-build 7 | name: redis 8 | name: redis-build 9 | spec: 10 | nodeSelector: null 11 | output: 12 | to: 13 | kind: ImageStreamTag 14 | name: redis:latest 15 | postCommit: {} 16 | resources: {} 17 | runPolicy: Serial 18 | source: 19 | contextDir: image 20 | git: 21 | uri: https://github.com/mangirdaz/redis-openshift.git 22 | type: Git 23 | strategy: 24 | dockerStrategy: 25 | from: 26 | kind: ImageStreamTag 27 | name: rhel7:latest 28 | namespace: openshift 29 | type: Docker 30 | triggers: [] 31 | status: 32 | lastVersion: 0 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Reliable, Scalable Redis on Openshift 2 | 3 | The following document describes the deployment of a reliable, multi-node Redis on Openshift. It deploys a master with replicated slaves, as well as replicated redis sentinels which are use for health checking and failover. 4 | 5 | ### Prerequisites 6 | 7 | This example assumes that you have a Openshift cluster installed and running, and that you have installed the ```oc``` command line tool somewhere in your path. 8 | 9 | 10 | ### Run 11 | 12 | #make sure you have base image available 13 | oc create -f https://raw.githubusercontent.com/mjudeikis/redis-openshift/master/openshift/is-base.yaml -n openshift 14 | #create all components 15 | oc create -f https://raw.githubusercontent.com/mjudeikis/redis-openshift/master/list.yaml 16 | #start build and watch 17 | oc start-build redis-build 18 | -------------------------------------------------------------------------------- /example/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "time" 7 | 8 | "github.com/go-redis/redis" 9 | ) 10 | 11 | func main() { 12 | fmt.Println("Go Redis Example") 13 | 14 | server := os.Getenv("REDIS_SERVER") 15 | passwd := os.Getenv("REDIS_PASSWORD") 16 | 17 | client := redis.NewClient(&redis.Options{ 18 | Addr: server + ":6379", 19 | Password: passwd, 20 | DB: 0, 21 | }) 22 | 23 | for { 24 | pong, err := client.Ping().Result() 25 | fmt.Println(pong, err) 26 | 27 | err = client.Set("name", "MJ", 0).Err() 28 | if err != nil { 29 | fmt.Println(err) 30 | } 31 | 32 | val, err := client.Get("name").Result() 33 | if err != nil { 34 | fmt.Println(err) 35 | } 36 | fmt.Println(val) 37 | 38 | err = client.Del("name").Err() 39 | if err != nil { 40 | fmt.Println(err) 41 | } 42 | 43 | time.Sleep(time.Second * 10) 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /openshift/redis.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: DeploymentConfig 5 | metadata: 6 | labels: 7 | app: redis 8 | name: redis 9 | spec: 10 | replicas: 3 11 | selector: 12 | app: redis 13 | template: 14 | metadata: 15 | labels: 16 | app: redis 17 | spec: 18 | containers: 19 | - image: 172.30.151.14:5000/redis/redis@sha256:e508f686a4f50eb54350c091f3df8fc6a9649dd8c613482088534c5ca70cfc37 20 | imagePullPolicy: Always 21 | name: redis 22 | volumeMounts: 23 | - mountPath: /redis-master-data 24 | name: data 25 | ports: 26 | - containerPort: 6379 27 | resources: {} 28 | dnsPolicy: ClusterFirst 29 | restartPolicy: Always 30 | securityContext: {} 31 | terminationGracePeriodSeconds: 30 32 | volumes: 33 | - name: data 34 | emptyDir: {} 35 | test: false 36 | triggers: 37 | - type: ConfigChange 38 | - imageChangeParams: 39 | automatic: true 40 | containerNames: 41 | - redis 42 | from: 43 | kind: ImageStreamTag 44 | name: redis:latest 45 | namespace: redis 46 | type: ImageChange 47 | status: {} 48 | kind: List 49 | metadata: {} 50 | -------------------------------------------------------------------------------- /openshift/redis-sentinel-dc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: DeploymentConfig 5 | metadata: 6 | labels: 7 | app: redis-sentinel 8 | redis-sentinel: "true" 9 | name: redis-sentinel 10 | spec: 11 | replicas: 3 12 | selector: 13 | app: redis-sentinel 14 | redis-sentinel: "true" 15 | template: 16 | metadata: 17 | labels: 18 | app: redis-sentinel 19 | redis-sentinel: "true" 20 | spec: 21 | containers: 22 | - image: 172.30.151.14:5000/redis/redis@sha256:e508f686a4f50eb54350c091f3df8fc6a9649dd8c613482088534c5ca70cfc37 23 | imagePullPolicy: Always 24 | name: sentinel 25 | env: 26 | - name: SENTINEL 27 | value: "true" 28 | volumeMounts: 29 | - mountPath: /redis-master-data 30 | name: data 31 | ports: 32 | - containerPort: 26379 33 | resources: {} 34 | dnsPolicy: ClusterFirst 35 | restartPolicy: Always 36 | securityContext: {} 37 | terminationGracePeriodSeconds: 30 38 | volumes: 39 | - name: data 40 | emptyDir: {} 41 | test: false 42 | triggers: 43 | - type: ConfigChange 44 | - imageChangeParams: 45 | automatic: true 46 | containerNames: 47 | - sentinel 48 | from: 49 | kind: ImageStreamTag 50 | name: redis:latest 51 | namespace: redis 52 | type: ImageChange 53 | status: {} 54 | kind: List 55 | metadata: {} 56 | -------------------------------------------------------------------------------- /openshift/redis-master-dc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: DeploymentConfig 5 | metadata: 6 | labels: 7 | app: redis-master 8 | redis-sentinel: "true" 9 | redis-master: "true" 10 | role: master 11 | name: redis-master 12 | spec: 13 | replicas: 1 14 | selector: 15 | app: redis-master 16 | redis-sentinel: "true" 17 | redis-master: "true" 18 | role: master 19 | strategy: 20 | resources: {} 21 | rollingParams: 22 | intervalSeconds: 1 23 | maxSurge: 25% 24 | maxUnavailable: 25% 25 | timeoutSeconds: 600 26 | updatePeriodSeconds: 1 27 | type: Rolling 28 | template: 29 | metadata: 30 | labels: 31 | app: redis-master 32 | redis-sentinel: "true" 33 | redis-master: "true" 34 | role: master 35 | spec: 36 | containers: 37 | - image: 172.30.151.14:5000/redis/redis@sha256:e508f686a4f50eb54350c091f3df8fc6a9649dd8c613482088534c5ca70cfc37 38 | imagePullPolicy: Always 39 | name: redis 40 | env: 41 | - name: MASTER 42 | value: "true" 43 | volumeMounts: 44 | - mountPath: /redis-master-data 45 | name: data 46 | ports: 47 | - containerPort: 6379 48 | resources: {} 49 | - image: 172.30.151.14:5000/redis/redis@sha256:e508f686a4f50eb54350c091f3df8fc6a9649dd8c613482088534c5ca70cfc37 50 | imagePullPolicy: Always 51 | name: sentinel 52 | resources: {} 53 | ports: 54 | - containerPort: 26379 55 | env: 56 | - name: SENTINEL 57 | value: "true" 58 | dnsPolicy: ClusterFirst 59 | restartPolicy: Always 60 | securityContext: {} 61 | terminationGracePeriodSeconds: 30 62 | volumes: 63 | - name: data 64 | emptyDir: {} 65 | test: false 66 | triggers: 67 | - type: ConfigChange 68 | - imageChangeParams: 69 | automatic: true 70 | containerNames: 71 | - redis 72 | from: 73 | kind: ImageStreamTag 74 | name: redis:latest 75 | namespace: redis 76 | type: ImageChange 77 | status: {} 78 | kind: List 79 | metadata: {} 80 | -------------------------------------------------------------------------------- /image/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2014 The Kubernetes Authors All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | function launchmaster() { 18 | if [[ ! -e /redis-master-data ]]; then 19 | echo "Redis master data doesn't exist, data won't be persistent!" 20 | mkdir /redis-master-data 21 | fi 22 | redis-server /redis-master/redis.conf 23 | } 24 | 25 | function launchsentinel() { 26 | while true; do 27 | master=$(redis-cli -h ${REDIS_SENTINEL_SERVICE_HOST} -p ${REDIS_SENTINEL_SERVICE_PORT} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | cut -d' ' -f1) 28 | if [[ -n ${master} ]]; then 29 | master="${master//\"}" 30 | else 31 | master="${REDIS_MASTER_SERVICE_HOST}" 32 | fi 33 | 34 | redis-cli -h ${master} INFO 35 | if [[ "$?" == "0" ]]; then 36 | break 37 | fi 38 | echo "Connecting to master failed. Waiting..." 39 | sleep 10 40 | done 41 | 42 | sentinel_conf=/redis-sentinel/sentinel.conf 43 | 44 | echo "sentinel monitor mymaster ${master} 6379 2" > ${sentinel_conf} 45 | echo "sentinel down-after-milliseconds mymaster 60000" >> ${sentinel_conf} 46 | echo "sentinel failover-timeout mymaster 180000" >> ${sentinel_conf} 47 | echo "sentinel parallel-syncs mymaster 1" >> ${sentinel_conf} 48 | 49 | redis-sentinel ${sentinel_conf} 50 | } 51 | 52 | function launchslave() { 53 | while true; do 54 | master=$(redis-cli -h ${REDIS_SENTINEL_SERVICE_HOST} -p ${REDIS_SENTINEL_SERVICE_PORT} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | cut -d' ' -f1) 55 | if [[ -n ${master} ]]; then 56 | master="${master//\"}" 57 | else 58 | echo "Failed to find master." 59 | sleep 60 60 | exit 1 61 | fi 62 | redis-cli -h ${master} INFO 63 | if [[ "$?" == "0" ]]; then 64 | break 65 | fi 66 | echo "Connecting to master failed. Waiting..." 67 | sleep 10 68 | done 69 | sed -i "s/%master-ip%/${master}/" /redis-slave/redis.conf 70 | sed -i "s/%master-port%/6379/" /redis-slave/redis.conf 71 | redis-server /redis-slave/redis.conf 72 | } 73 | 74 | if [[ "${MASTER}" == "true" ]]; then 75 | launchmaster 76 | exit 0 77 | fi 78 | 79 | if [[ "${SENTINEL}" == "true" ]]; then 80 | launchsentinel 81 | exit 0 82 | fi 83 | 84 | launchslave 85 | -------------------------------------------------------------------------------- /list.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: BuildConfig 5 | metadata: 6 | creationTimestamp: null 7 | labels: 8 | app: redis-build 9 | name: redis 10 | name: redis-build 11 | spec: 12 | nodeSelector: null 13 | output: 14 | to: 15 | kind: ImageStreamTag 16 | name: redis:latest 17 | postCommit: {} 18 | resources: {} 19 | runPolicy: Serial 20 | source: 21 | contextDir: image 22 | git: 23 | uri: https://github.com/mangirdaz/redis-openshift.git 24 | type: Git 25 | strategy: 26 | dockerStrategy: 27 | from: 28 | kind: ImageStreamTag 29 | name: rhel7:latest 30 | namespace: openshift 31 | type: Docker 32 | triggers: [] 33 | status: 34 | lastVersion: 0 35 | - apiVersion: v1 36 | kind: ImageStream 37 | metadata: 38 | labels: 39 | build: redis 40 | name: redis 41 | status: 42 | dockerImageRepository: "" 43 | - apiVersion: v1 44 | kind: DeploymentConfig 45 | metadata: 46 | creationTimestamp: null 47 | generation: 1 48 | labels: 49 | app: redis 50 | name: redis 51 | spec: 52 | replicas: 3 53 | selector: 54 | app: redis 55 | strategy: 56 | resources: {} 57 | rollingParams: 58 | intervalSeconds: 1 59 | maxSurge: 25% 60 | maxUnavailable: 25% 61 | timeoutSeconds: 600 62 | updatePeriodSeconds: 1 63 | type: Rolling 64 | template: 65 | metadata: 66 | creationTimestamp: null 67 | labels: 68 | app: redis 69 | spec: 70 | containers: 71 | - image: ' ' 72 | imagePullPolicy: Always 73 | name: redis 74 | ports: 75 | - containerPort: 6379 76 | protocol: TCP 77 | resources: {} 78 | terminationMessagePath: /dev/termination-log 79 | volumeMounts: 80 | - mountPath: /redis-master-data 81 | name: data 82 | dnsPolicy: ClusterFirst 83 | restartPolicy: Always 84 | securityContext: {} 85 | terminationGracePeriodSeconds: 30 86 | volumes: 87 | - emptyDir: {} 88 | name: data 89 | test: false 90 | triggers: 91 | - type: ConfigChange 92 | - imageChangeParams: 93 | automatic: true 94 | containerNames: 95 | - redis 96 | from: 97 | kind: ImageStreamTag 98 | name: redis:latest 99 | namespace: redis 100 | type: ImageChange 101 | status: {} 102 | - apiVersion: v1 103 | kind: DeploymentConfig 104 | metadata: 105 | creationTimestamp: null 106 | generation: 1 107 | labels: 108 | app: redis-master 109 | redis-master: "true" 110 | redis-sentinel: "true" 111 | role: master 112 | name: redis-master 113 | spec: 114 | replicas: 1 115 | selector: 116 | app: redis-master 117 | redis-master: "true" 118 | redis-sentinel: "true" 119 | role: master 120 | strategy: 121 | resources: {} 122 | rollingParams: 123 | intervalSeconds: 1 124 | maxSurge: 25% 125 | maxUnavailable: 25% 126 | timeoutSeconds: 600 127 | updatePeriodSeconds: 1 128 | type: Rolling 129 | template: 130 | metadata: 131 | creationTimestamp: null 132 | labels: 133 | app: redis-master 134 | redis-master: "true" 135 | redis-sentinel: "true" 136 | role: master 137 | spec: 138 | containers: 139 | - env: 140 | - name: MASTER 141 | value: "true" 142 | image: ' ' 143 | imagePullPolicy: Always 144 | name: redis 145 | ports: 146 | - containerPort: 6379 147 | protocol: TCP 148 | resources: {} 149 | terminationMessagePath: /dev/termination-log 150 | volumeMounts: 151 | - mountPath: /redis-master-data 152 | name: data 153 | - env: 154 | - name: SENTINEL 155 | value: "true" 156 | image: ' ' 157 | imagePullPolicy: Always 158 | name: sentinel 159 | ports: 160 | - containerPort: 26379 161 | protocol: TCP 162 | resources: {} 163 | terminationMessagePath: /dev/termination-log 164 | dnsPolicy: ClusterFirst 165 | restartPolicy: Always 166 | securityContext: {} 167 | terminationGracePeriodSeconds: 30 168 | volumes: 169 | - emptyDir: {} 170 | name: data 171 | test: false 172 | triggers: 173 | - imageChangeParams: 174 | automatic: true 175 | containerNames: 176 | - redis 177 | from: 178 | kind: ImageStreamTag 179 | name: redis:latest 180 | namespace: redis 181 | type: ImageChange 182 | - imageChangeParams: 183 | automatic: true 184 | containerNames: 185 | - sentinel 186 | from: 187 | kind: ImageStreamTag 188 | name: redis:latest 189 | namespace: redis 190 | type: ImageChange 191 | - type: ConfigChange 192 | status: {} 193 | - apiVersion: v1 194 | kind: DeploymentConfig 195 | metadata: 196 | creationTimestamp: null 197 | generation: 1 198 | labels: 199 | app: redis-sentinel 200 | redis-sentinel: "true" 201 | name: redis-sentinel 202 | spec: 203 | replicas: 3 204 | selector: 205 | app: redis-sentinel 206 | redis-sentinel: "true" 207 | strategy: 208 | resources: {} 209 | rollingParams: 210 | intervalSeconds: 1 211 | maxSurge: 25% 212 | maxUnavailable: 25% 213 | timeoutSeconds: 600 214 | updatePeriodSeconds: 1 215 | type: Rolling 216 | template: 217 | metadata: 218 | creationTimestamp: null 219 | labels: 220 | app: redis-sentinel 221 | redis-sentinel: "true" 222 | spec: 223 | containers: 224 | - env: 225 | - name: SENTINEL 226 | value: "true" 227 | image: ' ' 228 | imagePullPolicy: Always 229 | name: sentinel 230 | ports: 231 | - containerPort: 26379 232 | protocol: TCP 233 | resources: {} 234 | terminationMessagePath: /dev/termination-log 235 | volumeMounts: 236 | - mountPath: /redis-master-data 237 | name: data 238 | dnsPolicy: ClusterFirst 239 | restartPolicy: Always 240 | securityContext: {} 241 | terminationGracePeriodSeconds: 30 242 | volumes: 243 | - emptyDir: {} 244 | name: data 245 | test: false 246 | triggers: 247 | - type: ConfigChange 248 | - imageChangeParams: 249 | automatic: true 250 | containerNames: 251 | - sentinel 252 | from: 253 | kind: ImageStreamTag 254 | name: redis:latest 255 | namespace: redis 256 | type: ImageChange 257 | status: {} 258 | - apiVersion: v1 259 | kind: Service 260 | metadata: 261 | creationTimestamp: null 262 | labels: 263 | name: master 264 | role: service 265 | name: redis-master 266 | spec: 267 | ports: 268 | - port: 6379 269 | protocol: TCP 270 | targetPort: 6379 271 | selector: 272 | redis-master: "true" 273 | sessionAffinity: None 274 | type: ClusterIP 275 | status: 276 | loadBalancer: {} 277 | - apiVersion: v1 278 | kind: Service 279 | metadata: 280 | creationTimestamp: null 281 | labels: 282 | name: sentinel 283 | role: service 284 | name: redis-sentinel 285 | spec: 286 | ports: 287 | - port: 26379 288 | protocol: TCP 289 | targetPort: 26379 290 | selector: 291 | redis-sentinel: "true" 292 | sessionAffinity: None 293 | type: ClusterIP 294 | status: 295 | loadBalancer: {} 296 | kind: List 297 | metadata: {} 298 | -------------------------------------------------------------------------------- /image/redis-slave.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example 2 | 3 | # Note on units: when memory size is needed, it is possible to specify 4 | # it in the usual form of 1k 5GB 4M and so forth: 5 | # 6 | # 1k => 1000 bytes 7 | # 1kb => 1024 bytes 8 | # 1m => 1000000 bytes 9 | # 1mb => 1024*1024 bytes 10 | # 1g => 1000000000 bytes 11 | # 1gb => 1024*1024*1024 bytes 12 | # 13 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 14 | 15 | ################################## INCLUDES ################################### 16 | 17 | # Include one or more other config files here. This is useful if you 18 | # have a standard template that goes to all Redis servers but also need 19 | # to customize a few per-server settings. Include files can include 20 | # other files, so use this wisely. 21 | # 22 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 23 | # from admin or Redis Sentinel. Since Redis always uses the last processed 24 | # line as value of a configuration directive, you'd better put includes 25 | # at the beginning of this file to avoid overwriting config change at runtime. 26 | # 27 | # If instead you are interested in using includes to override configuration 28 | # options, it is better to use include as the last line. 29 | # 30 | # include /path/to/local.conf 31 | # include /path/to/other.conf 32 | 33 | ################################ GENERAL ##################################### 34 | 35 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 36 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 37 | daemonize no 38 | 39 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by 40 | # default. You can specify a custom pid file location here. 41 | pidfile /var/run/redis.pid 42 | 43 | # Accept connections on the specified port, default is 6379. 44 | # If port 0 is specified Redis will not listen on a TCP socket. 45 | port 6379 46 | 47 | # TCP listen() backlog. 48 | # 49 | # In high requests-per-second environments you need an high backlog in order 50 | # to avoid slow clients connections issues. Note that the Linux kernel 51 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 52 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 53 | # in order to get the desired effect. 54 | tcp-backlog 511 55 | 56 | # By default Redis listens for connections from all the network interfaces 57 | # available on the server. It is possible to listen to just one or multiple 58 | # interfaces using the "bind" configuration directive, followed by one or 59 | # more IP addresses. 60 | # 61 | # Examples: 62 | # 63 | # bind 192.168.1.100 10.0.0.1 64 | 65 | bind 0.0.0.0 66 | 67 | # Specify the path for the Unix socket that will be used to listen for 68 | # incoming connections. There is no default, so Redis will not listen 69 | # on a unix socket when not specified. 70 | # 71 | # unixsocket /tmp/redis.sock 72 | # unixsocketperm 700 73 | 74 | # Close the connection after a client is idle for N seconds (0 to disable) 75 | timeout 0 76 | 77 | # TCP keepalive. 78 | # 79 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 80 | # of communication. This is useful for two reasons: 81 | # 82 | # 1) Detect dead peers. 83 | # 2) Take the connection alive from the point of view of network 84 | # equipment in the middle. 85 | # 86 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 87 | # Note that to close the connection the double of the time is needed. 88 | # On other kernels the period depends on the kernel configuration. 89 | # 90 | # A reasonable value for this option is 60 seconds. 91 | tcp-keepalive 60 92 | 93 | # Specify the server verbosity level. 94 | # This can be one of: 95 | # debug (a lot of information, useful for development/testing) 96 | # verbose (many rarely useful info, but not a mess like the debug level) 97 | # notice (moderately verbose, what you want in production probably) 98 | # warning (only very important / critical messages are logged) 99 | loglevel notice 100 | 101 | # Specify the log file name. Also the empty string can be used to force 102 | # Redis to log on the standard output. Note that if you use standard 103 | # output for logging but daemonize, logs will be sent to /dev/null 104 | logfile "" 105 | 106 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 107 | # and optionally update the other syslog parameters to suit your needs. 108 | # syslog-enabled no 109 | 110 | # Specify the syslog identity. 111 | # syslog-ident redis 112 | 113 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 114 | # syslog-facility local0 115 | 116 | # Set the number of databases. The default database is DB 0, you can select 117 | # a different one on a per-connection basis using SELECT where 118 | # dbid is a number between 0 and 'databases'-1 119 | databases 16 120 | 121 | ################################ SNAPSHOTTING ################################ 122 | # 123 | # Save the DB on disk: 124 | # 125 | # save 126 | # 127 | # Will save the DB if both the given number of seconds and the given 128 | # number of write operations against the DB occurred. 129 | # 130 | # In the example below the behaviour will be to save: 131 | # after 900 sec (15 min) if at least 1 key changed 132 | # after 300 sec (5 min) if at least 10 keys changed 133 | # after 60 sec if at least 10000 keys changed 134 | # 135 | # Note: you can disable saving completely by commenting out all "save" lines. 136 | # 137 | # It is also possible to remove all the previously configured save 138 | # points by adding a save directive with a single empty string argument 139 | # like in the following example: 140 | # 141 | # save "" 142 | 143 | save 900 1 144 | save 300 10 145 | save 60 10000 146 | 147 | # By default Redis will stop accepting writes if RDB snapshots are enabled 148 | # (at least one save point) and the latest background save failed. 149 | # This will make the user aware (in a hard way) that data is not persisting 150 | # on disk properly, otherwise chances are that no one will notice and some 151 | # disaster will happen. 152 | # 153 | # If the background saving process will start working again Redis will 154 | # automatically allow writes again. 155 | # 156 | # However if you have setup your proper monitoring of the Redis server 157 | # and persistence, you may want to disable this feature so that Redis will 158 | # continue to work as usual even if there are problems with disk, 159 | # permissions, and so forth. 160 | stop-writes-on-bgsave-error yes 161 | 162 | # Compress string objects using LZF when dump .rdb databases? 163 | # For default that's set to 'yes' as it's almost always a win. 164 | # If you want to save some CPU in the saving child set it to 'no' but 165 | # the dataset will likely be bigger if you have compressible values or keys. 166 | rdbcompression yes 167 | 168 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 169 | # This makes the format more resistant to corruption but there is a performance 170 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 171 | # for maximum performances. 172 | # 173 | # RDB files created with checksum disabled have a checksum of zero that will 174 | # tell the loading code to skip the check. 175 | rdbchecksum yes 176 | 177 | # The filename where to dump the DB 178 | dbfilename dump.rdb 179 | 180 | # The working directory. 181 | # 182 | # The DB will be written inside this directory, with the filename specified 183 | # above using the 'dbfilename' configuration directive. 184 | # 185 | # The Append Only File will also be created inside this directory. 186 | # 187 | # Note that you must specify a directory here, not a file name. 188 | dir "/redis-slave/" 189 | 190 | ################################# REPLICATION ################################# 191 | 192 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 193 | # another Redis server. A few things to understand ASAP about Redis replication. 194 | # 195 | # 1) Redis replication is asynchronous, but you can configure a master to 196 | # stop accepting writes if it appears to be not connected with at least 197 | # a given number of slaves. 198 | # 2) Redis slaves are able to perform a partial resynchronization with the 199 | # master if the replication link is lost for a relatively small amount of 200 | # time. You may want to configure the replication backlog size (see the next 201 | # sections of this file) with a sensible value depending on your needs. 202 | # 3) Replication is automatic and does not need user intervention. After a 203 | # network partition slaves automatically try to reconnect to masters 204 | # and resynchronize with them. 205 | # 206 | slaveof %master-ip% %master-port% 207 | 208 | # If the master is password protected (using the "requirepass" configuration 209 | # directive below) it is possible to tell the slave to authenticate before 210 | # starting the replication synchronization process, otherwise the master will 211 | # refuse the slave request. 212 | # 213 | # masterauth 214 | 215 | # When a slave loses its connection with the master, or when the replication 216 | # is still in progress, the slave can act in two different ways: 217 | # 218 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 219 | # still reply to client requests, possibly with out of date data, or the 220 | # data set may just be empty if this is the first synchronization. 221 | # 222 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 223 | # an error "SYNC with master in progress" to all the kind of commands 224 | # but to INFO and SLAVEOF. 225 | # 226 | slave-serve-stale-data yes 227 | 228 | protected-mode no 229 | 230 | # You can configure a slave instance to accept writes or not. Writing against 231 | # a slave instance may be useful to store some ephemeral data (because data 232 | # written on a slave will be easily deleted after resync with the master) but 233 | # may also cause problems if clients are writing to it because of a 234 | # misconfiguration. 235 | # 236 | # Since Redis 2.6 by default slaves are read-only. 237 | # 238 | # Note: read only slaves are not designed to be exposed to untrusted clients 239 | # on the internet. It's just a protection layer against misuse of the instance. 240 | # Still a read only slave exports by default all the administrative commands 241 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 242 | # security of read only slaves using 'rename-command' to shadow all the 243 | # administrative / dangerous commands. 244 | slave-read-only yes 245 | 246 | # Replication SYNC strategy: disk or socket. 247 | # 248 | # ------------------------------------------------------- 249 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 250 | # ------------------------------------------------------- 251 | # 252 | # New slaves and reconnecting slaves that are not able to continue the replication 253 | # process just receiving differences, need to do what is called a "full 254 | # synchronization". An RDB file is transmitted from the master to the slaves. 255 | # The transmission can happen in two different ways: 256 | # 257 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 258 | # file on disk. Later the file is transferred by the parent 259 | # process to the slaves incrementally. 260 | # 2) Diskless: The Redis master creates a new process that directly writes the 261 | # RDB file to slave sockets, without touching the disk at all. 262 | # 263 | # With disk-backed replication, while the RDB file is generated, more slaves 264 | # can be queued and served with the RDB file as soon as the current child producing 265 | # the RDB file finishes its work. With diskless replication instead once 266 | # the transfer starts, new slaves arriving will be queued and a new transfer 267 | # will start when the current one terminates. 268 | # 269 | # When diskless replication is used, the master waits a configurable amount of 270 | # time (in seconds) before starting the transfer in the hope that multiple slaves 271 | # will arrive and the transfer can be parallelized. 272 | # 273 | # With slow disks and fast (large bandwidth) networks, diskless replication 274 | # works better. 275 | repl-diskless-sync no 276 | 277 | # When diskless replication is enabled, it is possible to configure the delay 278 | # the server waits in order to spawn the child that trnasfers the RDB via socket 279 | # to the slaves. 280 | # 281 | # This is important since once the transfer starts, it is not possible to serve 282 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 283 | # waits a delay in order to let more slaves arrive. 284 | # 285 | # The delay is specified in seconds, and by default is 5 seconds. To disable 286 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 287 | repl-diskless-sync-delay 5 288 | 289 | # Slaves send PINGs to server in a predefined interval. It's possible to change 290 | # this interval with the repl_ping_slave_period option. The default value is 10 291 | # seconds. 292 | # 293 | # repl-ping-slave-period 10 294 | 295 | # The following option sets the replication timeout for: 296 | # 297 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 298 | # 2) Master timeout from the point of view of slaves (data, pings). 299 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 300 | # 301 | # It is important to make sure that this value is greater than the value 302 | # specified for repl-ping-slave-period otherwise a timeout will be detected 303 | # every time there is low traffic between the master and the slave. 304 | # 305 | # repl-timeout 60 306 | 307 | # Disable TCP_NODELAY on the slave socket after SYNC? 308 | # 309 | # If you select "yes" Redis will use a smaller number of TCP packets and 310 | # less bandwidth to send data to slaves. But this can add a delay for 311 | # the data to appear on the slave side, up to 40 milliseconds with 312 | # Linux kernels using a default configuration. 313 | # 314 | # If you select "no" the delay for data to appear on the slave side will 315 | # be reduced but more bandwidth will be used for replication. 316 | # 317 | # By default we optimize for low latency, but in very high traffic conditions 318 | # or when the master and slaves are many hops away, turning this to "yes" may 319 | # be a good idea. 320 | repl-disable-tcp-nodelay no 321 | 322 | # Set the replication backlog size. The backlog is a buffer that accumulates 323 | # slave data when slaves are disconnected for some time, so that when a slave 324 | # wants to reconnect again, often a full resync is not needed, but a partial 325 | # resync is enough, just passing the portion of data the slave missed while 326 | # disconnected. 327 | # 328 | # The bigger the replication backlog, the longer the time the slave can be 329 | # disconnected and later be able to perform a partial resynchronization. 330 | # 331 | # The backlog is only allocated once there is at least a slave connected. 332 | # 333 | # repl-backlog-size 1mb 334 | 335 | # After a master has no longer connected slaves for some time, the backlog 336 | # will be freed. The following option configures the amount of seconds that 337 | # need to elapse, starting from the time the last slave disconnected, for 338 | # the backlog buffer to be freed. 339 | # 340 | # A value of 0 means to never release the backlog. 341 | # 342 | # repl-backlog-ttl 3600 343 | 344 | # The slave priority is an integer number published by Redis in the INFO output. 345 | # It is used by Redis Sentinel in order to select a slave to promote into a 346 | # master if the master is no longer working correctly. 347 | # 348 | # A slave with a low priority number is considered better for promotion, so 349 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 350 | # pick the one with priority 10, that is the lowest. 351 | # 352 | # However a special priority of 0 marks the slave as not able to perform the 353 | # role of master, so a slave with priority of 0 will never be selected by 354 | # Redis Sentinel for promotion. 355 | # 356 | # By default the priority is 100. 357 | slave-priority 100 358 | 359 | # It is possible for a master to stop accepting writes if there are less than 360 | # N slaves connected, having a lag less or equal than M seconds. 361 | # 362 | # The N slaves need to be in "online" state. 363 | # 364 | # The lag in seconds, that must be <= the specified value, is calculated from 365 | # the last ping received from the slave, that is usually sent every second. 366 | # 367 | # This option does not GUARANTEE that N replicas will accept the write, but 368 | # will limit the window of exposure for lost writes in case not enough slaves 369 | # are available, to the specified number of seconds. 370 | # 371 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 372 | # 373 | # min-slaves-to-write 3 374 | # min-slaves-max-lag 10 375 | # 376 | # Setting one or the other to 0 disables the feature. 377 | # 378 | # By default min-slaves-to-write is set to 0 (feature disabled) and 379 | # min-slaves-max-lag is set to 10. 380 | 381 | ################################## SECURITY ################################### 382 | 383 | # Require clients to issue AUTH before processing any other 384 | # commands. This might be useful in environments in which you do not trust 385 | # others with access to the host running redis-server. 386 | # 387 | # This should stay commented out for backward compatibility and because most 388 | # people do not need auth (e.g. they run their own servers). 389 | # 390 | # Warning: since Redis is pretty fast an outside user can try up to 391 | # 150k passwords per second against a good box. This means that you should 392 | # use a very strong password otherwise it will be very easy to break. 393 | # 394 | # requirepass foobared 395 | 396 | # Command renaming. 397 | # 398 | # It is possible to change the name of dangerous commands in a shared 399 | # environment. For instance the CONFIG command may be renamed into something 400 | # hard to guess so that it will still be available for internal-use tools 401 | # but not available for general clients. 402 | # 403 | # Example: 404 | # 405 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 406 | # 407 | # It is also possible to completely kill a command by renaming it into 408 | # an empty string: 409 | # 410 | # rename-command CONFIG "" 411 | # 412 | # Please note that changing the name of commands that are logged into the 413 | # AOF file or transmitted to slaves may cause problems. 414 | 415 | ################################### LIMITS #################################### 416 | 417 | # Set the max number of connected clients at the same time. By default 418 | # this limit is set to 10000 clients, however if the Redis server is not 419 | # able to configure the process file limit to allow for the specified limit 420 | # the max number of allowed clients is set to the current file limit 421 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 422 | # 423 | # Once the limit is reached Redis will close all the new connections sending 424 | # an error 'max number of clients reached'. 425 | # 426 | # maxclients 10000 427 | 428 | # Don't use more memory than the specified amount of bytes. 429 | # When the memory limit is reached Redis will try to remove keys 430 | # according to the eviction policy selected (see maxmemory-policy). 431 | # 432 | # If Redis can't remove keys according to the policy, or if the policy is 433 | # set to 'noeviction', Redis will start to reply with errors to commands 434 | # that would use more memory, like SET, LPUSH, and so on, and will continue 435 | # to reply to read-only commands like GET. 436 | # 437 | # This option is usually useful when using Redis as an LRU cache, or to set 438 | # a hard memory limit for an instance (using the 'noeviction' policy). 439 | # 440 | # WARNING: If you have slaves attached to an instance with maxmemory on, 441 | # the size of the output buffers needed to feed the slaves are subtracted 442 | # from the used memory count, so that network problems / resyncs will 443 | # not trigger a loop where keys are evicted, and in turn the output 444 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 445 | # of more keys, and so forth until the database is completely emptied. 446 | # 447 | # In short... if you have slaves attached it is suggested that you set a lower 448 | # limit for maxmemory so that there is some free RAM on the system for slave 449 | # output buffers (but this is not needed if the policy is 'noeviction'). 450 | # 451 | # maxmemory 452 | 453 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 454 | # is reached. You can select among five behaviors: 455 | # 456 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 457 | # allkeys-lru -> remove any key according to the LRU algorithm 458 | # volatile-random -> remove a random key with an expire set 459 | # allkeys-random -> remove a random key, any key 460 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 461 | # noeviction -> don't expire at all, just return an error on write operations 462 | # 463 | # Note: with any of the above policies, Redis will return an error on write 464 | # operations, when there are no suitable keys for eviction. 465 | # 466 | # At the date of writing these commands are: set setnx setex append 467 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 468 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 469 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 470 | # getset mset msetnx exec sort 471 | # 472 | # The default is: 473 | # 474 | # maxmemory-policy volatile-lru 475 | 476 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 477 | # algorithms (in order to save memory), so you can select as well the sample 478 | # size to check. For instance for default Redis will check three keys and 479 | # pick the one that was used less recently, you can change the sample size 480 | # using the following configuration directive. 481 | # 482 | # maxmemory-samples 3 483 | 484 | ############################## APPEND ONLY MODE ############################### 485 | 486 | # By default Redis asynchronously dumps the dataset on disk. This mode is 487 | # good enough in many applications, but an issue with the Redis process or 488 | # a power outage may result into a few minutes of writes lost (depending on 489 | # the configured save points). 490 | # 491 | # The Append Only File is an alternative persistence mode that provides 492 | # much better durability. For instance using the default data fsync policy 493 | # (see later in the config file) Redis can lose just one second of writes in a 494 | # dramatic event like a server power outage, or a single write if something 495 | # wrong with the Redis process itself happens, but the operating system is 496 | # still running correctly. 497 | # 498 | # AOF and RDB persistence can be enabled at the same time without problems. 499 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 500 | # with the better durability guarantees. 501 | # 502 | # Please check http://redis.io/topics/persistence for more information. 503 | 504 | appendonly yes 505 | 506 | # The name of the append only file (default: "appendonly.aof") 507 | 508 | appendfilename "appendonly.aof" 509 | 510 | # The fsync() call tells the Operating System to actually write data on disk 511 | # instead of waiting for more data in the output buffer. Some OS will really flush 512 | # data on disk, some other OS will just try to do it ASAP. 513 | # 514 | # Redis supports three different modes: 515 | # 516 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 517 | # always: fsync after every write to the append only log. Slow, Safest. 518 | # everysec: fsync only one time every second. Compromise. 519 | # 520 | # The default is "everysec", as that's usually the right compromise between 521 | # speed and data safety. It's up to you to understand if you can relax this to 522 | # "no" that will let the operating system flush the output buffer when 523 | # it wants, for better performances (but if you can live with the idea of 524 | # some data loss consider the default persistence mode that's snapshotting), 525 | # or on the contrary, use "always" that's very slow but a bit safer than 526 | # everysec. 527 | # 528 | # More details please check the following article: 529 | # http://antirez.com/post/redis-persistence-demystified.html 530 | # 531 | # If unsure, use "everysec". 532 | 533 | # appendfsync always 534 | appendfsync everysec 535 | # appendfsync no 536 | 537 | # When the AOF fsync policy is set to always or everysec, and a background 538 | # saving process (a background save or AOF log background rewriting) is 539 | # performing a lot of I/O against the disk, in some Linux configurations 540 | # Redis may block too long on the fsync() call. Note that there is no fix for 541 | # this currently, as even performing fsync in a different thread will block 542 | # our synchronous write(2) call. 543 | # 544 | # In order to mitigate this problem it's possible to use the following option 545 | # that will prevent fsync() from being called in the main process while a 546 | # BGSAVE or BGREWRITEAOF is in progress. 547 | # 548 | # This means that while another child is saving, the durability of Redis is 549 | # the same as "appendfsync none". In practical terms, this means that it is 550 | # possible to lose up to 30 seconds of log in the worst scenario (with the 551 | # default Linux settings). 552 | # 553 | # If you have latency problems turn this to "yes". Otherwise leave it as 554 | # "no" that is the safest pick from the point of view of durability. 555 | 556 | no-appendfsync-on-rewrite no 557 | 558 | # Automatic rewrite of the append only file. 559 | # Redis is able to automatically rewrite the log file implicitly calling 560 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 561 | # 562 | # This is how it works: Redis remembers the size of the AOF file after the 563 | # latest rewrite (if no rewrite has happened since the restart, the size of 564 | # the AOF at startup is used). 565 | # 566 | # This base size is compared to the current size. If the current size is 567 | # bigger than the specified percentage, the rewrite is triggered. Also 568 | # you need to specify a minimal size for the AOF file to be rewritten, this 569 | # is useful to avoid rewriting the AOF file even if the percentage increase 570 | # is reached but it is still pretty small. 571 | # 572 | # Specify a percentage of zero in order to disable the automatic AOF 573 | # rewrite feature. 574 | 575 | auto-aof-rewrite-percentage 100 576 | auto-aof-rewrite-min-size 64mb 577 | 578 | # An AOF file may be found to be truncated at the end during the Redis 579 | # startup process, when the AOF data gets loaded back into memory. 580 | # This may happen when the system where Redis is running 581 | # crashes, especially when an ext4 filesystem is mounted without the 582 | # data=ordered option (however this can't happen when Redis itself 583 | # crashes or aborts but the operating system still works correctly). 584 | # 585 | # Redis can either exit with an error when this happens, or load as much 586 | # data as possible (the default now) and start if the AOF file is found 587 | # to be truncated at the end. The following option controls this behavior. 588 | # 589 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 590 | # the Redis server starts emitting a log to inform the user of the event. 591 | # Otherwise if the option is set to no, the server aborts with an error 592 | # and refuses to start. When the option is set to no, the user requires 593 | # to fix the AOF file using the "redis-check-aof" utility before to restart 594 | # the server. 595 | # 596 | # Note that if the AOF file will be found to be corrupted in the middle 597 | # the server will still exit with an error. This option only applies when 598 | # Redis will try to read more data from the AOF file but not enough bytes 599 | # will be found. 600 | aof-load-truncated yes 601 | 602 | ################################ LUA SCRIPTING ############################### 603 | 604 | # Max execution time of a Lua script in milliseconds. 605 | # 606 | # If the maximum execution time is reached Redis will log that a script is 607 | # still in execution after the maximum allowed time and will start to 608 | # reply to queries with an error. 609 | # 610 | # When a long running script exceeds the maximum execution time only the 611 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 612 | # used to stop a script that did not yet called write commands. The second 613 | # is the only way to shut down the server in the case a write command was 614 | # already issued by the script but the user doesn't want to wait for the natural 615 | # termination of the script. 616 | # 617 | # Set it to 0 or a negative value for unlimited execution without warnings. 618 | lua-time-limit 5000 619 | 620 | ################################## SLOW LOG ################################### 621 | 622 | # The Redis Slow Log is a system to log queries that exceeded a specified 623 | # execution time. The execution time does not include the I/O operations 624 | # like talking with the client, sending the reply and so forth, 625 | # but just the time needed to actually execute the command (this is the only 626 | # stage of command execution where the thread is blocked and can not serve 627 | # other requests in the meantime). 628 | # 629 | # You can configure the slow log with two parameters: one tells Redis 630 | # what is the execution time, in microseconds, to exceed in order for the 631 | # command to get logged, and the other parameter is the length of the 632 | # slow log. When a new command is logged the oldest one is removed from the 633 | # queue of logged commands. 634 | 635 | # The following time is expressed in microseconds, so 1000000 is equivalent 636 | # to one second. Note that a negative number disables the slow log, while 637 | # a value of zero forces the logging of every command. 638 | slowlog-log-slower-than 10000 639 | 640 | # There is no limit to this length. Just be aware that it will consume memory. 641 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 642 | slowlog-max-len 128 643 | 644 | ################################ LATENCY MONITOR ############################## 645 | 646 | # The Redis latency monitoring subsystem samples different operations 647 | # at runtime in order to collect data related to possible sources of 648 | # latency of a Redis instance. 649 | # 650 | # Via the LATENCY command this information is available to the user that can 651 | # print graphs and obtain reports. 652 | # 653 | # The system only logs operations that were performed in a time equal or 654 | # greater than the amount of milliseconds specified via the 655 | # latency-monitor-threshold configuration directive. When its value is set 656 | # to zero, the latency monitor is turned off. 657 | # 658 | # By default latency monitoring is disabled since it is mostly not needed 659 | # if you don't have latency issues, and collecting data has a performance 660 | # impact, that while very small, can be measured under big load. Latency 661 | # monitoring can easily be enalbed at runtime using the command 662 | # "CONFIG SET latency-monitor-threshold " if needed. 663 | latency-monitor-threshold 0 664 | 665 | ############################# Event notification ############################## 666 | 667 | # Redis can notify Pub/Sub clients about events happening in the key space. 668 | # This feature is documented at http://redis.io/topics/notifications 669 | # 670 | # For instance if keyspace events notification is enabled, and a client 671 | # performs a DEL operation on key "foo" stored in the Database 0, two 672 | # messages will be published via Pub/Sub: 673 | # 674 | # PUBLISH __keyspace@0__:foo del 675 | # PUBLISH __keyevent@0__:del foo 676 | # 677 | # It is possible to select the events that Redis will notify among a set 678 | # of classes. Every class is identified by a single character: 679 | # 680 | # K Keyspace events, published with __keyspace@__ prefix. 681 | # E Keyevent events, published with __keyevent@__ prefix. 682 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 683 | # $ String commands 684 | # l List commands 685 | # s Set commands 686 | # h Hash commands 687 | # z Sorted set commands 688 | # x Expired events (events generated every time a key expires) 689 | # e Evicted events (events generated when a key is evicted for maxmemory) 690 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 691 | # 692 | # The "notify-keyspace-events" takes as argument a string that is composed 693 | # of zero or multiple characters. The empty string means that notifications 694 | # are disabled. 695 | # 696 | # Example: to enable list and generic events, from the point of view of the 697 | # event name, use: 698 | # 699 | # notify-keyspace-events Elg 700 | # 701 | # Example 2: to get the stream of the expired keys subscribing to channel 702 | # name __keyevent@0__:expired use: 703 | # 704 | # notify-keyspace-events Ex 705 | # 706 | # By default all notifications are disabled because most users don't need 707 | # this feature and the feature has some overhead. Note that if you don't 708 | # specify at least one of K or E, no events will be delivered. 709 | notify-keyspace-events "" 710 | 711 | ############################### ADVANCED CONFIG ############################### 712 | 713 | # Hashes are encoded using a memory efficient data structure when they have a 714 | # small number of entries, and the biggest entry does not exceed a given 715 | # threshold. These thresholds can be configured using the following directives. 716 | hash-max-ziplist-entries 512 717 | hash-max-ziplist-value 64 718 | 719 | # Similarly to hashes, small lists are also encoded in a special way in order 720 | # to save a lot of space. The special representation is only used when 721 | # you are under the following limits: 722 | list-max-ziplist-entries 512 723 | list-max-ziplist-value 64 724 | 725 | # Sets have a special encoding in just one case: when a set is composed 726 | # of just strings that happen to be integers in radix 10 in the range 727 | # of 64 bit signed integers. 728 | # The following configuration setting sets the limit in the size of the 729 | # set in order to use this special memory saving encoding. 730 | set-max-intset-entries 512 731 | 732 | # Similarly to hashes and lists, sorted sets are also specially encoded in 733 | # order to save a lot of space. This encoding is only used when the length and 734 | # elements of a sorted set are below the following limits: 735 | zset-max-ziplist-entries 128 736 | zset-max-ziplist-value 64 737 | 738 | # HyperLogLog sparse representation bytes limit. The limit includes the 739 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 740 | # this limit, it is converted into the dense representation. 741 | # 742 | # A value greater than 16000 is totally useless, since at that point the 743 | # dense representation is more memory efficient. 744 | # 745 | # The suggested value is ~ 3000 in order to have the benefits of 746 | # the space efficient encoding without slowing down too much PFADD, 747 | # which is O(N) with the sparse encoding. The value can be raised to 748 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 749 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 750 | hll-sparse-max-bytes 3000 751 | 752 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 753 | # order to help rehashing the main Redis hash table (the one mapping top-level 754 | # keys to values). The hash table implementation Redis uses (see dict.c) 755 | # performs a lazy rehashing: the more operation you run into a hash table 756 | # that is rehashing, the more rehashing "steps" are performed, so if the 757 | # server is idle the rehashing is never complete and some more memory is used 758 | # by the hash table. 759 | # 760 | # The default is to use this millisecond 10 times every second in order to 761 | # actively rehash the main dictionaries, freeing memory when possible. 762 | # 763 | # If unsure: 764 | # use "activerehashing no" if you have hard latency requirements and it is 765 | # not a good thing in your environment that Redis can reply from time to time 766 | # to queries with 2 milliseconds delay. 767 | # 768 | # use "activerehashing yes" if you don't have such hard requirements but 769 | # want to free memory asap when possible. 770 | activerehashing yes 771 | 772 | # The client output buffer limits can be used to force disconnection of clients 773 | # that are not reading data from the server fast enough for some reason (a 774 | # common reason is that a Pub/Sub client can't consume messages as fast as the 775 | # publisher can produce them). 776 | # 777 | # The limit can be set differently for the three different classes of clients: 778 | # 779 | # normal -> normal clients including MONITOR clients 780 | # slave -> slave clients 781 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 782 | # 783 | # The syntax of every client-output-buffer-limit directive is the following: 784 | # 785 | # client-output-buffer-limit 786 | # 787 | # A client is immediately disconnected once the hard limit is reached, or if 788 | # the soft limit is reached and remains reached for the specified number of 789 | # seconds (continuously). 790 | # So for instance if the hard limit is 32 megabytes and the soft limit is 791 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 792 | # if the size of the output buffers reach 32 megabytes, but will also get 793 | # disconnected if the client reaches 16 megabytes and continuously overcomes 794 | # the limit for 10 seconds. 795 | # 796 | # By default normal clients are not limited because they don't receive data 797 | # without asking (in a push way), but just after a request, so only 798 | # asynchronous clients may create a scenario where data is requested faster 799 | # than it can read. 800 | # 801 | # Instead there is a default limit for pubsub and slave clients, since 802 | # subscribers and slaves receive data in a push fashion. 803 | # 804 | # Both the hard or the soft limit can be disabled by setting them to zero. 805 | client-output-buffer-limit normal 0 0 0 806 | client-output-buffer-limit slave 256mb 64mb 60 807 | client-output-buffer-limit pubsub 32mb 8mb 60 808 | 809 | # Redis calls an internal function to perform many background tasks, like 810 | # closing connections of clients in timeout, purging expired keys that are 811 | # never requested, and so forth. 812 | # 813 | # Not all tasks are performed with the same frequency, but Redis checks for 814 | # tasks to perform according to the specified "hz" value. 815 | # 816 | # By default "hz" is set to 10. Raising the value will use more CPU when 817 | # Redis is idle, but at the same time will make Redis more responsive when 818 | # there are many keys expiring at the same time, and timeouts may be 819 | # handled with more precision. 820 | # 821 | # The range is between 1 and 500, however a value over 100 is usually not 822 | # a good idea. Most users should use the default of 10 and raise this up to 823 | # 100 only in environments where very low latency is required. 824 | hz 10 825 | 826 | # When a child rewrites the AOF file, if the following option is enabled 827 | # the file will be fsync-ed every 32 MB of data generated. This is useful 828 | # in order to commit the file to the disk more incrementally and avoid 829 | # big latency spikes. 830 | aof-rewrite-incremental-fsync yes 831 | -------------------------------------------------------------------------------- /image/redis-master.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example 2 | 3 | # Note on units: when memory size is needed, it is possible to specify 4 | # it in the usual form of 1k 5GB 4M and so forth: 5 | # 6 | # 1k => 1000 bytes 7 | # 1kb => 1024 bytes 8 | # 1m => 1000000 bytes 9 | # 1mb => 1024*1024 bytes 10 | # 1g => 1000000000 bytes 11 | # 1gb => 1024*1024*1024 bytes 12 | # 13 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 14 | 15 | ################################## INCLUDES ################################### 16 | 17 | # Include one or more other config files here. This is useful if you 18 | # have a standard template that goes to all Redis servers but also need 19 | # to customize a few per-server settings. Include files can include 20 | # other files, so use this wisely. 21 | # 22 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 23 | # from admin or Redis Sentinel. Since Redis always uses the last processed 24 | # line as value of a configuration directive, you'd better put includes 25 | # at the beginning of this file to avoid overwriting config change at runtime. 26 | # 27 | # If instead you are interested in using includes to override configuration 28 | # options, it is better to use include as the last line. 29 | # 30 | # include /path/to/local.conf 31 | # include /path/to/other.conf 32 | 33 | ################################ GENERAL ##################################### 34 | 35 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 36 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 37 | daemonize no 38 | 39 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by 40 | # default. You can specify a custom pid file location here. 41 | pidfile /var/run/redis.pid 42 | 43 | # Accept connections on the specified port, default is 6379. 44 | # If port 0 is specified Redis will not listen on a TCP socket. 45 | port 6379 46 | 47 | # TCP listen() backlog. 48 | # 49 | # In high requests-per-second environments you need an high backlog in order 50 | # to avoid slow clients connections issues. Note that the Linux kernel 51 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 52 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 53 | # in order to get the desired effect. 54 | tcp-backlog 511 55 | 56 | # By default Redis listens for connections from all the network interfaces 57 | # available on the server. It is possible to listen to just one or multiple 58 | # interfaces using the "bind" configuration directive, followed by one or 59 | # more IP addresses. 60 | # 61 | # Examples: 62 | # 63 | # bind 192.168.1.100 10.0.0.1 64 | 65 | bind 0.0.0.0 66 | 67 | # Protected mode is a layer of security protection, in order to avoid that 68 | # Redis instances left open on the internet are accessed and exploited. 69 | # 70 | # When protected mode is on and if: 71 | # 72 | # 1) The server is not binding explicitly to a set of addresses using the 73 | # "bind" directive. 74 | # 2) No password is configured. 75 | # 76 | # The server only accepts connections from clients connecting from the 77 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 78 | # sockets. 79 | # 80 | # By default protected mode is enabled. You should disable it only if 81 | # you are sure you want clients from other hosts to connect to Redis 82 | # even if no authentication is configured, nor a specific set of interfaces 83 | # are explicitly listed using the "bind" directive. 84 | protected-mode no 85 | 86 | # Specify the path for the Unix socket that will be used to listen for 87 | # incoming connections. There is no default, so Redis will not listen 88 | # on a unix socket when not specified. 89 | # 90 | # unixsocket /tmp/redis.sock 91 | # unixsocketperm 700 92 | 93 | # Close the connection after a client is idle for N seconds (0 to disable) 94 | timeout 0 95 | 96 | # TCP keepalive. 97 | # 98 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 99 | # of communication. This is useful for two reasons: 100 | # 101 | # 1) Detect dead peers. 102 | # 2) Take the connection alive from the point of view of network 103 | # equipment in the middle. 104 | # 105 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 106 | # Note that to close the connection the double of the time is needed. 107 | # On other kernels the period depends on the kernel configuration. 108 | # 109 | # A reasonable value for this option is 60 seconds. 110 | tcp-keepalive 60 111 | 112 | # Specify the server verbosity level. 113 | # This can be one of: 114 | # debug (a lot of information, useful for development/testing) 115 | # verbose (many rarely useful info, but not a mess like the debug level) 116 | # notice (moderately verbose, what you want in production probably) 117 | # warning (only very important / critical messages are logged) 118 | loglevel notice 119 | 120 | # Specify the log file name. Also the empty string can be used to force 121 | # Redis to log on the standard output. Note that if you use standard 122 | # output for logging but daemonize, logs will be sent to /dev/null 123 | logfile "" 124 | 125 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 126 | # and optionally update the other syslog parameters to suit your needs. 127 | # syslog-enabled no 128 | 129 | # Specify the syslog identity. 130 | # syslog-ident redis 131 | 132 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 133 | # syslog-facility local0 134 | 135 | # Set the number of databases. The default database is DB 0, you can select 136 | # a different one on a per-connection basis using SELECT where 137 | # dbid is a number between 0 and 'databases'-1 138 | databases 16 139 | 140 | ################################ SNAPSHOTTING ################################ 141 | # 142 | # Save the DB on disk: 143 | # 144 | # save 145 | # 146 | # Will save the DB if both the given number of seconds and the given 147 | # number of write operations against the DB occurred. 148 | # 149 | # In the example below the behaviour will be to save: 150 | # after 900 sec (15 min) if at least 1 key changed 151 | # after 300 sec (5 min) if at least 10 keys changed 152 | # after 60 sec if at least 10000 keys changed 153 | # 154 | # Note: you can disable saving completely by commenting out all "save" lines. 155 | # 156 | # It is also possible to remove all the previously configured save 157 | # points by adding a save directive with a single empty string argument 158 | # like in the following example: 159 | # 160 | # save "" 161 | 162 | save 900 1 163 | save 300 10 164 | save 60 10000 165 | 166 | # By default Redis will stop accepting writes if RDB snapshots are enabled 167 | # (at least one save point) and the latest background save failed. 168 | # This will make the user aware (in a hard way) that data is not persisting 169 | # on disk properly, otherwise chances are that no one will notice and some 170 | # disaster will happen. 171 | # 172 | # If the background saving process will start working again Redis will 173 | # automatically allow writes again. 174 | # 175 | # However if you have setup your proper monitoring of the Redis server 176 | # and persistence, you may want to disable this feature so that Redis will 177 | # continue to work as usual even if there are problems with disk, 178 | # permissions, and so forth. 179 | stop-writes-on-bgsave-error yes 180 | 181 | # Compress string objects using LZF when dump .rdb databases? 182 | # For default that's set to 'yes' as it's almost always a win. 183 | # If you want to save some CPU in the saving child set it to 'no' but 184 | # the dataset will likely be bigger if you have compressible values or keys. 185 | rdbcompression yes 186 | 187 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 188 | # This makes the format more resistant to corruption but there is a performance 189 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 190 | # for maximum performances. 191 | # 192 | # RDB files created with checksum disabled have a checksum of zero that will 193 | # tell the loading code to skip the check. 194 | rdbchecksum yes 195 | 196 | # The filename where to dump the DB 197 | dbfilename dump.rdb 198 | 199 | # The working directory. 200 | # 201 | # The DB will be written inside this directory, with the filename specified 202 | # above using the 'dbfilename' configuration directive. 203 | # 204 | # The Append Only File will also be created inside this directory. 205 | # 206 | # Note that you must specify a directory here, not a file name. 207 | dir /redis-master-data 208 | 209 | ################################# REPLICATION ################################# 210 | 211 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 212 | # another Redis server. A few things to understand ASAP about Redis replication. 213 | # 214 | # 1) Redis replication is asynchronous, but you can configure a master to 215 | # stop accepting writes if it appears to be not connected with at least 216 | # a given number of slaves. 217 | # 2) Redis slaves are able to perform a partial resynchronization with the 218 | # master if the replication link is lost for a relatively small amount of 219 | # time. You may want to configure the replication backlog size (see the next 220 | # sections of this file) with a sensible value depending on your needs. 221 | # 3) Replication is automatic and does not need user intervention. After a 222 | # network partition slaves automatically try to reconnect to masters 223 | # and resynchronize with them. 224 | # 225 | # slaveof 226 | 227 | # If the master is password protected (using the "requirepass" configuration 228 | # directive below) it is possible to tell the slave to authenticate before 229 | # starting the replication synchronization process, otherwise the master will 230 | # refuse the slave request. 231 | # 232 | # masterauth 233 | 234 | # When a slave loses its connection with the master, or when the replication 235 | # is still in progress, the slave can act in two different ways: 236 | # 237 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 238 | # still reply to client requests, possibly with out of date data, or the 239 | # data set may just be empty if this is the first synchronization. 240 | # 241 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 242 | # an error "SYNC with master in progress" to all the kind of commands 243 | # but to INFO and SLAVEOF. 244 | # 245 | slave-serve-stale-data yes 246 | 247 | # You can configure a slave instance to accept writes or not. Writing against 248 | # a slave instance may be useful to store some ephemeral data (because data 249 | # written on a slave will be easily deleted after resync with the master) but 250 | # may also cause problems if clients are writing to it because of a 251 | # misconfiguration. 252 | # 253 | # Since Redis 2.6 by default slaves are read-only. 254 | # 255 | # Note: read only slaves are not designed to be exposed to untrusted clients 256 | # on the internet. It's just a protection layer against misuse of the instance. 257 | # Still a read only slave exports by default all the administrative commands 258 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 259 | # security of read only slaves using 'rename-command' to shadow all the 260 | # administrative / dangerous commands. 261 | slave-read-only yes 262 | 263 | # Replication SYNC strategy: disk or socket. 264 | # 265 | # ------------------------------------------------------- 266 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 267 | # ------------------------------------------------------- 268 | # 269 | # New slaves and reconnecting slaves that are not able to continue the replication 270 | # process just receiving differences, need to do what is called a "full 271 | # synchronization". An RDB file is transmitted from the master to the slaves. 272 | # The transmission can happen in two different ways: 273 | # 274 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 275 | # file on disk. Later the file is transferred by the parent 276 | # process to the slaves incrementally. 277 | # 2) Diskless: The Redis master creates a new process that directly writes the 278 | # RDB file to slave sockets, without touching the disk at all. 279 | # 280 | # With disk-backed replication, while the RDB file is generated, more slaves 281 | # can be queued and served with the RDB file as soon as the current child producing 282 | # the RDB file finishes its work. With diskless replication instead once 283 | # the transfer starts, new slaves arriving will be queued and a new transfer 284 | # will start when the current one terminates. 285 | # 286 | # When diskless replication is used, the master waits a configurable amount of 287 | # time (in seconds) before starting the transfer in the hope that multiple slaves 288 | # will arrive and the transfer can be parallelized. 289 | # 290 | # With slow disks and fast (large bandwidth) networks, diskless replication 291 | # works better. 292 | repl-diskless-sync no 293 | 294 | # When diskless replication is enabled, it is possible to configure the delay 295 | # the server waits in order to spawn the child that trnasfers the RDB via socket 296 | # to the slaves. 297 | # 298 | # This is important since once the transfer starts, it is not possible to serve 299 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 300 | # waits a delay in order to let more slaves arrive. 301 | # 302 | # The delay is specified in seconds, and by default is 5 seconds. To disable 303 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 304 | repl-diskless-sync-delay 5 305 | 306 | # Slaves send PINGs to server in a predefined interval. It's possible to change 307 | # this interval with the repl_ping_slave_period option. The default value is 10 308 | # seconds. 309 | # 310 | # repl-ping-slave-period 10 311 | 312 | # The following option sets the replication timeout for: 313 | # 314 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 315 | # 2) Master timeout from the point of view of slaves (data, pings). 316 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 317 | # 318 | # It is important to make sure that this value is greater than the value 319 | # specified for repl-ping-slave-period otherwise a timeout will be detected 320 | # every time there is low traffic between the master and the slave. 321 | # 322 | # repl-timeout 60 323 | 324 | # Disable TCP_NODELAY on the slave socket after SYNC? 325 | # 326 | # If you select "yes" Redis will use a smaller number of TCP packets and 327 | # less bandwidth to send data to slaves. But this can add a delay for 328 | # the data to appear on the slave side, up to 40 milliseconds with 329 | # Linux kernels using a default configuration. 330 | # 331 | # If you select "no" the delay for data to appear on the slave side will 332 | # be reduced but more bandwidth will be used for replication. 333 | # 334 | # By default we optimize for low latency, but in very high traffic conditions 335 | # or when the master and slaves are many hops away, turning this to "yes" may 336 | # be a good idea. 337 | repl-disable-tcp-nodelay no 338 | 339 | # Set the replication backlog size. The backlog is a buffer that accumulates 340 | # slave data when slaves are disconnected for some time, so that when a slave 341 | # wants to reconnect again, often a full resync is not needed, but a partial 342 | # resync is enough, just passing the portion of data the slave missed while 343 | # disconnected. 344 | # 345 | # The bigger the replication backlog, the longer the time the slave can be 346 | # disconnected and later be able to perform a partial resynchronization. 347 | # 348 | # The backlog is only allocated once there is at least a slave connected. 349 | # 350 | # repl-backlog-size 1mb 351 | 352 | # After a master has no longer connected slaves for some time, the backlog 353 | # will be freed. The following option configures the amount of seconds that 354 | # need to elapse, starting from the time the last slave disconnected, for 355 | # the backlog buffer to be freed. 356 | # 357 | # A value of 0 means to never release the backlog. 358 | # 359 | # repl-backlog-ttl 3600 360 | 361 | # The slave priority is an integer number published by Redis in the INFO output. 362 | # It is used by Redis Sentinel in order to select a slave to promote into a 363 | # master if the master is no longer working correctly. 364 | # 365 | # A slave with a low priority number is considered better for promotion, so 366 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 367 | # pick the one with priority 10, that is the lowest. 368 | # 369 | # However a special priority of 0 marks the slave as not able to perform the 370 | # role of master, so a slave with priority of 0 will never be selected by 371 | # Redis Sentinel for promotion. 372 | # 373 | # By default the priority is 100. 374 | slave-priority 100 375 | 376 | # It is possible for a master to stop accepting writes if there are less than 377 | # N slaves connected, having a lag less or equal than M seconds. 378 | # 379 | # The N slaves need to be in "online" state. 380 | # 381 | # The lag in seconds, that must be <= the specified value, is calculated from 382 | # the last ping received from the slave, that is usually sent every second. 383 | # 384 | # This option does not GUARANTEE that N replicas will accept the write, but 385 | # will limit the window of exposure for lost writes in case not enough slaves 386 | # are available, to the specified number of seconds. 387 | # 388 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 389 | # 390 | # min-slaves-to-write 3 391 | # min-slaves-max-lag 10 392 | # 393 | # Setting one or the other to 0 disables the feature. 394 | # 395 | # By default min-slaves-to-write is set to 0 (feature disabled) and 396 | # min-slaves-max-lag is set to 10. 397 | 398 | ################################## SECURITY ################################### 399 | 400 | # Require clients to issue AUTH before processing any other 401 | # commands. This might be useful in environments in which you do not trust 402 | # others with access to the host running redis-server. 403 | # 404 | # This should stay commented out for backward compatibility and because most 405 | # people do not need auth (e.g. they run their own servers). 406 | # 407 | # Warning: since Redis is pretty fast an outside user can try up to 408 | # 150k passwords per second against a good box. This means that you should 409 | # use a very strong password otherwise it will be very easy to break. 410 | # 411 | # requirepass foobared 412 | 413 | # Command renaming. 414 | # 415 | # It is possible to change the name of dangerous commands in a shared 416 | # environment. For instance the CONFIG command may be renamed into something 417 | # hard to guess so that it will still be available for internal-use tools 418 | # but not available for general clients. 419 | # 420 | # Example: 421 | # 422 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 423 | # 424 | # It is also possible to completely kill a command by renaming it into 425 | # an empty string: 426 | # 427 | # rename-command CONFIG "" 428 | # 429 | # Please note that changing the name of commands that are logged into the 430 | # AOF file or transmitted to slaves may cause problems. 431 | 432 | ################################### LIMITS #################################### 433 | 434 | # Set the max number of connected clients at the same time. By default 435 | # this limit is set to 10000 clients, however if the Redis server is not 436 | # able to configure the process file limit to allow for the specified limit 437 | # the max number of allowed clients is set to the current file limit 438 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 439 | # 440 | # Once the limit is reached Redis will close all the new connections sending 441 | # an error 'max number of clients reached'. 442 | # 443 | # maxclients 10000 444 | 445 | # Don't use more memory than the specified amount of bytes. 446 | # When the memory limit is reached Redis will try to remove keys 447 | # according to the eviction policy selected (see maxmemory-policy). 448 | # 449 | # If Redis can't remove keys according to the policy, or if the policy is 450 | # set to 'noeviction', Redis will start to reply with errors to commands 451 | # that would use more memory, like SET, LPUSH, and so on, and will continue 452 | # to reply to read-only commands like GET. 453 | # 454 | # This option is usually useful when using Redis as an LRU cache, or to set 455 | # a hard memory limit for an instance (using the 'noeviction' policy). 456 | # 457 | # WARNING: If you have slaves attached to an instance with maxmemory on, 458 | # the size of the output buffers needed to feed the slaves are subtracted 459 | # from the used memory count, so that network problems / resyncs will 460 | # not trigger a loop where keys are evicted, and in turn the output 461 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 462 | # of more keys, and so forth until the database is completely emptied. 463 | # 464 | # In short... if you have slaves attached it is suggested that you set a lower 465 | # limit for maxmemory so that there is some free RAM on the system for slave 466 | # output buffers (but this is not needed if the policy is 'noeviction'). 467 | # 468 | # maxmemory 469 | 470 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 471 | # is reached. You can select among five behaviors: 472 | # 473 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 474 | # allkeys-lru -> remove any key according to the LRU algorithm 475 | # volatile-random -> remove a random key with an expire set 476 | # allkeys-random -> remove a random key, any key 477 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 478 | # noeviction -> don't expire at all, just return an error on write operations 479 | # 480 | # Note: with any of the above policies, Redis will return an error on write 481 | # operations, when there are no suitable keys for eviction. 482 | # 483 | # At the date of writing these commands are: set setnx setex append 484 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 485 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 486 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 487 | # getset mset msetnx exec sort 488 | # 489 | # The default is: 490 | # 491 | # maxmemory-policy volatile-lru 492 | 493 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 494 | # algorithms (in order to save memory), so you can select as well the sample 495 | # size to check. For instance for default Redis will check three keys and 496 | # pick the one that was used less recently, you can change the sample size 497 | # using the following configuration directive. 498 | # 499 | # maxmemory-samples 3 500 | 501 | ############################## APPEND ONLY MODE ############################### 502 | 503 | # By default Redis asynchronously dumps the dataset on disk. This mode is 504 | # good enough in many applications, but an issue with the Redis process or 505 | # a power outage may result into a few minutes of writes lost (depending on 506 | # the configured save points). 507 | # 508 | # The Append Only File is an alternative persistence mode that provides 509 | # much better durability. For instance using the default data fsync policy 510 | # (see later in the config file) Redis can lose just one second of writes in a 511 | # dramatic event like a server power outage, or a single write if something 512 | # wrong with the Redis process itself happens, but the operating system is 513 | # still running correctly. 514 | # 515 | # AOF and RDB persistence can be enabled at the same time without problems. 516 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 517 | # with the better durability guarantees. 518 | # 519 | # Please check http://redis.io/topics/persistence for more information. 520 | 521 | appendonly yes 522 | 523 | # The name of the append only file (default: "appendonly.aof") 524 | 525 | appendfilename "appendonly.aof" 526 | 527 | # The fsync() call tells the Operating System to actually write data on disk 528 | # instead of waiting for more data in the output buffer. Some OS will really flush 529 | # data on disk, some other OS will just try to do it ASAP. 530 | # 531 | # Redis supports three different modes: 532 | # 533 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 534 | # always: fsync after every write to the append only log. Slow, Safest. 535 | # everysec: fsync only one time every second. Compromise. 536 | # 537 | # The default is "everysec", as that's usually the right compromise between 538 | # speed and data safety. It's up to you to understand if you can relax this to 539 | # "no" that will let the operating system flush the output buffer when 540 | # it wants, for better performances (but if you can live with the idea of 541 | # some data loss consider the default persistence mode that's snapshotting), 542 | # or on the contrary, use "always" that's very slow but a bit safer than 543 | # everysec. 544 | # 545 | # More details please check the following article: 546 | # http://antirez.com/post/redis-persistence-demystified.html 547 | # 548 | # If unsure, use "everysec". 549 | 550 | # appendfsync always 551 | appendfsync everysec 552 | # appendfsync no 553 | 554 | # When the AOF fsync policy is set to always or everysec, and a background 555 | # saving process (a background save or AOF log background rewriting) is 556 | # performing a lot of I/O against the disk, in some Linux configurations 557 | # Redis may block too long on the fsync() call. Note that there is no fix for 558 | # this currently, as even performing fsync in a different thread will block 559 | # our synchronous write(2) call. 560 | # 561 | # In order to mitigate this problem it's possible to use the following option 562 | # that will prevent fsync() from being called in the main process while a 563 | # BGSAVE or BGREWRITEAOF is in progress. 564 | # 565 | # This means that while another child is saving, the durability of Redis is 566 | # the same as "appendfsync none". In practical terms, this means that it is 567 | # possible to lose up to 30 seconds of log in the worst scenario (with the 568 | # default Linux settings). 569 | # 570 | # If you have latency problems turn this to "yes". Otherwise leave it as 571 | # "no" that is the safest pick from the point of view of durability. 572 | 573 | no-appendfsync-on-rewrite no 574 | 575 | # Automatic rewrite of the append only file. 576 | # Redis is able to automatically rewrite the log file implicitly calling 577 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 578 | # 579 | # This is how it works: Redis remembers the size of the AOF file after the 580 | # latest rewrite (if no rewrite has happened since the restart, the size of 581 | # the AOF at startup is used). 582 | # 583 | # This base size is compared to the current size. If the current size is 584 | # bigger than the specified percentage, the rewrite is triggered. Also 585 | # you need to specify a minimal size for the AOF file to be rewritten, this 586 | # is useful to avoid rewriting the AOF file even if the percentage increase 587 | # is reached but it is still pretty small. 588 | # 589 | # Specify a percentage of zero in order to disable the automatic AOF 590 | # rewrite feature. 591 | 592 | auto-aof-rewrite-percentage 100 593 | auto-aof-rewrite-min-size 64mb 594 | 595 | # An AOF file may be found to be truncated at the end during the Redis 596 | # startup process, when the AOF data gets loaded back into memory. 597 | # This may happen when the system where Redis is running 598 | # crashes, especially when an ext4 filesystem is mounted without the 599 | # data=ordered option (however this can't happen when Redis itself 600 | # crashes or aborts but the operating system still works correctly). 601 | # 602 | # Redis can either exit with an error when this happens, or load as much 603 | # data as possible (the default now) and start if the AOF file is found 604 | # to be truncated at the end. The following option controls this behavior. 605 | # 606 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 607 | # the Redis server starts emitting a log to inform the user of the event. 608 | # Otherwise if the option is set to no, the server aborts with an error 609 | # and refuses to start. When the option is set to no, the user requires 610 | # to fix the AOF file using the "redis-check-aof" utility before to restart 611 | # the server. 612 | # 613 | # Note that if the AOF file will be found to be corrupted in the middle 614 | # the server will still exit with an error. This option only applies when 615 | # Redis will try to read more data from the AOF file but not enough bytes 616 | # will be found. 617 | aof-load-truncated yes 618 | 619 | ################################ LUA SCRIPTING ############################### 620 | 621 | # Max execution time of a Lua script in milliseconds. 622 | # 623 | # If the maximum execution time is reached Redis will log that a script is 624 | # still in execution after the maximum allowed time and will start to 625 | # reply to queries with an error. 626 | # 627 | # When a long running script exceeds the maximum execution time only the 628 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 629 | # used to stop a script that did not yet called write commands. The second 630 | # is the only way to shut down the server in the case a write command was 631 | # already issued by the script but the user doesn't want to wait for the natural 632 | # termination of the script. 633 | # 634 | # Set it to 0 or a negative value for unlimited execution without warnings. 635 | lua-time-limit 5000 636 | 637 | ################################## SLOW LOG ################################### 638 | 639 | # The Redis Slow Log is a system to log queries that exceeded a specified 640 | # execution time. The execution time does not include the I/O operations 641 | # like talking with the client, sending the reply and so forth, 642 | # but just the time needed to actually execute the command (this is the only 643 | # stage of command execution where the thread is blocked and can not serve 644 | # other requests in the meantime). 645 | # 646 | # You can configure the slow log with two parameters: one tells Redis 647 | # what is the execution time, in microseconds, to exceed in order for the 648 | # command to get logged, and the other parameter is the length of the 649 | # slow log. When a new command is logged the oldest one is removed from the 650 | # queue of logged commands. 651 | 652 | # The following time is expressed in microseconds, so 1000000 is equivalent 653 | # to one second. Note that a negative number disables the slow log, while 654 | # a value of zero forces the logging of every command. 655 | slowlog-log-slower-than 10000 656 | 657 | # There is no limit to this length. Just be aware that it will consume memory. 658 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 659 | slowlog-max-len 128 660 | 661 | ################################ LATENCY MONITOR ############################## 662 | 663 | # The Redis latency monitoring subsystem samples different operations 664 | # at runtime in order to collect data related to possible sources of 665 | # latency of a Redis instance. 666 | # 667 | # Via the LATENCY command this information is available to the user that can 668 | # print graphs and obtain reports. 669 | # 670 | # The system only logs operations that were performed in a time equal or 671 | # greater than the amount of milliseconds specified via the 672 | # latency-monitor-threshold configuration directive. When its value is set 673 | # to zero, the latency monitor is turned off. 674 | # 675 | # By default latency monitoring is disabled since it is mostly not needed 676 | # if you don't have latency issues, and collecting data has a performance 677 | # impact, that while very small, can be measured under big load. Latency 678 | # monitoring can easily be enalbed at runtime using the command 679 | # "CONFIG SET latency-monitor-threshold " if needed. 680 | latency-monitor-threshold 0 681 | 682 | ############################# Event notification ############################## 683 | 684 | # Redis can notify Pub/Sub clients about events happening in the key space. 685 | # This feature is documented at http://redis.io/topics/notifications 686 | # 687 | # For instance if keyspace events notification is enabled, and a client 688 | # performs a DEL operation on key "foo" stored in the Database 0, two 689 | # messages will be published via Pub/Sub: 690 | # 691 | # PUBLISH __keyspace@0__:foo del 692 | # PUBLISH __keyevent@0__:del foo 693 | # 694 | # It is possible to select the events that Redis will notify among a set 695 | # of classes. Every class is identified by a single character: 696 | # 697 | # K Keyspace events, published with __keyspace@__ prefix. 698 | # E Keyevent events, published with __keyevent@__ prefix. 699 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 700 | # $ String commands 701 | # l List commands 702 | # s Set commands 703 | # h Hash commands 704 | # z Sorted set commands 705 | # x Expired events (events generated every time a key expires) 706 | # e Evicted events (events generated when a key is evicted for maxmemory) 707 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 708 | # 709 | # The "notify-keyspace-events" takes as argument a string that is composed 710 | # of zero or multiple characters. The empty string means that notifications 711 | # are disabled. 712 | # 713 | # Example: to enable list and generic events, from the point of view of the 714 | # event name, use: 715 | # 716 | # notify-keyspace-events Elg 717 | # 718 | # Example 2: to get the stream of the expired keys subscribing to channel 719 | # name __keyevent@0__:expired use: 720 | # 721 | # notify-keyspace-events Ex 722 | # 723 | # By default all notifications are disabled because most users don't need 724 | # this feature and the feature has some overhead. Note that if you don't 725 | # specify at least one of K or E, no events will be delivered. 726 | notify-keyspace-events "" 727 | 728 | ############################### ADVANCED CONFIG ############################### 729 | 730 | # Hashes are encoded using a memory efficient data structure when they have a 731 | # small number of entries, and the biggest entry does not exceed a given 732 | # threshold. These thresholds can be configured using the following directives. 733 | hash-max-ziplist-entries 512 734 | hash-max-ziplist-value 64 735 | 736 | # Similarly to hashes, small lists are also encoded in a special way in order 737 | # to save a lot of space. The special representation is only used when 738 | # you are under the following limits: 739 | list-max-ziplist-entries 512 740 | list-max-ziplist-value 64 741 | 742 | # Sets have a special encoding in just one case: when a set is composed 743 | # of just strings that happen to be integers in radix 10 in the range 744 | # of 64 bit signed integers. 745 | # The following configuration setting sets the limit in the size of the 746 | # set in order to use this special memory saving encoding. 747 | set-max-intset-entries 512 748 | 749 | # Similarly to hashes and lists, sorted sets are also specially encoded in 750 | # order to save a lot of space. This encoding is only used when the length and 751 | # elements of a sorted set are below the following limits: 752 | zset-max-ziplist-entries 128 753 | zset-max-ziplist-value 64 754 | 755 | # HyperLogLog sparse representation bytes limit. The limit includes the 756 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 757 | # this limit, it is converted into the dense representation. 758 | # 759 | # A value greater than 16000 is totally useless, since at that point the 760 | # dense representation is more memory efficient. 761 | # 762 | # The suggested value is ~ 3000 in order to have the benefits of 763 | # the space efficient encoding without slowing down too much PFADD, 764 | # which is O(N) with the sparse encoding. The value can be raised to 765 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 766 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 767 | hll-sparse-max-bytes 3000 768 | 769 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 770 | # order to help rehashing the main Redis hash table (the one mapping top-level 771 | # keys to values). The hash table implementation Redis uses (see dict.c) 772 | # performs a lazy rehashing: the more operation you run into a hash table 773 | # that is rehashing, the more rehashing "steps" are performed, so if the 774 | # server is idle the rehashing is never complete and some more memory is used 775 | # by the hash table. 776 | # 777 | # The default is to use this millisecond 10 times every second in order to 778 | # actively rehash the main dictionaries, freeing memory when possible. 779 | # 780 | # If unsure: 781 | # use "activerehashing no" if you have hard latency requirements and it is 782 | # not a good thing in your environment that Redis can reply from time to time 783 | # to queries with 2 milliseconds delay. 784 | # 785 | # use "activerehashing yes" if you don't have such hard requirements but 786 | # want to free memory asap when possible. 787 | activerehashing yes 788 | 789 | # The client output buffer limits can be used to force disconnection of clients 790 | # that are not reading data from the server fast enough for some reason (a 791 | # common reason is that a Pub/Sub client can't consume messages as fast as the 792 | # publisher can produce them). 793 | # 794 | # The limit can be set differently for the three different classes of clients: 795 | # 796 | # normal -> normal clients including MONITOR clients 797 | # slave -> slave clients 798 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 799 | # 800 | # The syntax of every client-output-buffer-limit directive is the following: 801 | # 802 | # client-output-buffer-limit 803 | # 804 | # A client is immediately disconnected once the hard limit is reached, or if 805 | # the soft limit is reached and remains reached for the specified number of 806 | # seconds (continuously). 807 | # So for instance if the hard limit is 32 megabytes and the soft limit is 808 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 809 | # if the size of the output buffers reach 32 megabytes, but will also get 810 | # disconnected if the client reaches 16 megabytes and continuously overcomes 811 | # the limit for 10 seconds. 812 | # 813 | # By default normal clients are not limited because they don't receive data 814 | # without asking (in a push way), but just after a request, so only 815 | # asynchronous clients may create a scenario where data is requested faster 816 | # than it can read. 817 | # 818 | # Instead there is a default limit for pubsub and slave clients, since 819 | # subscribers and slaves receive data in a push fashion. 820 | # 821 | # Both the hard or the soft limit can be disabled by setting them to zero. 822 | client-output-buffer-limit normal 0 0 0 823 | client-output-buffer-limit slave 256mb 64mb 60 824 | client-output-buffer-limit pubsub 32mb 8mb 60 825 | 826 | # Redis calls an internal function to perform many background tasks, like 827 | # closing connections of clients in timeout, purging expired keys that are 828 | # never requested, and so forth. 829 | # 830 | # Not all tasks are performed with the same frequency, but Redis checks for 831 | # tasks to perform according to the specified "hz" value. 832 | # 833 | # By default "hz" is set to 10. Raising the value will use more CPU when 834 | # Redis is idle, but at the same time will make Redis more responsive when 835 | # there are many keys expiring at the same time, and timeouts may be 836 | # handled with more precision. 837 | # 838 | # The range is between 1 and 500, however a value over 100 is usually not 839 | # a good idea. Most users should use the default of 10 and raise this up to 840 | # 100 only in environments where very low latency is required. 841 | hz 10 842 | 843 | # When a child rewrites the AOF file, if the following option is enabled 844 | # the file will be fsync-ed every 32 MB of data generated. This is useful 845 | # in order to commit the file to the disk more incrementally and avoid 846 | # big latency spikes. 847 | aof-rewrite-incremental-fsync yes --------------------------------------------------------------------------------