├── .gitignore
├── README.md
├── apache2
├── Dockerfile
├── sites
│ ├── .gitignore
│ ├── default.apache.conf
│ └── sample.conf.example
└── vhost.conf
├── caddy
├── Dockerfile
└── caddy
│ ├── Caddyfile
│ └── authlist.conf
├── couchdb
├── Dockerfile
└── local.ini
├── docker-compose.yml
├── elasticsearch
├── Dockerfile
└── elasticsearch.yml
├── env.example
├── kafka
└── Dockerfile
├── kibana
└── Dockerfile
├── mariadb
├── Dockerfile
├── docker-entrypoint-initdb.d
│ ├── .gitignore
│ └── createdb.sql.example
└── my.cnf
├── memcached
└── Dockerfile
├── mongodb
└── Dockerfile
├── mysql
├── Dockerfile
├── docker-entrypoint-initdb.d
│ ├── .gitignore
│ └── createdb.sql.example
└── my.cnf
├── nacos-standalone
└── Dockerfile
├── nginx
├── Dockerfile
├── nginx.conf
├── sites
│ ├── .gitignore
│ └── default.conf.example
└── ssl
│ └── .gitignore
├── postgres
└── Dockerfile
├── rabbitmq
├── Dockerfile
├── management_agent.disable_metrics_collector.conf
└── rabbitmq.conf
├── redis-cluster
└── Dockerfile
├── redis
├── Dockerfile
└── redis.conf
└── zookeeper
└── Dockerfile
/.gitignore:
--------------------------------------------------------------------------------
1 | /logs/*
2 | .env
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## 采用docker-compose构建跨平台的服务容器
2 |
3 | #### 目前支持:
4 |
5 | - php5.6~php7.2
6 | - nginx
7 | - apache2
8 | - couchdb
9 | - mongodb
10 | - mysql
11 | - mariadb
12 | - redis
13 | - memcache
14 | - rabbitmq
15 | - elasticsearch
16 | - tomcat
17 | - nexus3
18 |
--------------------------------------------------------------------------------
/apache2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM webdevops/apache:ubuntu-16.04
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | ARG TZ=UTC
7 | ENV TZ ${TZ}
8 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
9 |
10 | EXPOSE 80 443
11 |
12 | WORKDIR /var/www/
13 |
14 | COPY vhost.conf /etc/apache2/sites-enabled/vhost.conf
15 |
16 | ENTRYPOINT ["/opt/docker/bin/entrypoint.sh"]
17 |
18 | CMD ["supervisord"]
19 |
--------------------------------------------------------------------------------
/apache2/sites/.gitignore:
--------------------------------------------------------------------------------
1 | *.conf
2 | !default.conf
3 | !default.apache.conf
4 |
--------------------------------------------------------------------------------
/apache2/sites/default.apache.conf:
--------------------------------------------------------------------------------
1 |
2 | ServerName laradock.test
3 | DocumentRoot /var/www/
4 | Options Indexes FollowSymLinks
5 |
6 |
7 | AllowOverride All
8 |
9 | Allow from all
10 |
11 | = 2.4>
12 | Require all granted
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/apache2/sites/sample.conf.example:
--------------------------------------------------------------------------------
1 |
2 | ServerName sample.test
3 | DocumentRoot /var/www/sample/public/
4 | Options Indexes FollowSymLinks
5 |
6 |
7 | AllowOverride All
8 |
9 | Allow from all
10 |
11 | = 2.4>
12 | Require all granted
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/apache2/vhost.conf:
--------------------------------------------------------------------------------
1 | Include /etc/apache2/sites-available/*.conf
2 |
--------------------------------------------------------------------------------
/caddy/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM abiosoft/caddy:no-stats
2 |
3 | CMD ["--conf", "/etc/caddy/Caddyfile", "--log", "stdout", "--agree=true"]
4 |
5 | EXPOSE 80 443 2015
6 |
--------------------------------------------------------------------------------
/caddy/caddy/Caddyfile:
--------------------------------------------------------------------------------
1 | # Docs: https://caddyserver.com/docs/caddyfile
2 | 0.0.0.0:80 {
3 | root /var/www/public
4 | fastcgi / php-fpm:9000 php {
5 | index index.php
6 | }
7 |
8 | # To handle .html extensions with laravel change ext to
9 | # ext / .html
10 |
11 | rewrite {
12 | to {path} {path}/ /index.php?{query}
13 | }
14 | gzip
15 | browse
16 | log /var/logs/caddy/access.log
17 | errors /var/logs/caddy/error.log
18 | # Uncomment to enable TLS (HTTPS)
19 | # Change the first list to listen on port 443 when enabling TLS
20 | #tls self_signed
21 |
22 | # To use Lets encrpt tls with a DNS provider uncomment these
23 | # lines and change the provider as required
24 | #tls {
25 | # dns cloudflare
26 | #}
27 | }
28 |
29 | laradock1.demo:80 {
30 | root /var/www/public
31 | # Create a Webhook in git.
32 | #git {
33 | #repo https://github.com/xxx/xxx
34 | # path /home/xxx
35 | # #interval 60
36 | # hook webhook laradock
37 | # hook_type generic
38 | #}
39 |
40 | }
41 |
42 | laradock2.demo:80 {
43 | # Create a Proxy and cors.
44 | #proxy domain.com
45 | #cors
46 | }
47 |
48 | laradock3.demo:80 {
49 | import authlist.conf
50 | root /var/www/public
51 | }
--------------------------------------------------------------------------------
/caddy/caddy/authlist.conf:
--------------------------------------------------------------------------------
1 | basicauth / laradock laradock
2 |
--------------------------------------------------------------------------------
/couchdb/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM apache/couchdb:latest
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | ARG TZ=UTC
7 | ENV TZ ${TZ}
8 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
9 |
10 | # Add configuration
11 | COPY local.ini /opt/couchdb/etc/
12 |
13 | EXPOSE 5984
14 |
--------------------------------------------------------------------------------
/couchdb/local.ini:
--------------------------------------------------------------------------------
1 | ; CouchDB Configuration Settings
2 |
3 | ; Custom settings should be made in this file. They will override settings
4 | ; in default.ini, but unlike changes made to default.ini, this file won't be
5 | ; overwritten on server upgrade.
6 |
7 | [chttpd]
8 | bind_address = any
9 |
10 | [httpd]
11 | bind_address = any
12 |
13 | [log]
14 | file = /usr/local/var/logs/couchdb/couch.log
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.5"
2 |
3 | networks:
4 | frontend:
5 | driver: ${NETWORKS_DRIVER}
6 | backend:
7 | driver: ${NETWORKS_DRIVER}
8 |
9 | volumes:
10 | mysql:
11 | driver: ${VOLUMES_DRIVER}
12 | mariadb:
13 | driver: ${VOLUMES_DRIVER}
14 | postgres:
15 | driver: ${VOLUMES_DRIVER}
16 | memcached:
17 | driver: ${VOLUMES_DRIVER}
18 | redis:
19 | driver: ${VOLUMES_DRIVER}
20 | mongodb:
21 | driver: ${VOLUMES_DRIVER}
22 | couchdb:
23 | driver: ${VOLUMES_DRIVER}
24 | elasticsearch:
25 | driver: ${VOLUMES_DRIVER}
26 |
27 | services:
28 |
29 | ### NGINX Server Container ################################
30 | nginx:
31 | build:
32 | context: ./nginx
33 | args:
34 | - TZ=${TZ}
35 | volumes:
36 | - ${APP_CODE_PATH_HOST}/nginx:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG}
37 | - ${NGINX_HOST_LOG_PATH}:/var/logs/nginx
38 | - ${NGINX_SITES_PATH}:/etc/nginx/sites-available
39 | - ${NGINX_SSL_PATH}:/etc/nginx/ssl
40 | ports:
41 | - "${NGINX_HOST_HTTP_PORT}:80"
42 | - "${NGINX_HOST_HTTPS_PORT}:443"
43 | # networks:
44 | # - frontend
45 | # - backend
46 | network_mode: host
47 |
48 | ### Apache Server ########################################
49 | apache2:
50 | build:
51 | context: ./apache2
52 | args:
53 | - TZ=${TZ}
54 | volumes:
55 | - ${APP_CODE_PATH_HOST}/apache2:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG}
56 | - ${APACHE_HOST_LOG_PATH}:/var/logs/apache2
57 | - ${APACHE_SITES_PATH}:/etc/apache2/sites-available
58 | ports:
59 | - "${APACHE_HOST_HTTP_PORT}:80"
60 | - "${APACHE_HOST_HTTPS_PORT}:443"
61 | networks:
62 | - frontend
63 | - backend
64 |
65 | ### Caddy Server #########################################
66 | caddy:
67 | build: ./caddy
68 | volumes:
69 | - ${APP_CODE_PATH_HOST}/caddy:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG}
70 | - ${CADDY_CONFIG_PATH}:/etc/caddy
71 | - ${CADDY_HOST_LOG_PATH}:/var/logs/caddy
72 | - ${DATA_PATH_HOST}:/root/.caddy
73 | ports:
74 | - "${CADDY_HOST_HTTP_PORT}:80"
75 | - "${CADDY_HOST_HTTPS_PORT}:443"
76 | networks:
77 | - frontend
78 | - backend
79 |
80 | ### MySQL Container #########################################
81 | mysql:
82 | build:
83 | context: ./mysql
84 | args:
85 | - MYSQL_VERSION=${MYSQL_VERSION}
86 | - TZ=${TZ}
87 | environment:
88 | - MYSQL_DATABASE=${MYSQL_DATABASE}
89 | - MYSQL_USER=${MYSQL_USER}
90 | - MYSQL_PASSWORD=${MYSQL_PASSWORD}
91 | - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
92 | volumes:
93 | - ${DATA_PATH_HOST}/mysql:/var/lib/mysql
94 | - ${MYSQL_ENTRYPOINT_INITDB}:/docker-entrypoint-initdb.d
95 | ports:
96 | - "${MYSQL_PORT}:3306"
97 | networks:
98 | - backend
99 |
100 | ### MariaDB Container #######################################
101 | mariadb:
102 | build:
103 | context: ./mariadb
104 | args:
105 | - MARIADB_VERSION=${MARIADB_VERSION}
106 | - TZ=${TZ}
107 | environment:
108 | - MYSQL_DATABASE=${MARIADB_DATABASE}
109 | - MYSQL_USER=${MARIADB_USER}
110 | - MYSQL_PASSWORD=${MARIADB_PASSWORD}
111 | - MYSQL_ROOT_PASSWORD=${MARIADB_ROOT_PASSWORD}
112 | volumes:
113 | - ${DATA_PATH_HOST}/mariadb:/var/lib/mysql
114 | - ${MARIADB_ENTRYPOINT_INITDB}:/docker-entrypoint-initdb.d
115 | ports:
116 | - "${MARIADB_PORT}:3306"
117 | networks:
118 | - backend
119 |
120 | ### PostgreSQL Container ####################################
121 | postgres:
122 | build:
123 | context: ./postgres
124 | args:
125 | - TZ=${TZ}
126 | environment:
127 | - POSTGRES_DB=${POSTGRES_DB}
128 | - POSTGRES_USER=${POSTGRES_USER}
129 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
130 | volumes:
131 | - ${DATA_PATH_HOST}/postgres:/var/lib/postgresql/data
132 | ports:
133 | - "${POSTGRES_PORT}:5432"
134 | networks:
135 | - backend
136 |
137 | ### MongoDB Container #######################################
138 | mongodb:
139 | build:
140 | context: ./mongodb
141 | args:
142 | - TZ=${TZ}
143 | ports:
144 | - "${MONGODB_PORT}:27017"
145 | volumes:
146 | - ${DATA_PATH_HOST}/mongodb:/data/db
147 | - ${DATA_PATH_HOST}/mongo_config:/data/configdb
148 | networks:
149 | - backend
150 |
151 | ### couchdb Container #######################################
152 | couchdb:
153 | build:
154 | context: ./couchdb
155 | args:
156 | - TZ=${TZ}
157 | environment:
158 | - COUCHDB_USER=${COUCHDB_USER}
159 | - COUCHDB_PASSWORD=${COUCHDB_PASSWORD}
160 | volumes:
161 | - ${DATA_PATH_HOST}/couchdb:/opt/couchdb/data
162 | - ${COUCHDB_HOST_LOG_PATH}:/usr/local/var/logs/couchdb
163 | ports:
164 | - "${COUCHDB_PORT}:5984"
165 | networks:
166 | - backend
167 |
168 | ### Redis Container #########################################
169 | redis:
170 | build:
171 | context: ./redis
172 | args:
173 | - TZ=${TZ}
174 | volumes:
175 | - ${DATA_PATH_HOST}/redis:/data
176 | ports:
177 | - "${REDIS_PORT}:6379"
178 | networks:
179 | - backend
180 |
181 | ### Redis Cluster ###########################################
182 | redis-cluster:
183 | build: ./redis-cluster
184 | volumes:
185 | - ${DATA_PATH_HOST}/redis-cluster:/redis-data
186 | ports:
187 | - "${REDIS_CLUSTER_PORT_RANGE}:7000-7005"
188 | networks:
189 | - backend
190 |
191 | ### Memcached Container #####################################
192 | memcached:
193 | build:
194 | context: ./memcached
195 | args:
196 | - TZ=${TZ}
197 | volumes:
198 | - ${DATA_PATH_HOST}/memcached:/var/lib/memcached
199 | ports:
200 | - "${MEMCACHED_HOST_PORT}:11211"
201 | networks:
202 | - backend
203 |
204 | ### RabbitMQ #############################################
205 | rabbitmq:
206 | build:
207 | context: ./rabbitmq
208 | args:
209 | - TZ=${TZ}
210 | ports:
211 | - "${RABBITMQ_NODE_HOST_PORT}:5672"
212 | - "${RABBITMQ_MANAGEMENT_HTTP_HOST_PORT}:15672"
213 | - "${RABBITMQ_MANAGEMENT_HTTPS_HOST_PORT}:15671"
214 | privileged: true
215 | volumes:
216 | - ${DATA_PATH_HOST}/rabbitmq:/var/lib/rabbitmq
217 | - ./rabbitmq/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf
218 | - ./rabbitmq/management_agent.disable_metrics_collector.conf:/etc/rabbitmq/conf.d/management_agent.disable_metrics_collector.conf
219 | networks:
220 | - backend
221 |
222 | ### ElasticSearch ########################################
223 | elasticsearch:
224 | build:
225 | context: ./elasticsearch
226 | args:
227 | - TZ=${TZ}
228 | volumes:
229 | - ${DATA_PATH_HOST}/elasticsearch:/usr/share/elasticsearch/data
230 | - ./elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
231 | environment:
232 | - cluster.name=cluster
233 | - node.name=node
234 | - bootstrap.memory_lock=true
235 | - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
236 | - cluster.initial_master_nodes=node
237 | ulimits:
238 | memlock:
239 | soft: -1
240 | hard: -1
241 | ports:
242 | - "${ELASTICSEARCH_HOST_HTTP_PORT}:9200"
243 | - "${ELASTICSEARCH_HOST_TRANSPORT_PORT}:9300"
244 | networks:
245 | - backend
246 |
247 | ### Kibana ########################################
248 | kibana:
249 | build:
250 | context: ./kibana
251 | # args:
252 | # - TZ=${TZ}
253 | # restart: always
254 | environment:
255 | ELASTICSEARCH_HOSTS: http://${HOST_IP}:${ELASTICSEARCH_HOST_HTTP_PORT}
256 | I18N_LOCALE: zh-CN
257 | # depends_on:
258 | # - elasticsearch
259 | ports:
260 | - ${KIBANA_HOST_HTTP_PORT}:5601
261 |
262 | ### Zookeeper ###########################################
263 | zookeeper:
264 | build:
265 | context: ./zookeeper
266 | hostname: zookeeper
267 | # restart: always
268 | volumes:
269 | - ${DATA_PATH_HOST}/zookeeper/data:/data
270 | - ${DATA_PATH_HOST}/zookeeper/datakig:/datalog
271 | - ${ZOOKEEPER_HOST_LOG_PATH}:/logs
272 | ports:
273 | - "${ZOOKEEPER_PORT}:2181"
274 | networks:
275 | - backend
276 |
277 | ### Kafka1 ###########################################
278 | kafka1:
279 | build:
280 | context: ./kafka
281 | ports:
282 | - "${KAFKA_PORT1}:9092"
283 | environment:
284 | - KAFKA_ZOOKEEPER_CONNECT=zookeeper:${ZOOKEEPER_PORT}
285 | # 非必须,设置自动创建 topic
286 | - KAFKA_AUTO_CREATE_TOPICS_ENABLE=${KAFKA_AUTO_CREATE_TOPICS_ENABLE}
287 | - KAFKA_ADVERTISED_HOST_NAME=${HOST_IP}
288 | - KAFKA_ADVERTISED_PORT=${KAFKA_PORT1}
289 | - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://${HOST_IP}:${KAFKA_PORT1}
290 | # - KAFKA_LISTENERS=PLAINTEXT://:${KAFKA_PORT1}
291 | # 非必须,设置对内存
292 | - KAFKA_HEAP_OPTS=${KAFKA_HEAP_OPTS}
293 | # 非必须,设置保存7天数据,为默认值
294 | - KAFKA_LOG_RETENTION_HOURS=${KAFKA_LOG_RETENTION_HOURS}
295 | - KAFKA_BROKER_ID=1
296 | - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
297 | volumes:
298 | # 将 kafka 的数据文件映射出来
299 | - ${DATA_PATH_HOST}/kafka1:/kafka
300 | # - /var/run/docker.sock:/var/run/docker.sock
301 | depends_on:
302 | - zookeeper
303 | networks:
304 | - backend
305 |
306 | ### Kafka2 ###########################################
307 | kafka2:
308 | build:
309 | context: ./kafka
310 | ports:
311 | - "${KAFKA_PORT2}:9092"
312 | environment:
313 | - KAFKA_ZOOKEEPER_CONNECT=zookeeper:${ZOOKEEPER_PORT}
314 | # 非必须,设置自动创建 topic
315 | - KAFKA_AUTO_CREATE_TOPICS_ENABLE=${KAFKA_AUTO_CREATE_TOPICS_ENABLE}
316 | - KAFKA_ADVERTISED_HOST_NAME=${HOST_IP}
317 | - KAFKA_ADVERTISED_PORT=${KAFKA_PORT2}
318 | - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://${HOST_IP}:${KAFKA_PORT2}
319 | # - KAFKA_LISTENERS=PLAINTEXT://:${KAFKA_PORT2}
320 | # 非必须,设置对内存
321 | - KAFKA_HEAP_OPTS=${KAFKA_HEAP_OPTS}
322 | # 非必须,设置保存7天数据,为默认值
323 | - KAFKA_LOG_RETENTION_HOURS=${KAFKA_LOG_RETENTION_HOURS}
324 | - KAFKA_BROKER_ID=2
325 | - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
326 | volumes:
327 | # 将 kafka 的数据文件映射出来
328 | - ${DATA_PATH_HOST}/kafka2:/kafka
329 | # - /var/run/docker.sock:/var/run/docker.sock
330 | depends_on:
331 | - zookeeper
332 | networks:
333 | - backend
334 |
335 | ### Kafka-manager #####################################
336 | kafka-manager:
337 | image: sheepkiller/kafka-manager
338 | environment:
339 | - ZK_HOSTS=${HOST_IP}
340 | ports:
341 | - "${KAFKA_MAMAGER_PORT}:9000"
342 |
343 | ### nacos #####################################
344 | nacos-standalone:
345 | build:
346 | context: ./nacos-standalone
347 | args:
348 | - NACOS_STANDALONE_VERSION=${NACOS_STANDALONE_VERSION}
349 | container_name: nacos-standalone
350 | environment:
351 | - MODE=standalone
352 | - JVM_XMS=126M
353 | - JVM_XMX=128M
354 | - JVM_XMN=32M
355 | volumes:
356 | - ${NACOS_STANDALONE_HOST_LOG_PATH}:/home/nacos/logs
357 | ports:
358 | - "${NACOS_STANDALONE_CLIENT_PORT}:8848"
359 | - "${NACOS_STANDALONE_SERVER_PORT}:9848"
--------------------------------------------------------------------------------
/elasticsearch/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM elasticsearch:7.17.10
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | ARG TZ=UTC
7 | ENV TZ ${TZ}
8 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
9 |
10 | EXPOSE 9200 9300
11 |
--------------------------------------------------------------------------------
/elasticsearch/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | cluster.name: "docker-cluster"
2 | network.host: 0.0.0.0
--------------------------------------------------------------------------------
/env.example:
--------------------------------------------------------------------------------
1 | ### Path ######################################
2 |
3 | # Point to the path of your applications code on your host
4 | APP_CODE_PATH_HOST=../.app_code
5 |
6 | # Point to where the `APP_CODE_PATH_HOST` should be in the container
7 | APP_CODE_PATH_CONTAINER=/var/www
8 |
9 | # You may add flags to the path `:cached`, `:delegated`. When using Docker Sync add `:nocopy`
10 | APP_CODE_CONTAINER_FLAG=:cached
11 |
12 | DATA_PATH_HOST=../.data
13 | TZ=PRC
14 | HOST_IP=127.0.0.1
15 |
16 | ### All volumes driver #####################################
17 | VOLUMES_DRIVER=local
18 |
19 | ### All Networks driver ####################################
20 | NETWORKS_DRIVER=bridge
21 |
22 | ###########################################################
23 | # Containers Customization
24 | ###########################################################
25 |
26 | ### NGINX #################################################
27 |
28 | NGINX_HOST_HTTP_PORT=80
29 | NGINX_HOST_HTTPS_PORT=443
30 | NGINX_HOST_LOG_PATH=~/logs/nginx/
31 | NGINX_SITES_PATH=./nginx/sites/
32 | NGINX_PHP_UPSTREAM_PORT=9000
33 | NGINX_SSL_PATH=./nginx/ssl/
34 |
35 | ### APACHE ################################################
36 |
37 | APACHE_HOST_HTTP_PORT=80
38 | APACHE_HOST_HTTPS_PORT=443
39 | APACHE_HOST_LOG_PATH=~/logs/apache2
40 | APACHE_SITES_PATH=./apache2/sites
41 | APACHE_DOCUMENT_ROOT=/var/www/
42 |
43 | ### CADDY #################################################
44 |
45 | CADDY_HOST_HTTP_PORT=80
46 | CADDY_HOST_HTTPS_PORT=443
47 | CADDY_HOST_LOG_PATH=~/logs/caddy
48 | CADDY_CONFIG_PATH=./caddy/caddy
49 |
50 | ### MYSQL #################################################
51 |
52 | MYSQL_VERSION=latest
53 | MYSQL_DATABASE=default
54 | MYSQL_USER=default
55 | MYSQL_PASSWORD=secret
56 | MYSQL_PORT=3306
57 | MYSQL_ROOT_PASSWORD=root
58 | MYSQL_ENTRYPOINT_INITDB=./mysql/docker-entrypoint-initdb.d
59 |
60 | ### MARIADB ###############################################
61 |
62 | MARIADB_VERSION=latest
63 | MARIADB_DATABASE=default
64 | MARIADB_USER=default
65 | MARIADB_PASSWORD=secret
66 | MARIADB_PORT=3306
67 | MARIADB_ROOT_PASSWORD=root
68 | MARIADB_ENTRYPOINT_INITDB=./mariadb/docker-entrypoint-initdb.d
69 |
70 | ### POSTGRES ##############################################
71 |
72 | POSTGRESQL_VERSION=latest
73 | POSTGRES_DB=default
74 | POSTGRES_USER=default
75 | POSTGRES_PASSWORD=secret
76 | POSTGRES_PORT=5432
77 |
78 | ### RABBITMQ ##############################################
79 |
80 | RABBITMQ_NODE_HOST_PORT=5672
81 | RABBITMQ_MANAGEMENT_HTTP_HOST_PORT=15672
82 | RABBITMQ_MANAGEMENT_HTTPS_HOST_PORT=15671
83 |
84 | ### ELASTICSEARCH #########################################
85 |
86 | ELASTICSEARCH_HOST_HTTP_PORT=9200
87 | ELASTICSEARCH_HOST_TRANSPORT_PORT=9300
88 |
89 | ### KIBANA ################################################
90 |
91 | KIBANA_HOST_HTTP_PORT=5601
92 |
93 | ### MEMCACHED #############################################
94 |
95 | MEMCACHED_HOST_PORT=11211
96 |
97 | ### REDIS #################################################
98 |
99 | REDIS_PORT=6379
100 |
101 | ### REDIS CLUSTER #########################################
102 |
103 | REDIS_CLUSTER_PORT_RANGE=7000-7005
104 |
105 | ### COUCHDB ###############################################
106 |
107 | COUCHDB_USER=admin
108 | COUCHDB_PASSWORD=admin
109 | COUCHDB_PORT=5984
110 | COUCHDB_HOST_LOG_PATH=~/logs/couchdb/
111 |
112 | ### MONGODB ###############################################
113 |
114 | MONGODB_PORT=27017
115 |
116 | ### ZOOKEEPER ###############################################
117 |
118 | ZOOKEEPER_PORT=2181
119 | ZOOKEEPER_HOST_LOG_PATH=~/logs/zookeeper/
120 |
121 | ### KAFKA ###############################################
122 |
123 | KAFKA_AUTO_CREATE_TOPICS_ENABLE=true
124 | KAFKA_HEAP_OPTS=-Xmx1G -Xms1G
125 | KAFKA_LOG_RETENTION_HOURS=168
126 | KAFKA_PORT1=9092
127 | KAFKA_PORT2=9093
128 | KAFKA_MAMAGER_PORT=9000
129 |
130 | ### NACOS-STANDALONE ####################################
131 | NACOS_STANDALONE_VERSION=v2.1.0
132 | NACOS_STANDALONE_HOST_LOG_PATH=~/logs/nacos-standalone/
133 | NACOS_STANDALONE_CLIENT_PORT=8848
134 | NACOS_STANDALONE_SERVER_PORT=9848
135 |
136 |
--------------------------------------------------------------------------------
/kafka/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM wurstmeister/kafka:latest
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | ARG TZ=UTC
7 | ENV TZ ${TZ}
8 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
9 |
10 |
--------------------------------------------------------------------------------
/kibana/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM kibana:7.17.10
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | # ARG TZ=UTC
7 | # ENV TZ ${TZ}
8 | # RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
9 |
10 | EXPOSE 5601
11 |
--------------------------------------------------------------------------------
/mariadb/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG MARIADB_VERSION=latest
2 | FROM mariadb:${MARIADB_VERSION}
3 |
4 | LABEL maintainer="Leo Qin"
5 |
6 | # update timezone
7 | ARG TZ=UTC
8 | ENV TZ ${TZ}
9 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
10 |
11 | ADD my.cnf /etc/mysql/conf.d/my.cnf
12 |
13 | CMD ["mysqld"]
14 |
15 | EXPOSE 3306
16 |
--------------------------------------------------------------------------------
/mariadb/docker-entrypoint-initdb.d/.gitignore:
--------------------------------------------------------------------------------
1 | *.sql
2 |
--------------------------------------------------------------------------------
/mariadb/docker-entrypoint-initdb.d/createdb.sql.example:
--------------------------------------------------------------------------------
1 | ###
2 | ### Copy createdb.sql.example to createdb.sql
3 | ### then uncomment then set database name and username to create you need databases
4 | #
5 | # example: .env MARIADB_USER=appuser and need db name is myshop_db
6 | #
7 | # CREATE DATABASE IF NOT EXISTS `myshop_db` ;
8 | # GRANT ALL ON `myshop_db`.* TO 'appuser'@'%' ;
9 | #
10 | ###
11 | ### this sql script is auto run when mariadb container start and $DATA_SAVE_PATH/mariadb not exists.
12 | ###
13 | ### if your $DATA_SAVE_PATH/mariadb is exists and you do not want to delete it, you can run by manual execution:
14 | ###
15 | ### docker-compose exec mariadb bash
16 | ### mysql -u root -p < /docker-entrypoint-initdb.d/createdb.sql
17 | ###
18 |
19 | #CREATE DATABASE IF NOT EXISTS `dev_db_1` COLLATE 'utf8_general_ci' ;
20 | #GRANT ALL ON `dev_db_1`.* TO 'default'@'%' ;
21 |
22 | #CREATE DATABASE IF NOT EXISTS `dev_db_2` COLLATE 'utf8_general_ci' ;
23 | #GRANT ALL ON `dev_db_2`.* TO 'default'@'%' ;
24 |
25 | #CREATE DATABASE IF NOT EXISTS `dev_db_3` COLLATE 'utf8_general_ci' ;
26 | #GRANT ALL ON `dev_db_3`.* TO 'default'@'%' ;
27 |
28 | FLUSH PRIVILEGES ;
29 |
--------------------------------------------------------------------------------
/mariadb/my.cnf:
--------------------------------------------------------------------------------
1 | # MariaDB database server configuration file.
2 | #
3 | # You can use this file to overwrite the default configuration
4 | #
5 | # For explanations see
6 | # http://dev.mysql.com/doc/mysql/en/server-system-variables.html
7 |
8 |
--------------------------------------------------------------------------------
/memcached/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM memcached:latest
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | ARG TZ=UTC
7 | ENV TZ ${TZ}
8 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
9 |
10 | CMD ["memcached"]
11 |
12 | EXPOSE 11211
13 |
--------------------------------------------------------------------------------
/mongodb/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mongo:latest
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | ARG TZ=UTC
7 | ENV TZ ${TZ}
8 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
9 |
10 | #COPY mongo.conf /usr/local/etc/mongo/mongo.conf
11 |
12 | VOLUME /data/db /data/configdb
13 |
14 | CMD ["mongod"]
15 |
16 | EXPOSE 27017
17 |
18 |
--------------------------------------------------------------------------------
/mysql/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG MYSQL_VERSION=latest
2 | FROM mysql:${MYSQL_VERSION}
3 |
4 | LABEL maintainer="Leo Qin"
5 |
6 | # update timezone
7 | ARG TZ=UTC
8 | ENV TZ ${TZ}
9 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
10 |
11 | RUN chown -R mysql:root /var/lib/mysql/
12 |
13 | COPY my.cnf /etc/mysql/conf.d/my.cnf
14 |
15 | CMD ["mysqld"]
16 |
17 | EXPOSE 3306
18 |
--------------------------------------------------------------------------------
/mysql/docker-entrypoint-initdb.d/.gitignore:
--------------------------------------------------------------------------------
1 | *.sql
2 |
--------------------------------------------------------------------------------
/mysql/docker-entrypoint-initdb.d/createdb.sql.example:
--------------------------------------------------------------------------------
1 | #
2 | # Copy createdb.sql.example to createdb.sql
3 | # then uncomment then set database name and username to create you need databases
4 | #
5 | # example: .env MYSQL_USER=appuser and need db name is myshop_db
6 | #
7 | # CREATE DATABASE IF NOT EXISTS `myshop_db` ;
8 | # GRANT ALL ON `myshop_db`.* TO 'appuser'@'%' ;
9 | #
10 | #
11 | # this sql script will auto run when the mysql container starts and the $DATA_SAVE_PATH/mysql not found.
12 | #
13 | # if your $DATA_SAVE_PATH/mysql exists and you do not want to delete it, you can run by manual execution:
14 | #
15 | # docker-compose exec mysql bash
16 | # mysql -u root -p < /docker-entrypoint-initdb.d/createdb.sql
17 | #
18 |
19 | #CREATE DATABASE IF NOT EXISTS `dev_db_1` COLLATE 'utf8_general_ci' ;
20 | #GRANT ALL ON `dev_db_1`.* TO 'default'@'%' ;
21 |
22 | #CREATE DATABASE IF NOT EXISTS `dev_db_2` COLLATE 'utf8_general_ci' ;
23 | #GRANT ALL ON `dev_db_2`.* TO 'default'@'%' ;
24 |
25 | #CREATE DATABASE IF NOT EXISTS `dev_db_3` COLLATE 'utf8_general_ci' ;
26 | #GRANT ALL ON `dev_db_3`.* TO 'default'@'%' ;
27 |
28 | FLUSH PRIVILEGES ;
29 |
--------------------------------------------------------------------------------
/mysql/my.cnf:
--------------------------------------------------------------------------------
1 | # The MySQL Client configuration file.
2 | #
3 | # For explanations see
4 | # http://dev.mysql.com/doc/mysql/en/server-system-variables.html
5 |
6 | [mysql]
7 |
8 | [mysqld]
9 | sql-mode="STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION"
10 | character-set-server=utf8
11 | default-authentication-plugin=mysql_native_password
12 | performance_schema = OFF
13 | table_definition_cache=400
14 | table_open_cache=256
--------------------------------------------------------------------------------
/nacos-standalone/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG NACOS_STANDALONE_VERSION=latest
2 | FROM nacos/nacos-server:${NACOS_STANDALONE_VERSION}
3 |
4 | LABEL maintainer="Leo Qin"
5 |
6 | # update timezone
7 | ARG TZ=UTC
8 | ENV TZ ${TZ}
9 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
--------------------------------------------------------------------------------
/nginx/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nginx:alpine
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | ARG TZ=UTC
7 | ENV TZ ${TZ}
8 |
9 | COPY nginx.conf /etc/nginx/
10 |
11 | RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
12 |
13 | RUN apk update \
14 | && apk upgrade \
15 | && apk add --no-cache bash \
16 | && rm /etc/nginx/conf.d/default.conf \
17 | && ln -snf /usr/share/zoneinfo/$TZ /etc/localtime \
18 | && echo $TZ > /etc/timezone
19 |
20 | CMD ["nginx"]
21 |
22 | EXPOSE 80 443
23 |
--------------------------------------------------------------------------------
/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | # 启动进程,通常设置和cpu数量相等
2 | worker_processes 2;
3 | # worker_processes auto;
4 | # 以下参数指定了哪个cpu分配给哪个进程,一般来说不用特殊指定。如果一定要设的话,用0和1指定分配方式.
5 | # 这样设就是给1-4个进程分配单独的核来运行,出现第5个进程是就是随机分配了。
6 | # eg:
7 | # worker_processes 4 #4核CPU
8 | # worker_cpu_affinity 0001 0010 0100 1000
9 |
10 | # 指定进程ID存储文件位置
11 | pid /run/nginx.pid;
12 |
13 | daemon off;
14 |
15 | # 全局错误日志及PID文件 [debug|info|notice|warn|crit]
16 | error_log /var/logs/nginx/error.log;
17 | # error_log logs/error.log notice;
18 | # error_log logs/error.log info;
19 |
20 | #一个nginx进程打开的最多文件描述符数目,理论值应该是最多打开文件数(ulimit -n)与nginx进程数相除,但是nginx分配请求并不是那么均匀,所以最好与ulimit -n的值保持一致。
21 | #vim /etc/security/limits.conf
22 | # * soft nproc 65535
23 | # * hard nproc 65535
24 | # * soft nofile 65535
25 | # * hard nofile 65535
26 | worker_rlimit_nofile 65535;
27 |
28 | #工作模式及连接数上限
29 | events {
30 | # use [ kqueue | rtsig | epoll | /dev/poll | select | poll ]; epoll模型是Linux 2.6以上版本内核中的高性能网络I/O模型,如果跑在FreeBSD上面,就用kqueue模型
31 | # epoll是多路复用IO(I/O Multiplexing)中的一种方式,
32 | # 仅用于linux2.6以上内核,可以大大提高nginx的性能
33 | use epoll;
34 |
35 | # 单个后台worker process进程的最大并发链接数
36 | worker_connections 2048;
37 | # 并发总数是 worker_processes 和 worker_connections 的乘积
38 | # 即 max_clients = worker_processes * worker_connections
39 | # 在设置了反向代理的情况下,max_clients = worker_processes * worker_connections / 4 为什么
40 | # 为什么上面反向代理要除以4,应该说是一个经验值
41 | # 根据以上条件,正常情况下的Nginx Server可以应付的最大连接数为:4 * 8000 = 32000
42 | # worker_connections 值的设置跟物理内存大小有关
43 | # 因为并发受IO约束,max_clients的值须小于系统可以打开的最大文件数
44 | # 而系统可以打开的最大文件数和内存大小成正比,一般1GB内存的机器上可以打开的文件数大约是10万左右
45 | # 我们来看看360M内存的VPS可以打开的文件句柄数是多少:
46 | # $ cat /proc/sys/fs/file-max
47 | # 输出 34336
48 | # 32000 < 34336,即并发连接总数小于系统可以打开的文件句柄总数,这样就在操作系统可以承受的范围之内
49 | # 所以,worker_connections 的值需根据 worker_processes 进程数目和系统可以打开的最大文件总数进行适当地进行设置
50 | # 使得并发总数小于操作系统可以打开的最大文件数目
51 | # 其实质也就是根据主机的物理CPU和内存进行配置
52 | # 当然,理论上的并发总数可能会和实际有所偏差,因为主机还有其他的工作进程需要消耗系统资源。
53 | # ulimit -SHn 65535
54 |
55 | # worker工作方式:串行(一定程度降低负载,但服务器吞吐量大时,关闭使用并行方式
56 | multi_accept on;
57 | }
58 |
59 | http {
60 | server_tokens off;
61 | # sendfile 指令指定 nginx 是否调用 sendfile 函数(zero copy 方式)来输出文件,
62 | # 对于普通应用,必须设为 on,
63 | # 如果用来进行下载等应用磁盘IO重负载应用,可设置为 off,
64 | # 以平衡磁盘与网络I/O处理速度,降低系统的uptime.
65 |
66 | # 连接超时时间
67 | types_hash_max_size 2048;
68 |
69 | # 文件扩展名与文件类型映射表,设定mime类型,类型由mime.type文件定义
70 | include /etc/nginx/mime.types;
71 | default_type application/octet-stream;
72 |
73 | #日志相关定义
74 | # log_format main '$remote_addr - $remote_user [$time_local] "$request" '
75 | # '$status $body_bytes_sent "$http_referer" '
76 | # '"$http_user_agent" "$http_x_forwarded_for"';
77 | # 定义日志的格式。后面定义要输出的内容。
78 | # 1.$remote_addr 与$http_x_forwarded_for 用以记录客户端的ip地址;
79 | # 2.$remote_user :用来记录客户端用户名称;
80 | # 3.$time_local :用来记录访问时间与时区;
81 | # 4.$request :用来记录请求的url与http协议;
82 | # 5.$status :用来记录请求状态;
83 | # 6.$body_bytes_sent :记录发送给客户端文件主体内容大小;
84 | # 7.$http_referer :用来记录从那个页面链接访问过来的;
85 | # 8.$http_user_agent :记录客户端浏览器的相关信息
86 | # 连接日志的路径,指定的日志格式放在最后。
87 | # access_log logs/access.log main;
88 | # 只记录更为严重的错误日志,减少IO压力
89 | # error_log logs/error.log crit;
90 | # 关闭日志
91 | # access_log off;
92 | access_log /var/logs/nginx/access.log;
93 |
94 | # 默认编码
95 | charset utf-8;
96 |
97 | # 服务器名字的hash表大小
98 | # server_names_hash_bucket_size 128;
99 |
100 | #客户端请求单个文件的最大字节数
101 | client_max_body_size 20m;
102 |
103 | # 指定来自客户端请求头的hearerbuffer大小
104 | # client_header_buffer_size 32k;
105 | # 指定客户端请求中较大的消息头的缓存最大数量和大小。
106 | # large_client_header_buffers 4 64k;
107 | # 开启高效传输模式。
108 | sendfile on;
109 |
110 | #防止网络阻塞
111 | tcp_nopush on;
112 | tcp_nodelay on;
113 |
114 | # 客户端连接超时时间,单位是秒
115 | keepalive_timeout 15;
116 | # 客户端请求头读取超时时间
117 | # client_header_timeout 10;
118 | # 设置客户端请求主体读取超时时间
119 | # client_body_timeout 10;
120 | # 响应客户端超时时间
121 | # send_timeout 10;
122 |
123 | #FastCGI相关参数是为了改善网站的性能:减少资源占用,提高访问速度。
124 | fastcgi_connect_timeout 300;
125 | fastcgi_send_timeout 300;
126 | fastcgi_read_timeout 300;
127 | fastcgi_buffer_size 64k;
128 | fastcgi_buffers 4 64k;
129 | fastcgi_busy_buffers_size 128k;
130 | fastcgi_temp_file_write_size 128k;
131 |
132 | #gzip模块设置
133 | # 开启gzip压缩输出
134 | gzip on;
135 | # 最小压缩文件大小
136 | # gzip_min_length 1k;
137 | # 压缩缓冲区
138 | # gzip_buffers 4 16k;
139 | # 压缩版本(默认1.1,前端如果是squid2.5请使用1.0)
140 | # gzip_http_version 1.0;
141 | # 压缩等级 1-9 等级越高,压缩效果越好,节约宽带,但CPU消耗大
142 | # gzip_comp_level 2;
143 | # 压缩类型,默认就已经包含text/html,所以下面就不用再写了,写上去也不会有问题,但是会有一个warn。
144 | # gzip_types text/plain application/x-javascript text/css application/xml;
145 | #前端缓存服务器缓存经过压缩的页面
146 | # gzip_vary on;
147 | gzip_disable "msie6";
148 |
149 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
150 | ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS';
151 |
152 | include /etc/nginx/conf.d/*.conf;
153 | include /etc/nginx/sites-available/*.conf;
154 | open_file_cache off; # Disabled for issue 619
155 | }
156 |
--------------------------------------------------------------------------------
/nginx/sites/.gitignore:
--------------------------------------------------------------------------------
1 | *.conf
2 | !default.conf.example
--------------------------------------------------------------------------------
/nginx/sites/default.conf.example:
--------------------------------------------------------------------------------
1 | # upstream server-api{
2 | # # api 代理服务地址
3 | # server 127.0.0.1:81;
4 | # }
5 | server {
6 | # 监听端口
7 | listen 80 default_server;
8 | listen [::]:80 default_server ipv6only=on;
9 |
10 | # For https
11 | # listen 443 ssl;
12 | # listen [::]:443 ssl ipv6only=on;
13 | # ssl_certificate /etc/nginx/ssl/default.crt;
14 | # ssl_certificate_key /etc/nginx/ssl/default.key;
15 |
16 | server_name localhost;
17 | root /var/www/public;
18 | index index.php index.html index.htm;
19 |
20 | location / {
21 | try_files $uri $uri/;
22 | }
23 |
24 | # 匹配 api 路由的反向代理到API服务
25 | # location ^~/api/ {
26 | # proxy_pass http://server-api/;
27 | # }
28 |
29 | # location ^~/storage/ {
30 | # proxy_pass http://server-api/storage/;
31 | # }
32 |
33 | location ~ /\.ht {
34 | deny all;
35 | }
36 |
37 | location /.well-known/acme-challenge/ {
38 | root /var/www/letsencrypt/;
39 | log_not_found off;
40 | }
41 |
42 | error_log /var/logs/nginx/app_error.log;
43 | access_log /var/logs/nginx/app_access.log;
44 | }
45 |
--------------------------------------------------------------------------------
/nginx/ssl/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
--------------------------------------------------------------------------------
/postgres/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG POSTGRESQL_VERSION=latest
2 | FROM postgres:${POSTGRESQL_VERSION}
3 |
4 | LABEL maintainer="Leo Qin"
5 |
6 | # update timezone
7 | ARG TZ=UTC
8 | ENV TZ ${TZ}
9 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
10 |
11 | CMD ["postgres"]
12 |
13 | EXPOSE 5432
14 |
--------------------------------------------------------------------------------
/rabbitmq/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM rabbitmq:alpine
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | ARG TZ=UTC
7 | ENV TZ ${TZ}
8 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
9 |
10 | RUN rabbitmq-plugins enable --offline rabbitmq_management
11 |
12 | EXPOSE 4369 5671 5672 15671 15672 25672
13 |
--------------------------------------------------------------------------------
/rabbitmq/management_agent.disable_metrics_collector.conf:
--------------------------------------------------------------------------------
1 | management_agent.disable_metrics_collector = false
2 |
--------------------------------------------------------------------------------
/rabbitmq/rabbitmq.conf:
--------------------------------------------------------------------------------
1 | default_user = guest
2 | default_pass = guest
3 |
--------------------------------------------------------------------------------
/redis-cluster/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM grokzen/redis-cluster:latest
2 |
3 | LABEL maintainer="Leo Qin"
4 |
--------------------------------------------------------------------------------
/redis/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM redis:latest
2 |
3 | LABEL maintainer="Leo Qin"
4 |
5 | # update timezone
6 | ARG TZ=UTC
7 | ENV TZ ${TZ}
8 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
9 |
10 | ## For security settings uncomment, make the dir, copy conf, and also start with the conf, to use it
11 | # RUN mkdir -p /usr/local/etc/redis
12 | # COPY redis.conf /usr/local/etc/redis/redis.conf
13 |
14 | VOLUME /data
15 |
16 | EXPOSE 6379
17 |
18 | # CMD ["redis-server", "/usr/local/etc/redis/redis.conf"]
19 | CMD ["redis-server"]
20 |
--------------------------------------------------------------------------------
/redis/redis.conf:
--------------------------------------------------------------------------------
1 | # Redis configuration file example.
2 | #
3 | # Note that in order to read the configuration file, Redis must be
4 | # started with the file path as first argument:
5 | #
6 | # ./redis-server /path/to/redis.conf
7 |
8 | # Note on units: when memory size is needed, it is possible to specify
9 | # it in the usual form of 1k 5GB 4M and so forth:
10 | #
11 | # 1k => 1000 bytes
12 | # 1kb => 1024 bytes
13 | # 1m => 1000000 bytes
14 | # 1mb => 1024*1024 bytes
15 | # 1g => 1000000000 bytes
16 | # 1gb => 1024*1024*1024 bytes
17 | #
18 | # units are case insensitive so 1GB 1Gb 1gB are all the same.
19 |
20 | ################################## INCLUDES ###################################
21 |
22 | # Include one or more other config files here. This is useful if you
23 | # have a standard template that goes to all Redis servers but also need
24 | # to customize a few per-server settings. Include files can include
25 | # other files, so use this wisely.
26 | #
27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE"
28 | # from admin or Redis Sentinel. Since Redis always uses the last processed
29 | # line as value of a configuration directive, you'd better put includes
30 | # at the beginning of this file to avoid overwriting config change at runtime.
31 | #
32 | # If instead you are interested in using includes to override configuration
33 | # options, it is better to use include as the last line.
34 | #
35 | # include /path/to/local.conf
36 | # include /path/to/other.conf
37 |
38 | ################################## MODULES #####################################
39 |
40 | # Load modules at startup. If the server is not able to load modules
41 | # it will abort. It is possible to use multiple loadmodule directives.
42 | #
43 | # loadmodule /path/to/my_module.so
44 | # loadmodule /path/to/other_module.so
45 |
46 | ################################## NETWORK #####################################
47 |
48 | # By default, if no "bind" configuration directive is specified, Redis listens
49 | # for connections from all the network interfaces available on the server.
50 | # It is possible to listen to just one or multiple selected interfaces using
51 | # the "bind" configuration directive, followed by one or more IP addresses.
52 | #
53 | # Examples:
54 | #
55 | # bind 192.168.1.100 10.0.0.1
56 | # bind 127.0.0.1 ::1
57 | #
58 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
59 | # internet, binding to all the interfaces is dangerous and will expose the
60 | # instance to everybody on the internet. So by default we uncomment the
61 | # following bind directive, that will force Redis to listen only into
62 | # the IPv4 loopback interface address (this means Redis will be able to
63 | # accept connections only from clients running into the same computer it
64 | # is running).
65 | #
66 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
67 | # JUST COMMENT THE FOLLOWING LINE.
68 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
69 | bind 127.0.0.1
70 |
71 | # Protected mode is a layer of security protection, in order to avoid that
72 | # Redis instances left open on the internet are accessed and exploited.
73 | #
74 | # When protected mode is on and if:
75 | #
76 | # 1) The server is not binding explicitly to a set of addresses using the
77 | # "bind" directive.
78 | # 2) No password is configured.
79 | #
80 | # The server only accepts connections from clients connecting from the
81 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
82 | # sockets.
83 | #
84 | # By default protected mode is enabled. You should disable it only if
85 | # you are sure you want clients from other hosts to connect to Redis
86 | # even if no authentication is configured, nor a specific set of interfaces
87 | # are explicitly listed using the "bind" directive.
88 | protected-mode yes
89 |
90 | # Accept connections on the specified port, default is 6379 (IANA #815344).
91 | # If port 0 is specified Redis will not listen on a TCP socket.
92 | port 6379
93 |
94 | # TCP listen() backlog.
95 | #
96 | # In high requests-per-second environments you need an high backlog in order
97 | # to avoid slow clients connections issues. Note that the Linux kernel
98 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
99 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
100 | # in order to get the desired effect.
101 | tcp-backlog 511
102 |
103 | # Unix socket.
104 | #
105 | # Specify the path for the Unix socket that will be used to listen for
106 | # incoming connections. There is no default, so Redis will not listen
107 | # on a unix socket when not specified.
108 | #
109 | # unixsocket /tmp/redis.sock
110 | # unixsocketperm 700
111 |
112 | # Close the connection after a client is idle for N seconds (0 to disable)
113 | timeout 0
114 |
115 | # TCP keepalive.
116 | #
117 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
118 | # of communication. This is useful for two reasons:
119 | #
120 | # 1) Detect dead peers.
121 | # 2) Take the connection alive from the point of view of network
122 | # equipment in the middle.
123 | #
124 | # On Linux, the specified value (in seconds) is the period used to send ACKs.
125 | # Note that to close the connection the double of the time is needed.
126 | # On other kernels the period depends on the kernel configuration.
127 | #
128 | # A reasonable value for this option is 300 seconds, which is the new
129 | # Redis default starting with Redis 3.2.1.
130 | tcp-keepalive 300
131 |
132 | ################################# GENERAL #####################################
133 |
134 | # By default Redis does not run as a daemon. Use 'yes' if you need it.
135 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
136 | daemonize no
137 |
138 | # If you run Redis from upstart or systemd, Redis can interact with your
139 | # supervision tree. Options:
140 | # supervised no - no supervision interaction
141 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode
142 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
143 | # supervised auto - detect upstart or systemd method based on
144 | # UPSTART_JOB or NOTIFY_SOCKET environment variables
145 | # Note: these supervision methods only signal "process is ready."
146 | # They do not enable continuous liveness pings back to your supervisor.
147 | supervised no
148 |
149 | # If a pid file is specified, Redis writes it where specified at startup
150 | # and removes it at exit.
151 | #
152 | # When the server runs non daemonized, no pid file is created if none is
153 | # specified in the configuration. When the server is daemonized, the pid file
154 | # is used even if not specified, defaulting to "/var/run/redis.pid".
155 | #
156 | # Creating a pid file is best effort: if Redis is not able to create it
157 | # nothing bad happens, the server will start and run normally.
158 | pidfile /var/run/redis_6379.pid
159 |
160 | # Specify the server verbosity level.
161 | # This can be one of:
162 | # debug (a lot of information, useful for development/testing)
163 | # verbose (many rarely useful info, but not a mess like the debug level)
164 | # notice (moderately verbose, what you want in production probably)
165 | # warning (only very important / critical messages are logged)
166 | loglevel notice
167 |
168 | # Specify the log file name. Also the empty string can be used to force
169 | # Redis to log on the standard output. Note that if you use standard
170 | # output for logging but daemonize, logs will be sent to /dev/null
171 | logfile ""
172 |
173 | # To enable logging to the system logger, just set 'syslog-enabled' to yes,
174 | # and optionally update the other syslog parameters to suit your needs.
175 | # syslog-enabled no
176 |
177 | # Specify the syslog identity.
178 | # syslog-ident redis
179 |
180 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
181 | # syslog-facility local0
182 |
183 | # Set the number of databases. The default database is DB 0, you can select
184 | # a different one on a per-connection basis using SELECT where
185 | # dbid is a number between 0 and 'databases'-1
186 | databases 16
187 |
188 | # By default Redis shows an ASCII art logo only when started to log to the
189 | # standard output and if the standard output is a TTY. Basically this means
190 | # that normally a logo is displayed only in interactive sessions.
191 | #
192 | # However it is possible to force the pre-4.0 behavior and always show a
193 | # ASCII art logo in startup logs by setting the following option to yes.
194 | always-show-logo yes
195 |
196 | ################################ SNAPSHOTTING ################################
197 | #
198 | # Save the DB on disk:
199 | #
200 | # save
201 | #
202 | # Will save the DB if both the given number of seconds and the given
203 | # number of write operations against the DB occurred.
204 | #
205 | # In the example below the behaviour will be to save:
206 | # after 900 sec (15 min) if at least 1 key changed
207 | # after 300 sec (5 min) if at least 10 keys changed
208 | # after 60 sec if at least 10000 keys changed
209 | #
210 | # Note: you can disable saving completely by commenting out all "save" lines.
211 | #
212 | # It is also possible to remove all the previously configured save
213 | # points by adding a save directive with a single empty string argument
214 | # like in the following example:
215 | #
216 | # save ""
217 |
218 | save 900 1
219 | save 300 10
220 | save 60 10000
221 |
222 | # By default Redis will stop accepting writes if RDB snapshots are enabled
223 | # (at least one save point) and the latest background save failed.
224 | # This will make the user aware (in a hard way) that data is not persisting
225 | # on disk properly, otherwise chances are that no one will notice and some
226 | # disaster will happen.
227 | #
228 | # If the background saving process will start working again Redis will
229 | # automatically allow writes again.
230 | #
231 | # However if you have setup your proper monitoring of the Redis server
232 | # and persistence, you may want to disable this feature so that Redis will
233 | # continue to work as usual even if there are problems with disk,
234 | # permissions, and so forth.
235 | stop-writes-on-bgsave-error yes
236 |
237 | # Compress string objects using LZF when dump .rdb databases?
238 | # For default that's set to 'yes' as it's almost always a win.
239 | # If you want to save some CPU in the saving child set it to 'no' but
240 | # the dataset will likely be bigger if you have compressible values or keys.
241 | rdbcompression yes
242 |
243 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
244 | # This makes the format more resistant to corruption but there is a performance
245 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
246 | # for maximum performances.
247 | #
248 | # RDB files created with checksum disabled have a checksum of zero that will
249 | # tell the loading code to skip the check.
250 | rdbchecksum yes
251 |
252 | # The filename where to dump the DB
253 | dbfilename dump.rdb
254 |
255 | # The working directory.
256 | #
257 | # The DB will be written inside this directory, with the filename specified
258 | # above using the 'dbfilename' configuration directive.
259 | #
260 | # The Append Only File will also be created inside this directory.
261 | #
262 | # Note that you must specify a directory here, not a file name.
263 | dir ./
264 |
265 | ################################# REPLICATION #################################
266 |
267 | # Master-Replica replication. Use replicaof to make a Redis instance a copy of
268 | # another Redis server. A few things to understand ASAP about Redis replication.
269 | #
270 | # +------------------+ +---------------+
271 | # | Master | ---> | Replica |
272 | # | (receive writes) | | (exact copy) |
273 | # +------------------+ +---------------+
274 | #
275 | # 1) Redis replication is asynchronous, but you can configure a master to
276 | # stop accepting writes if it appears to be not connected with at least
277 | # a given number of replicas.
278 | # 2) Redis replicas are able to perform a partial resynchronization with the
279 | # master if the replication link is lost for a relatively small amount of
280 | # time. You may want to configure the replication backlog size (see the next
281 | # sections of this file) with a sensible value depending on your needs.
282 | # 3) Replication is automatic and does not need user intervention. After a
283 | # network partition replicas automatically try to reconnect to masters
284 | # and resynchronize with them.
285 | #
286 | # replicaof
287 |
288 | # If the master is password protected (using the "requirepass" configuration
289 | # directive below) it is possible to tell the replica to authenticate before
290 | # starting the replication synchronization process, otherwise the master will
291 | # refuse the replica request.
292 | #
293 | # masterauth
294 |
295 | # When a replica loses its connection with the master, or when the replication
296 | # is still in progress, the replica can act in two different ways:
297 | #
298 | # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
299 | # still reply to client requests, possibly with out of date data, or the
300 | # data set may just be empty if this is the first synchronization.
301 | #
302 | # 2) if replica-serve-stale-data is set to 'no' the replica will reply with
303 | # an error "SYNC with master in progress" to all the kind of commands
304 | # but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG,
305 | # SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB,
306 | # COMMAND, POST, HOST: and LATENCY.
307 | #
308 | replica-serve-stale-data yes
309 |
310 | # You can configure a replica instance to accept writes or not. Writing against
311 | # a replica instance may be useful to store some ephemeral data (because data
312 | # written on a replica will be easily deleted after resync with the master) but
313 | # may also cause problems if clients are writing to it because of a
314 | # misconfiguration.
315 | #
316 | # Since Redis 2.6 by default replicas are read-only.
317 | #
318 | # Note: read only replicas are not designed to be exposed to untrusted clients
319 | # on the internet. It's just a protection layer against misuse of the instance.
320 | # Still a read only replica exports by default all the administrative commands
321 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
322 | # security of read only replicas using 'rename-command' to shadow all the
323 | # administrative / dangerous commands.
324 | replica-read-only yes
325 |
326 | # Replication SYNC strategy: disk or socket.
327 | #
328 | # -------------------------------------------------------
329 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
330 | # -------------------------------------------------------
331 | #
332 | # New replicas and reconnecting replicas that are not able to continue the replication
333 | # process just receiving differences, need to do what is called a "full
334 | # synchronization". An RDB file is transmitted from the master to the replicas.
335 | # The transmission can happen in two different ways:
336 | #
337 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB
338 | # file on disk. Later the file is transferred by the parent
339 | # process to the replicas incrementally.
340 | # 2) Diskless: The Redis master creates a new process that directly writes the
341 | # RDB file to replica sockets, without touching the disk at all.
342 | #
343 | # With disk-backed replication, while the RDB file is generated, more replicas
344 | # can be queued and served with the RDB file as soon as the current child producing
345 | # the RDB file finishes its work. With diskless replication instead once
346 | # the transfer starts, new replicas arriving will be queued and a new transfer
347 | # will start when the current one terminates.
348 | #
349 | # When diskless replication is used, the master waits a configurable amount of
350 | # time (in seconds) before starting the transfer in the hope that multiple replicas
351 | # will arrive and the transfer can be parallelized.
352 | #
353 | # With slow disks and fast (large bandwidth) networks, diskless replication
354 | # works better.
355 | repl-diskless-sync no
356 |
357 | # When diskless replication is enabled, it is possible to configure the delay
358 | # the server waits in order to spawn the child that transfers the RDB via socket
359 | # to the replicas.
360 | #
361 | # This is important since once the transfer starts, it is not possible to serve
362 | # new replicas arriving, that will be queued for the next RDB transfer, so the server
363 | # waits a delay in order to let more replicas arrive.
364 | #
365 | # The delay is specified in seconds, and by default is 5 seconds. To disable
366 | # it entirely just set it to 0 seconds and the transfer will start ASAP.
367 | repl-diskless-sync-delay 5
368 |
369 | # Replicas send PINGs to server in a predefined interval. It's possible to change
370 | # this interval with the repl_ping_replica_period option. The default value is 10
371 | # seconds.
372 | #
373 | # repl-ping-replica-period 10
374 |
375 | # The following option sets the replication timeout for:
376 | #
377 | # 1) Bulk transfer I/O during SYNC, from the point of view of replica.
378 | # 2) Master timeout from the point of view of replicas (data, pings).
379 | # 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
380 | #
381 | # It is important to make sure that this value is greater than the value
382 | # specified for repl-ping-replica-period otherwise a timeout will be detected
383 | # every time there is low traffic between the master and the replica.
384 | #
385 | # repl-timeout 60
386 |
387 | # Disable TCP_NODELAY on the replica socket after SYNC?
388 | #
389 | # If you select "yes" Redis will use a smaller number of TCP packets and
390 | # less bandwidth to send data to replicas. But this can add a delay for
391 | # the data to appear on the replica side, up to 40 milliseconds with
392 | # Linux kernels using a default configuration.
393 | #
394 | # If you select "no" the delay for data to appear on the replica side will
395 | # be reduced but more bandwidth will be used for replication.
396 | #
397 | # By default we optimize for low latency, but in very high traffic conditions
398 | # or when the master and replicas are many hops away, turning this to "yes" may
399 | # be a good idea.
400 | repl-disable-tcp-nodelay no
401 |
402 | # Set the replication backlog size. The backlog is a buffer that accumulates
403 | # replica data when replicas are disconnected for some time, so that when a replica
404 | # wants to reconnect again, often a full resync is not needed, but a partial
405 | # resync is enough, just passing the portion of data the replica missed while
406 | # disconnected.
407 | #
408 | # The bigger the replication backlog, the longer the time the replica can be
409 | # disconnected and later be able to perform a partial resynchronization.
410 | #
411 | # The backlog is only allocated once there is at least a replica connected.
412 | #
413 | # repl-backlog-size 1mb
414 |
415 | # After a master has no longer connected replicas for some time, the backlog
416 | # will be freed. The following option configures the amount of seconds that
417 | # need to elapse, starting from the time the last replica disconnected, for
418 | # the backlog buffer to be freed.
419 | #
420 | # Note that replicas never free the backlog for timeout, since they may be
421 | # promoted to masters later, and should be able to correctly "partially
422 | # resynchronize" with the replicas: hence they should always accumulate backlog.
423 | #
424 | # A value of 0 means to never release the backlog.
425 | #
426 | # repl-backlog-ttl 3600
427 |
428 | # The replica priority is an integer number published by Redis in the INFO output.
429 | # It is used by Redis Sentinel in order to select a replica to promote into a
430 | # master if the master is no longer working correctly.
431 | #
432 | # A replica with a low priority number is considered better for promotion, so
433 | # for instance if there are three replicas with priority 10, 100, 25 Sentinel will
434 | # pick the one with priority 10, that is the lowest.
435 | #
436 | # However a special priority of 0 marks the replica as not able to perform the
437 | # role of master, so a replica with priority of 0 will never be selected by
438 | # Redis Sentinel for promotion.
439 | #
440 | # By default the priority is 100.
441 | replica-priority 100
442 |
443 | # It is possible for a master to stop accepting writes if there are less than
444 | # N replicas connected, having a lag less or equal than M seconds.
445 | #
446 | # The N replicas need to be in "online" state.
447 | #
448 | # The lag in seconds, that must be <= the specified value, is calculated from
449 | # the last ping received from the replica, that is usually sent every second.
450 | #
451 | # This option does not GUARANTEE that N replicas will accept the write, but
452 | # will limit the window of exposure for lost writes in case not enough replicas
453 | # are available, to the specified number of seconds.
454 | #
455 | # For example to require at least 3 replicas with a lag <= 10 seconds use:
456 | #
457 | # min-replicas-to-write 3
458 | # min-replicas-max-lag 10
459 | #
460 | # Setting one or the other to 0 disables the feature.
461 | #
462 | # By default min-replicas-to-write is set to 0 (feature disabled) and
463 | # min-replicas-max-lag is set to 10.
464 |
465 | # A Redis master is able to list the address and port of the attached
466 | # replicas in different ways. For example the "INFO replication" section
467 | # offers this information, which is used, among other tools, by
468 | # Redis Sentinel in order to discover replica instances.
469 | # Another place where this info is available is in the output of the
470 | # "ROLE" command of a master.
471 | #
472 | # The listed IP and address normally reported by a replica is obtained
473 | # in the following way:
474 | #
475 | # IP: The address is auto detected by checking the peer address
476 | # of the socket used by the replica to connect with the master.
477 | #
478 | # Port: The port is communicated by the replica during the replication
479 | # handshake, and is normally the port that the replica is using to
480 | # listen for connections.
481 | #
482 | # However when port forwarding or Network Address Translation (NAT) is
483 | # used, the replica may be actually reachable via different IP and port
484 | # pairs. The following two options can be used by a replica in order to
485 | # report to its master a specific set of IP and port, so that both INFO
486 | # and ROLE will report those values.
487 | #
488 | # There is no need to use both the options if you need to override just
489 | # the port or the IP address.
490 | #
491 | # replica-announce-ip 5.5.5.5
492 | # replica-announce-port 1234
493 |
494 | ################################## SECURITY ###################################
495 |
496 | # Require clients to issue AUTH before processing any other
497 | # commands. This might be useful in environments in which you do not trust
498 | # others with access to the host running redis-server.
499 | #
500 | # This should stay commented out for backward compatibility and because most
501 | # people do not need auth (e.g. they run their own servers).
502 | #
503 | # Warning: since Redis is pretty fast an outside user can try up to
504 | # 150k passwords per second against a good box. This means that you should
505 | # use a very strong password otherwise it will be very easy to break.
506 | #
507 | # requirepass foobared
508 |
509 | # Command renaming.
510 | #
511 | # It is possible to change the name of dangerous commands in a shared
512 | # environment. For instance the CONFIG command may be renamed into something
513 | # hard to guess so that it will still be available for internal-use tools
514 | # but not available for general clients.
515 | #
516 | # Example:
517 | #
518 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
519 | #
520 | # It is also possible to completely kill a command by renaming it into
521 | # an empty string:
522 | #
523 | # rename-command CONFIG ""
524 | #
525 | # Please note that changing the name of commands that are logged into the
526 | # AOF file or transmitted to replicas may cause problems.
527 |
528 | ################################### CLIENTS ####################################
529 |
530 | # Set the max number of connected clients at the same time. By default
531 | # this limit is set to 10000 clients, however if the Redis server is not
532 | # able to configure the process file limit to allow for the specified limit
533 | # the max number of allowed clients is set to the current file limit
534 | # minus 32 (as Redis reserves a few file descriptors for internal uses).
535 | #
536 | # Once the limit is reached Redis will close all the new connections sending
537 | # an error 'max number of clients reached'.
538 | #
539 | # maxclients 10000
540 |
541 | ############################## MEMORY MANAGEMENT ################################
542 |
543 | # Set a memory usage limit to the specified amount of bytes.
544 | # When the memory limit is reached Redis will try to remove keys
545 | # according to the eviction policy selected (see maxmemory-policy).
546 | #
547 | # If Redis can't remove keys according to the policy, or if the policy is
548 | # set to 'noeviction', Redis will start to reply with errors to commands
549 | # that would use more memory, like SET, LPUSH, and so on, and will continue
550 | # to reply to read-only commands like GET.
551 | #
552 | # This option is usually useful when using Redis as an LRU or LFU cache, or to
553 | # set a hard memory limit for an instance (using the 'noeviction' policy).
554 | #
555 | # WARNING: If you have replicas attached to an instance with maxmemory on,
556 | # the size of the output buffers needed to feed the replicas are subtracted
557 | # from the used memory count, so that network problems / resyncs will
558 | # not trigger a loop where keys are evicted, and in turn the output
559 | # buffer of replicas is full with DELs of keys evicted triggering the deletion
560 | # of more keys, and so forth until the database is completely emptied.
561 | #
562 | # In short... if you have replicas attached it is suggested that you set a lower
563 | # limit for maxmemory so that there is some free RAM on the system for replica
564 | # output buffers (but this is not needed if the policy is 'noeviction').
565 | #
566 | # maxmemory
567 |
568 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
569 | # is reached. You can select among five behaviors:
570 | #
571 | # volatile-lru -> Evict using approximated LRU among the keys with an expire set.
572 | # allkeys-lru -> Evict any key using approximated LRU.
573 | # volatile-lfu -> Evict using approximated LFU among the keys with an expire set.
574 | # allkeys-lfu -> Evict any key using approximated LFU.
575 | # volatile-random -> Remove a random key among the ones with an expire set.
576 | # allkeys-random -> Remove a random key, any key.
577 | # volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
578 | # noeviction -> Don't evict anything, just return an error on write operations.
579 | #
580 | # LRU means Least Recently Used
581 | # LFU means Least Frequently Used
582 | #
583 | # Both LRU, LFU and volatile-ttl are implemented using approximated
584 | # randomized algorithms.
585 | #
586 | # Note: with any of the above policies, Redis will return an error on write
587 | # operations, when there are no suitable keys for eviction.
588 | #
589 | # At the date of writing these commands are: set setnx setex append
590 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
591 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
592 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
593 | # getset mset msetnx exec sort
594 | #
595 | # The default is:
596 | #
597 | # maxmemory-policy noeviction
598 |
599 | # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
600 | # algorithms (in order to save memory), so you can tune it for speed or
601 | # accuracy. For default Redis will check five keys and pick the one that was
602 | # used less recently, you can change the sample size using the following
603 | # configuration directive.
604 | #
605 | # The default of 5 produces good enough results. 10 Approximates very closely
606 | # true LRU but costs more CPU. 3 is faster but not very accurate.
607 | #
608 | # maxmemory-samples 5
609 |
610 | # Starting from Redis 5, by default a replica will ignore its maxmemory setting
611 | # (unless it is promoted to master after a failover or manually). It means
612 | # that the eviction of keys will be just handled by the master, sending the
613 | # DEL commands to the replica as keys evict in the master side.
614 | #
615 | # This behavior ensures that masters and replicas stay consistent, and is usually
616 | # what you want, however if your replica is writable, or you want the replica to have
617 | # a different memory setting, and you are sure all the writes performed to the
618 | # replica are idempotent, then you may change this default (but be sure to understand
619 | # what you are doing).
620 | #
621 | # Note that since the replica by default does not evict, it may end using more
622 | # memory than the one set via maxmemory (there are certain buffers that may
623 | # be larger on the replica, or data structures may sometimes take more memory and so
624 | # forth). So make sure you monitor your replicas and make sure they have enough
625 | # memory to never hit a real out-of-memory condition before the master hits
626 | # the configured maxmemory setting.
627 | #
628 | # replica-ignore-maxmemory yes
629 |
630 | ############################# LAZY FREEING ####################################
631 |
632 | # Redis has two primitives to delete keys. One is called DEL and is a blocking
633 | # deletion of the object. It means that the server stops processing new commands
634 | # in order to reclaim all the memory associated with an object in a synchronous
635 | # way. If the key deleted is associated with a small object, the time needed
636 | # in order to execute the DEL command is very small and comparable to most other
637 | # O(1) or O(log_N) commands in Redis. However if the key is associated with an
638 | # aggregated value containing millions of elements, the server can block for
639 | # a long time (even seconds) in order to complete the operation.
640 | #
641 | # For the above reasons Redis also offers non blocking deletion primitives
642 | # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
643 | # FLUSHDB commands, in order to reclaim memory in background. Those commands
644 | # are executed in constant time. Another thread will incrementally free the
645 | # object in the background as fast as possible.
646 | #
647 | # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
648 | # It's up to the design of the application to understand when it is a good
649 | # idea to use one or the other. However the Redis server sometimes has to
650 | # delete keys or flush the whole database as a side effect of other operations.
651 | # Specifically Redis deletes objects independently of a user call in the
652 | # following scenarios:
653 | #
654 | # 1) On eviction, because of the maxmemory and maxmemory policy configurations,
655 | # in order to make room for new data, without going over the specified
656 | # memory limit.
657 | # 2) Because of expire: when a key with an associated time to live (see the
658 | # EXPIRE command) must be deleted from memory.
659 | # 3) Because of a side effect of a command that stores data on a key that may
660 | # already exist. For example the RENAME command may delete the old key
661 | # content when it is replaced with another one. Similarly SUNIONSTORE
662 | # or SORT with STORE option may delete existing keys. The SET command
663 | # itself removes any old content of the specified key in order to replace
664 | # it with the specified string.
665 | # 4) During replication, when a replica performs a full resynchronization with
666 | # its master, the content of the whole database is removed in order to
667 | # load the RDB file just transferred.
668 | #
669 | # In all the above cases the default is to delete objects in a blocking way,
670 | # like if DEL was called. However you can configure each case specifically
671 | # in order to instead release memory in a non-blocking way like if UNLINK
672 | # was called, using the following configuration directives:
673 |
674 | lazyfree-lazy-eviction no
675 | lazyfree-lazy-expire no
676 | lazyfree-lazy-server-del no
677 | replica-lazy-flush no
678 |
679 | ############################## APPEND ONLY MODE ###############################
680 |
681 | # By default Redis asynchronously dumps the dataset on disk. This mode is
682 | # good enough in many applications, but an issue with the Redis process or
683 | # a power outage may result into a few minutes of writes lost (depending on
684 | # the configured save points).
685 | #
686 | # The Append Only File is an alternative persistence mode that provides
687 | # much better durability. For instance using the default data fsync policy
688 | # (see later in the config file) Redis can lose just one second of writes in a
689 | # dramatic event like a server power outage, or a single write if something
690 | # wrong with the Redis process itself happens, but the operating system is
691 | # still running correctly.
692 | #
693 | # AOF and RDB persistence can be enabled at the same time without problems.
694 | # If the AOF is enabled on startup Redis will load the AOF, that is the file
695 | # with the better durability guarantees.
696 | #
697 | # Please check http://redis.io/topics/persistence for more information.
698 |
699 | appendonly no
700 |
701 | # The name of the append only file (default: "appendonly.aof")
702 |
703 | appendfilename "appendonly.aof"
704 |
705 | # The fsync() call tells the Operating System to actually write data on disk
706 | # instead of waiting for more data in the output buffer. Some OS will really flush
707 | # data on disk, some other OS will just try to do it ASAP.
708 | #
709 | # Redis supports three different modes:
710 | #
711 | # no: don't fsync, just let the OS flush the data when it wants. Faster.
712 | # always: fsync after every write to the append only log. Slow, Safest.
713 | # everysec: fsync only one time every second. Compromise.
714 | #
715 | # The default is "everysec", as that's usually the right compromise between
716 | # speed and data safety. It's up to you to understand if you can relax this to
717 | # "no" that will let the operating system flush the output buffer when
718 | # it wants, for better performances (but if you can live with the idea of
719 | # some data loss consider the default persistence mode that's snapshotting),
720 | # or on the contrary, use "always" that's very slow but a bit safer than
721 | # everysec.
722 | #
723 | # More details please check the following article:
724 | # http://antirez.com/post/redis-persistence-demystified.html
725 | #
726 | # If unsure, use "everysec".
727 |
728 | # appendfsync always
729 | appendfsync everysec
730 | # appendfsync no
731 |
732 | # When the AOF fsync policy is set to always or everysec, and a background
733 | # saving process (a background save or AOF log background rewriting) is
734 | # performing a lot of I/O against the disk, in some Linux configurations
735 | # Redis may block too long on the fsync() call. Note that there is no fix for
736 | # this currently, as even performing fsync in a different thread will block
737 | # our synchronous write(2) call.
738 | #
739 | # In order to mitigate this problem it's possible to use the following option
740 | # that will prevent fsync() from being called in the main process while a
741 | # BGSAVE or BGREWRITEAOF is in progress.
742 | #
743 | # This means that while another child is saving, the durability of Redis is
744 | # the same as "appendfsync none". In practical terms, this means that it is
745 | # possible to lose up to 30 seconds of log in the worst scenario (with the
746 | # default Linux settings).
747 | #
748 | # If you have latency problems turn this to "yes". Otherwise leave it as
749 | # "no" that is the safest pick from the point of view of durability.
750 |
751 | no-appendfsync-on-rewrite no
752 |
753 | # Automatic rewrite of the append only file.
754 | # Redis is able to automatically rewrite the log file implicitly calling
755 | # BGREWRITEAOF when the AOF log size grows by the specified percentage.
756 | #
757 | # This is how it works: Redis remembers the size of the AOF file after the
758 | # latest rewrite (if no rewrite has happened since the restart, the size of
759 | # the AOF at startup is used).
760 | #
761 | # This base size is compared to the current size. If the current size is
762 | # bigger than the specified percentage, the rewrite is triggered. Also
763 | # you need to specify a minimal size for the AOF file to be rewritten, this
764 | # is useful to avoid rewriting the AOF file even if the percentage increase
765 | # is reached but it is still pretty small.
766 | #
767 | # Specify a percentage of zero in order to disable the automatic AOF
768 | # rewrite feature.
769 |
770 | auto-aof-rewrite-percentage 100
771 | auto-aof-rewrite-min-size 64mb
772 |
773 | # An AOF file may be found to be truncated at the end during the Redis
774 | # startup process, when the AOF data gets loaded back into memory.
775 | # This may happen when the system where Redis is running
776 | # crashes, especially when an ext4 filesystem is mounted without the
777 | # data=ordered option (however this can't happen when Redis itself
778 | # crashes or aborts but the operating system still works correctly).
779 | #
780 | # Redis can either exit with an error when this happens, or load as much
781 | # data as possible (the default now) and start if the AOF file is found
782 | # to be truncated at the end. The following option controls this behavior.
783 | #
784 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
785 | # the Redis server starts emitting a log to inform the user of the event.
786 | # Otherwise if the option is set to no, the server aborts with an error
787 | # and refuses to start. When the option is set to no, the user requires
788 | # to fix the AOF file using the "redis-check-aof" utility before to restart
789 | # the server.
790 | #
791 | # Note that if the AOF file will be found to be corrupted in the middle
792 | # the server will still exit with an error. This option only applies when
793 | # Redis will try to read more data from the AOF file but not enough bytes
794 | # will be found.
795 | aof-load-truncated yes
796 |
797 | # When rewriting the AOF file, Redis is able to use an RDB preamble in the
798 | # AOF file for faster rewrites and recoveries. When this option is turned
799 | # on the rewritten AOF file is composed of two different stanzas:
800 | #
801 | # [RDB file][AOF tail]
802 | #
803 | # When loading Redis recognizes that the AOF file starts with the "REDIS"
804 | # string and loads the prefixed RDB file, and continues loading the AOF
805 | # tail.
806 | aof-use-rdb-preamble yes
807 |
808 | ################################ LUA SCRIPTING ###############################
809 |
810 | # Max execution time of a Lua script in milliseconds.
811 | #
812 | # If the maximum execution time is reached Redis will log that a script is
813 | # still in execution after the maximum allowed time and will start to
814 | # reply to queries with an error.
815 | #
816 | # When a long running script exceeds the maximum execution time only the
817 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
818 | # used to stop a script that did not yet called write commands. The second
819 | # is the only way to shut down the server in the case a write command was
820 | # already issued by the script but the user doesn't want to wait for the natural
821 | # termination of the script.
822 | #
823 | # Set it to 0 or a negative value for unlimited execution without warnings.
824 | lua-time-limit 5000
825 |
826 | ################################ REDIS CLUSTER ###############################
827 | #
828 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
829 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
830 | # in order to mark it as "mature" we need to wait for a non trivial percentage
831 | # of users to deploy it in production.
832 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
833 | #
834 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are
835 | # started as cluster nodes can. In order to start a Redis instance as a
836 | # cluster node enable the cluster support uncommenting the following:
837 | #
838 | # cluster-enabled yes
839 |
840 | # Every cluster node has a cluster configuration file. This file is not
841 | # intended to be edited by hand. It is created and updated by Redis nodes.
842 | # Every Redis Cluster node requires a different cluster configuration file.
843 | # Make sure that instances running in the same system do not have
844 | # overlapping cluster configuration file names.
845 | #
846 | # cluster-config-file nodes-6379.conf
847 |
848 | # Cluster node timeout is the amount of milliseconds a node must be unreachable
849 | # for it to be considered in failure state.
850 | # Most other internal time limits are multiple of the node timeout.
851 | #
852 | # cluster-node-timeout 15000
853 |
854 | # A replica of a failing master will avoid to start a failover if its data
855 | # looks too old.
856 | #
857 | # There is no simple way for a replica to actually have an exact measure of
858 | # its "data age", so the following two checks are performed:
859 | #
860 | # 1) If there are multiple replicas able to failover, they exchange messages
861 | # in order to try to give an advantage to the replica with the best
862 | # replication offset (more data from the master processed).
863 | # Replicas will try to get their rank by offset, and apply to the start
864 | # of the failover a delay proportional to their rank.
865 | #
866 | # 2) Every single replica computes the time of the last interaction with
867 | # its master. This can be the last ping or command received (if the master
868 | # is still in the "connected" state), or the time that elapsed since the
869 | # disconnection with the master (if the replication link is currently down).
870 | # If the last interaction is too old, the replica will not try to failover
871 | # at all.
872 | #
873 | # The point "2" can be tuned by user. Specifically a replica will not perform
874 | # the failover if, since the last interaction with the master, the time
875 | # elapsed is greater than:
876 | #
877 | # (node-timeout * replica-validity-factor) + repl-ping-replica-period
878 | #
879 | # So for example if node-timeout is 30 seconds, and the replica-validity-factor
880 | # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
881 | # replica will not try to failover if it was not able to talk with the master
882 | # for longer than 310 seconds.
883 | #
884 | # A large replica-validity-factor may allow replicas with too old data to failover
885 | # a master, while a too small value may prevent the cluster from being able to
886 | # elect a replica at all.
887 | #
888 | # For maximum availability, it is possible to set the replica-validity-factor
889 | # to a value of 0, which means, that replicas will always try to failover the
890 | # master regardless of the last time they interacted with the master.
891 | # (However they'll always try to apply a delay proportional to their
892 | # offset rank).
893 | #
894 | # Zero is the only value able to guarantee that when all the partitions heal
895 | # the cluster will always be able to continue.
896 | #
897 | # cluster-replica-validity-factor 10
898 |
899 | # Cluster replicas are able to migrate to orphaned masters, that are masters
900 | # that are left without working replicas. This improves the cluster ability
901 | # to resist to failures as otherwise an orphaned master can't be failed over
902 | # in case of failure if it has no working replicas.
903 | #
904 | # Replicas migrate to orphaned masters only if there are still at least a
905 | # given number of other working replicas for their old master. This number
906 | # is the "migration barrier". A migration barrier of 1 means that a replica
907 | # will migrate only if there is at least 1 other working replica for its master
908 | # and so forth. It usually reflects the number of replicas you want for every
909 | # master in your cluster.
910 | #
911 | # Default is 1 (replicas migrate only if their masters remain with at least
912 | # one replica). To disable migration just set it to a very large value.
913 | # A value of 0 can be set but is useful only for debugging and dangerous
914 | # in production.
915 | #
916 | # cluster-migration-barrier 1
917 |
918 | # By default Redis Cluster nodes stop accepting queries if they detect there
919 | # is at least an hash slot uncovered (no available node is serving it).
920 | # This way if the cluster is partially down (for example a range of hash slots
921 | # are no longer covered) all the cluster becomes, eventually, unavailable.
922 | # It automatically returns available as soon as all the slots are covered again.
923 | #
924 | # However sometimes you want the subset of the cluster which is working,
925 | # to continue to accept queries for the part of the key space that is still
926 | # covered. In order to do so, just set the cluster-require-full-coverage
927 | # option to no.
928 | #
929 | # cluster-require-full-coverage yes
930 |
931 | # This option, when set to yes, prevents replicas from trying to failover its
932 | # master during master failures. However the master can still perform a
933 | # manual failover, if forced to do so.
934 | #
935 | # This is useful in different scenarios, especially in the case of multiple
936 | # data center operations, where we want one side to never be promoted if not
937 | # in the case of a total DC failure.
938 | #
939 | # cluster-replica-no-failover no
940 |
941 | # In order to setup your cluster make sure to read the documentation
942 | # available at http://redis.io web site.
943 |
944 | ########################## CLUSTER DOCKER/NAT support ########################
945 |
946 | # In certain deployments, Redis Cluster nodes address discovery fails, because
947 | # addresses are NAT-ted or because ports are forwarded (the typical case is
948 | # Docker and other containers).
949 | #
950 | # In order to make Redis Cluster working in such environments, a static
951 | # configuration where each node knows its public address is needed. The
952 | # following two options are used for this scope, and are:
953 | #
954 | # * cluster-announce-ip
955 | # * cluster-announce-port
956 | # * cluster-announce-bus-port
957 | #
958 | # Each instruct the node about its address, client port, and cluster message
959 | # bus port. The information is then published in the header of the bus packets
960 | # so that other nodes will be able to correctly map the address of the node
961 | # publishing the information.
962 | #
963 | # If the above options are not used, the normal Redis Cluster auto-detection
964 | # will be used instead.
965 | #
966 | # Note that when remapped, the bus port may not be at the fixed offset of
967 | # clients port + 10000, so you can specify any port and bus-port depending
968 | # on how they get remapped. If the bus-port is not set, a fixed offset of
969 | # 10000 will be used as usually.
970 | #
971 | # Example:
972 | #
973 | # cluster-announce-ip 10.1.1.5
974 | # cluster-announce-port 6379
975 | # cluster-announce-bus-port 6380
976 |
977 | ################################## SLOW LOG ###################################
978 |
979 | # The Redis Slow Log is a system to log queries that exceeded a specified
980 | # execution time. The execution time does not include the I/O operations
981 | # like talking with the client, sending the reply and so forth,
982 | # but just the time needed to actually execute the command (this is the only
983 | # stage of command execution where the thread is blocked and can not serve
984 | # other requests in the meantime).
985 | #
986 | # You can configure the slow log with two parameters: one tells Redis
987 | # what is the execution time, in microseconds, to exceed in order for the
988 | # command to get logged, and the other parameter is the length of the
989 | # slow log. When a new command is logged the oldest one is removed from the
990 | # queue of logged commands.
991 |
992 | # The following time is expressed in microseconds, so 1000000 is equivalent
993 | # to one second. Note that a negative number disables the slow log, while
994 | # a value of zero forces the logging of every command.
995 | slowlog-log-slower-than 10000
996 |
997 | # There is no limit to this length. Just be aware that it will consume memory.
998 | # You can reclaim memory used by the slow log with SLOWLOG RESET.
999 | slowlog-max-len 128
1000 |
1001 | ################################ LATENCY MONITOR ##############################
1002 |
1003 | # The Redis latency monitoring subsystem samples different operations
1004 | # at runtime in order to collect data related to possible sources of
1005 | # latency of a Redis instance.
1006 | #
1007 | # Via the LATENCY command this information is available to the user that can
1008 | # print graphs and obtain reports.
1009 | #
1010 | # The system only logs operations that were performed in a time equal or
1011 | # greater than the amount of milliseconds specified via the
1012 | # latency-monitor-threshold configuration directive. When its value is set
1013 | # to zero, the latency monitor is turned off.
1014 | #
1015 | # By default latency monitoring is disabled since it is mostly not needed
1016 | # if you don't have latency issues, and collecting data has a performance
1017 | # impact, that while very small, can be measured under big load. Latency
1018 | # monitoring can easily be enabled at runtime using the command
1019 | # "CONFIG SET latency-monitor-threshold " if needed.
1020 | latency-monitor-threshold 0
1021 |
1022 | ############################# EVENT NOTIFICATION ##############################
1023 |
1024 | # Redis can notify Pub/Sub clients about events happening in the key space.
1025 | # This feature is documented at http://redis.io/topics/notifications
1026 | #
1027 | # For instance if keyspace events notification is enabled, and a client
1028 | # performs a DEL operation on key "foo" stored in the Database 0, two
1029 | # messages will be published via Pub/Sub:
1030 | #
1031 | # PUBLISH __keyspace@0__:foo del
1032 | # PUBLISH __keyevent@0__:del foo
1033 | #
1034 | # It is possible to select the events that Redis will notify among a set
1035 | # of classes. Every class is identified by a single character:
1036 | #
1037 | # K Keyspace events, published with __keyspace@__ prefix.
1038 | # E Keyevent events, published with __keyevent@__ prefix.
1039 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
1040 | # $ String commands
1041 | # l List commands
1042 | # s Set commands
1043 | # h Hash commands
1044 | # z Sorted set commands
1045 | # x Expired events (events generated every time a key expires)
1046 | # e Evicted events (events generated when a key is evicted for maxmemory)
1047 | # A Alias for g$lshzxe, so that the "AKE" string means all the events.
1048 | #
1049 | # The "notify-keyspace-events" takes as argument a string that is composed
1050 | # of zero or multiple characters. The empty string means that notifications
1051 | # are disabled.
1052 | #
1053 | # Example: to enable list and generic events, from the point of view of the
1054 | # event name, use:
1055 | #
1056 | # notify-keyspace-events Elg
1057 | #
1058 | # Example 2: to get the stream of the expired keys subscribing to channel
1059 | # name __keyevent@0__:expired use:
1060 | #
1061 | # notify-keyspace-events Ex
1062 | #
1063 | # By default all notifications are disabled because most users don't need
1064 | # this feature and the feature has some overhead. Note that if you don't
1065 | # specify at least one of K or E, no events will be delivered.
1066 | notify-keyspace-events ""
1067 |
1068 | ############################### ADVANCED CONFIG ###############################
1069 |
1070 | # Hashes are encoded using a memory efficient data structure when they have a
1071 | # small number of entries, and the biggest entry does not exceed a given
1072 | # threshold. These thresholds can be configured using the following directives.
1073 | hash-max-ziplist-entries 512
1074 | hash-max-ziplist-value 64
1075 |
1076 | # Lists are also encoded in a special way to save a lot of space.
1077 | # The number of entries allowed per internal list node can be specified
1078 | # as a fixed maximum size or a maximum number of elements.
1079 | # For a fixed maximum size, use -5 through -1, meaning:
1080 | # -5: max size: 64 Kb <-- not recommended for normal workloads
1081 | # -4: max size: 32 Kb <-- not recommended
1082 | # -3: max size: 16 Kb <-- probably not recommended
1083 | # -2: max size: 8 Kb <-- good
1084 | # -1: max size: 4 Kb <-- good
1085 | # Positive numbers mean store up to _exactly_ that number of elements
1086 | # per list node.
1087 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
1088 | # but if your use case is unique, adjust the settings as necessary.
1089 | list-max-ziplist-size -2
1090 |
1091 | # Lists may also be compressed.
1092 | # Compress depth is the number of quicklist ziplist nodes from *each* side of
1093 | # the list to *exclude* from compression. The head and tail of the list
1094 | # are always uncompressed for fast push/pop operations. Settings are:
1095 | # 0: disable all list compression
1096 | # 1: depth 1 means "don't start compressing until after 1 node into the list,
1097 | # going from either the head or tail"
1098 | # So: [head]->node->node->...->node->[tail]
1099 | # [head], [tail] will always be uncompressed; inner nodes will compress.
1100 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail]
1101 | # 2 here means: don't compress head or head->next or tail->prev or tail,
1102 | # but compress all nodes between them.
1103 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
1104 | # etc.
1105 | list-compress-depth 0
1106 |
1107 | # Sets have a special encoding in just one case: when a set is composed
1108 | # of just strings that happen to be integers in radix 10 in the range
1109 | # of 64 bit signed integers.
1110 | # The following configuration setting sets the limit in the size of the
1111 | # set in order to use this special memory saving encoding.
1112 | set-max-intset-entries 512
1113 |
1114 | # Similarly to hashes and lists, sorted sets are also specially encoded in
1115 | # order to save a lot of space. This encoding is only used when the length and
1116 | # elements of a sorted set are below the following limits:
1117 | zset-max-ziplist-entries 128
1118 | zset-max-ziplist-value 64
1119 |
1120 | # HyperLogLog sparse representation bytes limit. The limit includes the
1121 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses
1122 | # this limit, it is converted into the dense representation.
1123 | #
1124 | # A value greater than 16000 is totally useless, since at that point the
1125 | # dense representation is more memory efficient.
1126 | #
1127 | # The suggested value is ~ 3000 in order to have the benefits of
1128 | # the space efficient encoding without slowing down too much PFADD,
1129 | # which is O(N) with the sparse encoding. The value can be raised to
1130 | # ~ 10000 when CPU is not a concern, but space is, and the data set is
1131 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
1132 | hll-sparse-max-bytes 3000
1133 |
1134 | # Streams macro node max size / items. The stream data structure is a radix
1135 | # tree of big nodes that encode multiple items inside. Using this configuration
1136 | # it is possible to configure how big a single node can be in bytes, and the
1137 | # maximum number of items it may contain before switching to a new node when
1138 | # appending new stream entries. If any of the following settings are set to
1139 | # zero, the limit is ignored, so for instance it is possible to set just a
1140 | # max entires limit by setting max-bytes to 0 and max-entries to the desired
1141 | # value.
1142 | stream-node-max-bytes 4096
1143 | stream-node-max-entries 100
1144 |
1145 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
1146 | # order to help rehashing the main Redis hash table (the one mapping top-level
1147 | # keys to values). The hash table implementation Redis uses (see dict.c)
1148 | # performs a lazy rehashing: the more operation you run into a hash table
1149 | # that is rehashing, the more rehashing "steps" are performed, so if the
1150 | # server is idle the rehashing is never complete and some more memory is used
1151 | # by the hash table.
1152 | #
1153 | # The default is to use this millisecond 10 times every second in order to
1154 | # actively rehash the main dictionaries, freeing memory when possible.
1155 | #
1156 | # If unsure:
1157 | # use "activerehashing no" if you have hard latency requirements and it is
1158 | # not a good thing in your environment that Redis can reply from time to time
1159 | # to queries with 2 milliseconds delay.
1160 | #
1161 | # use "activerehashing yes" if you don't have such hard requirements but
1162 | # want to free memory asap when possible.
1163 | activerehashing yes
1164 |
1165 | # The client output buffer limits can be used to force disconnection of clients
1166 | # that are not reading data from the server fast enough for some reason (a
1167 | # common reason is that a Pub/Sub client can't consume messages as fast as the
1168 | # publisher can produce them).
1169 | #
1170 | # The limit can be set differently for the three different classes of clients:
1171 | #
1172 | # normal -> normal clients including MONITOR clients
1173 | # replica -> replica clients
1174 | # pubsub -> clients subscribed to at least one pubsub channel or pattern
1175 | #
1176 | # The syntax of every client-output-buffer-limit directive is the following:
1177 | #
1178 | # client-output-buffer-limit
1179 | #
1180 | # A client is immediately disconnected once the hard limit is reached, or if
1181 | # the soft limit is reached and remains reached for the specified number of
1182 | # seconds (continuously).
1183 | # So for instance if the hard limit is 32 megabytes and the soft limit is
1184 | # 16 megabytes / 10 seconds, the client will get disconnected immediately
1185 | # if the size of the output buffers reach 32 megabytes, but will also get
1186 | # disconnected if the client reaches 16 megabytes and continuously overcomes
1187 | # the limit for 10 seconds.
1188 | #
1189 | # By default normal clients are not limited because they don't receive data
1190 | # without asking (in a push way), but just after a request, so only
1191 | # asynchronous clients may create a scenario where data is requested faster
1192 | # than it can read.
1193 | #
1194 | # Instead there is a default limit for pubsub and replica clients, since
1195 | # subscribers and replicas receive data in a push fashion.
1196 | #
1197 | # Both the hard or the soft limit can be disabled by setting them to zero.
1198 | client-output-buffer-limit normal 0 0 0
1199 | client-output-buffer-limit replica 256mb 64mb 60
1200 | client-output-buffer-limit pubsub 32mb 8mb 60
1201 |
1202 | # Client query buffers accumulate new commands. They are limited to a fixed
1203 | # amount by default in order to avoid that a protocol desynchronization (for
1204 | # instance due to a bug in the client) will lead to unbound memory usage in
1205 | # the query buffer. However you can configure it here if you have very special
1206 | # needs, such us huge multi/exec requests or alike.
1207 | #
1208 | # client-query-buffer-limit 1gb
1209 |
1210 | # In the Redis protocol, bulk requests, that are, elements representing single
1211 | # strings, are normally limited ot 512 mb. However you can change this limit
1212 | # here.
1213 | #
1214 | # proto-max-bulk-len 512mb
1215 |
1216 | # Redis calls an internal function to perform many background tasks, like
1217 | # closing connections of clients in timeout, purging expired keys that are
1218 | # never requested, and so forth.
1219 | #
1220 | # Not all tasks are performed with the same frequency, but Redis checks for
1221 | # tasks to perform according to the specified "hz" value.
1222 | #
1223 | # By default "hz" is set to 10. Raising the value will use more CPU when
1224 | # Redis is idle, but at the same time will make Redis more responsive when
1225 | # there are many keys expiring at the same time, and timeouts may be
1226 | # handled with more precision.
1227 | #
1228 | # The range is between 1 and 500, however a value over 100 is usually not
1229 | # a good idea. Most users should use the default of 10 and raise this up to
1230 | # 100 only in environments where very low latency is required.
1231 | hz 10
1232 |
1233 | # Normally it is useful to have an HZ value which is proportional to the
1234 | # number of clients connected. This is useful in order, for instance, to
1235 | # avoid too many clients are processed for each background task invocation
1236 | # in order to avoid latency spikes.
1237 | #
1238 | # Since the default HZ value by default is conservatively set to 10, Redis
1239 | # offers, and enables by default, the ability to use an adaptive HZ value
1240 | # which will temporary raise when there are many connected clients.
1241 | #
1242 | # When dynamic HZ is enabled, the actual configured HZ will be used as
1243 | # as a baseline, but multiples of the configured HZ value will be actually
1244 | # used as needed once more clients are connected. In this way an idle
1245 | # instance will use very little CPU time while a busy instance will be
1246 | # more responsive.
1247 | dynamic-hz yes
1248 |
1249 | # When a child rewrites the AOF file, if the following option is enabled
1250 | # the file will be fsync-ed every 32 MB of data generated. This is useful
1251 | # in order to commit the file to the disk more incrementally and avoid
1252 | # big latency spikes.
1253 | aof-rewrite-incremental-fsync yes
1254 |
1255 | # When redis saves RDB file, if the following option is enabled
1256 | # the file will be fsync-ed every 32 MB of data generated. This is useful
1257 | # in order to commit the file to the disk more incrementally and avoid
1258 | # big latency spikes.
1259 | rdb-save-incremental-fsync yes
1260 |
1261 | # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
1262 | # idea to start with the default settings and only change them after investigating
1263 | # how to improve the performances and how the keys LFU change over time, which
1264 | # is possible to inspect via the OBJECT FREQ command.
1265 | #
1266 | # There are two tunable parameters in the Redis LFU implementation: the
1267 | # counter logarithm factor and the counter decay time. It is important to
1268 | # understand what the two parameters mean before changing them.
1269 | #
1270 | # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
1271 | # uses a probabilistic increment with logarithmic behavior. Given the value
1272 | # of the old counter, when a key is accessed, the counter is incremented in
1273 | # this way:
1274 | #
1275 | # 1. A random number R between 0 and 1 is extracted.
1276 | # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
1277 | # 3. The counter is incremented only if R < P.
1278 | #
1279 | # The default lfu-log-factor is 10. This is a table of how the frequency
1280 | # counter changes with a different number of accesses with different
1281 | # logarithmic factors:
1282 | #
1283 | # +--------+------------+------------+------------+------------+------------+
1284 | # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits |
1285 | # +--------+------------+------------+------------+------------+------------+
1286 | # | 0 | 104 | 255 | 255 | 255 | 255 |
1287 | # +--------+------------+------------+------------+------------+------------+
1288 | # | 1 | 18 | 49 | 255 | 255 | 255 |
1289 | # +--------+------------+------------+------------+------------+------------+
1290 | # | 10 | 10 | 18 | 142 | 255 | 255 |
1291 | # +--------+------------+------------+------------+------------+------------+
1292 | # | 100 | 8 | 11 | 49 | 143 | 255 |
1293 | # +--------+------------+------------+------------+------------+------------+
1294 | #
1295 | # NOTE: The above table was obtained by running the following commands:
1296 | #
1297 | # redis-benchmark -n 1000000 incr foo
1298 | # redis-cli object freq foo
1299 | #
1300 | # NOTE 2: The counter initial value is 5 in order to give new objects a chance
1301 | # to accumulate hits.
1302 | #
1303 | # The counter decay time is the time, in minutes, that must elapse in order
1304 | # for the key counter to be divided by two (or decremented if it has a value
1305 | # less <= 10).
1306 | #
1307 | # The default value for the lfu-decay-time is 1. A Special value of 0 means to
1308 | # decay the counter every time it happens to be scanned.
1309 | #
1310 | # lfu-log-factor 10
1311 | # lfu-decay-time 1
1312 |
1313 | ########################### ACTIVE DEFRAGMENTATION #######################
1314 | #
1315 | # WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested
1316 | # even in production and manually tested by multiple engineers for some
1317 | # time.
1318 | #
1319 | # What is active defragmentation?
1320 | # -------------------------------
1321 | #
1322 | # Active (online) defragmentation allows a Redis server to compact the
1323 | # spaces left between small allocations and deallocations of data in memory,
1324 | # thus allowing to reclaim back memory.
1325 | #
1326 | # Fragmentation is a natural process that happens with every allocator (but
1327 | # less so with Jemalloc, fortunately) and certain workloads. Normally a server
1328 | # restart is needed in order to lower the fragmentation, or at least to flush
1329 | # away all the data and create it again. However thanks to this feature
1330 | # implemented by Oran Agra for Redis 4.0 this process can happen at runtime
1331 | # in an "hot" way, while the server is running.
1332 | #
1333 | # Basically when the fragmentation is over a certain level (see the
1334 | # configuration options below) Redis will start to create new copies of the
1335 | # values in contiguous memory regions by exploiting certain specific Jemalloc
1336 | # features (in order to understand if an allocation is causing fragmentation
1337 | # and to allocate it in a better place), and at the same time, will release the
1338 | # old copies of the data. This process, repeated incrementally for all the keys
1339 | # will cause the fragmentation to drop back to normal values.
1340 | #
1341 | # Important things to understand:
1342 | #
1343 | # 1. This feature is disabled by default, and only works if you compiled Redis
1344 | # to use the copy of Jemalloc we ship with the source code of Redis.
1345 | # This is the default with Linux builds.
1346 | #
1347 | # 2. You never need to enable this feature if you don't have fragmentation
1348 | # issues.
1349 | #
1350 | # 3. Once you experience fragmentation, you can enable this feature when
1351 | # needed with the command "CONFIG SET activedefrag yes".
1352 | #
1353 | # The configuration parameters are able to fine tune the behavior of the
1354 | # defragmentation process. If you are not sure about what they mean it is
1355 | # a good idea to leave the defaults untouched.
1356 |
1357 | # Enabled active defragmentation
1358 | # activedefrag yes
1359 |
1360 | # Minimum amount of fragmentation waste to start active defrag
1361 | # active-defrag-ignore-bytes 100mb
1362 |
1363 | # Minimum percentage of fragmentation to start active defrag
1364 | # active-defrag-threshold-lower 10
1365 |
1366 | # Maximum percentage of fragmentation at which we use maximum effort
1367 | # active-defrag-threshold-upper 100
1368 |
1369 | # Minimal effort for defrag in CPU percentage
1370 | # active-defrag-cycle-min 5
1371 |
1372 | # Maximal effort for defrag in CPU percentage
1373 | # active-defrag-cycle-max 75
1374 |
1375 | # Maximum number of set/hash/zset/list fields that will be processed from
1376 | # the main dictionary scan
1377 | # active-defrag-max-scan-fields 1000
1378 |
--------------------------------------------------------------------------------
/zookeeper/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM zookeeper:latest
2 |
3 | EXPOSE 2181 2888 3888 8080
--------------------------------------------------------------------------------