├── .gitignore ├── mysql ├── Dockerfile └── mysqld.cnf ├── mysql5 ├── Dockerfile └── mysqld.cnf ├── redis ├── Dockerfile └── redis.conf ├── docker-compose.yml ├── inotify └── Dockerfile ├── 1.x-lts └── Dockerfile ├── 2.x-lts └── Dockerfile ├── master └── Dockerfile ├── release └── Dockerfile └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .idea -------------------------------------------------------------------------------- /mysql/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mysql 2 | 3 | RUN mkdir -p \ 4 | /var/lib/mysql \ 5 | /var/run/mysqld && \ 6 | chown mysql /var/lib/mysql && \ 7 | chown mysql /var/run/mysqld 8 | 9 | COPY mysqld.cnf /etc/mysql/mysql.conf.d/mysqld.cnf 10 | 11 | RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 12 | echo 'Asia/Shanghai' > /etc/timezone 13 | -------------------------------------------------------------------------------- /mysql5/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mysql:5 2 | 3 | RUN mkdir -p \ 4 | /var/lib/mysql \ 5 | /var/run/mysqld && \ 6 | chown mysql /var/lib/mysql && \ 7 | chown mysql /var/run/mysqld 8 | 9 | COPY mysqld.cnf /etc/mysql/mysql.conf.d/mysqld.cnf 10 | 11 | RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 12 | echo 'Asia/Shanghai' > /etc/timezone 13 | -------------------------------------------------------------------------------- /redis/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM redis:4 2 | 3 | RUN mkdir -p \ 4 | /var/lib/redis \ 5 | /var/run/redis && \ 6 | chown redis /var/lib/redis && \ 7 | chown redis /var/run/redis 8 | 9 | COPY redis.conf /usr/local/etc/redis/redis.conf 10 | 11 | CMD [ "redis-server", "/usr/local/etc/redis/redis.conf" ] 12 | 13 | RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 14 | echo 'Asia/Shanghai' > /etc/timezone 15 | -------------------------------------------------------------------------------- /mysql/mysqld.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | socket = /var/run/mysqld/mysqld.sock 3 | pid-file = /var/run/mysqld/mysqld.pid 4 | 5 | default_authentication_plugin = caching_sha2_password 6 | back_log = 500 7 | max_connections = 3000 8 | max_allowed_packet = 64M 9 | 10 | log-error = /var/lib/mysql/error.log 11 | general_log = 1 12 | general_log_file = /var/lib/mysql/general.log 13 | slow_query_log = 1 14 | slow_query_log_file = /var/lib/mysql/slow.log 15 | long_query_time = 1 16 | log_slow_admin_statements = 1 17 | log_queries_not_using_indexes = 1 18 | 19 | interactive_timeout = 28800 20 | wait_timeout = 28800 21 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | swoole: 4 | image: "twosee/swoole-coroutine" 5 | ports: 6 | - "9501:9501" 7 | volumes: 8 | - ./src:/app/src:rw 9 | restart: always 10 | depends_on: 11 | - mysql 12 | command: php /app/src/server.php start 13 | 14 | mysql: 15 | image: "twosee/swoole-coroutine:mysql" 16 | ports: 17 | - "9502:3306" 18 | volumes: 19 | - ./data/mysql/data:/var/lib/mysql:rw 20 | - ./data/mysql/sock:/var/run/mysqld:rw # remove when windows. 21 | restart: always 22 | environment: 23 | MYSQL_ROOT_PASSWORD: root_password_here 24 | MYSQL_DATABASE: test 25 | MYSQL_USER: php 26 | MYSQL_PASSWORD: php_user_password_here 27 | 28 | redis: 29 | image: "twosee/swoole-coroutine:redis" 30 | ports: 31 | - "9503:6379" 32 | volumes: 33 | - ./data/redis/data:/var/lib/redis:rw 34 | sysctls: 35 | net.core.somaxconn: 65535 36 | restart: always 37 | 38 | inotify: 39 | image: "twosee/swoole-coroutine:inotify" 40 | volumes: 41 | - ./:/app:rw 42 | restart: always 43 | environment: 44 | APP_ENV: dev # or product 45 | working_dir: /app/util 46 | command: /bin/bash inotify.sh -------------------------------------------------------------------------------- /inotify/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:latest 2 | 3 | MAINTAINER twosee 4 | 5 | # install modules : GD iconv 6 | RUN apt-get update && apt-get install -y \ 7 | libfreetype6-dev \ 8 | libjpeg62-turbo-dev \ 9 | libpng-dev \ 10 | openssl \ 11 | libssh-dev \ 12 | libpcre3 \ 13 | libpcre3-dev \ 14 | libnghttp2-dev \ 15 | libhiredis-dev \ 16 | curl \ 17 | wget \ 18 | zip \ 19 | unzip \ 20 | git && \ 21 | docker-php-ext-install iconv && \ 22 | docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ && \ 23 | docker-php-ext-install gd && \ 24 | apt autoremove && apt clean 25 | 26 | # install php pdo_mysql opcache 27 | # WARNING: Disable opcache-cli if you run you php 28 | RUN docker-php-ext-install pdo_mysql mysqli iconv mbstring json opcache && \ 29 | echo "opcache.enable_cli=1" >> /usr/local/etc/php/conf.d/docker-php-ext-opcache.ini 30 | 31 | #install redis 32 | RUN pecl install redis && docker-php-ext-enable redis 33 | 34 | # install composer 35 | ENV COMPOSER_ALLOW_SUPERUSER 1 36 | RUN curl -sS https://getcomposer.org/installer | php && \ 37 | mv composer.phar /usr/local/bin/composer && \ 38 | composer self-update --clean-backups 39 | 40 | # install inotify 41 | RUN apt-get install -y inotify-tools 42 | 43 | # install node npm 44 | RUN apt-get install -my wget gnupg && \ 45 | curl -sL https://deb.nodesource.com/setup_9.x | bash - && \ 46 | apt-get install -y nodejs && \ 47 | npm install -g cnpm --registry=https://registry.npm.taobao.org 48 | 49 | # set China timezone 50 | RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 51 | echo 'Asia/Shanghai' > /etc/timezone && \ 52 | echo "[Date]\ndate.timezone=Asia/Shanghai" > /usr/local/etc/php/conf.d/timezone.ini -------------------------------------------------------------------------------- /1.x-lts/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:latest 2 | 3 | MAINTAINER twosee 4 | 5 | # install modules : GD iconv 6 | RUN apt-get update && apt-get install -y \ 7 | procps \ 8 | libfreetype6-dev \ 9 | libjpeg62-turbo-dev \ 10 | libpng-dev \ 11 | openssl \ 12 | libssh-dev \ 13 | libpcre3 \ 14 | libpcre3-dev \ 15 | libnghttp2-dev \ 16 | libhiredis-dev \ 17 | curl \ 18 | wget \ 19 | zip \ 20 | unzip \ 21 | git && \ 22 | apt autoremove && apt clean 23 | 24 | # install php pdo_mysql opcache 25 | # WARNING: Disable opcache-cli if you run you php 26 | RUN docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ && \ 27 | docker-php-ext-install \ 28 | iconv \ 29 | gd \ 30 | pdo_mysql \ 31 | mysqli \ 32 | iconv \ 33 | mbstring \ 34 | json \ 35 | opcache \ 36 | sockets \ 37 | pcntl && \ 38 | echo "opcache.enable_cli=1" >> /usr/local/etc/php/conf.d/docker-php-ext-opcache.ini 39 | 40 | #install redis 41 | RUN pecl install redis && docker-php-ext-enable redis 42 | 43 | # install composer 44 | ENV COMPOSER_ALLOW_SUPERUSER 1 45 | RUN curl -sS https://getcomposer.org/installer | php && \ 46 | mv composer.phar /usr/local/bin/composer && \ 47 | composer self-update --clean-backups 48 | 49 | # install swoole 50 | RUN cd /root && \ 51 | curl -o /tmp/swoole.tar.gz https://github.com/swoole/swoole-src/archive/1.x-lts.tar.gz -L && \ 52 | tar zxvf /tmp/swoole.tar.gz && cd swoole-src* && \ 53 | phpize && \ 54 | ./configure \ 55 | --enable-openssl \ 56 | --enable-http2 \ 57 | --enable-async-redis \ 58 | --enable-mysqlnd && \ 59 | make && make install && \ 60 | docker-php-ext-enable swoole && \ 61 | echo "swoole.fast_serialize=On" >> /usr/local/etc/php/conf.d/docker-php-ext-swoole-serialize.ini && \ 62 | apt-get install -y gdb && \ 63 | rm -rf /tmp/* 64 | 65 | # set China timezone 66 | RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 67 | echo 'Asia/Shanghai' > /etc/timezone && \ 68 | echo "[Date]\ndate.timezone=Asia/Shanghai" > /usr/local/etc/php/conf.d/timezone.ini -------------------------------------------------------------------------------- /2.x-lts/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:latest 2 | 3 | MAINTAINER twosee 4 | 5 | # install modules : GD iconv 6 | RUN apt-get update && apt-get install -y \ 7 | procps \ 8 | libfreetype6-dev \ 9 | libjpeg62-turbo-dev \ 10 | libpng-dev \ 11 | openssl \ 12 | libssh-dev \ 13 | libpcre3 \ 14 | libpcre3-dev \ 15 | libnghttp2-dev \ 16 | libhiredis-dev \ 17 | curl \ 18 | wget \ 19 | zip \ 20 | unzip \ 21 | git && \ 22 | apt autoremove && apt clean 23 | 24 | # install php pdo_mysql opcache 25 | # WARNING: Disable opcache-cli if you run you php 26 | RUN docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ && \ 27 | docker-php-ext-install \ 28 | iconv \ 29 | gd \ 30 | pdo_mysql \ 31 | mysqli \ 32 | iconv \ 33 | mbstring \ 34 | json \ 35 | opcache \ 36 | sockets \ 37 | pcntl && \ 38 | echo "opcache.enable_cli=1" >> /usr/local/etc/php/conf.d/docker-php-ext-opcache.ini 39 | 40 | #install redis 41 | RUN pecl install redis && docker-php-ext-enable redis 42 | 43 | # install composer 44 | ENV COMPOSER_ALLOW_SUPERUSER 1 45 | RUN curl -sS https://getcomposer.org/installer | php && \ 46 | mv composer.phar /usr/local/bin/composer && \ 47 | composer self-update --clean-backups 48 | 49 | # install swoole 50 | RUN cd /root && \ 51 | curl -o /tmp/swoole.tar.gz https://github.com/swoole/swoole-src/archive/2.x-lts.tar.gz -L && \ 52 | tar zxvf /tmp/swoole.tar.gz && cd swoole-src* && \ 53 | phpize && \ 54 | ./configure \ 55 | --enable-openssl \ 56 | --enable-http2 \ 57 | --enable-async-redis \ 58 | --enable-mysqlnd && \ 59 | make && make install && \ 60 | docker-php-ext-enable swoole && \ 61 | echo "swoole.fast_serialize=On" >> /usr/local/etc/php/conf.d/docker-php-ext-swoole-serialize.ini && \ 62 | apt-get install -y gdb && \ 63 | rm -rf /tmp/* 64 | 65 | # set China timezone 66 | RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 67 | echo 'Asia/Shanghai' > /etc/timezone && \ 68 | echo "[Date]\ndate.timezone=Asia/Shanghai" > /usr/local/etc/php/conf.d/timezone.ini -------------------------------------------------------------------------------- /mysql5/mysqld.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | port = 3306 3 | 4 | [mysqld] 5 | # innodb_buffer_pool_size = 128M 6 | 7 | user = mysql 8 | # basedir = /usr/local/mysql 9 | datadir = /var/lib/mysql 10 | port = 3306 11 | server-id = 1 12 | default-time-zone = +8:00 13 | log_timestamps = SYSTEM 14 | 15 | character-set-server = utf8mb4 16 | collation-server = utf8mb4_unicode_ci 17 | 18 | socket = /var/run/mysqld/mysqld.sock 19 | pid-file = /var/run/mysqld/mysqld.pid 20 | 21 | log-error = /var/lib/mysql/error.log 22 | general_log = 1 23 | general_log_file = /var/lib/mysql/general.log 24 | slow_query_log = 1 25 | slow_query_log_file = /var/lib/mysql/slow.log 26 | long_query_time = 1 27 | log_slow_admin_statements = 1 28 | log_queries_not_using_indexes = 1 29 | 30 | skip-name-resolve 31 | # skip-networking 32 | back_log = 500 33 | 34 | max_connections = 3000 35 | max_connect_errors = 6000 36 | open_files_limit = 65535 37 | table_open_cache = 128 38 | max_allowed_packet = 64M 39 | binlog_cache_size = 1M 40 | max_heap_table_size = 8M 41 | tmp_table_size = 16M 42 | 43 | read_buffer_size = 2M 44 | read_rnd_buffer_size = 8M 45 | sort_buffer_size = 8M 46 | join_buffer_size = 28M 47 | key_buffer_size = 4M 48 | 49 | thread_cache_size = 16 50 | 51 | query_cache_type = 1 52 | query_cache_size = 8M 53 | query_cache_limit = 2M 54 | 55 | ft_min_word_len = 4 56 | 57 | log_bin = mysql-bin 58 | binlog_format = mixed 59 | expire_logs_days = 30 60 | 61 | performance_schema = 0 62 | explicit_defaults_for_timestamp 63 | 64 | # lower_case_table_names = 1 65 | 66 | myisam_sort_buffer_size = 8M 67 | myisam_repair_threads = 1 68 | 69 | interactive_timeout = 28800 70 | wait_timeout = 28800 71 | 72 | # Remove leading # to set options mainly useful for reporting servers. 73 | # The server defaults are faster for transactions and fast SELECTs. 74 | # Adjust sizes as needed, experiment to find the optimal values. 75 | # join_buffer_size = 128M 76 | # sort_buffer_size = 2M 77 | # read_rnd_buffer_size = 2M 78 | 79 | # Recommended in standard MySQL setup 80 | sql_mode = NO_ENGINE_SUBSTITUTION,NO_AUTO_CREATE_USER,STRICT_TRANS_TABLES 81 | 82 | [mysqldump] 83 | quick 84 | max_allowed_packet = 64M 85 | 86 | [myisamchk] 87 | key_buffer_size = 8M 88 | sort_buffer_size = 8M 89 | read_buffer = 4M 90 | write_buffer = 4M 91 | -------------------------------------------------------------------------------- /master/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:latest 2 | 3 | MAINTAINER twosee 4 | 5 | # install modules : GD iconv 6 | RUN apt-get update && apt-get install -y \ 7 | procps \ 8 | libfreetype6-dev \ 9 | libjpeg62-turbo-dev \ 10 | libpng-dev \ 11 | openssl \ 12 | libssh-dev \ 13 | libpcre3 \ 14 | libpcre3-dev \ 15 | libnghttp2-dev \ 16 | libhiredis-dev \ 17 | curl \ 18 | wget \ 19 | zip \ 20 | unzip \ 21 | git \ 22 | vim && \ 23 | apt autoremove && apt clean 24 | 25 | # install php pdo_mysql opcache 26 | # WARNING: Disable opcache-cli if you run you php 27 | RUN docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ && \ 28 | docker-php-ext-install \ 29 | iconv \ 30 | gd \ 31 | pdo_mysql \ 32 | mysqli \ 33 | iconv \ 34 | mbstring \ 35 | json \ 36 | opcache \ 37 | sockets \ 38 | pcntl && \ 39 | echo "opcache.enable_cli=1" >> /usr/local/etc/php/conf.d/docker-php-ext-opcache.ini 40 | 41 | #install redis 42 | RUN pecl install redis && docker-php-ext-enable redis 43 | 44 | # install composer 45 | ENV COMPOSER_ALLOW_SUPERUSER 1 46 | RUN curl -sS https://getcomposer.org/installer | php && \ 47 | mv composer.phar /usr/local/bin/composer && \ 48 | composer self-update --clean-backups 49 | 50 | # dev-tools for master 51 | RUN apt-get install -y gdb valgrind && \ 52 | apt autoremove && apt clean 53 | 54 | # install swoole 55 | RUN cd /root && \ 56 | curl -o /tmp/swoole.tar.gz https://github.com/swoole/swoole-src/archive/master.tar.gz -L && \ 57 | tar zxvf /tmp/swoole.tar.gz && cd swoole-src* && \ 58 | phpize && \ 59 | ./configure \ 60 | --enable-coroutine \ 61 | --enable-openssl \ 62 | --enable-http2 \ 63 | --enable-async-redis \ 64 | --enable-sockets \ 65 | --enable-mysqlnd && \ 66 | make && make install && \ 67 | docker-php-ext-enable swoole && \ 68 | echo "swoole.fast_serialize=On" >> /usr/local/etc/php/conf.d/docker-php-ext-swoole-serialize.ini && \ 69 | rm -rf /tmp/* 70 | 71 | # set China timezone 72 | RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 73 | echo 'Asia/Shanghai' > /etc/timezone && \ 74 | echo "[Date]\ndate.timezone=Asia/Shanghai" > /usr/local/etc/php/conf.d/timezone.ini -------------------------------------------------------------------------------- /release/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:latest 2 | 3 | MAINTAINER twosee 4 | 5 | # install modules : GD iconv 6 | RUN apt-get update && apt-get install -y \ 7 | procps \ 8 | libfreetype6-dev \ 9 | libjpeg62-turbo-dev \ 10 | libpng-dev \ 11 | openssl \ 12 | libssh-dev \ 13 | libpcre3 \ 14 | libpcre3-dev \ 15 | libnghttp2-dev \ 16 | libhiredis-dev \ 17 | curl \ 18 | wget \ 19 | zip \ 20 | unzip \ 21 | git && \ 22 | apt autoremove && apt clean 23 | 24 | # install php pdo_mysql opcache 25 | # WARNING: Disable opcache-cli if you run you php 26 | RUN docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ && \ 27 | docker-php-ext-install \ 28 | iconv \ 29 | gd \ 30 | pdo_mysql \ 31 | mysqli \ 32 | iconv \ 33 | mbstring \ 34 | json \ 35 | opcache \ 36 | sockets \ 37 | pcntl && \ 38 | echo "opcache.enable_cli=1" >> /usr/local/etc/php/conf.d/docker-php-ext-opcache.ini 39 | 40 | #install redis 41 | RUN pecl install redis && docker-php-ext-enable redis 42 | 43 | # install composer 44 | ENV COMPOSER_ALLOW_SUPERUSER 1 45 | RUN curl -sS https://getcomposer.org/installer | php && \ 46 | mv composer.phar /usr/local/bin/composer && \ 47 | composer self-update --clean-backups 48 | 49 | # install swoole 50 | #TIP: it always get last stable version of swoole coroutine. 51 | RUN cd /root && \ 52 | curl -o /tmp/swoole-releases https://github.com/swoole/swoole-src/releases -L && \ 53 | cat /tmp/swoole-releases | grep 'href=".*archive.*.tar.gz"' | head -1 | \ 54 | awk -F '"' ' {print "curl -o /tmp/swoole.tar.gz https://github.com"$2" -L" > "/tmp/swoole.download"}' && \ 55 | sh /tmp/swoole.download && \ 56 | tar zxvf /tmp/swoole.tar.gz && cd swoole-src* && \ 57 | phpize && \ 58 | ./configure \ 59 | --enable-coroutine \ 60 | --enable-openssl \ 61 | --enable-http2 \ 62 | --enable-async-redis \ 63 | --enable-mysqlnd && \ 64 | make && make install && \ 65 | docker-php-ext-enable swoole && \ 66 | echo "swoole.fast_serialize=On" >> /usr/local/etc/php/conf.d/docker-php-ext-swoole-serialize.ini && \ 67 | rm -rf /tmp/* 68 | 69 | # set China timezone 70 | RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ 71 | echo 'Asia/Shanghai' > /etc/timezone && \ 72 | echo "[Date]\ndate.timezone=Asia/Shanghai" > /usr/local/etc/php/conf.d/timezone.ini 73 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Swoole-coroutine 2 | 3 | [**docker-hub**](https://hub.docker.com/r/twosee/swoole-coroutine/) 4 | 5 | [**github**](https://github.com/twose/swoole-coroutine-docker) 6 | 7 | ### Usage 8 | 9 | ```Bash 10 | docker pull twosee/swoole-coroutine 11 | docker pull twosee/swoole-coroutine:mysql 12 | docker pull twosee/swoole-coroutine:redis 13 | ``` 14 | ```Bash 15 | docker run -d --name=swoole \ 16 | -v /workdir:/workdir \ 17 | -p 9501:9501 \ 18 | twosee/swoole-coroutine \ 19 | php /app/server.php start 20 | ``` 21 | OR 22 | ```Bash 23 | docker-compose up 24 | ``` 25 | ### Introduction 26 | 27 | - 基于最新PHP7.2-cli版本 28 | - 使用swoole1.X~4.X最新版本构建, 所有功能火力全开 29 | - 提供Swoole的绝佳搭档:` MySQL`, `Redis`, `Inotify`, 配合`docker-compose`, 实现开箱即用 30 | - 已安装 ["GD", "iconv", "pdo_mysql", "dom", "xml", "curl", "swoole"]等PHP扩展 31 | - 已开启["coroutine", "openssl", "http2", "async-redis", "mysqlnd", "swoole-serialize"]等所有功能 32 | - 纯环境 , 0冗余 , 绿色清洁 , 无任何php代码 33 | - 默认中国上海时区 34 | - inotify提供自动化热更新等支持 35 | 36 | --- 37 | 38 | - Based on PHP7.2-cli 39 | - use swoole 1.*, 2.* and 4.*(libco) latest stable version, All functions are fully open 40 | - Provide the perfect partner for Swoole such as ` MySQL`, `Redis`, `Inotify` images, you can also use `docker-compose`, Out of the box. 41 | - PHP extension installed: ["GD", "iconv", "pdo_mysql", "dom", "xml", "curl", "swoole"] 42 | - enable ["coroutine", "openssl", "http2", "async-redis", "mysqlnd", "swoole-serialize"] 43 | - this container has no PHP code or framework included 44 | - Asia/Shanghai timezone default (you can remove it on the last RUN line) 45 | - Inotify provides hot auto updates and other support 46 | 47 | ### Version 48 | 49 | | DIR | INTRO | Tag | 50 | | -------- | ---------------------------------------------------- | ------- | 51 | | /master | Latest master version (Experimental type) | latest | 52 | | /mysql | It's a perfect MySQL8's docker | mysql | 53 | | /mysql5 | It's a perfect MySQL5's docker | mysql5 | 54 | | /redis | It's a perfect Redis's docker | redis | 55 | | /inotify | inotify, composer, git, node, to support hot updates | inotify | 56 | | /release | Latest release version | release | 57 | | /1.x-lts | Latest version from branch 1.x-lts | 1.x-lts | 58 | | /2.x-lts | Latest version from branch 1.x-lts | 2.x-lts | 59 | 60 | ### Docker-compose 61 | 62 | Swoole + mysql + redis 63 | 64 | ```yaml 65 | version: '3.4' 66 | services: 67 | swoole: 68 | image: "twosee/swoole-coroutine" 69 | ports: 70 | - "9501:9501" 71 | volumes: 72 | - ./src:/app/src:rw 73 | restart: always 74 | depends_on: 75 | - mysql 76 | command: php /app/src/server.php start 77 | 78 | mysql: 79 | image: "twosee/swoole-coroutine:mysql" 80 | ports: 81 | - "9502:3306" 82 | volumes: 83 | - ./data/mysql/data:/var/lib/mysql:rw 84 | - ./data/mysql/sock:/var/run/mysqld:rw # remove when windows. 85 | restart: always 86 | environment: 87 | MYSQL_ROOT_PASSWORD: root_password_here 88 | MYSQL_DATABASE: test 89 | MYSQL_USER: php 90 | MYSQL_PASSWORD: php_user_password_here 91 | 92 | redis: 93 | image: "twosee/swoole-coroutine:redis" 94 | ports: 95 | - "9503:6379" 96 | volumes: 97 | - ./data/redis/data:/var/lib/redis:rw 98 | sysctls: 99 | net.core.somaxconn: 65535 100 | restart: always 101 | 102 | inotify: 103 | image: "twosee/swoole-coroutine:inotify" 104 | volumes: 105 | - ./:/app:rw 106 | restart: always 107 | environment: 108 | APP_ENV: dev # or product 109 | working_dir: /app/util 110 | command: /bin/bash inotify.sh 111 | ``` 112 | You can see [mysqld.cnf](https://github.com/twose/swoole-coroutine-docker/tree/master/mysql). 113 | 114 | ```php 115 | $options = [ 116 | 'host' => 'mysql', //So there. 117 | 'port' => 3306, 118 | 'user' => 'php', 119 | 'password' => 'php_user_password_here', 120 | 'database' => 'test' 121 | ]; 122 | $db = new \Swoole\Coroutine\Mysql(); 123 | $db->connect($options); 124 | ``` 125 | You can see [redis.conf](https://github.com/twose/swoole-coroutine-docker/tree/master/redis). 126 | ```php 127 | $redis = new \Swoole\Coroutine\Redis(); 128 | $redis->connect('redis', 6379); 129 | $val = $redis->get('foo'); 130 | ``` 131 | -------------------------------------------------------------------------------- /redis/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################## MODULES ##################################### 39 | 40 | # Load modules at startup. If the server is not able to load modules 41 | # it will abort. It is possible to use multiple loadmodule directives. 42 | # 43 | # loadmodule /path/to/my_module.so 44 | # loadmodule /path/to/other_module.so 45 | 46 | ################################## NETWORK ##################################### 47 | 48 | # By default, if no "bind" configuration directive is specified, Redis listens 49 | # for connections from all the network interfaces available on the server. 50 | # It is possible to listen to just one or multiple selected interfaces using 51 | # the "bind" configuration directive, followed by one or more IP addresses. 52 | # 53 | # Examples: 54 | # 55 | # bind 192.168.1.100 10.0.0.1 56 | # bind 127.0.0.1 ::1 57 | # 58 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the 59 | # internet, binding to all the interfaces is dangerous and will expose the 60 | # instance to everybody on the internet. So by default we uncomment the 61 | # following bind directive, that will force Redis to listen only into 62 | # the IPv4 lookback interface address (this means Redis will be able to 63 | # accept connections only from clients running into the same computer it 64 | # is running). 65 | # 66 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES 67 | # JUST COMMENT THE FOLLOWING LINE. 68 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | bind 0.0.0.0 70 | 71 | # Protected mode is a layer of security protection, in order to avoid that 72 | # Redis instances left open on the internet are accessed and exploited. 73 | # 74 | # When protected mode is on and if: 75 | # 76 | # 1) The server is not binding explicitly to a set of addresses using the 77 | # "bind" directive. 78 | # 2) No password is configured. 79 | # 80 | # The server only accepts connections from clients connecting from the 81 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 82 | # sockets. 83 | # 84 | # By default protected mode is enabled. You should disable it only if 85 | # you are sure you want clients from other hosts to connect to Redis 86 | # even if no authentication is configured, nor a specific set of interfaces 87 | # are explicitly listed using the "bind" directive. 88 | protected-mode yes 89 | 90 | # Accept connections on the specified port, default is 6379 (IANA #815344). 91 | # If port 0 is specified Redis will not listen on a TCP socket. 92 | port 6379 93 | 94 | # TCP listen() backlog. 95 | # 96 | # In high requests-per-second environments you need an high backlog in order 97 | # to avoid slow clients connections issues. Note that the Linux kernel 98 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 99 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 100 | # in order to get the desired effect. 101 | tcp-backlog 511 102 | 103 | # Unix socket. 104 | # 105 | # Specify the path for the Unix socket that will be used to listen for 106 | # incoming connections. There is no default, so Redis will not listen 107 | # on a unix socket when not specified. 108 | # 109 | unixsocket /var/run/redis/redis.sock 110 | # unixsocketperm 700 111 | 112 | # Close the connection after a client is idle for N seconds (0 to disable) 113 | timeout 0 114 | 115 | # TCP keepalive. 116 | # 117 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 118 | # of communication. This is useful for two reasons: 119 | # 120 | # 1) Detect dead peers. 121 | # 2) Take the connection alive from the point of view of network 122 | # equipment in the middle. 123 | # 124 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 125 | # Note that to close the connection the double of the time is needed. 126 | # On other kernels the period depends on the kernel configuration. 127 | # 128 | # A reasonable value for this option is 300 seconds, which is the new 129 | # Redis default starting with Redis 3.2.1. 130 | tcp-keepalive 300 131 | 132 | ################################# GENERAL ##################################### 133 | 134 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 135 | # Note that Redis will write a pid file in /usr/local/var/run/redis.pid when daemonized. 136 | daemonize no 137 | 138 | # If you run Redis from upstart or systemd, Redis can interact with your 139 | # supervision tree. Options: 140 | # supervised no - no supervision interaction 141 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode 142 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET 143 | # supervised auto - detect upstart or systemd method based on 144 | # UPSTART_JOB or NOTIFY_SOCKET environment variables 145 | # Note: these supervision methods only signal "process is ready." 146 | # They do not enable continuous liveness pings back to your supervisor. 147 | supervised no 148 | 149 | # If a pid file is specified, Redis writes it where specified at startup 150 | # and removes it at exit. 151 | # 152 | # When the server runs non daemonized, no pid file is created if none is 153 | # specified in the configuration. When the server is daemonized, the pid file 154 | # is used even if not specified, defaulting to "/usr/local/var/run/redis.pid". 155 | # 156 | # Creating a pid file is best effort: if Redis is not able to create it 157 | # nothing bad happens, the server will start and run normally. 158 | pidfile /var/lib/redis/redis_6379.pid 159 | 160 | # Specify the server verbosity level. 161 | # This can be one of: 162 | # debug (a lot of information, useful for development/testing) 163 | # verbose (many rarely useful info, but not a mess like the debug level) 164 | # notice (moderately verbose, what you want in production probably) 165 | # warning (only very important / critical messages are logged) 166 | loglevel notice 167 | 168 | # Specify the log file name. Also the empty string can be used to force 169 | # Redis to log on the standard output. Note that if you use standard 170 | # output for logging but daemonize, logs will be sent to /dev/null 171 | logfile "/var/lib/redis/redis.log" 172 | 173 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 174 | # and optionally update the other syslog parameters to suit your needs. 175 | # syslog-enabled no 176 | 177 | # Specify the syslog identity. 178 | # syslog-ident redis 179 | 180 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 181 | # syslog-facility local0 182 | 183 | # Set the number of databases. The default database is DB 0, you can select 184 | # a different one on a per-connection basis using SELECT where 185 | # dbid is a number between 0 and 'databases'-1 186 | databases 16 187 | 188 | # By default Redis shows an ASCII art logo only when started to log to the 189 | # standard output and if the standard output is a TTY. Basically this means 190 | # that normally a logo is displayed only in interactive sessions. 191 | # 192 | # However it is possible to force the pre-4.0 behavior and always show a 193 | # ASCII art logo in startup logs by setting the following option to yes. 194 | always-show-logo yes 195 | 196 | ################################ SNAPSHOTTING ################################ 197 | # 198 | # Save the DB on disk: 199 | # 200 | # save 201 | # 202 | # Will save the DB if both the given number of seconds and the given 203 | # number of write operations against the DB occurred. 204 | # 205 | # In the example below the behaviour will be to save: 206 | # after 900 sec (15 min) if at least 1 key changed 207 | # after 300 sec (5 min) if at least 10 keys changed 208 | # after 60 sec if at least 10000 keys changed 209 | # 210 | # Note: you can disable saving completely by commenting out all "save" lines. 211 | # 212 | # It is also possible to remove all the previously configured save 213 | # points by adding a save directive with a single empty string argument 214 | # like in the following example: 215 | # 216 | # save "" 217 | 218 | save 300 1 219 | save 100 10 220 | save 10 10000 221 | 222 | # By default Redis will stop accepting writes if RDB snapshots are enabled 223 | # (at least one save point) and the latest background save failed. 224 | # This will make the user aware (in a hard way) that data is not persisting 225 | # on disk properly, otherwise chances are that no one will notice and some 226 | # disaster will happen. 227 | # 228 | # If the background saving process will start working again Redis will 229 | # automatically allow writes again. 230 | # 231 | # However if you have setup your proper monitoring of the Redis server 232 | # and persistence, you may want to disable this feature so that Redis will 233 | # continue to work as usual even if there are problems with disk, 234 | # permissions, and so forth. 235 | stop-writes-on-bgsave-error yes 236 | 237 | # Compress string objects using LZF when dump .rdb databases? 238 | # For default that's set to 'yes' as it's almost always a win. 239 | # If you want to save some CPU in the saving child set it to 'no' but 240 | # the dataset will likely be bigger if you have compressible values or keys. 241 | rdbcompression yes 242 | 243 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 244 | # This makes the format more resistant to corruption but there is a performance 245 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 246 | # for maximum performances. 247 | # 248 | # RDB files created with checksum disabled have a checksum of zero that will 249 | # tell the loading code to skip the check. 250 | rdbchecksum yes 251 | 252 | # The filename where to dump the DB 253 | dbfilename dump.rdb 254 | 255 | # The working directory. 256 | # 257 | # The DB will be written inside this directory, with the filename specified 258 | # above using the 'dbfilename' configuration directive. 259 | # 260 | # The Append Only File will also be created inside this directory. 261 | # 262 | # Note that you must specify a directory here, not a file name. 263 | dir /var/lib/redis/ 264 | 265 | ################################# REPLICATION ################################# 266 | 267 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 268 | # another Redis server. A few things to understand ASAP about Redis replication. 269 | # 270 | # 1) Redis replication is asynchronous, but you can configure a master to 271 | # stop accepting writes if it appears to be not connected with at least 272 | # a given number of slaves. 273 | # 2) Redis slaves are able to perform a partial resynchronization with the 274 | # master if the replication link is lost for a relatively small amount of 275 | # time. You may want to configure the replication backlog size (see the next 276 | # sections of this file) with a sensible value depending on your needs. 277 | # 3) Replication is automatic and does not need user intervention. After a 278 | # network partition slaves automatically try to reconnect to masters 279 | # and resynchronize with them. 280 | # 281 | # slaveof 282 | 283 | # If the master is password protected (using the "requirepass" configuration 284 | # directive below) it is possible to tell the slave to authenticate before 285 | # starting the replication synchronization process, otherwise the master will 286 | # refuse the slave request. 287 | # 288 | # masterauth 289 | 290 | # When a slave loses its connection with the master, or when the replication 291 | # is still in progress, the slave can act in two different ways: 292 | # 293 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 294 | # still reply to client requests, possibly with out of date data, or the 295 | # data set may just be empty if this is the first synchronization. 296 | # 297 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 298 | # an error "SYNC with master in progress" to all the kind of commands 299 | # but to INFO and SLAVEOF. 300 | # 301 | slave-serve-stale-data yes 302 | 303 | # You can configure a slave instance to accept writes or not. Writing against 304 | # a slave instance may be useful to store some ephemeral data (because data 305 | # written on a slave will be easily deleted after resync with the master) but 306 | # may also cause problems if clients are writing to it because of a 307 | # misconfiguration. 308 | # 309 | # Since Redis 2.6 by default slaves are read-only. 310 | # 311 | # Note: read only slaves are not designed to be exposed to untrusted clients 312 | # on the internet. It's just a protection layer against misuse of the instance. 313 | # Still a read only slave exports by default all the administrative commands 314 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 315 | # security of read only slaves using 'rename-command' to shadow all the 316 | # administrative / dangerous commands. 317 | slave-read-only yes 318 | 319 | # Replication SYNC strategy: disk or socket. 320 | # 321 | # ------------------------------------------------------- 322 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 323 | # ------------------------------------------------------- 324 | # 325 | # New slaves and reconnecting slaves that are not able to continue the replication 326 | # process just receiving differences, need to do what is called a "full 327 | # synchronization". An RDB file is transmitted from the master to the slaves. 328 | # The transmission can happen in two different ways: 329 | # 330 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 331 | # file on disk. Later the file is transferred by the parent 332 | # process to the slaves incrementally. 333 | # 2) Diskless: The Redis master creates a new process that directly writes the 334 | # RDB file to slave sockets, without touching the disk at all. 335 | # 336 | # With disk-backed replication, while the RDB file is generated, more slaves 337 | # can be queued and served with the RDB file as soon as the current child producing 338 | # the RDB file finishes its work. With diskless replication instead once 339 | # the transfer starts, new slaves arriving will be queued and a new transfer 340 | # will start when the current one terminates. 341 | # 342 | # When diskless replication is used, the master waits a configurable amount of 343 | # time (in seconds) before starting the transfer in the hope that multiple slaves 344 | # will arrive and the transfer can be parallelized. 345 | # 346 | # With slow disks and fast (large bandwidth) networks, diskless replication 347 | # works better. 348 | repl-diskless-sync no 349 | 350 | # When diskless replication is enabled, it is possible to configure the delay 351 | # the server waits in order to spawn the child that transfers the RDB via socket 352 | # to the slaves. 353 | # 354 | # This is important since once the transfer starts, it is not possible to serve 355 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 356 | # waits a delay in order to let more slaves arrive. 357 | # 358 | # The delay is specified in seconds, and by default is 5 seconds. To disable 359 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 360 | repl-diskless-sync-delay 5 361 | 362 | # Slaves send PINGs to server in a predefined interval. It's possible to change 363 | # this interval with the repl_ping_slave_period option. The default value is 10 364 | # seconds. 365 | # 366 | # repl-ping-slave-period 10 367 | 368 | # The following option sets the replication timeout for: 369 | # 370 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 371 | # 2) Master timeout from the point of view of slaves (data, pings). 372 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 373 | # 374 | # It is important to make sure that this value is greater than the value 375 | # specified for repl-ping-slave-period otherwise a timeout will be detected 376 | # every time there is low traffic between the master and the slave. 377 | # 378 | # repl-timeout 60 379 | 380 | # Disable TCP_NODELAY on the slave socket after SYNC? 381 | # 382 | # If you select "yes" Redis will use a smaller number of TCP packets and 383 | # less bandwidth to send data to slaves. But this can add a delay for 384 | # the data to appear on the slave side, up to 40 milliseconds with 385 | # Linux kernels using a default configuration. 386 | # 387 | # If you select "no" the delay for data to appear on the slave side will 388 | # be reduced but more bandwidth will be used for replication. 389 | # 390 | # By default we optimize for low latency, but in very high traffic conditions 391 | # or when the master and slaves are many hops away, turning this to "yes" may 392 | # be a good idea. 393 | repl-disable-tcp-nodelay no 394 | 395 | # Set the replication backlog size. The backlog is a buffer that accumulates 396 | # slave data when slaves are disconnected for some time, so that when a slave 397 | # wants to reconnect again, often a full resync is not needed, but a partial 398 | # resync is enough, just passing the portion of data the slave missed while 399 | # disconnected. 400 | # 401 | # The bigger the replication backlog, the longer the time the slave can be 402 | # disconnected and later be able to perform a partial resynchronization. 403 | # 404 | # The backlog is only allocated once there is at least a slave connected. 405 | # 406 | # repl-backlog-size 1mb 407 | 408 | # After a master has no longer connected slaves for some time, the backlog 409 | # will be freed. The following option configures the amount of seconds that 410 | # need to elapse, starting from the time the last slave disconnected, for 411 | # the backlog buffer to be freed. 412 | # 413 | # Note that slaves never free the backlog for timeout, since they may be 414 | # promoted to masters later, and should be able to correctly "partially 415 | # resynchronize" with the slaves: hence they should always accumulate backlog. 416 | # 417 | # A value of 0 means to never release the backlog. 418 | # 419 | # repl-backlog-ttl 3600 420 | 421 | # The slave priority is an integer number published by Redis in the INFO output. 422 | # It is used by Redis Sentinel in order to select a slave to promote into a 423 | # master if the master is no longer working correctly. 424 | # 425 | # A slave with a low priority number is considered better for promotion, so 426 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 427 | # pick the one with priority 10, that is the lowest. 428 | # 429 | # However a special priority of 0 marks the slave as not able to perform the 430 | # role of master, so a slave with priority of 0 will never be selected by 431 | # Redis Sentinel for promotion. 432 | # 433 | # By default the priority is 100. 434 | slave-priority 100 435 | 436 | # It is possible for a master to stop accepting writes if there are less than 437 | # N slaves connected, having a lag less or equal than M seconds. 438 | # 439 | # The N slaves need to be in "online" state. 440 | # 441 | # The lag in seconds, that must be <= the specified value, is calculated from 442 | # the last ping received from the slave, that is usually sent every second. 443 | # 444 | # This option does not GUARANTEE that N replicas will accept the write, but 445 | # will limit the window of exposure for lost writes in case not enough slaves 446 | # are available, to the specified number of seconds. 447 | # 448 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 449 | # 450 | # min-slaves-to-write 3 451 | # min-slaves-max-lag 10 452 | # 453 | # Setting one or the other to 0 disables the feature. 454 | # 455 | # By default min-slaves-to-write is set to 0 (feature disabled) and 456 | # min-slaves-max-lag is set to 10. 457 | 458 | # A Redis master is able to list the address and port of the attached 459 | # slaves in different ways. For example the "INFO replication" section 460 | # offers this information, which is used, among other tools, by 461 | # Redis Sentinel in order to discover slave instances. 462 | # Another place where this info is available is in the output of the 463 | # "ROLE" command of a master. 464 | # 465 | # The listed IP and address normally reported by a slave is obtained 466 | # in the following way: 467 | # 468 | # IP: The address is auto detected by checking the peer address 469 | # of the socket used by the slave to connect with the master. 470 | # 471 | # Port: The port is communicated by the slave during the replication 472 | # handshake, and is normally the port that the slave is using to 473 | # list for connections. 474 | # 475 | # However when port forwarding or Network Address Translation (NAT) is 476 | # used, the slave may be actually reachable via different IP and port 477 | # pairs. The following two options can be used by a slave in order to 478 | # report to its master a specific set of IP and port, so that both INFO 479 | # and ROLE will report those values. 480 | # 481 | # There is no need to use both the options if you need to override just 482 | # the port or the IP address. 483 | # 484 | # slave-announce-ip 5.5.5.5 485 | # slave-announce-port 1234 486 | 487 | ################################## SECURITY ################################### 488 | 489 | # Require clients to issue AUTH before processing any other 490 | # commands. This might be useful in environments in which you do not trust 491 | # others with access to the host running redis-server. 492 | # 493 | # This should stay commented out for backward compatibility and because most 494 | # people do not need auth (e.g. they run their own servers). 495 | # 496 | # Warning: since Redis is pretty fast an outside user can try up to 497 | # 150k passwords per second against a good box. This means that you should 498 | # use a very strong password otherwise it will be very easy to break. 499 | # 500 | # requirepass foobared 501 | 502 | # Command renaming. 503 | # 504 | # It is possible to change the name of dangerous commands in a shared 505 | # environment. For instance the CONFIG command may be renamed into something 506 | # hard to guess so that it will still be available for internal-use tools 507 | # but not available for general clients. 508 | # 509 | # Example: 510 | # 511 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 512 | # 513 | # It is also possible to completely kill a command by renaming it into 514 | # an empty string: 515 | # 516 | # rename-command CONFIG "" 517 | # 518 | # Please note that changing the name of commands that are logged into the 519 | # AOF file or transmitted to slaves may cause problems. 520 | 521 | ################################### CLIENTS #################################### 522 | 523 | # Set the max number of connected clients at the same time. By default 524 | # this limit is set to 10000 clients, however if the Redis server is not 525 | # able to configure the process file limit to allow for the specified limit 526 | # the max number of allowed clients is set to the current file limit 527 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 528 | # 529 | # Once the limit is reached Redis will close all the new connections sending 530 | # an error 'max number of clients reached'. 531 | # 532 | # maxclients 10000 533 | 534 | ############################## MEMORY MANAGEMENT ################################ 535 | 536 | # Set a memory usage limit to the specified amount of bytes. 537 | # When the memory limit is reached Redis will try to remove keys 538 | # according to the eviction policy selected (see maxmemory-policy). 539 | # 540 | # If Redis can't remove keys according to the policy, or if the policy is 541 | # set to 'noeviction', Redis will start to reply with errors to commands 542 | # that would use more memory, like SET, LPUSH, and so on, and will continue 543 | # to reply to read-only commands like GET. 544 | # 545 | # This option is usually useful when using Redis as an LRU or LFU cache, or to 546 | # set a hard memory limit for an instance (using the 'noeviction' policy). 547 | # 548 | # WARNING: If you have slaves attached to an instance with maxmemory on, 549 | # the size of the output buffers needed to feed the slaves are subtracted 550 | # from the used memory count, so that network problems / resyncs will 551 | # not trigger a loop where keys are evicted, and in turn the output 552 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 553 | # of more keys, and so forth until the database is completely emptied. 554 | # 555 | # In short... if you have slaves attached it is suggested that you set a lower 556 | # limit for maxmemory so that there is some free RAM on the system for slave 557 | # output buffers (but this is not needed if the policy is 'noeviction'). 558 | # 559 | # maxmemory 560 | 561 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 562 | # is reached. You can select among five behaviors: 563 | # 564 | # volatile-lru -> Evict using approximated LRU among the keys with an expire set. 565 | # allkeys-lru -> Evict any key using approximated LRU. 566 | # volatile-lfu -> Evict using approximated LFU among the keys with an expire set. 567 | # allkeys-lfu -> Evict any key using approximated LFU. 568 | # volatile-random -> Remove a random key among the ones with an expire set. 569 | # allkeys-random -> Remove a random key, any key. 570 | # volatile-ttl -> Remove the key with the nearest expire time (minor TTL) 571 | # noeviction -> Don't evict anything, just return an error on write operations. 572 | # 573 | # LRU means Least Recently Used 574 | # LFU means Least Frequently Used 575 | # 576 | # Both LRU, LFU and volatile-ttl are implemented using approximated 577 | # randomized algorithms. 578 | # 579 | # Note: with any of the above policies, Redis will return an error on write 580 | # operations, when there are no suitable keys for eviction. 581 | # 582 | # At the date of writing these commands are: set setnx setex append 583 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 584 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 585 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 586 | # getset mset msetnx exec sort 587 | # 588 | # The default is: 589 | # 590 | # maxmemory-policy noeviction 591 | 592 | # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated 593 | # algorithms (in order to save memory), so you can tune it for speed or 594 | # accuracy. For default Redis will check five keys and pick the one that was 595 | # used less recently, you can change the sample size using the following 596 | # configuration directive. 597 | # 598 | # The default of 5 produces good enough results. 10 Approximates very closely 599 | # true LRU but costs more CPU. 3 is faster but not very accurate. 600 | # 601 | # maxmemory-samples 5 602 | 603 | ############################# LAZY FREEING #################################### 604 | 605 | # Redis has two primitives to delete keys. One is called DEL and is a blocking 606 | # deletion of the object. It means that the server stops processing new commands 607 | # in order to reclaim all the memory associated with an object in a synchronous 608 | # way. If the key deleted is associated with a small object, the time needed 609 | # in order to execute the DEL command is very small and comparable to most other 610 | # O(1) or O(log_N) commands in Redis. However if the key is associated with an 611 | # aggregated value containing millions of elements, the server can block for 612 | # a long time (even seconds) in order to complete the operation. 613 | # 614 | # For the above reasons Redis also offers non blocking deletion primitives 615 | # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and 616 | # FLUSHDB commands, in order to reclaim memory in background. Those commands 617 | # are executed in constant time. Another thread will incrementally free the 618 | # object in the background as fast as possible. 619 | # 620 | # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. 621 | # It's up to the design of the application to understand when it is a good 622 | # idea to use one or the other. However the Redis server sometimes has to 623 | # delete keys or flush the whole database as a side effect of other operations. 624 | # Specifically Redis deletes objects independently of a user call in the 625 | # following scenarios: 626 | # 627 | # 1) On eviction, because of the maxmemory and maxmemory policy configurations, 628 | # in order to make room for new data, without going over the specified 629 | # memory limit. 630 | # 2) Because of expire: when a key with an associated time to live (see the 631 | # EXPIRE command) must be deleted from memory. 632 | # 3) Because of a side effect of a command that stores data on a key that may 633 | # already exist. For example the RENAME command may delete the old key 634 | # content when it is replaced with another one. Similarly SUNIONSTORE 635 | # or SORT with STORE option may delete existing keys. The SET command 636 | # itself removes any old content of the specified key in order to replace 637 | # it with the specified string. 638 | # 4) During replication, when a slave performs a full resynchronization with 639 | # its master, the content of the whole database is removed in order to 640 | # load the RDB file just transfered. 641 | # 642 | # In all the above cases the default is to delete objects in a blocking way, 643 | # like if DEL was called. However you can configure each case specifically 644 | # in order to instead release memory in a non-blocking way like if UNLINK 645 | # was called, using the following configuration directives: 646 | 647 | lazyfree-lazy-eviction no 648 | lazyfree-lazy-expire no 649 | lazyfree-lazy-server-del no 650 | slave-lazy-flush no 651 | 652 | ############################## APPEND ONLY MODE ############################### 653 | 654 | # By default Redis asynchronously dumps the dataset on disk. This mode is 655 | # good enough in many applications, but an issue with the Redis process or 656 | # a power outage may result into a few minutes of writes lost (depending on 657 | # the configured save points). 658 | # 659 | # The Append Only File is an alternative persistence mode that provides 660 | # much better durability. For instance using the default data fsync policy 661 | # (see later in the config file) Redis can lose just one second of writes in a 662 | # dramatic event like a server power outage, or a single write if something 663 | # wrong with the Redis process itself happens, but the operating system is 664 | # still running correctly. 665 | # 666 | # AOF and RDB persistence can be enabled at the same time without problems. 667 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 668 | # with the better durability guarantees. 669 | # 670 | # Please check http://redis.io/topics/persistence for more information. 671 | 672 | appendonly yes 673 | 674 | # The name of the append only file (default: "appendonly.aof") 675 | 676 | appendfilename "appendonly.aof" 677 | 678 | # The fsync() call tells the Operating System to actually write data on disk 679 | # instead of waiting for more data in the output buffer. Some OS will really flush 680 | # data on disk, some other OS will just try to do it ASAP. 681 | # 682 | # Redis supports three different modes: 683 | # 684 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 685 | # always: fsync after every write to the append only log. Slow, Safest. 686 | # everysec: fsync only one time every second. Compromise. 687 | # 688 | # The default is "everysec", as that's usually the right compromise between 689 | # speed and data safety. It's up to you to understand if you can relax this to 690 | # "no" that will let the operating system flush the output buffer when 691 | # it wants, for better performances (but if you can live with the idea of 692 | # some data loss consider the default persistence mode that's snapshotting), 693 | # or on the contrary, use "always" that's very slow but a bit safer than 694 | # everysec. 695 | # 696 | # More details please check the following article: 697 | # http://antirez.com/post/redis-persistence-demystified.html 698 | # 699 | # If unsure, use "everysec". 700 | 701 | # appendfsync always 702 | appendfsync everysec 703 | # appendfsync no 704 | 705 | # When the AOF fsync policy is set to always or everysec, and a background 706 | # saving process (a background save or AOF log background rewriting) is 707 | # performing a lot of I/O against the disk, in some Linux configurations 708 | # Redis may block too long on the fsync() call. Note that there is no fix for 709 | # this currently, as even performing fsync in a different thread will block 710 | # our synchronous write(2) call. 711 | # 712 | # In order to mitigate this problem it's possible to use the following option 713 | # that will prevent fsync() from being called in the main process while a 714 | # BGSAVE or BGREWRITEAOF is in progress. 715 | # 716 | # This means that while another child is saving, the durability of Redis is 717 | # the same as "appendfsync none". In practical terms, this means that it is 718 | # possible to lose up to 30 seconds of log in the worst scenario (with the 719 | # default Linux settings). 720 | # 721 | # If you have latency problems turn this to "yes". Otherwise leave it as 722 | # "no" that is the safest pick from the point of view of durability. 723 | 724 | no-appendfsync-on-rewrite no 725 | 726 | # Automatic rewrite of the append only file. 727 | # Redis is able to automatically rewrite the log file implicitly calling 728 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 729 | # 730 | # This is how it works: Redis remembers the size of the AOF file after the 731 | # latest rewrite (if no rewrite has happened since the restart, the size of 732 | # the AOF at startup is used). 733 | # 734 | # This base size is compared to the current size. If the current size is 735 | # bigger than the specified percentage, the rewrite is triggered. Also 736 | # you need to specify a minimal size for the AOF file to be rewritten, this 737 | # is useful to avoid rewriting the AOF file even if the percentage increase 738 | # is reached but it is still pretty small. 739 | # 740 | # Specify a percentage of zero in order to disable the automatic AOF 741 | # rewrite feature. 742 | 743 | auto-aof-rewrite-percentage 100 744 | auto-aof-rewrite-min-size 64mb 745 | 746 | # An AOF file may be found to be truncated at the end during the Redis 747 | # startup process, when the AOF data gets loaded back into memory. 748 | # This may happen when the system where Redis is running 749 | # crashes, especially when an ext4 filesystem is mounted without the 750 | # data=ordered option (however this can't happen when Redis itself 751 | # crashes or aborts but the operating system still works correctly). 752 | # 753 | # Redis can either exit with an error when this happens, or load as much 754 | # data as possible (the default now) and start if the AOF file is found 755 | # to be truncated at the end. The following option controls this behavior. 756 | # 757 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 758 | # the Redis server starts emitting a log to inform the user of the event. 759 | # Otherwise if the option is set to no, the server aborts with an error 760 | # and refuses to start. When the option is set to no, the user requires 761 | # to fix the AOF file using the "redis-check-aof" utility before to restart 762 | # the server. 763 | # 764 | # Note that if the AOF file will be found to be corrupted in the middle 765 | # the server will still exit with an error. This option only applies when 766 | # Redis will try to read more data from the AOF file but not enough bytes 767 | # will be found. 768 | aof-load-truncated yes 769 | 770 | # When rewriting the AOF file, Redis is able to use an RDB preamble in the 771 | # AOF file for faster rewrites and recoveries. When this option is turned 772 | # on the rewritten AOF file is composed of two different stanzas: 773 | # 774 | # [RDB file][AOF tail] 775 | # 776 | # When loading Redis recognizes that the AOF file starts with the "REDIS" 777 | # string and loads the prefixed RDB file, and continues loading the AOF 778 | # tail. 779 | # 780 | # This is currently turned off by default in order to avoid the surprise 781 | # of a format change, but will at some point be used as the default. 782 | aof-use-rdb-preamble no 783 | 784 | ################################ LUA SCRIPTING ############################### 785 | 786 | # Max execution time of a Lua script in milliseconds. 787 | # 788 | # If the maximum execution time is reached Redis will log that a script is 789 | # still in execution after the maximum allowed time and will start to 790 | # reply to queries with an error. 791 | # 792 | # When a long running script exceeds the maximum execution time only the 793 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 794 | # used to stop a script that did not yet called write commands. The second 795 | # is the only way to shut down the server in the case a write command was 796 | # already issued by the script but the user doesn't want to wait for the natural 797 | # termination of the script. 798 | # 799 | # Set it to 0 or a negative value for unlimited execution without warnings. 800 | lua-time-limit 5000 801 | 802 | ################################ REDIS CLUSTER ############################### 803 | # 804 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 805 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however 806 | # in order to mark it as "mature" we need to wait for a non trivial percentage 807 | # of users to deploy it in production. 808 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 809 | # 810 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 811 | # started as cluster nodes can. In order to start a Redis instance as a 812 | # cluster node enable the cluster support uncommenting the following: 813 | # 814 | # cluster-enabled yes 815 | 816 | # Every cluster node has a cluster configuration file. This file is not 817 | # intended to be edited by hand. It is created and updated by Redis nodes. 818 | # Every Redis Cluster node requires a different cluster configuration file. 819 | # Make sure that instances running in the same system do not have 820 | # overlapping cluster configuration file names. 821 | # 822 | # cluster-config-file nodes-6379.conf 823 | 824 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 825 | # for it to be considered in failure state. 826 | # Most other internal time limits are multiple of the node timeout. 827 | # 828 | # cluster-node-timeout 15000 829 | 830 | # A slave of a failing master will avoid to start a failover if its data 831 | # looks too old. 832 | # 833 | # There is no simple way for a slave to actually have an exact measure of 834 | # its "data age", so the following two checks are performed: 835 | # 836 | # 1) If there are multiple slaves able to failover, they exchange messages 837 | # in order to try to give an advantage to the slave with the best 838 | # replication offset (more data from the master processed). 839 | # Slaves will try to get their rank by offset, and apply to the start 840 | # of the failover a delay proportional to their rank. 841 | # 842 | # 2) Every single slave computes the time of the last interaction with 843 | # its master. This can be the last ping or command received (if the master 844 | # is still in the "connected" state), or the time that elapsed since the 845 | # disconnection with the master (if the replication link is currently down). 846 | # If the last interaction is too old, the slave will not try to failover 847 | # at all. 848 | # 849 | # The point "2" can be tuned by user. Specifically a slave will not perform 850 | # the failover if, since the last interaction with the master, the time 851 | # elapsed is greater than: 852 | # 853 | # (node-timeout * slave-validity-factor) + repl-ping-slave-period 854 | # 855 | # So for example if node-timeout is 30 seconds, and the slave-validity-factor 856 | # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the 857 | # slave will not try to failover if it was not able to talk with the master 858 | # for longer than 310 seconds. 859 | # 860 | # A large slave-validity-factor may allow slaves with too old data to failover 861 | # a master, while a too small value may prevent the cluster from being able to 862 | # elect a slave at all. 863 | # 864 | # For maximum availability, it is possible to set the slave-validity-factor 865 | # to a value of 0, which means, that slaves will always try to failover the 866 | # master regardless of the last time they interacted with the master. 867 | # (However they'll always try to apply a delay proportional to their 868 | # offset rank). 869 | # 870 | # Zero is the only value able to guarantee that when all the partitions heal 871 | # the cluster will always be able to continue. 872 | # 873 | # cluster-slave-validity-factor 10 874 | 875 | # Cluster slaves are able to migrate to orphaned masters, that are masters 876 | # that are left without working slaves. This improves the cluster ability 877 | # to resist to failures as otherwise an orphaned master can't be failed over 878 | # in case of failure if it has no working slaves. 879 | # 880 | # Slaves migrate to orphaned masters only if there are still at least a 881 | # given number of other working slaves for their old master. This number 882 | # is the "migration barrier". A migration barrier of 1 means that a slave 883 | # will migrate only if there is at least 1 other working slave for its master 884 | # and so forth. It usually reflects the number of slaves you want for every 885 | # master in your cluster. 886 | # 887 | # Default is 1 (slaves migrate only if their masters remain with at least 888 | # one slave). To disable migration just set it to a very large value. 889 | # A value of 0 can be set but is useful only for debugging and dangerous 890 | # in production. 891 | # 892 | # cluster-migration-barrier 1 893 | 894 | # By default Redis Cluster nodes stop accepting queries if they detect there 895 | # is at least an hash slot uncovered (no available node is serving it). 896 | # This way if the cluster is partially down (for example a range of hash slots 897 | # are no longer covered) all the cluster becomes, eventually, unavailable. 898 | # It automatically returns available as soon as all the slots are covered again. 899 | # 900 | # However sometimes you want the subset of the cluster which is working, 901 | # to continue to accept queries for the part of the key space that is still 902 | # covered. In order to do so, just set the cluster-require-full-coverage 903 | # option to no. 904 | # 905 | # cluster-require-full-coverage yes 906 | 907 | # In order to setup your cluster make sure to read the documentation 908 | # available at http://redis.io web site. 909 | 910 | ########################## CLUSTER DOCKER/NAT support ######################## 911 | 912 | # In certain deployments, Redis Cluster nodes address discovery fails, because 913 | # addresses are NAT-ted or because ports are forwarded (the typical case is 914 | # Docker and other containers). 915 | # 916 | # In order to make Redis Cluster working in such environments, a static 917 | # configuration where each node knows its public address is needed. The 918 | # following two options are used for this scope, and are: 919 | # 920 | # * cluster-announce-ip 921 | # * cluster-announce-port 922 | # * cluster-announce-bus-port 923 | # 924 | # Each instruct the node about its address, client port, and cluster message 925 | # bus port. The information is then published in the header of the bus packets 926 | # so that other nodes will be able to correctly map the address of the node 927 | # publishing the information. 928 | # 929 | # If the above options are not used, the normal Redis Cluster auto-detection 930 | # will be used instead. 931 | # 932 | # Note that when remapped, the bus port may not be at the fixed offset of 933 | # clients port + 10000, so you can specify any port and bus-port depending 934 | # on how they get remapped. If the bus-port is not set, a fixed offset of 935 | # 10000 will be used as usually. 936 | # 937 | # Example: 938 | # 939 | # cluster-announce-ip 10.1.1.5 940 | # cluster-announce-port 6379 941 | # cluster-announce-bus-port 6380 942 | 943 | ################################## SLOW LOG ################################### 944 | 945 | # The Redis Slow Log is a system to log queries that exceeded a specified 946 | # execution time. The execution time does not include the I/O operations 947 | # like talking with the client, sending the reply and so forth, 948 | # but just the time needed to actually execute the command (this is the only 949 | # stage of command execution where the thread is blocked and can not serve 950 | # other requests in the meantime). 951 | # 952 | # You can configure the slow log with two parameters: one tells Redis 953 | # what is the execution time, in microseconds, to exceed in order for the 954 | # command to get logged, and the other parameter is the length of the 955 | # slow log. When a new command is logged the oldest one is removed from the 956 | # queue of logged commands. 957 | 958 | # The following time is expressed in microseconds, so 1000000 is equivalent 959 | # to one second. Note that a negative number disables the slow log, while 960 | # a value of zero forces the logging of every command. 961 | slowlog-log-slower-than 10000 962 | 963 | # There is no limit to this length. Just be aware that it will consume memory. 964 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 965 | slowlog-max-len 128 966 | 967 | ################################ LATENCY MONITOR ############################## 968 | 969 | # The Redis latency monitoring subsystem samples different operations 970 | # at runtime in order to collect data related to possible sources of 971 | # latency of a Redis instance. 972 | # 973 | # Via the LATENCY command this information is available to the user that can 974 | # print graphs and obtain reports. 975 | # 976 | # The system only logs operations that were performed in a time equal or 977 | # greater than the amount of milliseconds specified via the 978 | # latency-monitor-threshold configuration directive. When its value is set 979 | # to zero, the latency monitor is turned off. 980 | # 981 | # By default latency monitoring is disabled since it is mostly not needed 982 | # if you don't have latency issues, and collecting data has a performance 983 | # impact, that while very small, can be measured under big load. Latency 984 | # monitoring can easily be enabled at runtime using the command 985 | # "CONFIG SET latency-monitor-threshold " if needed. 986 | latency-monitor-threshold 0 987 | 988 | ############################# EVENT NOTIFICATION ############################## 989 | 990 | # Redis can notify Pub/Sub clients about events happening in the key space. 991 | # This feature is documented at http://redis.io/topics/notifications 992 | # 993 | # For instance if keyspace events notification is enabled, and a client 994 | # performs a DEL operation on key "foo" stored in the Database 0, two 995 | # messages will be published via Pub/Sub: 996 | # 997 | # PUBLISH __keyspace@0__:foo del 998 | # PUBLISH __keyevent@0__:del foo 999 | # 1000 | # It is possible to select the events that Redis will notify among a set 1001 | # of classes. Every class is identified by a single character: 1002 | # 1003 | # K Keyspace events, published with __keyspace@__ prefix. 1004 | # E Keyevent events, published with __keyevent@__ prefix. 1005 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 1006 | # $ String commands 1007 | # l List commands 1008 | # s Set commands 1009 | # h Hash commands 1010 | # z Sorted set commands 1011 | # x Expired events (events generated every time a key expires) 1012 | # e Evicted events (events generated when a key is evicted for maxmemory) 1013 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 1014 | # 1015 | # The "notify-keyspace-events" takes as argument a string that is composed 1016 | # of zero or multiple characters. The empty string means that notifications 1017 | # are disabled. 1018 | # 1019 | # Example: to enable list and generic events, from the point of view of the 1020 | # event name, use: 1021 | # 1022 | # notify-keyspace-events Elg 1023 | # 1024 | # Example 2: to get the stream of the expired keys subscribing to channel 1025 | # name __keyevent@0__:expired use: 1026 | # 1027 | # notify-keyspace-events Ex 1028 | # 1029 | # By default all notifications are disabled because most users don't need 1030 | # this feature and the feature has some overhead. Note that if you don't 1031 | # specify at least one of K or E, no events will be delivered. 1032 | notify-keyspace-events "" 1033 | 1034 | ############################### ADVANCED CONFIG ############################### 1035 | 1036 | # Hashes are encoded using a memory efficient data structure when they have a 1037 | # small number of entries, and the biggest entry does not exceed a given 1038 | # threshold. These thresholds can be configured using the following directives. 1039 | hash-max-ziplist-entries 512 1040 | hash-max-ziplist-value 64 1041 | 1042 | # Lists are also encoded in a special way to save a lot of space. 1043 | # The number of entries allowed per internal list node can be specified 1044 | # as a fixed maximum size or a maximum number of elements. 1045 | # For a fixed maximum size, use -5 through -1, meaning: 1046 | # -5: max size: 64 Kb <-- not recommended for normal workloads 1047 | # -4: max size: 32 Kb <-- not recommended 1048 | # -3: max size: 16 Kb <-- probably not recommended 1049 | # -2: max size: 8 Kb <-- good 1050 | # -1: max size: 4 Kb <-- good 1051 | # Positive numbers mean store up to _exactly_ that number of elements 1052 | # per list node. 1053 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), 1054 | # but if your use case is unique, adjust the settings as necessary. 1055 | list-max-ziplist-size -2 1056 | 1057 | # Lists may also be compressed. 1058 | # Compress depth is the number of quicklist ziplist nodes from *each* side of 1059 | # the list to *exclude* from compression. The head and tail of the list 1060 | # are always uncompressed for fast push/pop operations. Settings are: 1061 | # 0: disable all list compression 1062 | # 1: depth 1 means "don't start compressing until after 1 node into the list, 1063 | # going from either the head or tail" 1064 | # So: [head]->node->node->...->node->[tail] 1065 | # [head], [tail] will always be uncompressed; inner nodes will compress. 1066 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] 1067 | # 2 here means: don't compress head or head->next or tail->prev or tail, 1068 | # but compress all nodes between them. 1069 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] 1070 | # etc. 1071 | list-compress-depth 0 1072 | 1073 | # Sets have a special encoding in just one case: when a set is composed 1074 | # of just strings that happen to be integers in radix 10 in the range 1075 | # of 64 bit signed integers. 1076 | # The following configuration setting sets the limit in the size of the 1077 | # set in order to use this special memory saving encoding. 1078 | set-max-intset-entries 512 1079 | 1080 | # Similarly to hashes and lists, sorted sets are also specially encoded in 1081 | # order to save a lot of space. This encoding is only used when the length and 1082 | # elements of a sorted set are below the following limits: 1083 | zset-max-ziplist-entries 128 1084 | zset-max-ziplist-value 64 1085 | 1086 | # HyperLogLog sparse representation bytes limit. The limit includes the 1087 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 1088 | # this limit, it is converted into the dense representation. 1089 | # 1090 | # A value greater than 16000 is totally useless, since at that point the 1091 | # dense representation is more memory efficient. 1092 | # 1093 | # The suggested value is ~ 3000 in order to have the benefits of 1094 | # the space efficient encoding without slowing down too much PFADD, 1095 | # which is O(N) with the sparse encoding. The value can be raised to 1096 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 1097 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 1098 | hll-sparse-max-bytes 3000 1099 | 1100 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 1101 | # order to help rehashing the main Redis hash table (the one mapping top-level 1102 | # keys to values). The hash table implementation Redis uses (see dict.c) 1103 | # performs a lazy rehashing: the more operation you run into a hash table 1104 | # that is rehashing, the more rehashing "steps" are performed, so if the 1105 | # server is idle the rehashing is never complete and some more memory is used 1106 | # by the hash table. 1107 | # 1108 | # The default is to use this millisecond 10 times every second in order to 1109 | # actively rehash the main dictionaries, freeing memory when possible. 1110 | # 1111 | # If unsure: 1112 | # use "activerehashing no" if you have hard latency requirements and it is 1113 | # not a good thing in your environment that Redis can reply from time to time 1114 | # to queries with 2 milliseconds delay. 1115 | # 1116 | # use "activerehashing yes" if you don't have such hard requirements but 1117 | # want to free memory asap when possible. 1118 | activerehashing yes 1119 | 1120 | # The client output buffer limits can be used to force disconnection of clients 1121 | # that are not reading data from the server fast enough for some reason (a 1122 | # common reason is that a Pub/Sub client can't consume messages as fast as the 1123 | # publisher can produce them). 1124 | # 1125 | # The limit can be set differently for the three different classes of clients: 1126 | # 1127 | # normal -> normal clients including MONITOR clients 1128 | # slave -> slave clients 1129 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 1130 | # 1131 | # The syntax of every client-output-buffer-limit directive is the following: 1132 | # 1133 | # client-output-buffer-limit 1134 | # 1135 | # A client is immediately disconnected once the hard limit is reached, or if 1136 | # the soft limit is reached and remains reached for the specified number of 1137 | # seconds (continuously). 1138 | # So for instance if the hard limit is 32 megabytes and the soft limit is 1139 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 1140 | # if the size of the output buffers reach 32 megabytes, but will also get 1141 | # disconnected if the client reaches 16 megabytes and continuously overcomes 1142 | # the limit for 10 seconds. 1143 | # 1144 | # By default normal clients are not limited because they don't receive data 1145 | # without asking (in a push way), but just after a request, so only 1146 | # asynchronous clients may create a scenario where data is requested faster 1147 | # than it can read. 1148 | # 1149 | # Instead there is a default limit for pubsub and slave clients, since 1150 | # subscribers and slaves receive data in a push fashion. 1151 | # 1152 | # Both the hard or the soft limit can be disabled by setting them to zero. 1153 | client-output-buffer-limit normal 0 0 0 1154 | client-output-buffer-limit slave 256mb 64mb 60 1155 | client-output-buffer-limit pubsub 32mb 8mb 60 1156 | 1157 | # Redis calls an internal function to perform many background tasks, like 1158 | # closing connections of clients in timeout, purging expired keys that are 1159 | # never requested, and so forth. 1160 | # 1161 | # Not all tasks are performed with the same frequency, but Redis checks for 1162 | # tasks to perform according to the specified "hz" value. 1163 | # 1164 | # By default "hz" is set to 10. Raising the value will use more CPU when 1165 | # Redis is idle, but at the same time will make Redis more responsive when 1166 | # there are many keys expiring at the same time, and timeouts may be 1167 | # handled with more precision. 1168 | # 1169 | # The range is between 1 and 500, however a value over 100 is usually not 1170 | # a good idea. Most users should use the default of 10 and raise this up to 1171 | # 100 only in environments where very low latency is required. 1172 | hz 10 1173 | 1174 | # When a child rewrites the AOF file, if the following option is enabled 1175 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1176 | # in order to commit the file to the disk more incrementally and avoid 1177 | # big latency spikes. 1178 | aof-rewrite-incremental-fsync yes 1179 | 1180 | # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good 1181 | # idea to start with the default settings and only change them after investigating 1182 | # how to improve the performances and how the keys LFU change over time, which 1183 | # is possible to inspect via the OBJECT FREQ command. 1184 | # 1185 | # There are two tunable parameters in the Redis LFU implementation: the 1186 | # counter logarithm factor and the counter decay time. It is important to 1187 | # understand what the two parameters mean before changing them. 1188 | # 1189 | # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis 1190 | # uses a probabilistic increment with logarithmic behavior. Given the value 1191 | # of the old counter, when a key is accessed, the counter is incremented in 1192 | # this way: 1193 | # 1194 | # 1. A random number R between 0 and 1 is extracted. 1195 | # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). 1196 | # 3. The counter is incremented only if R < P. 1197 | # 1198 | # The default lfu-log-factor is 10. This is a table of how the frequency 1199 | # counter changes with a different number of accesses with different 1200 | # logarithmic factors: 1201 | # 1202 | # +--------+------------+------------+------------+------------+------------+ 1203 | # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | 1204 | # +--------+------------+------------+------------+------------+------------+ 1205 | # | 0 | 104 | 255 | 255 | 255 | 255 | 1206 | # +--------+------------+------------+------------+------------+------------+ 1207 | # | 1 | 18 | 49 | 255 | 255 | 255 | 1208 | # +--------+------------+------------+------------+------------+------------+ 1209 | # | 10 | 10 | 18 | 142 | 255 | 255 | 1210 | # +--------+------------+------------+------------+------------+------------+ 1211 | # | 100 | 8 | 11 | 49 | 143 | 255 | 1212 | # +--------+------------+------------+------------+------------+------------+ 1213 | # 1214 | # NOTE: The above table was obtained by running the following commands: 1215 | # 1216 | # redis-benchmark -n 1000000 incr foo 1217 | # redis-cli object freq foo 1218 | # 1219 | # NOTE 2: The counter initial value is 5 in order to give new objects a chance 1220 | # to accumulate hits. 1221 | # 1222 | # The counter decay time is the time, in minutes, that must elapse in order 1223 | # for the key counter to be divided by two (or decremented if it has a value 1224 | # less <= 10). 1225 | # 1226 | # The default value for the lfu-decay-time is 1. A Special value of 0 means to 1227 | # decay the counter every time it happens to be scanned. 1228 | # 1229 | # lfu-log-factor 10 1230 | # lfu-decay-time 1 1231 | 1232 | ########################### ACTIVE DEFRAGMENTATION ####################### 1233 | # 1234 | # WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested 1235 | # even in production and manually tested by multiple engineers for some 1236 | # time. 1237 | # 1238 | # What is active defragmentation? 1239 | # ------------------------------- 1240 | # 1241 | # Active (online) defragmentation allows a Redis server to compact the 1242 | # spaces left between small allocations and deallocations of data in memory, 1243 | # thus allowing to reclaim back memory. 1244 | # 1245 | # Fragmentation is a natural process that happens with every allocator (but 1246 | # less so with Jemalloc, fortunately) and certain workloads. Normally a server 1247 | # restart is needed in order to lower the fragmentation, or at least to flush 1248 | # away all the data and create it again. However thanks to this feature 1249 | # implemented by Oran Agra for Redis 4.0 this process can happen at runtime 1250 | # in an "hot" way, while the server is running. 1251 | # 1252 | # Basically when the fragmentation is over a certain level (see the 1253 | # configuration options below) Redis will start to create new copies of the 1254 | # values in contiguous memory regions by exploiting certain specific Jemalloc 1255 | # features (in order to understand if an allocation is causing fragmentation 1256 | # and to allocate it in a better place), and at the same time, will release the 1257 | # old copies of the data. This process, repeated incrementally for all the keys 1258 | # will cause the fragmentation to drop back to normal values. 1259 | # 1260 | # Important things to understand: 1261 | # 1262 | # 1. This feature is disabled by default, and only works if you compiled Redis 1263 | # to use the copy of Jemalloc we ship with the source code of Redis. 1264 | # This is the default with Linux builds. 1265 | # 1266 | # 2. You never need to enable this feature if you don't have fragmentation 1267 | # issues. 1268 | # 1269 | # 3. Once you experience fragmentation, you can enable this feature when 1270 | # needed with the command "CONFIG SET activedefrag yes". 1271 | # 1272 | # The configuration parameters are able to fine tune the behavior of the 1273 | # defragmentation process. If you are not sure about what they mean it is 1274 | # a good idea to leave the defaults untouched. 1275 | 1276 | # Enabled active defragmentation 1277 | # activedefrag yes 1278 | 1279 | # Minimum amount of fragmentation waste to start active defrag 1280 | # active-defrag-ignore-bytes 100mb 1281 | 1282 | # Minimum percentage of fragmentation to start active defrag 1283 | # active-defrag-threshold-lower 10 1284 | 1285 | # Maximum percentage of fragmentation at which we use maximum effort 1286 | # active-defrag-threshold-upper 100 1287 | 1288 | # Minimal effort for defrag in CPU percentage 1289 | # active-defrag-cycle-min 25 1290 | 1291 | # Maximal effort for defrag in CPU percentage 1292 | # active-defrag-cycle-max 75 1293 | 1294 | --------------------------------------------------------------------------------