├── srcs ├── requirements │ ├── nginx │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ ├── conf │ │ │ └── default │ │ └── tools │ │ │ └── script.py │ ├── mariadb │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ ├── tools │ │ │ └── script.py │ │ └── config │ │ │ └── 50-server.cnf │ ├── wordpress │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ ├── tools │ │ │ └── script.py │ │ └── conf │ │ │ └── wp-config.php │ └── bonus │ │ ├── static-website │ │ ├── conf │ │ │ └── default │ │ ├── Dockerfile │ │ └── tools │ │ │ ├── index.html │ │ │ ├── styles.css │ │ │ └── script.js │ │ ├── redis │ │ ├── Dockerfile │ │ └── config │ │ │ └── config │ │ ├── portainer │ │ ├── tools │ │ │ └── script.py │ │ └── Dockerfile │ │ ├── ftp │ │ ├── Dockerfile │ │ ├── tools │ │ │ └── script.py │ │ └── conf │ │ │ └── vsftpd.conf │ │ └── adminer │ │ └── Dockerfile ├── .env └── docker-compose.yml ├── imgs ├── Docker-Storage.jpeg ├── Docker-Workflow.jpeg ├── vm_vs_containers.jpg ├── Docker-Components.jpeg ├── Architecture-of-Docker.png ├── Docker-Execution-Environment.jpeg └── Linux-Container-Architecture-1.jpeg ├── Makefile ├── LICENSE └── README.md /srcs/requirements/nginx/.dockerignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /srcs/requirements/mariadb/.dockerignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /srcs/requirements/wordpress/.dockerignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /imgs/Docker-Storage.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahlyel-amine/inception/HEAD/imgs/Docker-Storage.jpeg -------------------------------------------------------------------------------- /imgs/Docker-Workflow.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahlyel-amine/inception/HEAD/imgs/Docker-Workflow.jpeg -------------------------------------------------------------------------------- /imgs/vm_vs_containers.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahlyel-amine/inception/HEAD/imgs/vm_vs_containers.jpg -------------------------------------------------------------------------------- /imgs/Docker-Components.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahlyel-amine/inception/HEAD/imgs/Docker-Components.jpeg -------------------------------------------------------------------------------- /imgs/Architecture-of-Docker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahlyel-amine/inception/HEAD/imgs/Architecture-of-Docker.png -------------------------------------------------------------------------------- /imgs/Docker-Execution-Environment.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahlyel-amine/inception/HEAD/imgs/Docker-Execution-Environment.jpeg -------------------------------------------------------------------------------- /imgs/Linux-Container-Architecture-1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahlyel-amine/inception/HEAD/imgs/Linux-Container-Architecture-1.jpeg -------------------------------------------------------------------------------- /srcs/requirements/bonus/static-website/conf/default: -------------------------------------------------------------------------------- 1 | server 2 | { 3 | listen 5484; 4 | root /var/www/html/; 5 | index index.html; 6 | } 7 | -------------------------------------------------------------------------------- /srcs/requirements/bonus/redis/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye-slim 2 | 3 | RUN apt update && apt install -y redis-server && apt clean && rm -rf /var/lib/apt/lists/* 4 | 5 | COPY ./config/config /etc/redis/redis.conf 6 | 7 | ENTRYPOINT [ "redis-server", "--protected-mode", "no"] 8 | -------------------------------------------------------------------------------- /srcs/requirements/bonus/static-website/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye-slim 2 | 3 | RUN apt update -y && apt install nginx ftp -y --no-install-recommends 4 | 5 | COPY conf/default etc/nginx/sites-enabled/ 6 | COPY ./tools/ /var/www/html/ 7 | EXPOSE 5484 8 | ENTRYPOINT ["nginx", "-g", "daemon off;"] 9 | -------------------------------------------------------------------------------- /srcs/requirements/bonus/portainer/tools/script.py: -------------------------------------------------------------------------------- 1 | from os import system, environ 2 | import subprocess 3 | 4 | PORTAINER_PASSWORD = environ.get('PORTAINER_PASSWORD') 5 | 6 | system("echo $PORTAINER_PASSWORD > /tmp/portianer_ps") 7 | system("(sleep 1 ; rm /tmp/portianer_ps) &") 8 | system("/portainer/portainer --bind=:2307 --admin-password-file=/tmp/portianer_ps") 9 | -------------------------------------------------------------------------------- /srcs/requirements/nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye-slim 2 | 3 | RUN apt update -y && apt install nginx openssl python3 -y --no-install-recommends && \ 4 | apt clean && rm -rf /var/lib/apt/lists/* 5 | 6 | COPY ./tools/script.py / 7 | COPY ./conf/default /etc/nginx/sites-enabled/ 8 | 9 | EXPOSE 443 10 | 11 | ENTRYPOINT ["python3", "script.py"] 12 | -------------------------------------------------------------------------------- /srcs/requirements/bonus/ftp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye 2 | 3 | RUN apt update && apt install -y --no-install-recommends \ 4 | vsftpd openssl python3 && apt clean && rm -rf /var/lib/apt/lists/* 5 | RUN mkdir -p /etc/ssl/ 6 | 7 | COPY ./tools/script.py / 8 | COPY ./conf/vsftpd.conf /etc/vsftpd.conf 9 | 10 | EXPOSE 21 11 | 12 | ENTRYPOINT ["python3", "script.py"] 13 | -------------------------------------------------------------------------------- /srcs/requirements/mariadb/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | --no-install-recommends \ 5 | mariadb-server python3 \ 6 | && apt-get clean && rm -rf /var/lib/apt/lists/* 7 | 8 | COPY ./config/50-server.cnf /etc/mysql/mariadb.conf.d/ 9 | COPY ./tools/script.py / 10 | 11 | # EXPOSE 3306 12 | 13 | CMD [ "python3", "script.py"] 14 | -------------------------------------------------------------------------------- /srcs/requirements/bonus/adminer/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | FROM debian:bullseye 3 | 4 | RUN apt update && apt install -y php php-mysqli wget && apt clean && rm -rf /var/lib/apt/lists/* 5 | 6 | WORKDIR /var/www/ 7 | 8 | RUN wget --no-check-certificate -O adminer.php https://www.adminer.org/latest.php && \ 9 | mv adminer.php index.php && chown -R root:root /var/www/ 10 | 11 | ENTRYPOINT [ "php", "-S", "0.0.0.0:8080"] 12 | -------------------------------------------------------------------------------- /srcs/requirements/bonus/portainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye 2 | 3 | RUN apt-get update && apt-get install -y curl python3\ 4 | && apt-get clean && rm -rf /var/lib/apt/lists/* 5 | 6 | RUN curl -L https://github.com/portainer/portainer/releases/download/2.19.5/portainer-2.19.5-linux-amd64.tar.gz -o portainer.tar.gz && \ 7 | tar -xzvf portainer.tar.gz && rm portainer.tar.gz 8 | 9 | COPY ./tools/script.py / 10 | EXPOSE 2307 11 | CMD [ "python3", "./script.py" ] 12 | -------------------------------------------------------------------------------- /srcs/.env: -------------------------------------------------------------------------------- 1 | # certificates 2 | CERTS_=./var/www/html 3 | 4 | #mysql 5 | MYSQL_DATABASE_NAME=WPdb 6 | MYSQL_USER=saba 7 | MYSQL_PASSWORD=saba 8 | 9 | #wordpress 10 | WP_URL=aahlyel.42.fr 11 | WP_TITLE=kamikazeSaba 12 | WP_ADMIN=saba 13 | WP_ADMIN_PSWD=saba 14 | WP_ADMIN_MAIL=saba@student.1337.ma 15 | WP_PORTAL=wordpress:9000 16 | 17 | #ngnix 18 | NGNIX_LISTEN_ON=443 ssl 19 | #ssl 20 | SSL_KEY=inception.key 21 | SSL_CERT=inception.crt 22 | 23 | #ftp 24 | FTP_USER=sabat 25 | FTP_PASSWORD=saba 26 | 27 | #portainer 28 | 29 | PORTAINER_PASSWORD=sabato123456 -------------------------------------------------------------------------------- /srcs/requirements/nginx/conf/default: -------------------------------------------------------------------------------- 1 | server 2 | { 3 | listen 443 ssl; 4 | server_name aahlyel.42.fr; 5 | root /var/www/html/; 6 | index index.php; 7 | 8 | ssl_certificate /etc/nginx/ssl/inception.crt; 9 | ssl_certificate_key /etc/nginx/ssl/inception.key; 10 | ssl_protocols TLSv1.2; 11 | 12 | location /adminer { 13 | proxy_pass http://adminer:8080; 14 | } 15 | 16 | location ~ [^/]\\.php(/|$) { 17 | include fastcgi_params; 18 | fastcgi_pass wordpress:9000; 19 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /srcs/requirements/nginx/tools/script.py: -------------------------------------------------------------------------------- 1 | from os import environ, system 2 | 3 | # constants 4 | SSL_PATH = "/etc/nginx/ssl" 5 | SSL_KEY= f"{SSL_PATH}/{environ['SSL_KEY']}" 6 | SSL_CERT= f"{SSL_PATH}/{environ['SSL_CERT']}" 7 | 8 | # generate ssl key 9 | system(f"mkdir -p {SSL_PATH}") 10 | system(f"""openssl req -x509 -newkey rsa:4096 -nodes \ 11 | -keyout {SSL_KEY} \ 12 | -out {SSL_CERT} \ 13 | -sha256 \ 14 | -days 365 \ 15 | -subj '/C=MA/ST=BeniMellal/L=Khouribga/O=1337/OU=io/CN=inception/'""") 16 | 17 | # launch ngnix deamon in the foreground 18 | system("nginx -g 'daemon off;'") 19 | -------------------------------------------------------------------------------- /srcs/requirements/wordpress/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye-slim 2 | 3 | RUN apt update && apt install -y --no-install-recommends \ 4 | wordpress ftp default-mysql-client php-redis php-fpm php-mysql curl python3 && \ 5 | apt clean && rm -rf /var/lib/apt/lists/* && \ 6 | chmod 755 /var/www/html/ && \ 7 | curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar && \ 8 | chmod +x wp-cli.phar && \ 9 | mv wp-cli.phar /usr/local/bin/wp && \ 10 | wp core download --path=/var/www/html/ --allow-root 11 | 12 | COPY ./tools/script.py / 13 | COPY ./conf/wp-config.php /var/www/html/wp-config.php 14 | EXPOSE 9000 15 | 16 | ENTRYPOINT [ "python3", "script.py"] 17 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | up: volumes_dir 3 | docker-compose -f ./srcs/docker-compose.yml up --build -d 4 | 5 | volumes_dir: 6 | @mkdir -p /home/aahlyel/data/data /home/aahlyel/data/db 7 | 8 | stop: 9 | docker-compose -f ./srcs/docker-compose.yml stop 10 | 11 | down: 12 | docker-compose -f ./srcs/docker-compose.yml down --rmi all --volumes 13 | sudo rm -rf /home/aahlyel/data/data /home/aahlyel/data/db 14 | 15 | 16 | prune: down 17 | docker system prune -af 18 | 19 | start: 20 | docker-compose -f ./srcs/docker-compose.yml start 21 | 22 | re: down up 23 | 24 | network: 25 | @cd srcs && docker network inspect inception 26 | 27 | s = wordpress 28 | exec: 29 | @cd srcs && docker-compose exec ${s} bash 30 | 31 | c = wordpress 32 | logs: 33 | @cd srcs && docker-compose logs ${c} 34 | 35 | volumes: 36 | @cd srcs && docker volume ls 37 | 38 | volumes_rm: 39 | @cd srcs && docker volume rm ${v} 40 | 41 | volume_inspect: 42 | @cd srcs && docker volume inspect ${v} 43 | 44 | .PHONY: up stop down start re network exec logs volumes volumes_rm volume_inspect prune 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 ahlyel-amine 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /srcs/requirements/mariadb/tools/script.py: -------------------------------------------------------------------------------- 1 | from os import environ, system, remove 2 | import subprocess 3 | from time import sleep 4 | # constants 5 | system("service mariadb start") 6 | sleep(5) 7 | 8 | MYSQL_DATABASE_NAME = environ["MYSQL_DATABASE_NAME"] 9 | MYSQL_USER = environ["MYSQL_USER"] 10 | MYSQL_PASSWORD = environ["MYSQL_PASSWORD"] 11 | 12 | SQL_QUERIES = f""" 13 | CREATE DATABASE IF NOT EXISTS {MYSQL_DATABASE_NAME}; 14 | CREATE USER IF NOT EXISTS '{MYSQL_USER}'@'%' IDENTIFIED BY '{MYSQL_PASSWORD}'; 15 | GRANT ALL PRIVILEGES ON {MYSQL_DATABASE_NAME}.* TO '{MYSQL_USER}'@'%'; 16 | FLUSH PRIVILEGES; 17 | """ 18 | 19 | SQL_QUERIES_FILE = "./db.sql" 20 | shell_script = f""" 21 | mysql_secure_installation << EOF 1>&2 22 | n 23 | {MYSQL_PASSWORD} 24 | {MYSQL_PASSWORD} 25 | y 26 | n 27 | n 28 | n 29 | n 30 | EOF 31 | """ 32 | subprocess.run(shell_script, shell=True, check=False) 33 | 34 | # create sql queries file 35 | with open(SQL_QUERIES_FILE, "w", encoding="utf-8") as file: 36 | file.write(SQL_QUERIES) 37 | 38 | # launch mariadb service 39 | # execute sql queries 40 | system(f"mariadb -u root -p{MYSQL_PASSWORD} < {SQL_QUERIES_FILE}") 41 | remove(SQL_QUERIES_FILE) 42 | 43 | # run mariadb deamon 44 | system(f"mysqladmin -u root -p{ MYSQL_PASSWORD} shutdown") 45 | system("mariadbd") 46 | -------------------------------------------------------------------------------- /srcs/requirements/bonus/ftp/tools/script.py: -------------------------------------------------------------------------------- 1 | from os import system, environ, makedirs 2 | import subprocess 3 | # system("service vsftpd start") 4 | from time import sleep 5 | 6 | sleep(15) 7 | makedirs("/var/run/vsftpd/empty", exist_ok=True) 8 | makedirs("/var/www/html/", exist_ok=True) 9 | makedirs("/etc/ssl/certs/", exist_ok=True) 10 | FTP_USER = environ.get('FTP_USER') 11 | FTP_PASSWORD = environ.get('FTP_PASSWORD') 12 | ADDUSER = f""" 13 | adduser --home /var/www/html/ {environ['FTP_USER']} --disabled-password << EOF 14 | 15 | 16 | 17 | 18 | 19 | y 20 | EOF 21 | """ 22 | if not system(f"id -u {FTP_USER} > /dev/null 2>&1"): 23 | print(f"User '{FTP_USER}' already exists.") 24 | else: 25 | subprocess.run(ADDUSER, shell=True, check=False) 26 | subprocess.run(f"echo {FTP_USER}:{FTP_PASSWORD} | /usr/sbin/chpasswd", shell=True, check=False) 27 | 28 | subprocess.run(f"echo {FTP_USER} | tee -a /etc/vsftpd.userlist &> /dev/null", shell=True, check=False) 29 | subprocess.run(f"adduser {FTP_USER} root", shell=True, check=False) 30 | 31 | system("""openssl req -x509 -newkey rsa:4096 -nodes \ 32 | -keyout /etc/ssl/certs/inception.key\ 33 | -out /etc/ssl/certs/inception.crt \ 34 | -sha256 \ 35 | -days 365 \ 36 | -subj '/C=MA/ST=BeniMellal/L=Khouribga/O=1337/OU=io/CN=inception/'""") 37 | system("vsftpd /etc/vsftpd.conf") 38 | -------------------------------------------------------------------------------- /srcs/requirements/bonus/static-website/tools/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Flip Card Memory Game 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 |
14 |
15 |

Card Matching game

16 |
17 | 18 |
19 | 30 | Moves: 0 31 | Elapsed Time: 0:00 32 |
33 | 34 |
35 |
36 | 37 |
38 | 39 |
40 | 41 | 50 |
51 | 52 | 53 | -------------------------------------------------------------------------------- /srcs/requirements/wordpress/tools/script.py: -------------------------------------------------------------------------------- 1 | from os import system, environ 2 | import subprocess 3 | from re import sub 4 | import time 5 | 6 | # waiting for mariadb 7 | time.sleep(10) 8 | 9 | #constants 10 | FPM_CONG_FILE = "/etc/php/7.4/fpm/pool.d/www.conf" 11 | WP_CONF_FILE = f"{environ['CERTS_']}/wp-config.php" 12 | MYSQL_DB_NAME = environ['MYSQL_DATABASE_NAME'] 13 | MYSQL_USER = environ['MYSQL_USER'] 14 | MYSQL_PASSWORD = environ['MYSQL_PASSWORD'] 15 | MYSQL_HOST = "mariadb" 16 | WP_CERT = environ['CERTS_'] 17 | WP_URL = environ['WP_URL'] 18 | WP_TITLE = environ['WP_TITLE'] 19 | WP_ADMIN = environ['WP_ADMIN'] 20 | WP_ADMIN_PSWD = environ['WP_ADMIN_PSWD'] 21 | WP_ADMIN_MAIL = environ['WP_ADMIN_MAIL'] 22 | 23 | def replace(targetfile:str, old_values:list[str], new_values:list[str]): 24 | """replace old values by new one's in the target file""" 25 | with open(targetfile, "r", encoding="utf-8") as f: 26 | lol = f.read() 27 | for i, value in enumerate(old_values): 28 | lol = sub(value, new_values[i], lol) 29 | with open(targetfile, "w", encoding="utf-8") as f: 30 | f.write(lol) 31 | 32 | # start fast-cgi process manager so wordpress can run the php based configuration 33 | system("service php7.4-fpm start") 34 | 35 | # configure fast-cgi process manager config file 36 | replace(FPM_CONG_FILE, ["listen = /run/php/php7.4-fpm.sock"], ["listen = 9000"]) 37 | 38 | 39 | # configure wordpress config file 40 | default_values = ["database_name_here", "username_here", "password_here", "localhost"] 41 | wp_conf_values = [MYSQL_DB_NAME, MYSQL_USER, MYSQL_PASSWORD, MYSQL_HOST] 42 | 43 | replace(WP_CONF_FILE, default_values, wp_conf_values) 44 | 45 | #install wordpress 46 | system(f"""wp core install \ 47 | --path={WP_CERT} \ 48 | --url={WP_URL} \ 49 | --title={WP_TITLE} \ 50 | --admin_user={WP_ADMIN} \ 51 | --admin_password={WP_ADMIN_PSWD} \ 52 | --admin_email={WP_ADMIN_MAIL} \ 53 | --allow-root""") 54 | system(f"wp plugin install redis-cache --activate --path={WP_CERT} --allow-root") 55 | system(f"wp plugin update --all --path={WP_CERT} --allow-root") 56 | #install redis plugin 57 | system(f"wp redis enable --path={WP_CERT} --allow-root") 58 | #install a theme 59 | system(f"wp theme install blockstarter --activate --path={WP_CERT} --allow-root") 60 | 61 | subprocess.run(f"chown -R www-data:www-data /var/www/html", shell=True, check=False) 62 | 63 | system("service php7.4-fpm stop") 64 | 65 | # stop the fpm service so we can run it as a main process in the foreground 66 | system("php-fpm7.4 -F") 67 | -------------------------------------------------------------------------------- /srcs/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | nginx: 3 | container_name: nginx 4 | image: nginx:saba 5 | build: ./requirements/nginx/ 6 | ports: 7 | - "443:443" 8 | depends_on: 9 | - wordpress 10 | restart : always 11 | volumes: 12 | - wp-data-volume:/var/www/html/ 13 | networks: 14 | - inception 15 | env_file: 16 | - ./.env 17 | privileged: true 18 | 19 | vsftpd: 20 | container_name: vsftpd 21 | image: vsftpd:saba 22 | build: ./requirements/bonus/ftp 23 | volumes: 24 | - wp-data-volume:/var/www/html/ 25 | ports: 26 | - "21:21" 27 | env_file: 28 | - ./.env 29 | restart: always 30 | networks: 31 | - inception 32 | depends_on: 33 | - wordpress 34 | privileged: true 35 | 36 | redis: 37 | container_name: redis 38 | image: redis:saba 39 | build: ./requirements/bonus/redis/ 40 | volumes: 41 | - wp-data-volume:/var/www/html 42 | restart: always 43 | networks: 44 | - inception 45 | expose: 46 | - 6379 47 | depends_on: 48 | - wordpress 49 | 50 | mariadb: 51 | container_name: mariadb 52 | image: mariadb:saba 53 | build: ./requirements/mariadb/ 54 | expose: 55 | - 3306 56 | volumes: 57 | - wp-db-volume:/var/lib/mysql 58 | restart : always 59 | env_file: 60 | - .env 61 | networks: 62 | - inception 63 | # privileged: true 64 | 65 | wordpress: 66 | container_name: wordpress 67 | image: wordpress:saba 68 | build: ./requirements/wordpress/ 69 | expose: 70 | - 9000 71 | depends_on: 72 | - mariadb 73 | restart : always 74 | volumes: 75 | - wp-data-volume:/var/www/html/ 76 | env_file: 77 | - ./.env 78 | networks: 79 | - inception 80 | privileged: true 81 | 82 | adminer: 83 | container_name: adminer 84 | image: adminer:saba 85 | build: ./requirements/bonus/adminer/ 86 | ports: 87 | - "8080:8080" 88 | restart: always 89 | networks: 90 | - inception 91 | depends_on: 92 | - mariadb 93 | 94 | static-website: 95 | container_name: static-website 96 | image: static:saba-website 97 | build: ./requirements/bonus/static-website/ 98 | ports: 99 | - "5484:5484" 100 | restart: always 101 | 102 | portainer: 103 | container_name: portainer 104 | image: portainer:saba 105 | build: ./requirements/bonus/portainer/ 106 | ports : 107 | - "2307:2307" 108 | env_file: 109 | - ./.env 110 | volumes: 111 | - /var/run/docker.sock:/var/run/docker.sock 112 | restart: always 113 | 114 | volumes: 115 | wp-db-volume: 116 | name: wp-db-volume 117 | driver: local 118 | driver_opts: 119 | type: 'none' 120 | o: bind 121 | device: /home/aahlyel/data/db 122 | 123 | wp-data-volume: 124 | name: wp-data-volume 125 | driver: local 126 | driver_opts: 127 | type: 'none' 128 | o: bind 129 | device: /home/aahlyel/data/data 130 | 131 | 132 | networks: 133 | inception: 134 | name: inception 135 | driver: bridge 136 | -------------------------------------------------------------------------------- /srcs/requirements/wordpress/conf/wp-config.php: -------------------------------------------------------------------------------- 1 | 40 | --- 41 | ## Virtualization 42 | 43 | `Virtualization` uses software to create an abstraction layer over computer hardware, enabling the division of a single computer's hardware components—such as processors, memory and storage—into multiple virtual machines (VMs). Each VM runs its own operating system (OS) and behaves like an independent computer, even though it is running on just a portion of the actual underlying computer hardware. 44 | 45 | It follows that virtualization enables more efficient use of physical computer hardware and allows a greater return on an organization’s hardware investment. 46 | 47 | Today, virtualization is a standard practice in enterprise IT architecture. It is also the technology that drives cloud computing economics. Virtualization enables cloud providers to serve users with their existing physical computer hardware. It enables cloud users to purchase only the computing resources they need when they need it, and to scale those resources cost-effectively as their workloads grow. 48 | 49 | ## What is a Virtual Machine 50 | 51 | A `virtual machine` (VM) is a software-based emulation of a physical computer that runs an operating system and applications just like a physical computer. VMs are created using virtualization software and have their own virtual hardware, including CPUs, memory, storage, and network interfaces. 52 | 53 | 54 | ## Hypervisor 55 | 56 | The `hypervisor`, or virtual machine monitor (VMM), is the software layer that enables virtualization. It manages the creation, execution, and termination of VMs. There are two types of hypervisors: 57 | - Type 1 (Bare-Metal): Runs directly on the host's hardware (e.g., VMware ESXi, Microsoft Hyper-V). 58 | - Type 2 (Hosted): Runs on top of a host operating system (e.g., VMware Workstation, Oracle VirtualBox). 59 | 60 | 61 | ## From chroot to container 62 | 63 | - During the development of Unix V7 in 1979, the chroot system call was introduced, changing the root directory of a process and its children to a new location in the filesystem. This advance was the beginning of process isolation: segregating file access for each process. 64 | 65 | **revolution:** 66 | Chroot introduced process isolation, allowing changes to the root directory of processes. 67 | FreeBSD Jails enabled partitioning systems into isolated environments with individual IP addresses. 68 | Linux VServer provided operating system-level virtualization for resource partitioning. 69 | Process Containers evolved into cgroups for resource management and isolation in Linux. 70 | Docker popularized containerization with an ecosystem for container management. 71 | ## Containers 72 | 73 | 74 |






















75 | 76 | **Container overview** 77 | 78 | Containers are packages of software that contain all of the necessary elements to run in any environment. In this way, containers virtualize the operating system and run anywhere, from a private data center to the public cloud or even on a developer’s personal laptop. 79 | 80 | **key features :** 81 | 82 | - A container is a lightweight, standalone, and executable software package that includes everything needed to run a piece of software, including the code, a runtime, libraries, environment variables, and config files. 83 | 84 | - Containers provide a consistent and reproducible environment, which makes it easier to develop, test, and deploy applications across different environments and platforms. 85 | 86 | - Containers are isolated from each other and from the host system. Each container runs in its own namespace, which means it has its own view of the operating system, processes, file system, network, and mounted volumes. 87 | 88 | - Containers use the host system's kernel and resources, but they do not require a full operating system per application like virtual machines do. This makes them more lightweight and efficient than virtual machines. 89 | 90 | - Containers can be managed by container orchestration tools like Kubernetes, which handle deployment, scaling, networking, and availability of containers. 91 | 92 | - The concept of a container as an isolated running process is fundamental to container technologies like Docker, LXC, and others. 93 | 94 | ### **Dive into containers** 95 | **Namespaces** 96 | 97 | In the context of containers, a namespace is a technology that provides isolation for running processes. Each aspect of a container runs in a separate namespace and its access is limited to that namespace. 98 | 99 | Namespaces are a feature of the Linux kernel that partitions kernel resources such that one set of processes sees one set of resources while another set of processes sees a different set of resources. The feature works by having the same namespace for a set of resources and processes, but those namespaces refer to distinct resources. 100 | 101 | Resources may be various aspects of a system like network access, process IDs, user IDs, and filesystems. 102 | 103 | For example, the PID namespace isolates the process ID number space, meaning that processes in different PID namespaces can have the same PID. Or, the network namespace isolates network interfaces, so containers can have their own virtual network interfaces and IP addresses. 104 | 105 | Namespaces provide the isolation that makes containers possible, so each container can run its own isolated processes, have its own network configuration, its own users, and its own filesystem. 106 | 107 | **Cgroups** 108 | 109 | Control groups, or cgroups, are a critical feature of the Linux kernel that Docker uses to manage and limit the resources used by containers. Cgroups provide fine-grained control over the allocation and usage of system resources such as CPU, memory, disk I/O, and network bandwidth. Here’s how Docker utilizes cgroups: 110 | 111 | - CPU Management: Docker can limit the CPU usage of a container by setting cgroup constraints, ensuring no single container can monopolize the CPU and allowing fair distribution of CPU resources among multiple containers. 112 | - Memory Management: Cgroups allow Docker to set memory usage limits for containers. If a container exceeds its allocated memory, it can be restricted or terminated, preventing it from affecting the stability of the host system. 113 | - Block I/O Management: Docker can control the disk I/O bandwidth available to a container, preventing I/O-intensive containers from degrading the performance of other containers or the host system. 114 | Network Bandwidth Management: Cgroups can also limit the network bandwidth available to a container, ensuring that network resources are fairly shared among containers. 115 | 116 | #### **Capabilities** 117 | 118 | Provides fine-grained control over process privileges, allowing containers to run with only the necessary privileges. 119 | 120 | --- 121 | ## Diff between hypervisor and namspaces 122 | 123 | 124 | 125 | 126 | | Feature | Hypervisor | Namespace | 127 | |------------------------|----------------------------------------------------|--------------------------------------------------| 128 | | **Definition** | Software that creates and runs virtual machines | Linux kernel feature for isolating resources | 129 | | **Purpose** | Allows multiple operating systems to share a single hardware host | Isolates and partitions kernel resources for processes | 130 | | **Resource Isolation** | Virtualizes hardware resources (CPU, memory, etc.) | Partitions kernel resources (processes, files, etc.) | 131 | | **Kernel** | Each VM has its own guest OS and kernel | Shares the host system's kernel | 132 | | **Hardware Virtualization** | Provides virtual hardware (e.g., virtual CPUs, disks) | No hardware virtualization; operates at process level | 133 | | **Use Case** | Running multiple OS environments on a single host | Creating isolated application environments in containers | 134 | | **Examples** | VMware, Hyper-V, KVM | PID namespace, NET namespace, IPC namespace | 135 | | **Resource Management**| Allocates physical resources to virtual machines | Manages process-level resources and isolation | 136 | | **Isolation Level** | Hardware-level isolation | Kernel-level isolation | 137 | | **Overhead** | Higher (requires full OS for each VM) | Lower (shares host OS and resources) | 138 | 139 | 140 | 141 | In summary, hypervisors provide full virtualization with separate guest operating systems, while namespaces offer process-level isolation within a single host OS, making them suitable for different use cases and providing different types of resource management and isolation. 142 | 143 | --- 144 | ## Container Vs VMs 145 | 146 | 147 | | Feature | Containers | Virtual Machines | 148 | |------------------------|-----------------------------------------------|-----------------------------------------------| 149 | | **Architecture** | Shares the host OS kernel | Runs a full guest OS with a hypervisor | 150 | | **Isolation** | Process-level isolation | Hardware-level isolation | 151 | | **Boot Time** | Fast (seconds) | Slower (minutes) | 152 | | **Resource Overhead** | Low (shares OS resources) | High (dedicated OS and resources) | 153 | | **Efficiency** | High (lightweight) | Lower (heavier) | 154 | | **Scalability** | Very high (more containers per host) | Lower (fewer VMs per host) | 155 | | **Portability** | Highly portable (consistent environment) | Portable but more complex (full OS migration) | 156 | | **Use Cases** | Microservices, DevOps, CI/CD, cloud-native apps| Traditional applications, legacy systems, OS diversity | 157 | | **Security** | Moderate (depends on host OS security) | Strong (hardware-level isolation) | 158 | | **Management** | Easier to manage and orchestrate | More complex management | 159 | | **Storage** | Layered filesystem (e.g., OverlayFS) | Dedicated virtual disks | 160 | | **Networking** | Uses host network or overlay networks | Virtual network interfaces | 161 | | **Examples** | Docker, Kubernetes, LXC | VMware, Hyper-V, KVM | 162 | 163 | 164 | --- 165 | ## Introduction to Docker 166 | 167 |
168 | Docker is a platform that revolutionizes how applications are developed, deployed, and run by leveraging containerization technology. Containers encapsulate an application and its dependencies into a single, portable unit, ensuring consistent performance across different environments. Docker simplifies the process of creating, deploying, and managing containers, making it easier to develop, ship, and run applications reliably. 169 | 170 | ### What Docker Offers 171 | 172 | Docker builds upon the fundamental concepts of containerization and introduces several key innovations and improvements: 173 | 174 | 1. **User-Friendly Tools**: 175 | - Docker provides an intuitive command-line interface (CLI) and graphical tools that simplify container management tasks, from building and running containers to orchestrating multi-container applications. 176 | 177 | 2. **Efficient Image Management**: 178 | - Docker uses a layered image format that enables efficient storage and distribution of container images. Images can be versioned, shared via Docker Hub, and reused across different environments. 179 | 180 | 3. **Enhanced Orchestration**: 181 | - Docker Compose and Docker Swarm facilitate the management of multi-container applications, while Docker's integration with Kubernetes provides advanced orchestration capabilities for complex, production-grade deployments. 182 | 183 | 4. **Improved Security**: 184 | - Docker includes security features such as Docker Content Trust, security scanning, and runtime security controls to help protect containerized applications. 185 | 186 | 5. **Developer Productivity**: 187 | - Docker's automation tools, such as Dockerfile for image building and Docker Hub for image distribution, streamline development workflows and foster collaboration. 188 | 189 | 6. **Flexible Networking**: 190 | - Docker offers various networking options, including bridge, host, and overlay networks, to manage container communication and connectivity effectively. 191 | 192 | 7. **Active Ecosystem**: 193 | - Docker has a vibrant community and extensive ecosystem, providing access to a wealth of resources, tools, and integrations. 194 | --- 195 | ### Docker vs. Containerization Methods Before Docker 196 | 197 |
198 | 199 | 200 | | Feature | Docker | Pre-Docker Containerization Methods | 201 | |------------------------|----------------------------------------------------|--------------------------------------------------------| 202 | | **Ease of Use** | Intuitive CLI and GUI tools | More manual setup and management | 203 | | **Image Management** | Layered images with versioning and Docker Hub | Limited image management and sharing options | 204 | | **Orchestration** | Docker Compose, Docker Swarm, Kubernetes integration | Less integrated orchestration tools | 205 | | **Security** | Built-in security features and integrations | Less standardized security controls | 206 | | **Developer Tools** | Dockerfile for automation, rich ecosystem | More manual and disparate tooling | 207 | | **Networking** | Advanced networking options (bridge, host, overlay) | Basic networking setups | 208 | | **Community** | Large, active community and ecosystem | Smaller, less integrated communities | 209 | 210 | 211 | 212 | --- 213 | 214 | ## Docker Ecosystem 215 | 216 | 217 | Docker provides a comprehensive ecosystem to simplify container management and application development. Here’s an overview: 218 | 219 | - **Docker Desktop** offers a user-friendly interface that makes it easy to build, run, and manage containers. It's designed to simplify the development workflow with intuitive tools. 220 | 221 | - **Docker Engine** is the core runtime that delivers a lightweight and secure environment for running containers. It is included with Docker Desktop and manages the execution of containerized applications. 222 | 223 | - **Docker Scout** provides near real-time actionable insights to secure and manage the software supply chain, enhancing the oversight and protection of containerized applications. 224 | 225 | - **Docker Hub** is the largest and most widely used image repository. It serves as the primary container registry for developers to share, manage, and securely store their containerized applications. 226 | 227 | - **Docker Build Cloud** is a premium service that optimizes the image-building process, particularly in enterprise environments, to enhance efficiency and performance. 228 | --- 229 | ## Key Docker Technologies 230 | 231 |
232 | 233 | 234 | | **Technology** | **Description** | 235 | |----------------------|---------------------------------------------------------------------------------------------------------| 236 | | **Runtime** | Manages the execution and isolation of containers, ensuring they run as independent, lightweight units. | 237 | | **Daemon** | The core service that runs in the background, handling Docker objects like containers, images, and volumes. | 238 | | **Orchestrator** | Tools like Docker Swarm and Kubernetes manage and coordinate the deployment, scaling, and operations of containers across multiple hosts. | 239 | | **CLI (Command Line Interface)** | Provides a command-line tool to interact with the Docker daemon, allowing users to manage containers, images, networks, and volumes. | 240 | | **Builder** | Creates Docker images from Dockerfiles, packaging applications and dependencies into portable formats. | 241 | | **Registry** | A system for storing and distributing Docker images, with Docker Hub being the most well-known public registry, along with private registries. | 242 | --- 243 | ## Docker Image 244 | 245 | `Docker images` are essential blueprints for creating containers, encapsulating all necessary components to run an application. These images utilize a layered architecture for efficient storage and reuse, and once created, remain immutable to preserve consistency. 246 | 247 | Docker images can be versioned with tags, ensuring easy management of different iterations. Their portable nature allows for consistent deployment across various Docker-supported environments, promoting reusability in different projects. Images are defined by **Dockerfiles**, which specify build instructions and configurations. This approach enables streamlined development and deployment processes. Docker images are typically stored in registries like Docker Hub or private repositories, facilitating easy sharing and distribution within development teams or across organizations. 248 | 249 | 250 | #### **Example of a Dockerfile** 251 | 252 | Here's a simple example demonstrates how to create a Docker image using a `Dockerfile` based on the Debian bullseye image. The image installs necessary packages, downloads and extracts Portainer, and sets up a Python script. 253 | 254 | ```dockerfile 255 | # Use Debian Bullseye as the base image 256 | FROM debian:bullseye 257 | 258 | # Update the package list, install curl and python3, and clean up to reduce image size 259 | RUN apt-get update && \ # Update the list of available packages 260 | apt-get install -y curl python3 && \ # Install curl and python3 without prompting for confirmation 261 | apt-get clean && \ # Remove temporary files used during package installation 262 | rm -rf /var/lib/apt/lists/* # Remove cached package lists to reduce image size 263 | 264 | # Download and extract Portainer 265 | RUN curl -L https://github.com/portainer/portainer/releases/download/2.19.5/portainer-2.19.5-linux-amd64.tar.gz -o portainer.tar.gz && \ # Download Portainer release 266 | tar -xzvf portainer.tar.gz && \ # Extract the downloaded Portainer archive 267 | rm portainer.tar.gz # Remove the Portainer archive file to save space 268 | 269 | # Copy the Python script from the host into the container's root directory 270 | COPY ./tools/script.py / # Copy script.py from the local 'tools' directory to the root directory of the container 271 | 272 | # Expose port 2307 to allow communication to and from the container 273 | EXPOSE 2307 # Inform Docker that the container listens on port 2307 274 | 275 | # Define the command to execute when the container starts 276 | CMD [ "python3", "./script.py" ] # Run script.py with python3 as the default command when the container starts 277 | ``` 278 | 279 | Certainly! Here's how to build and run the Docker image for the Portainer setup based on the provided Dockerfile: 280 | 281 | --- 282 | 283 | #### **How to Build and Run the Portainer Docker Image** 284 | 285 | 1. **Build the Image**: 286 | ```bash 287 | docker build -t my-portainer-app . 288 | ``` 289 | This command builds the Docker image from the `Dockerfile` in the current directory and tags it as `portainer`. The image includes Debian Bullseye, installs necessary packages, downloads and extracts Portainer, and copies a Python script into the container. 290 | 291 | 2. **Run the Container**: 292 | ```bash 293 | docker run -p 2307:2307 portainer 294 | ``` 295 | This command runs a container from the `portainer` image, mapping port 2307 in the container to port 2307 on the host. This setup ensures that the Portainer application and any associated services are accessible via port 2307 on the host machine. 296 | 297 | --- 298 | 299 | This approach builds and runs the Docker image while making sure that the application and services exposed by Portainer are properly configured and accessible. 300 | 301 | Here's an example of a Docker Compose configuration based on the provided details: 302 | 303 | --- 304 | 305 | --- 306 | ## Docker compose 307 | 308 | `Docker Compose` is a tool that simplifies the process of defining and managing multi-container Docker applications. It allows you to configure and run multiple Docker containers using a single YAML configuration file. 309 | 310 | #### Key Features of Docker Compose: 311 | - Declarative Configuration: Define multi-container applications in a single docker-compose.yml file. 312 | 313 | - Service Management: Easily manage the lifecycle of your containers, including starting, stopping, and rebuilding. 314 | 315 | - Networking: Automatically sets up a default network for your containers, facilitating easy communication between them. 316 | 317 | - Volumes: Support for defining and using persistent storage volumes. 318 | 319 | --- 320 | ### **Example docker-compose.yml** 321 | 322 | This Docker Compose configuration sets up a MariaDB service with a specified image, volume settings. 323 | 324 | ```yaml 325 | services: 326 | mariadb: 327 | container_name: mariadb # Name of the container 328 | image: mariadb:saba # Image for the MariaDB service 329 | build: ./requirements/mariadb/ # Build context for the MariaDB service 330 | expose: 331 | - 3306 # Expose port 3306 for internal communication (not published to the host) 332 | volumes: 333 | - wp-db-volume:/var/lib/mysql # Mount volume to persist database data 334 | restart: always # Restart policy to ensure the container is always running 335 | env_file: 336 | - .env # Environment variables file for configuration 337 | 338 | volumes: 339 | wp-db-volume: 340 | name: wp-db-volume # Named volume for MariaDB data 341 | driver: local # Use the local driver for volume management 342 | driver_opts: 343 | type: 'none' # Type of mount 344 | o: bind # Mount option 345 | device: /home/aahlyel/data/data # Host directory to mount as volume 346 | ``` 347 | 348 | ### **Commands** 349 | 350 | 1. **Start Services**: 351 | ```bash 352 | docker-compose up 353 | ``` 354 | This command starts up the services defined in the `docker-compose.yml` file. Use `-d` to run in detached mode (in the background). 355 | 356 | 2. **Stop Services**: 357 | ```bash 358 | docker-compose down 359 | ``` 360 | This command stops and removes the containers, networks, and volumes defined in the `docker-compose.yml` file. 361 | 362 | 3. **Rebuild Services**: 363 | ```bash 364 | docker-compose build 365 | ``` 366 | This command rebuilds the services, useful when changes are made to the Dockerfile or the `docker-compose.yml` configuration. 367 | 368 | 4. **View Logs**: 369 | ```bash 370 | docker-compose logs 371 | ``` 372 | This command displays the logs of the services, helping you to debug and monitor container output. 373 | 374 | 375 | ### Docker Volumes 376 | 377 |
378 |
379 | Docker volumes are used to persist data generated and used by Docker containers. Volumes are stored outside the container filesystem, making them a better choice than using bind mounts for sharing data among multiple containers and persisting data beyond the lifecycle of a container. 380 | 381 | #### **Types of Docker Volumes** 382 | 383 | 1. **Named Volumes**: Managed by Docker and are stored in a specific location on the host. 384 | 2. **Anonymous Volumes**: Similar to named volumes but are not given a specific name, and Docker manages their lifecycle. 385 | 3. **Bind Mounts**: Maps a directory or file from the host machine into the container. Bind mounts are tightly coupled to the host directory structure. 386 | 387 | #### **Creating and Using Docker Volumes** 388 | 389 | Volumes can be created and managed using the Docker CLI or defined within Docker Compose files. 390 | 391 | **Creating and Managing Volumes with Docker CLI** 392 | 393 | - **Create a Volume**: 394 | ```bash 395 | docker volume create my-volume 396 | ``` 397 | 398 | - **List Volumes**: 399 | ```bash 400 | docker volume ls 401 | ``` 402 | 403 | - **Inspect a Volume**: 404 | ```bash 405 | docker volume inspect my-volume 406 | ``` 407 | 408 | - **Remove a Volume**: 409 | ```bash 410 | docker volume rm my-volume 411 | ``` 412 | 413 | - **Using a Volume in a Docker Run Command**: 414 | ```bash 415 | docker run -d --name my-container -v my-volume:/path/in/container my-image 416 | ``` 417 | 418 | ### **Key Benefits of Docker Volumes** 419 | 420 | 1. **Data Persistence**: Volumes allow data to persist beyond the lifecycle of a container, ensuring that important data is not lost when containers are removed. 421 | 2. **Data Sharing**: Volumes facilitate data sharing between multiple containers, making it easy to share state or data among different parts of an application. 422 | 3. **Docker Managed**: Docker volumes are managed by Docker, making them more flexible and easier to back up or migrate than bind mounts. 423 | 4. **Decoupling from Host**: Volumes decouple the container data from the host filesystem, making containers more portable and reducing dependencies on the host machine's directory structure. 424 | 425 | --- 426 | ## Docker Networks 427 | 428 | Docker networks enable communication between Docker containers, other Docker hosts, and external services. They provide a way to isolate and secure container communication and offer various options to suit different networking requirements. 429 | 430 | #### Types of Docker Networks 431 | 432 | | **Network Type** | **Description** | **Use Case** | 433 | |------------------|---------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------| 434 | | **Bridge** | Default network type. Containers on the same bridge network can communicate with each other. | Isolating containers on the same host. | 435 | | **Host** | Shares the host’s network stack. Containers use the host’s IP address and network interfaces. | Running containers that require high network performance and low latency. | 436 | | **Overlay** | Connects containers running on different Docker hosts in a Swarm or Kubernetes cluster. | Enabling multi-host container communication, useful in clustered environments. | 437 | | **Macvlan** | Assigns a MAC address to each container, making it appear as a physical device on the network. | Allowing containers to communicate directly with the physical network, useful for legacy applications. | 438 | | **None** | Disables all networking. | Running containers that do not require network access. | 439 | 440 | #### **Creating and Managing Networks with Docker CLI** 441 | 442 | - **Create a Network**: 443 | ```bash 444 | docker network create my-network 445 | ``` 446 | 447 | - **List Networks**: 448 | ```bash 449 | docker network ls 450 | ``` 451 | 452 | - **Inspect a Network**: 453 | ```bash 454 | docker network inspect my-bridge-network 455 | ``` 456 | 457 | - **Remove a Network**: 458 | ```bash 459 | docker network rm my-bridge-network 460 | ``` 461 | 462 | - **Connecting a Container to a Network**: 463 | ```bash 464 | docker run -d --name my-container --network my-bridge-network my-image 465 | ``` 466 | 467 | - **Disconnecting a Container from a Network**: 468 | ```bash 469 | docker network disconnect my-bridge-network my-container 470 | ``` 471 | 472 | #### **Defining Networks in a Docker Compose File** 473 | 474 | Here’s an example `docker-compose.yml` file that defines and uses a custom network: 475 | 476 | ```yaml 477 | services: 478 | nginx: 479 | container_name: nginx 480 | image: nginx:saba 481 | build: ./requirements/nginx/ 482 | ports: 483 | - "443:443" 484 | depends_on: 485 | - wordpress 486 | restart : always 487 | networks: 488 | - inception 489 | 490 | 491 | mariadb: 492 | container_name: mariadb 493 | image: mariadb:saba 494 | build: ./requirements/mariadb/ 495 | expose: 496 | - 3306 497 | restart : always 498 | networks: 499 | - inception 500 | 501 | wordpress: 502 | container_name: wordpress 503 | image: wordpress:saba 504 | build: ./requirements/wordpress/ 505 | expose: 506 | - 9000 507 | depends_on: 508 | - mariadb 509 | restart : always 510 | networks: 511 | - inception 512 | 513 | 514 | networks: 515 | inception: 516 | name: inception 517 | driver: bridge 518 | 519 | ``` 520 | --- 521 | ### **Key Benefits of Docker Networks** 522 | 523 | 1. **Isolation and Security**: Docker networks provide a way to isolate containers from each other and from the host system. Each network type offers different levels of isolation and security. 524 | 2. **Easy Communication**: Networks facilitate communication between containers and external systems, making it easier to build complex, multi-container applications. 525 | 3. **Flexibility**: Docker supports multiple network types, allowing you to choose the best option for your specific use case. 526 | 4. **Scalability**: With Docker Swarm or Kubernetes, overlay networks 527 | 528 | --- 529 | 530 | # resources 531 | - [Use containers to Build, Share and Run your applications](https://www.docker.com/resources/what-container/) 532 | - [What is virtualization?](https://www.ibm.com/topics/virtualization) 533 | 534 | Here's an introduction to Docker, explaining what it offers and a comparison table of Docker versus containerization methods before Docker: 535 | 536 | 537 | -------------------------------------------------------------------------------- /srcs/requirements/bonus/redis/config/config: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Note that option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################## MODULES ##################################### 39 | 40 | # Load modules at startup. If the server is not able to load modules 41 | # it will abort. It is possible to use multiple loadmodule directives. 42 | # 43 | # loadmodule /path/to/my_module.so 44 | # loadmodule /path/to/other_module.so 45 | 46 | ################################## NETWORK ##################################### 47 | 48 | # By default, if no "bind" configuration directive is specified, Redis listens 49 | # for connections from all available network interfaces on the host machine. 50 | # It is possible to listen to just one or multiple selected interfaces using 51 | # the "bind" configuration directive, followed by one or more IP addresses. 52 | # 53 | # Examples: 54 | # 55 | # bind 192.168.1.100 10.0.0.1 56 | # bind 127.0.0.1 ::1 57 | # 58 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the 59 | # internet, binding to all the interfaces is dangerous and will expose the 60 | # instance to everybody on the internet. So by default we uncomment the 61 | # following bind directive, that will force Redis to listen only on the 62 | # IPv4 loopback interface address (this means Redis will only be able to 63 | # accept client connections from the same host that it is running on). 64 | # 65 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES 66 | # JUST COMMENT OUT THE FOLLOWING LINE. 67 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 | #bind 127.0.0.1 ::1 69 | 70 | # Protected mode is a layer of security protection, in order to avoid that 71 | # Redis instances left open on the internet are accessed and exploited. 72 | # 73 | # When protected mode is on and if: 74 | # 75 | # 1) The server is not binding explicitly to a set of addresses using the 76 | # "bind" directive. 77 | # 2) No password is configured. 78 | # 79 | # The server only accepts connections from clients connecting from the 80 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 81 | # sockets. 82 | # 83 | # By default protected mode is enabled. You should disable it only if 84 | # you are sure you want clients from other hosts to connect to Redis 85 | # even if no authentication is configured, nor a specific set of interfaces 86 | # are explicitly listed using the "bind" directive. 87 | protected-mode yes 88 | 89 | # Accept connections on the specified port, default is 6379 (IANA #815344). 90 | # If port 0 is specified Redis will not listen on a TCP socket. 91 | port 6379 92 | 93 | # TCP listen() backlog. 94 | # 95 | # In high requests-per-second environments you need a high backlog in order 96 | # to avoid slow clients connection issues. Note that the Linux kernel 97 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 98 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 99 | # in order to get the desired effect. 100 | tcp-backlog 511 101 | 102 | # Unix socket. 103 | # 104 | # Specify the path for the Unix socket that will be used to listen for 105 | # incoming connections. There is no default, so Redis will not listen 106 | # on a unix socket when not specified. 107 | # 108 | # unixsocket /var/run/redis/redis-server.sock 109 | # unixsocketperm 700 110 | 111 | # Close the connection after a client is idle for N seconds (0 to disable) 112 | timeout 0 113 | 114 | # TCP keepalive. 115 | # 116 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 117 | # of communication. This is useful for two reasons: 118 | # 119 | # 1) Detect dead peers. 120 | # 2) Force network equipment in the middle to consider the connection to be 121 | # alive. 122 | # 123 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 124 | # Note that to close the connection the double of the time is needed. 125 | # On other kernels the period depends on the kernel configuration. 126 | # 127 | # A reasonable value for this option is 300 seconds, which is the new 128 | # Redis default starting with Redis 3.2.1. 129 | tcp-keepalive 300 130 | 131 | ################################# TLS/SSL ##################################### 132 | 133 | # By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration 134 | # directive can be used to define TLS-listening ports. To enable TLS on the 135 | # default port, use: 136 | # 137 | # port 0 138 | # tls-port 6379 139 | 140 | # Configure a X.509 certificate and private key to use for authenticating the 141 | # server to connected clients, masters or cluster peers. These files should be 142 | # PEM formatted. 143 | # 144 | # tls-cert-file redis.crt 145 | # tls-key-file redis.key 146 | 147 | # Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: 148 | # 149 | # tls-dh-params-file redis.dh 150 | 151 | # Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL 152 | # clients and peers. Redis requires an explicit configuration of at least one 153 | # of these, and will not implicitly use the system wide configuration. 154 | # 155 | # tls-ca-cert-file ca.crt 156 | # tls-ca-cert-dir /etc/ssl/certs 157 | 158 | # By default, clients (including replica servers) on a TLS port are required 159 | # to authenticate using valid client side certificates. 160 | # 161 | # If "no" is specified, client certificates are not required and not accepted. 162 | # If "optional" is specified, client certificates are accepted and must be 163 | # valid if provided, but are not required. 164 | # 165 | # tls-auth-clients no 166 | # tls-auth-clients optional 167 | 168 | # By default, a Redis replica does not attempt to establish a TLS connection 169 | # with its master. 170 | # 171 | # Use the following directive to enable TLS on replication links. 172 | # 173 | # tls-replication yes 174 | 175 | # By default, the Redis Cluster bus uses a plain TCP connection. To enable 176 | # TLS for the bus protocol, use the following directive: 177 | # 178 | # tls-cluster yes 179 | 180 | # Explicitly specify TLS versions to support. Allowed values are case insensitive 181 | # and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or 182 | # any combination. To enable only TLSv1.2 and TLSv1.3, use: 183 | # 184 | # tls-protocols "TLSv1.2 TLSv1.3" 185 | 186 | # Configure allowed ciphers. See the ciphers(1ssl) manpage for more information 187 | # about the syntax of this string. 188 | # 189 | # Note: this configuration applies only to <= TLSv1.2. 190 | # 191 | # tls-ciphers DEFAULT:!MEDIUM 192 | 193 | # Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more 194 | # information about the syntax of this string, and specifically for TLSv1.3 195 | # ciphersuites. 196 | # 197 | # tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 198 | 199 | # When choosing a cipher, use the server's preference instead of the client 200 | # preference. By default, the server follows the client's preference. 201 | # 202 | # tls-prefer-server-ciphers yes 203 | 204 | # By default, TLS session caching is enabled to allow faster and less expensive 205 | # reconnections by clients that support it. Use the following directive to disable 206 | # caching. 207 | # 208 | # tls-session-caching no 209 | 210 | # Change the default number of TLS sessions cached. A zero value sets the cache 211 | # to unlimited size. The default size is 20480. 212 | # 213 | # tls-session-cache-size 5000 214 | 215 | # Change the default timeout of cached TLS sessions. The default timeout is 300 216 | # seconds. 217 | # 218 | # tls-session-cache-timeout 60 219 | 220 | ################################# GENERAL ##################################### 221 | 222 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 223 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 224 | daemonize yes 225 | 226 | # If you run Redis from upstart or systemd, Redis can interact with your 227 | # supervision tree. Options: 228 | # supervised no - no supervision interaction 229 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode 230 | # requires "expect stop" in your upstart job config 231 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET 232 | # supervised auto - detect upstart or systemd method based on 233 | # UPSTART_JOB or NOTIFY_SOCKET environment variables 234 | # Note: these supervision methods only signal "process is ready." 235 | # They do not enable continuous pings back to your supervisor. 236 | supervised no 237 | 238 | # If a pid file is specified, Redis writes it where specified at startup 239 | # and removes it at exit. 240 | # 241 | # When the server runs non daemonized, no pid file is created if none is 242 | # specified in the configuration. When the server is daemonized, the pid file 243 | # is used even if not specified, defaulting to "/var/run/redis.pid". 244 | # 245 | # Creating a pid file is best effort: if Redis is not able to create it 246 | # nothing bad happens, the server will start and run normally. 247 | pidfile /var/run/redis/redis-server.pid 248 | 249 | # Specify the server verbosity level. 250 | # This can be one of: 251 | # debug (a lot of information, useful for development/testing) 252 | # verbose (many rarely useful info, but not a mess like the debug level) 253 | # notice (moderately verbose, what you want in production probably) 254 | # warning (only very important / critical messages are logged) 255 | loglevel notice 256 | 257 | # Specify the log file name. Also the empty string can be used to force 258 | # Redis to log on the standard output. Note that if you use standard 259 | # output for logging but daemonize, logs will be sent to /dev/null 260 | logfile /var/log/redis/redis-server.log 261 | 262 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 263 | # and optionally update the other syslog parameters to suit your needs. 264 | # syslog-enabled no 265 | 266 | # Specify the syslog identity. 267 | # syslog-ident redis 268 | 269 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 270 | # syslog-facility local0 271 | 272 | # Set the number of databases. The default database is DB 0, you can select 273 | # a different one on a per-connection basis using SELECT where 274 | # dbid is a number between 0 and 'databases'-1 275 | databases 16 276 | 277 | # By default Redis shows an ASCII art logo only when started to log to the 278 | # standard output and if the standard output is a TTY. Basically this means 279 | # that normally a logo is displayed only in interactive sessions. 280 | # 281 | # However it is possible to force the pre-4.0 behavior and always show a 282 | # ASCII art logo in startup logs by setting the following option to yes. 283 | always-show-logo yes 284 | 285 | ################################ SNAPSHOTTING ################################ 286 | # 287 | # Save the DB on disk: 288 | # 289 | # save 290 | # 291 | # Will save the DB if both the given number of seconds and the given 292 | # number of write operations against the DB occurred. 293 | # 294 | # In the example below the behavior will be to save: 295 | # after 900 sec (15 min) if at least 1 key changed 296 | # after 300 sec (5 min) if at least 10 keys changed 297 | # after 60 sec if at least 10000 keys changed 298 | # 299 | # Note: you can disable saving completely by commenting out all "save" lines. 300 | # 301 | # It is also possible to remove all the previously configured save 302 | # points by adding a save directive with a single empty string argument 303 | # like in the following example: 304 | # 305 | # save "" 306 | 307 | save 900 1 308 | save 300 10 309 | save 60 10000 310 | 311 | # By default Redis will stop accepting writes if RDB snapshots are enabled 312 | # (at least one save point) and the latest background save failed. 313 | # This will make the user aware (in a hard way) that data is not persisting 314 | # on disk properly, otherwise chances are that no one will notice and some 315 | # disaster will happen. 316 | # 317 | # If the background saving process will start working again Redis will 318 | # automatically allow writes again. 319 | # 320 | # However if you have setup your proper monitoring of the Redis server 321 | # and persistence, you may want to disable this feature so that Redis will 322 | # continue to work as usual even if there are problems with disk, 323 | # permissions, and so forth. 324 | stop-writes-on-bgsave-error yes 325 | 326 | # Compress string objects using LZF when dump .rdb databases? 327 | # By default compression is enabled as it's almost always a win. 328 | # If you want to save some CPU in the saving child set it to 'no' but 329 | # the dataset will likely be bigger if you have compressible values or keys. 330 | rdbcompression yes 331 | 332 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 333 | # This makes the format more resistant to corruption but there is a performance 334 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 335 | # for maximum performances. 336 | # 337 | # RDB files created with checksum disabled have a checksum of zero that will 338 | # tell the loading code to skip the check. 339 | rdbchecksum yes 340 | 341 | # The filename where to dump the DB 342 | dbfilename dump.rdb 343 | 344 | # Remove RDB files used by replication in instances without persistence 345 | # enabled. By default this option is disabled, however there are environments 346 | # where for regulations or other security concerns, RDB files persisted on 347 | # disk by masters in order to feed replicas, or stored on disk by replicas 348 | # in order to load them for the initial synchronization, should be deleted 349 | # ASAP. Note that this option ONLY WORKS in instances that have both AOF 350 | # and RDB persistence disabled, otherwise is completely ignored. 351 | # 352 | # An alternative (and sometimes better) way to obtain the same effect is 353 | # to use diskless replication on both master and replicas instances. However 354 | # in the case of replicas, diskless is not always an option. 355 | rdb-del-sync-files no 356 | 357 | # The working directory. 358 | # 359 | # The DB will be written inside this directory, with the filename specified 360 | # above using the 'dbfilename' configuration directive. 361 | # 362 | # The Append Only File will also be created inside this directory. 363 | # 364 | # Note that you must specify a directory here, not a file name. 365 | dir /var/lib/redis 366 | 367 | ################################# REPLICATION ################################# 368 | 369 | # Master-Replica replication. Use replicaof to make a Redis instance a copy of 370 | # another Redis server. A few things to understand ASAP about Redis replication. 371 | # 372 | # +------------------+ +---------------+ 373 | # | Master | ---> | Replica | 374 | # | (receive writes) | | (exact copy) | 375 | # +------------------+ +---------------+ 376 | # 377 | # 1) Redis replication is asynchronous, but you can configure a master to 378 | # stop accepting writes if it appears to be not connected with at least 379 | # a given number of replicas. 380 | # 2) Redis replicas are able to perform a partial resynchronization with the 381 | # master if the replication link is lost for a relatively small amount of 382 | # time. You may want to configure the replication backlog size (see the next 383 | # sections of this file) with a sensible value depending on your needs. 384 | # 3) Replication is automatic and does not need user intervention. After a 385 | # network partition replicas automatically try to reconnect to masters 386 | # and resynchronize with them. 387 | # 388 | # replicaof 389 | 390 | # If the master is password protected (using the "requirepass" configuration 391 | # directive below) it is possible to tell the replica to authenticate before 392 | # starting the replication synchronization process, otherwise the master will 393 | # refuse the replica request. 394 | # 395 | # masterauth 396 | # 397 | # However this is not enough if you are using Redis ACLs (for Redis version 398 | # 6 or greater), and the default user is not capable of running the PSYNC 399 | # command and/or other commands needed for replication. In this case it's 400 | # better to configure a special user to use with replication, and specify the 401 | # masteruser configuration as such: 402 | # 403 | # masteruser 404 | # 405 | # When masteruser is specified, the replica will authenticate against its 406 | # master using the new AUTH form: AUTH . 407 | 408 | # When a replica loses its connection with the master, or when the replication 409 | # is still in progress, the replica can act in two different ways: 410 | # 411 | # 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will 412 | # still reply to client requests, possibly with out of date data, or the 413 | # data set may just be empty if this is the first synchronization. 414 | # 415 | # 2) If replica-serve-stale-data is set to 'no' the replica will reply with 416 | # an error "SYNC with master in progress" to all commands except: 417 | # INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, 418 | # UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, 419 | # HOST and LATENCY. 420 | # 421 | replica-serve-stale-data yes 422 | 423 | # You can configure a replica instance to accept writes or not. Writing against 424 | # a replica instance may be useful to store some ephemeral data (because data 425 | # written on a replica will be easily deleted after resync with the master) but 426 | # may also cause problems if clients are writing to it because of a 427 | # misconfiguration. 428 | # 429 | # Since Redis 2.6 by default replicas are read-only. 430 | # 431 | # Note: read only replicas are not designed to be exposed to untrusted clients 432 | # on the internet. It's just a protection layer against misuse of the instance. 433 | # Still a read only replica exports by default all the administrative commands 434 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 435 | # security of read only replicas using 'rename-command' to shadow all the 436 | # administrative / dangerous commands. 437 | replica-read-only yes 438 | 439 | # Replication SYNC strategy: disk or socket. 440 | # 441 | # New replicas and reconnecting replicas that are not able to continue the 442 | # replication process just receiving differences, need to do what is called a 443 | # "full synchronization". An RDB file is transmitted from the master to the 444 | # replicas. 445 | # 446 | # The transmission can happen in two different ways: 447 | # 448 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 449 | # file on disk. Later the file is transferred by the parent 450 | # process to the replicas incrementally. 451 | # 2) Diskless: The Redis master creates a new process that directly writes the 452 | # RDB file to replica sockets, without touching the disk at all. 453 | # 454 | # With disk-backed replication, while the RDB file is generated, more replicas 455 | # can be queued and served with the RDB file as soon as the current child 456 | # producing the RDB file finishes its work. With diskless replication instead 457 | # once the transfer starts, new replicas arriving will be queued and a new 458 | # transfer will start when the current one terminates. 459 | # 460 | # When diskless replication is used, the master waits a configurable amount of 461 | # time (in seconds) before starting the transfer in the hope that multiple 462 | # replicas will arrive and the transfer can be parallelized. 463 | # 464 | # With slow disks and fast (large bandwidth) networks, diskless replication 465 | # works better. 466 | repl-diskless-sync no 467 | 468 | # When diskless replication is enabled, it is possible to configure the delay 469 | # the server waits in order to spawn the child that transfers the RDB via socket 470 | # to the replicas. 471 | # 472 | # This is important since once the transfer starts, it is not possible to serve 473 | # new replicas arriving, that will be queued for the next RDB transfer, so the 474 | # server waits a delay in order to let more replicas arrive. 475 | # 476 | # The delay is specified in seconds, and by default is 5 seconds. To disable 477 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 478 | repl-diskless-sync-delay 5 479 | 480 | # ----------------------------------------------------------------------------- 481 | # WARNING: RDB diskless load is experimental. Since in this setup the replica 482 | # does not immediately store an RDB on disk, it may cause data loss during 483 | # failovers. RDB diskless load + Redis modules not handling I/O reads may also 484 | # cause Redis to abort in case of I/O errors during the initial synchronization 485 | # stage with the master. Use only if your do what you are doing. 486 | # ----------------------------------------------------------------------------- 487 | # 488 | # Replica can load the RDB it reads from the replication link directly from the 489 | # socket, or store the RDB to a file and read that file after it was completely 490 | # received from the master. 491 | # 492 | # In many cases the disk is slower than the network, and storing and loading 493 | # the RDB file may increase replication time (and even increase the master's 494 | # Copy on Write memory and salve buffers). 495 | # However, parsing the RDB file directly from the socket may mean that we have 496 | # to flush the contents of the current database before the full rdb was 497 | # received. For this reason we have the following options: 498 | # 499 | # "disabled" - Don't use diskless load (store the rdb file to the disk first) 500 | # "on-empty-db" - Use diskless load only when it is completely safe. 501 | # "swapdb" - Keep a copy of the current db contents in RAM while parsing 502 | # the data directly from the socket. note that this requires 503 | # sufficient memory, if you don't have it, you risk an OOM kill. 504 | repl-diskless-load disabled 505 | 506 | # Replicas send PINGs to server in a predefined interval. It's possible to 507 | # change this interval with the repl_ping_replica_period option. The default 508 | # value is 10 seconds. 509 | # 510 | # repl-ping-replica-period 10 511 | 512 | # The following option sets the replication timeout for: 513 | # 514 | # 1) Bulk transfer I/O during SYNC, from the point of view of replica. 515 | # 2) Master timeout from the point of view of replicas (data, pings). 516 | # 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). 517 | # 518 | # It is important to make sure that this value is greater than the value 519 | # specified for repl-ping-replica-period otherwise a timeout will be detected 520 | # every time there is low traffic between the master and the replica. The default 521 | # value is 60 seconds. 522 | # 523 | # repl-timeout 60 524 | 525 | # Disable TCP_NODELAY on the replica socket after SYNC? 526 | # 527 | # If you select "yes" Redis will use a smaller number of TCP packets and 528 | # less bandwidth to send data to replicas. But this can add a delay for 529 | # the data to appear on the replica side, up to 40 milliseconds with 530 | # Linux kernels using a default configuration. 531 | # 532 | # If you select "no" the delay for data to appear on the replica side will 533 | # be reduced but more bandwidth will be used for replication. 534 | # 535 | # By default we optimize for low latency, but in very high traffic conditions 536 | # or when the master and replicas are many hops away, turning this to "yes" may 537 | # be a good idea. 538 | repl-disable-tcp-nodelay no 539 | 540 | # Set the replication backlog size. The backlog is a buffer that accumulates 541 | # replica data when replicas are disconnected for some time, so that when a 542 | # replica wants to reconnect again, often a full resync is not needed, but a 543 | # partial resync is enough, just passing the portion of data the replica 544 | # missed while disconnected. 545 | # 546 | # The bigger the replication backlog, the longer the replica can endure the 547 | # disconnect and later be able to perform a partial resynchronization. 548 | # 549 | # The backlog is only allocated if there is at least one replica connected. 550 | # 551 | # repl-backlog-size 1mb 552 | 553 | # After a master has no connected replicas for some time, the backlog will be 554 | # freed. The following option configures the amount of seconds that need to 555 | # elapse, starting from the time the last replica disconnected, for the backlog 556 | # buffer to be freed. 557 | # 558 | # Note that replicas never free the backlog for timeout, since they may be 559 | # promoted to masters later, and should be able to correctly "partially 560 | # resynchronize" with other replicas: hence they should always accumulate backlog. 561 | # 562 | # A value of 0 means to never release the backlog. 563 | # 564 | # repl-backlog-ttl 3600 565 | 566 | # The replica priority is an integer number published by Redis in the INFO 567 | # output. It is used by Redis Sentinel in order to select a replica to promote 568 | # into a master if the master is no longer working correctly. 569 | # 570 | # A replica with a low priority number is considered better for promotion, so 571 | # for instance if there are three replicas with priority 10, 100, 25 Sentinel 572 | # will pick the one with priority 10, that is the lowest. 573 | # 574 | # However a special priority of 0 marks the replica as not able to perform the 575 | # role of master, so a replica with priority of 0 will never be selected by 576 | # Redis Sentinel for promotion. 577 | # 578 | # By default the priority is 100. 579 | replica-priority 100 580 | 581 | # It is possible for a master to stop accepting writes if there are less than 582 | # N replicas connected, having a lag less or equal than M seconds. 583 | # 584 | # The N replicas need to be in "online" state. 585 | # 586 | # The lag in seconds, that must be <= the specified value, is calculated from 587 | # the last ping received from the replica, that is usually sent every second. 588 | # 589 | # This option does not GUARANTEE that N replicas will accept the write, but 590 | # will limit the window of exposure for lost writes in case not enough replicas 591 | # are available, to the specified number of seconds. 592 | # 593 | # For example to require at least 3 replicas with a lag <= 10 seconds use: 594 | # 595 | # min-replicas-to-write 3 596 | # min-replicas-max-lag 10 597 | # 598 | # Setting one or the other to 0 disables the feature. 599 | # 600 | # By default min-replicas-to-write is set to 0 (feature disabled) and 601 | # min-replicas-max-lag is set to 10. 602 | 603 | # A Redis master is able to list the address and port of the attached 604 | # replicas in different ways. For example the "INFO replication" section 605 | # offers this information, which is used, among other tools, by 606 | # Redis Sentinel in order to discover replica instances. 607 | # Another place where this info is available is in the output of the 608 | # "ROLE" command of a master. 609 | # 610 | # The listed IP address and port normally reported by a replica is 611 | # obtained in the following way: 612 | # 613 | # IP: The address is auto detected by checking the peer address 614 | # of the socket used by the replica to connect with the master. 615 | # 616 | # Port: The port is communicated by the replica during the replication 617 | # handshake, and is normally the port that the replica is using to 618 | # listen for connections. 619 | # 620 | # However when port forwarding or Network Address Translation (NAT) is 621 | # used, the replica may actually be reachable via different IP and port 622 | # pairs. The following two options can be used by a replica in order to 623 | # report to its master a specific set of IP and port, so that both INFO 624 | # and ROLE will report those values. 625 | # 626 | # There is no need to use both the options if you need to override just 627 | # the port or the IP address. 628 | # 629 | # replica-announce-ip 5.5.5.5 630 | # replica-announce-port 1234 631 | 632 | ############################### KEYS TRACKING ################################# 633 | 634 | # Redis implements server assisted support for client side caching of values. 635 | # This is implemented using an invalidation table that remembers, using 636 | # 16 millions of slots, what clients may have certain subsets of keys. In turn 637 | # this is used in order to send invalidation messages to clients. Please 638 | # check this page to understand more about the feature: 639 | # 640 | # https://redis.io/topics/client-side-caching 641 | # 642 | # When tracking is enabled for a client, all the read only queries are assumed 643 | # to be cached: this will force Redis to store information in the invalidation 644 | # table. When keys are modified, such information is flushed away, and 645 | # invalidation messages are sent to the clients. However if the workload is 646 | # heavily dominated by reads, Redis could use more and more memory in order 647 | # to track the keys fetched by many clients. 648 | # 649 | # For this reason it is possible to configure a maximum fill value for the 650 | # invalidation table. By default it is set to 1M of keys, and once this limit 651 | # is reached, Redis will start to evict keys in the invalidation table 652 | # even if they were not modified, just to reclaim memory: this will in turn 653 | # force the clients to invalidate the cached values. Basically the table 654 | # maximum size is a trade off between the memory you want to spend server 655 | # side to track information about who cached what, and the ability of clients 656 | # to retain cached objects in memory. 657 | # 658 | # If you set the value to 0, it means there are no limits, and Redis will 659 | # retain as many keys as needed in the invalidation table. 660 | # In the "stats" INFO section, you can find information about the number of 661 | # keys in the invalidation table at every given moment. 662 | # 663 | # Note: when key tracking is used in broadcasting mode, no memory is used 664 | # in the server side so this setting is useless. 665 | # 666 | # tracking-table-max-keys 1000000 667 | 668 | ################################## SECURITY ################################### 669 | 670 | # Warning: since Redis is pretty fast, an outside user can try up to 671 | # 1 million passwords per second against a modern box. This means that you 672 | # should use very strong passwords, otherwise they will be very easy to break. 673 | # Note that because the password is really a shared secret between the client 674 | # and the server, and should not be memorized by any human, the password 675 | # can be easily a long string from /dev/urandom or whatever, so by using a 676 | # long and unguessable password no brute force attack will be possible. 677 | 678 | # Redis ACL users are defined in the following format: 679 | # 680 | # user ... acl rules ... 681 | # 682 | # For example: 683 | # 684 | # user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 685 | # 686 | # The special username "default" is used for new connections. If this user 687 | # has the "nopass" rule, then new connections will be immediately authenticated 688 | # as the "default" user without the need of any password provided via the 689 | # AUTH command. Otherwise if the "default" user is not flagged with "nopass" 690 | # the connections will start in not authenticated state, and will require 691 | # AUTH (or the HELLO command AUTH option) in order to be authenticated and 692 | # start to work. 693 | # 694 | # The ACL rules that describe what a user can do are the following: 695 | # 696 | # on Enable the user: it is possible to authenticate as this user. 697 | # off Disable the user: it's no longer possible to authenticate 698 | # with this user, however the already authenticated connections 699 | # will still work. 700 | # + Allow the execution of that command 701 | # - Disallow the execution of that command 702 | # +@ Allow the execution of all the commands in such category 703 | # with valid categories are like @admin, @set, @sortedset, ... 704 | # and so forth, see the full list in the server.c file where 705 | # the Redis command table is described and defined. 706 | # The special category @all means all the commands, but currently 707 | # present in the server, and that will be loaded in the future 708 | # via modules. 709 | # +|subcommand Allow a specific subcommand of an otherwise 710 | # disabled command. Note that this form is not 711 | # allowed as negative like -DEBUG|SEGFAULT, but 712 | # only additive starting with "+". 713 | # allcommands Alias for +@all. Note that it implies the ability to execute 714 | # all the future commands loaded via the modules system. 715 | # nocommands Alias for -@all. 716 | # ~ Add a pattern of keys that can be mentioned as part of 717 | # commands. For instance ~* allows all the keys. The pattern 718 | # is a glob-style pattern like the one of KEYS. 719 | # It is possible to specify multiple patterns. 720 | # allkeys Alias for ~* 721 | # resetkeys Flush the list of allowed keys patterns. 722 | # > Add this password to the list of valid password for the user. 723 | # For example >mypass will add "mypass" to the list. 724 | # This directive clears the "nopass" flag (see later). 725 | # < Remove this password from the list of valid passwords. 726 | # nopass All the set passwords of the user are removed, and the user 727 | # is flagged as requiring no password: it means that every 728 | # password will work against this user. If this directive is 729 | # used for the default user, every new connection will be 730 | # immediately authenticated with the default user without 731 | # any explicit AUTH command required. Note that the "resetpass" 732 | # directive will clear this condition. 733 | # resetpass Flush the list of allowed passwords. Moreover removes the 734 | # "nopass" status. After "resetpass" the user has no associated 735 | # passwords and there is no way to authenticate without adding 736 | # some password (or setting it as "nopass" later). 737 | # reset Performs the following actions: resetpass, resetkeys, off, 738 | # -@all. The user returns to the same state it has immediately 739 | # after its creation. 740 | # 741 | # ACL rules can be specified in any order: for instance you can start with 742 | # passwords, then flags, or key patterns. However note that the additive 743 | # and subtractive rules will CHANGE MEANING depending on the ordering. 744 | # For instance see the following example: 745 | # 746 | # user alice on +@all -DEBUG ~* >somepassword 747 | # 748 | # This will allow "alice" to use all the commands with the exception of the 749 | # DEBUG command, since +@all added all the commands to the set of the commands 750 | # alice can use, and later DEBUG was removed. However if we invert the order 751 | # of two ACL rules the result will be different: 752 | # 753 | # user alice on -DEBUG +@all ~* >somepassword 754 | # 755 | # Now DEBUG was removed when alice had yet no commands in the set of allowed 756 | # commands, later all the commands are added, so the user will be able to 757 | # execute everything. 758 | # 759 | # Basically ACL rules are processed left-to-right. 760 | # 761 | # For more information about ACL configuration please refer to 762 | # the Redis web site at https://redis.io/topics/acl 763 | 764 | # ACL LOG 765 | # 766 | # The ACL Log tracks failed commands and authentication events associated 767 | # with ACLs. The ACL Log is useful to troubleshoot failed commands blocked 768 | # by ACLs. The ACL Log is stored in memory. You can reclaim memory with 769 | # ACL LOG RESET. Define the maximum entry length of the ACL Log below. 770 | acllog-max-len 128 771 | 772 | # Using an external ACL file 773 | # 774 | # Instead of configuring users here in this file, it is possible to use 775 | # a stand-alone file just listing users. The two methods cannot be mixed: 776 | # if you configure users here and at the same time you activate the external 777 | # ACL file, the server will refuse to start. 778 | # 779 | # The format of the external ACL user file is exactly the same as the 780 | # format that is used inside redis.conf to describe users. 781 | # 782 | # aclfile /etc/redis/users.acl 783 | 784 | # IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility 785 | # layer on top of the new ACL system. The option effect will be just setting 786 | # the password for the default user. Clients will still authenticate using 787 | # AUTH as usually, or more explicitly with AUTH default 788 | # if they follow the new protocol: both will work. 789 | # 790 | # requirepass foobared 791 | 792 | # Command renaming (DEPRECATED). 793 | # 794 | # ------------------------------------------------------------------------ 795 | # WARNING: avoid using this option if possible. Instead use ACLs to remove 796 | # commands from the default user, and put them only in some admin user you 797 | # create for administrative purposes. 798 | # ------------------------------------------------------------------------ 799 | # 800 | # It is possible to change the name of dangerous commands in a shared 801 | # environment. For instance the CONFIG command may be renamed into something 802 | # hard to guess so that it will still be available for internal-use tools 803 | # but not available for general clients. 804 | # 805 | # Example: 806 | # 807 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 808 | # 809 | # It is also possible to completely kill a command by renaming it into 810 | # an empty string: 811 | # 812 | # rename-command CONFIG "" 813 | # 814 | # Please note that changing the name of commands that are logged into the 815 | # AOF file or transmitted to replicas may cause problems. 816 | 817 | ################################### CLIENTS #################################### 818 | 819 | # Set the max number of connected clients at the same time. By default 820 | # this limit is set to 10000 clients, however if the Redis server is not 821 | # able to configure the process file limit to allow for the specified limit 822 | # the max number of allowed clients is set to the current file limit 823 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 824 | # 825 | # Once the limit is reached Redis will close all the new connections sending 826 | # an error 'max number of clients reached'. 827 | # 828 | # IMPORTANT: When Redis Cluster is used, the max number of connections is also 829 | # shared with the cluster bus: every node in the cluster will use two 830 | # connections, one incoming and another outgoing. It is important to size the 831 | # limit accordingly in case of very large clusters. 832 | # 833 | # maxclients 10000 834 | 835 | ############################## MEMORY MANAGEMENT ################################ 836 | 837 | # Set a memory usage limit to the specified amount of bytes. 838 | # When the memory limit is reached Redis will try to remove keys 839 | # according to the eviction policy selected (see maxmemory-policy). 840 | # 841 | # If Redis can't remove keys according to the policy, or if the policy is 842 | # set to 'noeviction', Redis will start to reply with errors to commands 843 | # that would use more memory, like SET, LPUSH, and so on, and will continue 844 | # to reply to read-only commands like GET. 845 | # 846 | # This option is usually useful when using Redis as an LRU or LFU cache, or to 847 | # set a hard memory limit for an instance (using the 'noeviction' policy). 848 | # 849 | # WARNING: If you have replicas attached to an instance with maxmemory on, 850 | # the size of the output buffers needed to feed the replicas are subtracted 851 | # from the used memory count, so that network problems / resyncs will 852 | # not trigger a loop where keys are evicted, and in turn the output 853 | # buffer of replicas is full with DELs of keys evicted triggering the deletion 854 | # of more keys, and so forth until the database is completely emptied. 855 | # 856 | # In short... if you have replicas attached it is suggested that you set a lower 857 | # limit for maxmemory so that there is some free RAM on the system for replica 858 | # output buffers (but this is not needed if the policy is 'noeviction'). 859 | # 860 | # maxmemory 861 | 862 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 863 | # is reached. You can select one from the following behaviors: 864 | # 865 | # volatile-lru -> Evict using approximated LRU, only keys with an expire set. 866 | # allkeys-lru -> Evict any key using approximated LRU. 867 | # volatile-lfu -> Evict using approximated LFU, only keys with an expire set. 868 | # allkeys-lfu -> Evict any key using approximated LFU. 869 | # volatile-random -> Remove a random key having an expire set. 870 | # allkeys-random -> Remove a random key, any key. 871 | # volatile-ttl -> Remove the key with the nearest expire time (minor TTL) 872 | # noeviction -> Don't evict anything, just return an error on write operations. 873 | # 874 | # LRU means Least Recently Used 875 | # LFU means Least Frequently Used 876 | # 877 | # Both LRU, LFU and volatile-ttl are implemented using approximated 878 | # randomized algorithms. 879 | # 880 | # Note: with any of the above policies, Redis will return an error on write 881 | # operations, when there are no suitable keys for eviction. 882 | # 883 | # At the date of writing these commands are: set setnx setex append 884 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 885 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 886 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 887 | # getset mset msetnx exec sort 888 | # 889 | # The default is: 890 | # 891 | # maxmemory-policy noeviction 892 | 893 | # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated 894 | # algorithms (in order to save memory), so you can tune it for speed or 895 | # accuracy. By default Redis will check five keys and pick the one that was 896 | # used least recently, you can change the sample size using the following 897 | # configuration directive. 898 | # 899 | # The default of 5 produces good enough results. 10 Approximates very closely 900 | # true LRU but costs more CPU. 3 is faster but not very accurate. 901 | # 902 | # maxmemory-samples 5 903 | 904 | # Starting from Redis 5, by default a replica will ignore its maxmemory setting 905 | # (unless it is promoted to master after a failover or manually). It means 906 | # that the eviction of keys will be just handled by the master, sending the 907 | # DEL commands to the replica as keys evict in the master side. 908 | # 909 | # This behavior ensures that masters and replicas stay consistent, and is usually 910 | # what you want, however if your replica is writable, or you want the replica 911 | # to have a different memory setting, and you are sure all the writes performed 912 | # to the replica are idempotent, then you may change this default (but be sure 913 | # to understand what you are doing). 914 | # 915 | # Note that since the replica by default does not evict, it may end using more 916 | # memory than the one set via maxmemory (there are certain buffers that may 917 | # be larger on the replica, or data structures may sometimes take more memory 918 | # and so forth). So make sure you monitor your replicas and make sure they 919 | # have enough memory to never hit a real out-of-memory condition before the 920 | # master hits the configured maxmemory setting. 921 | # 922 | # replica-ignore-maxmemory yes 923 | 924 | # Redis reclaims expired keys in two ways: upon access when those keys are 925 | # found to be expired, and also in background, in what is called the 926 | # "active expire key". The key space is slowly and interactively scanned 927 | # looking for expired keys to reclaim, so that it is possible to free memory 928 | # of keys that are expired and will never be accessed again in a short time. 929 | # 930 | # The default effort of the expire cycle will try to avoid having more than 931 | # ten percent of expired keys still in memory, and will try to avoid consuming 932 | # more than 25% of total memory and to add latency to the system. However 933 | # it is possible to increase the expire "effort" that is normally set to 934 | # "1", to a greater value, up to the value "10". At its maximum value the 935 | # system will use more CPU, longer cycles (and technically may introduce 936 | # more latency), and will tolerate less already expired keys still present 937 | # in the system. It's a tradeoff between memory, CPU and latency. 938 | # 939 | # active-expire-effort 1 940 | 941 | ############################# LAZY FREEING #################################### 942 | 943 | # Redis has two primitives to delete keys. One is called DEL and is a blocking 944 | # deletion of the object. It means that the server stops processing new commands 945 | # in order to reclaim all the memory associated with an object in a synchronous 946 | # way. If the key deleted is associated with a small object, the time needed 947 | # in order to execute the DEL command is very small and comparable to most other 948 | # O(1) or O(log_N) commands in Redis. However if the key is associated with an 949 | # aggregated value containing millions of elements, the server can block for 950 | # a long time (even seconds) in order to complete the operation. 951 | # 952 | # For the above reasons Redis also offers non blocking deletion primitives 953 | # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and 954 | # FLUSHDB commands, in order to reclaim memory in background. Those commands 955 | # are executed in constant time. Another thread will incrementally free the 956 | # object in the background as fast as possible. 957 | # 958 | # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. 959 | # It's up to the design of the application to understand when it is a good 960 | # idea to use one or the other. However the Redis server sometimes has to 961 | # delete keys or flush the whole database as a side effect of other operations. 962 | # Specifically Redis deletes objects independently of a user call in the 963 | # following scenarios: 964 | # 965 | # 1) On eviction, because of the maxmemory and maxmemory policy configurations, 966 | # in order to make room for new data, without going over the specified 967 | # memory limit. 968 | # 2) Because of expire: when a key with an associated time to live (see the 969 | # EXPIRE command) must be deleted from memory. 970 | # 3) Because of a side effect of a command that stores data on a key that may 971 | # already exist. For example the RENAME command may delete the old key 972 | # content when it is replaced with another one. Similarly SUNIONSTORE 973 | # or SORT with STORE option may delete existing keys. The SET command 974 | # itself removes any old content of the specified key in order to replace 975 | # it with the specified string. 976 | # 4) During replication, when a replica performs a full resynchronization with 977 | # its master, the content of the whole database is removed in order to 978 | # load the RDB file just transferred. 979 | # 980 | # In all the above cases the default is to delete objects in a blocking way, 981 | # like if DEL was called. However you can configure each case specifically 982 | # in order to instead release memory in a non-blocking way like if UNLINK 983 | # was called, using the following configuration directives. 984 | 985 | lazyfree-lazy-eviction no 986 | lazyfree-lazy-expire no 987 | lazyfree-lazy-server-del no 988 | replica-lazy-flush no 989 | 990 | # It is also possible, for the case when to replace the user code DEL calls 991 | # with UNLINK calls is not easy, to modify the default behavior of the DEL 992 | # command to act exactly like UNLINK, using the following configuration 993 | # directive: 994 | 995 | lazyfree-lazy-user-del no 996 | 997 | ################################ THREADED I/O ################################# 998 | 999 | # Redis is mostly single threaded, however there are certain threaded 1000 | # operations such as UNLINK, slow I/O accesses and other things that are 1001 | # performed on side threads. 1002 | # 1003 | # Now it is also possible to handle Redis clients socket reads and writes 1004 | # in different I/O threads. Since especially writing is so slow, normally 1005 | # Redis users use pipelining in order to speed up the Redis performances per 1006 | # core, and spawn multiple instances in order to scale more. Using I/O 1007 | # threads it is possible to easily speedup two times Redis without resorting 1008 | # to pipelining nor sharding of the instance. 1009 | # 1010 | # By default threading is disabled, we suggest enabling it only in machines 1011 | # that have at least 4 or more cores, leaving at least one spare core. 1012 | # Using more than 8 threads is unlikely to help much. We also recommend using 1013 | # threaded I/O only if you actually have performance problems, with Redis 1014 | # instances being able to use a quite big percentage of CPU time, otherwise 1015 | # there is no point in using this feature. 1016 | # 1017 | # So for instance if you have a four cores boxes, try to use 2 or 3 I/O 1018 | # threads, if you have a 8 cores, try to use 6 threads. In order to 1019 | # enable I/O threads use the following configuration directive: 1020 | # 1021 | # io-threads 4 1022 | # 1023 | # Setting io-threads to 1 will just use the main thread as usual. 1024 | # When I/O threads are enabled, we only use threads for writes, that is 1025 | # to thread the write(2) syscall and transfer the client buffers to the 1026 | # socket. However it is also possible to enable threading of reads and 1027 | # protocol parsing using the following configuration directive, by setting 1028 | # it to yes: 1029 | # 1030 | # io-threads-do-reads no 1031 | # 1032 | # Usually threading reads doesn't help much. 1033 | # 1034 | # NOTE 1: This configuration directive cannot be changed at runtime via 1035 | # CONFIG SET. Aso this feature currently does not work when SSL is 1036 | # enabled. 1037 | # 1038 | # NOTE 2: If you want to test the Redis speedup using redis-benchmark, make 1039 | # sure you also run the benchmark itself in threaded mode, using the 1040 | # --threads option to match the number of Redis threads, otherwise you'll not 1041 | # be able to notice the improvements. 1042 | 1043 | ############################ KERNEL OOM CONTROL ############################## 1044 | 1045 | # On Linux, it is possible to hint the kernel OOM killer on what processes 1046 | # should be killed first when out of memory. 1047 | # 1048 | # Enabling this feature makes Redis actively control the oom_score_adj value 1049 | # for all its processes, depending on their role. The default scores will 1050 | # attempt to have background child processes killed before all others, and 1051 | # replicas killed before masters. 1052 | # 1053 | # Redis supports three options: 1054 | # 1055 | # no: Don't make changes to oom-score-adj (default). 1056 | # yes: Alias to "relative" see below. 1057 | # absolute: Values in oom-score-adj-values are written as is to the kernel. 1058 | # relative: Values are used relative to the initial value of oom_score_adj when 1059 | # the server starts and are then clamped to a range of -1000 to 1000. 1060 | # Because typically the initial value is 0, they will often match the 1061 | # absolute values. 1062 | oom-score-adj no 1063 | 1064 | # When oom-score-adj is used, this directive controls the specific values used 1065 | # for master, replica and background child processes. Values range -2000 to 1066 | # 2000 (higher means more likely to be killed). 1067 | # 1068 | # Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) 1069 | # can freely increase their value, but not decrease it below its initial 1070 | # settings. This means that setting oom-score-adj to "relative" and setting the 1071 | # oom-score-adj-values to positive values will always succeed. 1072 | oom-score-adj-values 0 200 800 1073 | 1074 | ############################## APPEND ONLY MODE ############################### 1075 | 1076 | # By default Redis asynchronously dumps the dataset on disk. This mode is 1077 | # good enough in many applications, but an issue with the Redis process or 1078 | # a power outage may result into a few minutes of writes lost (depending on 1079 | # the configured save points). 1080 | # 1081 | # The Append Only File is an alternative persistence mode that provides 1082 | # much better durability. For instance using the default data fsync policy 1083 | # (see later in the config file) Redis can lose just one second of writes in a 1084 | # dramatic event like a server power outage, or a single write if something 1085 | # wrong with the Redis process itself happens, but the operating system is 1086 | # still running correctly. 1087 | # 1088 | # AOF and RDB persistence can be enabled at the same time without problems. 1089 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 1090 | # with the better durability guarantees. 1091 | # 1092 | # Please check http://redis.io/topics/persistence for more information. 1093 | 1094 | appendonly no 1095 | 1096 | # The name of the append only file (default: "appendonly.aof") 1097 | 1098 | appendfilename "appendonly.aof" 1099 | 1100 | # The fsync() call tells the Operating System to actually write data on disk 1101 | # instead of waiting for more data in the output buffer. Some OS will really flush 1102 | # data on disk, some other OS will just try to do it ASAP. 1103 | # 1104 | # Redis supports three different modes: 1105 | # 1106 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 1107 | # always: fsync after every write to the append only log. Slow, Safest. 1108 | # everysec: fsync only one time every second. Compromise. 1109 | # 1110 | # The default is "everysec", as that's usually the right compromise between 1111 | # speed and data safety. It's up to you to understand if you can relax this to 1112 | # "no" that will let the operating system flush the output buffer when 1113 | # it wants, for better performances (but if you can live with the idea of 1114 | # some data loss consider the default persistence mode that's snapshotting), 1115 | # or on the contrary, use "always" that's very slow but a bit safer than 1116 | # everysec. 1117 | # 1118 | # More details please check the following article: 1119 | # http://antirez.com/post/redis-persistence-demystified.html 1120 | # 1121 | # If unsure, use "everysec". 1122 | 1123 | # appendfsync always 1124 | appendfsync everysec 1125 | # appendfsync no 1126 | 1127 | # When the AOF fsync policy is set to always or everysec, and a background 1128 | # saving process (a background save or AOF log background rewriting) is 1129 | # performing a lot of I/O against the disk, in some Linux configurations 1130 | # Redis may block too long on the fsync() call. Note that there is no fix for 1131 | # this currently, as even performing fsync in a different thread will block 1132 | # our synchronous write(2) call. 1133 | # 1134 | # In order to mitigate this problem it's possible to use the following option 1135 | # that will prevent fsync() from being called in the main process while a 1136 | # BGSAVE or BGREWRITEAOF is in progress. 1137 | # 1138 | # This means that while another child is saving, the durability of Redis is 1139 | # the same as "appendfsync none". In practical terms, this means that it is 1140 | # possible to lose up to 30 seconds of log in the worst scenario (with the 1141 | # default Linux settings). 1142 | # 1143 | # If you have latency problems turn this to "yes". Otherwise leave it as 1144 | # "no" that is the safest pick from the point of view of durability. 1145 | 1146 | no-appendfsync-on-rewrite no 1147 | 1148 | # Automatic rewrite of the append only file. 1149 | # Redis is able to automatically rewrite the log file implicitly calling 1150 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 1151 | # 1152 | # This is how it works: Redis remembers the size of the AOF file after the 1153 | # latest rewrite (if no rewrite has happened since the restart, the size of 1154 | # the AOF at startup is used). 1155 | # 1156 | # This base size is compared to the current size. If the current size is 1157 | # bigger than the specified percentage, the rewrite is triggered. Also 1158 | # you need to specify a minimal size for the AOF file to be rewritten, this 1159 | # is useful to avoid rewriting the AOF file even if the percentage increase 1160 | # is reached but it is still pretty small. 1161 | # 1162 | # Specify a percentage of zero in order to disable the automatic AOF 1163 | # rewrite feature. 1164 | 1165 | auto-aof-rewrite-percentage 100 1166 | auto-aof-rewrite-min-size 64mb 1167 | 1168 | # An AOF file may be found to be truncated at the end during the Redis 1169 | # startup process, when the AOF data gets loaded back into memory. 1170 | # This may happen when the system where Redis is running 1171 | # crashes, especially when an ext4 filesystem is mounted without the 1172 | # data=ordered option (however this can't happen when Redis itself 1173 | # crashes or aborts but the operating system still works correctly). 1174 | # 1175 | # Redis can either exit with an error when this happens, or load as much 1176 | # data as possible (the default now) and start if the AOF file is found 1177 | # to be truncated at the end. The following option controls this behavior. 1178 | # 1179 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 1180 | # the Redis server starts emitting a log to inform the user of the event. 1181 | # Otherwise if the option is set to no, the server aborts with an error 1182 | # and refuses to start. When the option is set to no, the user requires 1183 | # to fix the AOF file using the "redis-check-aof" utility before to restart 1184 | # the server. 1185 | # 1186 | # Note that if the AOF file will be found to be corrupted in the middle 1187 | # the server will still exit with an error. This option only applies when 1188 | # Redis will try to read more data from the AOF file but not enough bytes 1189 | # will be found. 1190 | aof-load-truncated yes 1191 | 1192 | # When rewriting the AOF file, Redis is able to use an RDB preamble in the 1193 | # AOF file for faster rewrites and recoveries. When this option is turned 1194 | # on the rewritten AOF file is composed of two different stanzas: 1195 | # 1196 | # [RDB file][AOF tail] 1197 | # 1198 | # When loading, Redis recognizes that the AOF file starts with the "REDIS" 1199 | # string and loads the prefixed RDB file, then continues loading the AOF 1200 | # tail. 1201 | aof-use-rdb-preamble yes 1202 | 1203 | ################################ LUA SCRIPTING ############################### 1204 | 1205 | # Max execution time of a Lua script in milliseconds. 1206 | # 1207 | # If the maximum execution time is reached Redis will log that a script is 1208 | # still in execution after the maximum allowed time and will start to 1209 | # reply to queries with an error. 1210 | # 1211 | # When a long running script exceeds the maximum execution time only the 1212 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 1213 | # used to stop a script that did not yet call any write commands. The second 1214 | # is the only way to shut down the server in the case a write command was 1215 | # already issued by the script but the user doesn't want to wait for the natural 1216 | # termination of the script. 1217 | # 1218 | # Set it to 0 or a negative value for unlimited execution without warnings. 1219 | lua-time-limit 5000 1220 | 1221 | ################################ REDIS CLUSTER ############################### 1222 | 1223 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 1224 | # started as cluster nodes can. In order to start a Redis instance as a 1225 | # cluster node enable the cluster support uncommenting the following: 1226 | # 1227 | # cluster-enabled yes 1228 | 1229 | # Every cluster node has a cluster configuration file. This file is not 1230 | # intended to be edited by hand. It is created and updated by Redis nodes. 1231 | # Every Redis Cluster node requires a different cluster configuration file. 1232 | # Make sure that instances running in the same system do not have 1233 | # overlapping cluster configuration file names. 1234 | # 1235 | # cluster-config-file nodes-6379.conf 1236 | 1237 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 1238 | # for it to be considered in failure state. 1239 | # Most other internal time limits are a multiple of the node timeout. 1240 | # 1241 | # cluster-node-timeout 15000 1242 | 1243 | # A replica of a failing master will avoid to start a failover if its data 1244 | # looks too old. 1245 | # 1246 | # There is no simple way for a replica to actually have an exact measure of 1247 | # its "data age", so the following two checks are performed: 1248 | # 1249 | # 1) If there are multiple replicas able to failover, they exchange messages 1250 | # in order to try to give an advantage to the replica with the best 1251 | # replication offset (more data from the master processed). 1252 | # Replicas will try to get their rank by offset, and apply to the start 1253 | # of the failover a delay proportional to their rank. 1254 | # 1255 | # 2) Every single replica computes the time of the last interaction with 1256 | # its master. This can be the last ping or command received (if the master 1257 | # is still in the "connected" state), or the time that elapsed since the 1258 | # disconnection with the master (if the replication link is currently down). 1259 | # If the last interaction is too old, the replica will not try to failover 1260 | # at all. 1261 | # 1262 | # The point "2" can be tuned by user. Specifically a replica will not perform 1263 | # the failover if, since the last interaction with the master, the time 1264 | # elapsed is greater than: 1265 | # 1266 | # (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period 1267 | # 1268 | # So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor 1269 | # is 10, and assuming a default repl-ping-replica-period of 10 seconds, the 1270 | # replica will not try to failover if it was not able to talk with the master 1271 | # for longer than 310 seconds. 1272 | # 1273 | # A large cluster-replica-validity-factor may allow replicas with too old data to failover 1274 | # a master, while a too small value may prevent the cluster from being able to 1275 | # elect a replica at all. 1276 | # 1277 | # For maximum availability, it is possible to set the cluster-replica-validity-factor 1278 | # to a value of 0, which means, that replicas will always try to failover the 1279 | # master regardless of the last time they interacted with the master. 1280 | # (However they'll always try to apply a delay proportional to their 1281 | # offset rank). 1282 | # 1283 | # Zero is the only value able to guarantee that when all the partitions heal 1284 | # the cluster will always be able to continue. 1285 | # 1286 | # cluster-replica-validity-factor 10 1287 | 1288 | # Cluster replicas are able to migrate to orphaned masters, that are masters 1289 | # that are left without working replicas. This improves the cluster ability 1290 | # to resist to failures as otherwise an orphaned master can't be failed over 1291 | # in case of failure if it has no working replicas. 1292 | # 1293 | # Replicas migrate to orphaned masters only if there are still at least a 1294 | # given number of other working replicas for their old master. This number 1295 | # is the "migration barrier". A migration barrier of 1 means that a replica 1296 | # will migrate only if there is at least 1 other working replica for its master 1297 | # and so forth. It usually reflects the number of replicas you want for every 1298 | # master in your cluster. 1299 | # 1300 | # Default is 1 (replicas migrate only if their masters remain with at least 1301 | # one replica). To disable migration just set it to a very large value. 1302 | # A value of 0 can be set but is useful only for debugging and dangerous 1303 | # in production. 1304 | # 1305 | # cluster-migration-barrier 1 1306 | 1307 | # By default Redis Cluster nodes stop accepting queries if they detect there 1308 | # is at least a hash slot uncovered (no available node is serving it). 1309 | # This way if the cluster is partially down (for example a range of hash slots 1310 | # are no longer covered) all the cluster becomes, eventually, unavailable. 1311 | # It automatically returns available as soon as all the slots are covered again. 1312 | # 1313 | # However sometimes you want the subset of the cluster which is working, 1314 | # to continue to accept queries for the part of the key space that is still 1315 | # covered. In order to do so, just set the cluster-require-full-coverage 1316 | # option to no. 1317 | # 1318 | # cluster-require-full-coverage yes 1319 | 1320 | # This option, when set to yes, prevents replicas from trying to failover its 1321 | # master during master failures. However the master can still perform a 1322 | # manual failover, if forced to do so. 1323 | # 1324 | # This is useful in different scenarios, especially in the case of multiple 1325 | # data center operations, where we want one side to never be promoted if not 1326 | # in the case of a total DC failure. 1327 | # 1328 | # cluster-replica-no-failover no 1329 | 1330 | # This option, when set to yes, allows nodes to serve read traffic while the 1331 | # the cluster is in a down state, as long as it believes it owns the slots. 1332 | # 1333 | # This is useful for two cases. The first case is for when an application 1334 | # doesn't require consistency of data during node failures or network partitions. 1335 | # One example of this is a cache, where as long as the node has the data it 1336 | # should be able to serve it. 1337 | # 1338 | # The second use case is for configurations that don't meet the recommended 1339 | # three shards but want to enable cluster mode and scale later. A 1340 | # master outage in a 1 or 2 shard configuration causes a read/write outage to the 1341 | # entire cluster without this option set, with it set there is only a write outage. 1342 | # Without a quorum of masters, slot ownership will not change automatically. 1343 | # 1344 | # cluster-allow-reads-when-down no 1345 | 1346 | # In order to setup your cluster make sure to read the documentation 1347 | # available at http://redis.io web site. 1348 | 1349 | ########################## CLUSTER DOCKER/NAT support ######################## 1350 | 1351 | # In certain deployments, Redis Cluster nodes address discovery fails, because 1352 | # addresses are NAT-ted or because ports are forwarded (the typical case is 1353 | # Docker and other containers). 1354 | # 1355 | # In order to make Redis Cluster working in such environments, a static 1356 | # configuration where each node knows its public address is needed. The 1357 | # following two options are used for this scope, and are: 1358 | # 1359 | # * cluster-announce-ip 1360 | # * cluster-announce-port 1361 | # * cluster-announce-bus-port 1362 | # 1363 | # Each instructs the node about its address, client port, and cluster message 1364 | # bus port. The information is then published in the header of the bus packets 1365 | # so that other nodes will be able to correctly map the address of the node 1366 | # publishing the information. 1367 | # 1368 | # If the above options are not used, the normal Redis Cluster auto-detection 1369 | # will be used instead. 1370 | # 1371 | # Note that when remapped, the bus port may not be at the fixed offset of 1372 | # clients port + 10000, so you can specify any port and bus-port depending 1373 | # on how they get remapped. If the bus-port is not set, a fixed offset of 1374 | # 10000 will be used as usual. 1375 | # 1376 | # Example: 1377 | # 1378 | # cluster-announce-ip 10.1.1.5 1379 | # cluster-announce-port 6379 1380 | # cluster-announce-bus-port 6380 1381 | 1382 | ################################## SLOW LOG ################################### 1383 | 1384 | # The Redis Slow Log is a system to log queries that exceeded a specified 1385 | # execution time. The execution time does not include the I/O operations 1386 | # like talking with the client, sending the reply and so forth, 1387 | # but just the time needed to actually execute the command (this is the only 1388 | # stage of command execution where the thread is blocked and can not serve 1389 | # other requests in the meantime). 1390 | # 1391 | # You can configure the slow log with two parameters: one tells Redis 1392 | # what is the execution time, in microseconds, to exceed in order for the 1393 | # command to get logged, and the other parameter is the length of the 1394 | # slow log. When a new command is logged the oldest one is removed from the 1395 | # queue of logged commands. 1396 | 1397 | # The following time is expressed in microseconds, so 1000000 is equivalent 1398 | # to one second. Note that a negative number disables the slow log, while 1399 | # a value of zero forces the logging of every command. 1400 | slowlog-log-slower-than 10000 1401 | 1402 | # There is no limit to this length. Just be aware that it will consume memory. 1403 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 1404 | slowlog-max-len 128 1405 | 1406 | ################################ LATENCY MONITOR ############################## 1407 | 1408 | # The Redis latency monitoring subsystem samples different operations 1409 | # at runtime in order to collect data related to possible sources of 1410 | # latency of a Redis instance. 1411 | # 1412 | # Via the LATENCY command this information is available to the user that can 1413 | # print graphs and obtain reports. 1414 | # 1415 | # The system only logs operations that were performed in a time equal or 1416 | # greater than the amount of milliseconds specified via the 1417 | # latency-monitor-threshold configuration directive. When its value is set 1418 | # to zero, the latency monitor is turned off. 1419 | # 1420 | # By default latency monitoring is disabled since it is mostly not needed 1421 | # if you don't have latency issues, and collecting data has a performance 1422 | # impact, that while very small, can be measured under big load. Latency 1423 | # monitoring can easily be enabled at runtime using the command 1424 | # "CONFIG SET latency-monitor-threshold " if needed. 1425 | latency-monitor-threshold 0 1426 | 1427 | ############################# EVENT NOTIFICATION ############################## 1428 | 1429 | # Redis can notify Pub/Sub clients about events happening in the key space. 1430 | # This feature is documented at http://redis.io/topics/notifications 1431 | # 1432 | # For instance if keyspace events notification is enabled, and a client 1433 | # performs a DEL operation on key "foo" stored in the Database 0, two 1434 | # messages will be published via Pub/Sub: 1435 | # 1436 | # PUBLISH __keyspace@0__:foo del 1437 | # PUBLISH __keyevent@0__:del foo 1438 | # 1439 | # It is possible to select the events that Redis will notify among a set 1440 | # of classes. Every class is identified by a single character: 1441 | # 1442 | # K Keyspace events, published with __keyspace@__ prefix. 1443 | # E Keyevent events, published with __keyevent@__ prefix. 1444 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 1445 | # $ String commands 1446 | # l List commands 1447 | # s Set commands 1448 | # h Hash commands 1449 | # z Sorted set commands 1450 | # x Expired events (events generated every time a key expires) 1451 | # e Evicted events (events generated when a key is evicted for maxmemory) 1452 | # t Stream commands 1453 | # m Key-miss events (Note: It is not included in the 'A' class) 1454 | # A Alias for g$lshzxet, so that the "AKE" string means all the events 1455 | # (Except key-miss events which are excluded from 'A' due to their 1456 | # unique nature). 1457 | # 1458 | # The "notify-keyspace-events" takes as argument a string that is composed 1459 | # of zero or multiple characters. The empty string means that notifications 1460 | # are disabled. 1461 | # 1462 | # Example: to enable list and generic events, from the point of view of the 1463 | # event name, use: 1464 | # 1465 | # notify-keyspace-events Elg 1466 | # 1467 | # Example 2: to get the stream of the expired keys subscribing to channel 1468 | # name __keyevent@0__:expired use: 1469 | # 1470 | # notify-keyspace-events Ex 1471 | # 1472 | # By default all notifications are disabled because most users don't need 1473 | # this feature and the feature has some overhead. Note that if you don't 1474 | # specify at least one of K or E, no events will be delivered. 1475 | notify-keyspace-events "" 1476 | 1477 | ############################### GOPHER SERVER ################################# 1478 | 1479 | # Redis contains an implementation of the Gopher protocol, as specified in 1480 | # the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). 1481 | # 1482 | # The Gopher protocol was very popular in the late '90s. It is an alternative 1483 | # to the web, and the implementation both server and client side is so simple 1484 | # that the Redis server has just 100 lines of code in order to implement this 1485 | # support. 1486 | # 1487 | # What do you do with Gopher nowadays? Well Gopher never *really* died, and 1488 | # lately there is a movement in order for the Gopher more hierarchical content 1489 | # composed of just plain text documents to be resurrected. Some want a simpler 1490 | # internet, others believe that the mainstream internet became too much 1491 | # controlled, and it's cool to create an alternative space for people that 1492 | # want a bit of fresh air. 1493 | # 1494 | # Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol 1495 | # as a gift. 1496 | # 1497 | # --- HOW IT WORKS? --- 1498 | # 1499 | # The Redis Gopher support uses the inline protocol of Redis, and specifically 1500 | # two kind of inline requests that were anyway illegal: an empty request 1501 | # or any request that starts with "/" (there are no Redis commands starting 1502 | # with such a slash). Normal RESP2/RESP3 requests are completely out of the 1503 | # path of the Gopher protocol implementation and are served as usual as well. 1504 | # 1505 | # If you open a connection to Redis when Gopher is enabled and send it 1506 | # a string like "/foo", if there is a key named "/foo" it is served via the 1507 | # Gopher protocol. 1508 | # 1509 | # In order to create a real Gopher "hole" (the name of a Gopher site in Gopher 1510 | # talking), you likely need a script like the following: 1511 | # 1512 | # https://github.com/antirez/gopher2redis 1513 | # 1514 | # --- SECURITY WARNING --- 1515 | # 1516 | # If you plan to put Redis on the internet in a publicly accessible address 1517 | # to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. 1518 | # Once a password is set: 1519 | # 1520 | # 1. The Gopher server (when enabled, not by default) will still serve 1521 | # content via Gopher. 1522 | # 2. However other commands cannot be called before the client will 1523 | # authenticate. 1524 | # 1525 | # So use the 'requirepass' option to protect your instance. 1526 | # 1527 | # Note that Gopher is not currently supported when 'io-threads-do-reads' 1528 | # is enabled. 1529 | # 1530 | # To enable Gopher support, uncomment the following line and set the option 1531 | # from no (the default) to yes. 1532 | # 1533 | # gopher-enabled no 1534 | 1535 | ############################### ADVANCED CONFIG ############################### 1536 | 1537 | # Hashes are encoded using a memory efficient data structure when they have a 1538 | # small number of entries, and the biggest entry does not exceed a given 1539 | # threshold. These thresholds can be configured using the following directives. 1540 | hash-max-ziplist-entries 512 1541 | hash-max-ziplist-value 64 1542 | 1543 | # Lists are also encoded in a special way to save a lot of space. 1544 | # The number of entries allowed per internal list node can be specified 1545 | # as a fixed maximum size or a maximum number of elements. 1546 | # For a fixed maximum size, use -5 through -1, meaning: 1547 | # -5: max size: 64 Kb <-- not recommended for normal workloads 1548 | # -4: max size: 32 Kb <-- not recommended 1549 | # -3: max size: 16 Kb <-- probably not recommended 1550 | # -2: max size: 8 Kb <-- good 1551 | # -1: max size: 4 Kb <-- good 1552 | # Positive numbers mean store up to _exactly_ that number of elements 1553 | # per list node. 1554 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), 1555 | # but if your use case is unique, adjust the settings as necessary. 1556 | list-max-ziplist-size -2 1557 | 1558 | # Lists may also be compressed. 1559 | # Compress depth is the number of quicklist ziplist nodes from *each* side of 1560 | # the list to *exclude* from compression. The head and tail of the list 1561 | # are always uncompressed for fast push/pop operations. Settings are: 1562 | # 0: disable all list compression 1563 | # 1: depth 1 means "don't start compressing until after 1 node into the list, 1564 | # going from either the head or tail" 1565 | # So: [head]->node->node->...->node->[tail] 1566 | # [head], [tail] will always be uncompressed; inner nodes will compress. 1567 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] 1568 | # 2 here means: don't compress head or head->next or tail->prev or tail, 1569 | # but compress all nodes between them. 1570 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] 1571 | # etc. 1572 | list-compress-depth 0 1573 | 1574 | # Sets have a special encoding in just one case: when a set is composed 1575 | # of just strings that happen to be integers in radix 10 in the range 1576 | # of 64 bit signed integers. 1577 | # The following configuration setting sets the limit in the size of the 1578 | # set in order to use this special memory saving encoding. 1579 | set-max-intset-entries 512 1580 | 1581 | # Similarly to hashes and lists, sorted sets are also specially encoded in 1582 | # order to save a lot of space. This encoding is only used when the length and 1583 | # elements of a sorted set are below the following limits: 1584 | zset-max-ziplist-entries 128 1585 | zset-max-ziplist-value 64 1586 | 1587 | # HyperLogLog sparse representation bytes limit. The limit includes the 1588 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 1589 | # this limit, it is converted into the dense representation. 1590 | # 1591 | # A value greater than 16000 is totally useless, since at that point the 1592 | # dense representation is more memory efficient. 1593 | # 1594 | # The suggested value is ~ 3000 in order to have the benefits of 1595 | # the space efficient encoding without slowing down too much PFADD, 1596 | # which is O(N) with the sparse encoding. The value can be raised to 1597 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 1598 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 1599 | hll-sparse-max-bytes 3000 1600 | 1601 | # Streams macro node max size / items. The stream data structure is a radix 1602 | # tree of big nodes that encode multiple items inside. Using this configuration 1603 | # it is possible to configure how big a single node can be in bytes, and the 1604 | # maximum number of items it may contain before switching to a new node when 1605 | # appending new stream entries. If any of the following settings are set to 1606 | # zero, the limit is ignored, so for instance it is possible to set just a 1607 | # max entires limit by setting max-bytes to 0 and max-entries to the desired 1608 | # value. 1609 | stream-node-max-bytes 4096 1610 | stream-node-max-entries 100 1611 | 1612 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 1613 | # order to help rehashing the main Redis hash table (the one mapping top-level 1614 | # keys to values). The hash table implementation Redis uses (see dict.c) 1615 | # performs a lazy rehashing: the more operation you run into a hash table 1616 | # that is rehashing, the more rehashing "steps" are performed, so if the 1617 | # server is idle the rehashing is never complete and some more memory is used 1618 | # by the hash table. 1619 | # 1620 | # The default is to use this millisecond 10 times every second in order to 1621 | # actively rehash the main dictionaries, freeing memory when possible. 1622 | # 1623 | # If unsure: 1624 | # use "activerehashing no" if you have hard latency requirements and it is 1625 | # not a good thing in your environment that Redis can reply from time to time 1626 | # to queries with 2 milliseconds delay. 1627 | # 1628 | # use "activerehashing yes" if you don't have such hard requirements but 1629 | # want to free memory asap when possible. 1630 | activerehashing yes 1631 | 1632 | # The client output buffer limits can be used to force disconnection of clients 1633 | # that are not reading data from the server fast enough for some reason (a 1634 | # common reason is that a Pub/Sub client can't consume messages as fast as the 1635 | # publisher can produce them). 1636 | # 1637 | # The limit can be set differently for the three different classes of clients: 1638 | # 1639 | # normal -> normal clients including MONITOR clients 1640 | # replica -> replica clients 1641 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 1642 | # 1643 | # The syntax of every client-output-buffer-limit directive is the following: 1644 | # 1645 | # client-output-buffer-limit 1646 | # 1647 | # A client is immediately disconnected once the hard limit is reached, or if 1648 | # the soft limit is reached and remains reached for the specified number of 1649 | # seconds (continuously). 1650 | # So for instance if the hard limit is 32 megabytes and the soft limit is 1651 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 1652 | # if the size of the output buffers reach 32 megabytes, but will also get 1653 | # disconnected if the client reaches 16 megabytes and continuously overcomes 1654 | # the limit for 10 seconds. 1655 | # 1656 | # By default normal clients are not limited because they don't receive data 1657 | # without asking (in a push way), but just after a request, so only 1658 | # asynchronous clients may create a scenario where data is requested faster 1659 | # than it can read. 1660 | # 1661 | # Instead there is a default limit for pubsub and replica clients, since 1662 | # subscribers and replicas receive data in a push fashion. 1663 | # 1664 | # Both the hard or the soft limit can be disabled by setting them to zero. 1665 | client-output-buffer-limit normal 0 0 0 1666 | client-output-buffer-limit replica 256mb 64mb 60 1667 | client-output-buffer-limit pubsub 32mb 8mb 60 1668 | 1669 | # Client query buffers accumulate new commands. They are limited to a fixed 1670 | # amount by default in order to avoid that a protocol desynchronization (for 1671 | # instance due to a bug in the client) will lead to unbound memory usage in 1672 | # the query buffer. However you can configure it here if you have very special 1673 | # needs, such us huge multi/exec requests or alike. 1674 | # 1675 | # client-query-buffer-limit 1gb 1676 | 1677 | # In the Redis protocol, bulk requests, that are, elements representing single 1678 | # strings, are normally limited to 512 mb. However you can change this limit 1679 | # here, but must be 1mb or greater 1680 | # 1681 | # proto-max-bulk-len 512mb 1682 | 1683 | # Redis calls an internal function to perform many background tasks, like 1684 | # closing connections of clients in timeout, purging expired keys that are 1685 | # never requested, and so forth. 1686 | # 1687 | # Not all tasks are performed with the same frequency, but Redis checks for 1688 | # tasks to perform according to the specified "hz" value. 1689 | # 1690 | # By default "hz" is set to 10. Raising the value will use more CPU when 1691 | # Redis is idle, but at the same time will make Redis more responsive when 1692 | # there are many keys expiring at the same time, and timeouts may be 1693 | # handled with more precision. 1694 | # 1695 | # The range is between 1 and 500, however a value over 100 is usually not 1696 | # a good idea. Most users should use the default of 10 and raise this up to 1697 | # 100 only in environments where very low latency is required. 1698 | hz 10 1699 | 1700 | # Normally it is useful to have an HZ value which is proportional to the 1701 | # number of clients connected. This is useful in order, for instance, to 1702 | # avoid too many clients are processed for each background task invocation 1703 | # in order to avoid latency spikes. 1704 | # 1705 | # Since the default HZ value by default is conservatively set to 10, Redis 1706 | # offers, and enables by default, the ability to use an adaptive HZ value 1707 | # which will temporarily raise when there are many connected clients. 1708 | # 1709 | # When dynamic HZ is enabled, the actual configured HZ will be used 1710 | # as a baseline, but multiples of the configured HZ value will be actually 1711 | # used as needed once more clients are connected. In this way an idle 1712 | # instance will use very little CPU time while a busy instance will be 1713 | # more responsive. 1714 | dynamic-hz yes 1715 | 1716 | # When a child rewrites the AOF file, if the following option is enabled 1717 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1718 | # in order to commit the file to the disk more incrementally and avoid 1719 | # big latency spikes. 1720 | aof-rewrite-incremental-fsync yes 1721 | 1722 | # When redis saves RDB file, if the following option is enabled 1723 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1724 | # in order to commit the file to the disk more incrementally and avoid 1725 | # big latency spikes. 1726 | rdb-save-incremental-fsync yes 1727 | 1728 | # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good 1729 | # idea to start with the default settings and only change them after investigating 1730 | # how to improve the performances and how the keys LFU change over time, which 1731 | # is possible to inspect via the OBJECT FREQ command. 1732 | # 1733 | # There are two tunable parameters in the Redis LFU implementation: the 1734 | # counter logarithm factor and the counter decay time. It is important to 1735 | # understand what the two parameters mean before changing them. 1736 | # 1737 | # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis 1738 | # uses a probabilistic increment with logarithmic behavior. Given the value 1739 | # of the old counter, when a key is accessed, the counter is incremented in 1740 | # this way: 1741 | # 1742 | # 1. A random number R between 0 and 1 is extracted. 1743 | # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). 1744 | # 3. The counter is incremented only if R < P. 1745 | # 1746 | # The default lfu-log-factor is 10. This is a table of how the frequency 1747 | # counter changes with a different number of accesses with different 1748 | # logarithmic factors: 1749 | # 1750 | # +--------+------------+------------+------------+------------+------------+ 1751 | # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | 1752 | # +--------+------------+------------+------------+------------+------------+ 1753 | # | 0 | 104 | 255 | 255 | 255 | 255 | 1754 | # +--------+------------+------------+------------+------------+------------+ 1755 | # | 1 | 18 | 49 | 255 | 255 | 255 | 1756 | # +--------+------------+------------+------------+------------+------------+ 1757 | # | 10 | 10 | 18 | 142 | 255 | 255 | 1758 | # +--------+------------+------------+------------+------------+------------+ 1759 | # | 100 | 8 | 11 | 49 | 143 | 255 | 1760 | # +--------+------------+------------+------------+------------+------------+ 1761 | # 1762 | # NOTE: The above table was obtained by running the following commands: 1763 | # 1764 | # redis-benchmark -n 1000000 incr foo 1765 | # redis-cli object freq foo 1766 | # 1767 | # NOTE 2: The counter initial value is 5 in order to give new objects a chance 1768 | # to accumulate hits. 1769 | # 1770 | # The counter decay time is the time, in minutes, that must elapse in order 1771 | # for the key counter to be divided by two (or decremented if it has a value 1772 | # less <= 10). 1773 | # 1774 | # The default value for the lfu-decay-time is 1. A special value of 0 means to 1775 | # decay the counter every time it happens to be scanned. 1776 | # 1777 | # lfu-log-factor 10 1778 | # lfu-decay-time 1 1779 | 1780 | ########################### ACTIVE DEFRAGMENTATION ####################### 1781 | # 1782 | # What is active defragmentation? 1783 | # ------------------------------- 1784 | # 1785 | # Active (online) defragmentation allows a Redis server to compact the 1786 | # spaces left between small allocations and deallocations of data in memory, 1787 | # thus allowing to reclaim back memory. 1788 | # 1789 | # Fragmentation is a natural process that happens with every allocator (but 1790 | # less so with Jemalloc, fortunately) and certain workloads. Normally a server 1791 | # restart is needed in order to lower the fragmentation, or at least to flush 1792 | # away all the data and create it again. However thanks to this feature 1793 | # implemented by Oran Agra for Redis 4.0 this process can happen at runtime 1794 | # in a "hot" way, while the server is running. 1795 | # 1796 | # Basically when the fragmentation is over a certain level (see the 1797 | # configuration options below) Redis will start to create new copies of the 1798 | # values in contiguous memory regions by exploiting certain specific Jemalloc 1799 | # features (in order to understand if an allocation is causing fragmentation 1800 | # and to allocate it in a better place), and at the same time, will release the 1801 | # old copies of the data. This process, repeated incrementally for all the keys 1802 | # will cause the fragmentation to drop back to normal values. 1803 | # 1804 | # Important things to understand: 1805 | # 1806 | # 1. This feature is disabled by default, and only works if you compiled Redis 1807 | # to use the copy of Jemalloc we ship with the source code of Redis. 1808 | # This is the default with Linux builds. 1809 | # 1810 | # 2. You never need to enable this feature if you don't have fragmentation 1811 | # issues. 1812 | # 1813 | # 3. Once you experience fragmentation, you can enable this feature when 1814 | # needed with the command "CONFIG SET activedefrag yes". 1815 | # 1816 | # The configuration parameters are able to fine tune the behavior of the 1817 | # defragmentation process. If you are not sure about what they mean it is 1818 | # a good idea to leave the defaults untouched. 1819 | 1820 | # Enabled active defragmentation 1821 | # NOTE: This feature is not available in the stock Debian packages as they use 1822 | # the distribution-wide jemalloc allocator that does not have support for active 1823 | # defragmentation. See #967970 for more information. 1824 | # activedefrag no 1825 | 1826 | # Minimum amount of fragmentation waste to start active defrag 1827 | # active-defrag-ignore-bytes 100mb 1828 | 1829 | # Minimum percentage of fragmentation to start active defrag 1830 | # active-defrag-threshold-lower 10 1831 | 1832 | # Maximum percentage of fragmentation at which we use maximum effort 1833 | # active-defrag-threshold-upper 100 1834 | 1835 | # Minimal effort for defrag in CPU percentage, to be used when the lower 1836 | # threshold is reached 1837 | # active-defrag-cycle-min 1 1838 | 1839 | # Maximal effort for defrag in CPU percentage, to be used when the upper 1840 | # threshold is reached 1841 | # active-defrag-cycle-max 25 1842 | 1843 | # Maximum number of set/hash/zset/list fields that will be processed from 1844 | # the main dictionary scan 1845 | # active-defrag-max-scan-fields 1000 1846 | 1847 | # Jemalloc background thread for purging will be enabled by default 1848 | jemalloc-bg-thread yes 1849 | 1850 | # It is possible to pin different threads and processes of Redis to specific 1851 | # CPUs in your system, in order to maximize the performances of the server. 1852 | # This is useful both in order to pin different Redis threads in different 1853 | # CPUs, but also in order to make sure that multiple Redis instances running 1854 | # in the same host will be pinned to different CPUs. 1855 | # 1856 | # Normally you can do this using the "taskset" command, however it is also 1857 | # possible to this via Redis configuration directly, both in Linux and FreeBSD. 1858 | # 1859 | # You can pin the server/IO threads, bio threads, aof rewrite child process, and 1860 | # the bgsave child process. The syntax to specify the cpu list is the same as 1861 | # the taskset command: 1862 | # 1863 | # Set redis server/io threads to cpu affinity 0,2,4,6: 1864 | # server_cpulist 0-7:2 1865 | # 1866 | # Set bio threads to cpu affinity 1,3: 1867 | # bio_cpulist 1,3 1868 | # 1869 | # Set aof rewrite child process to cpu affinity 8,9,10,11: 1870 | # aof_rewrite_cpulist 8-11 1871 | # 1872 | # Set bgsave child process to cpu affinity 1,10,11 1873 | # bgsave_cpulist 1,10-11 1874 | 1875 | # In some cases redis will emit warnings and even refuse to start if it detects 1876 | # that the system is in bad state, it is possible to suppress these warnings 1877 | # by setting the following config which takes a space delimited list of warnings 1878 | # to suppress 1879 | # 1880 | # ignore-warnings ARM64-COW-BUG 1881 | maxmemory 10mb 1882 | 1883 | maxmemory-policy allkeys-lru --------------------------------------------------------------------------------