├── cluster ├── README.md ├── meta │ └── main.yml ├── defaults │ └── main.yml ├── tasks │ └── main.yml └── files │ └── redis-trib ├── core ├── README.md ├── vars │ └── main.yml ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── .yamllint └── tasks │ └── main.yml ├── defaults ├── README.md ├── defaults │ └── main.yml ├── main.yml └── filter_plugins │ └── collections.py ├── node ├── README.md ├── meta │ └── main.yml ├── handlers │ └── main.yml ├── templates │ ├── redis.conf.j2 │ └── redis.service.j2 └── tasks │ └── main.yml ├── config ├── defaults │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml └── templates │ └── redis.conf.j2 ├── tasks └── main.yml ├── requirements.txt ├── .gitignore ├── files └── redis-3.3.3.gem ├── handlers └── main.yml ├── molecule ├── default │ ├── playbook.yml │ ├── prepare.yml │ ├── molecule.yml │ ├── INSTALL.rst │ ├── destroy.yml │ ├── tests │ │ └── test_default.py │ └── create.yml ├── stop-service │ ├── playbook.yml │ ├── prepare.yml │ ├── molecule.yml │ ├── INSTALL.rst │ ├── destroy.yml │ ├── tests │ │ └── test_default.py │ └── create.yml ├── single-node │ ├── molecule.yml │ ├── prepare.yml │ ├── playbook.yml │ ├── INSTALL.rst │ ├── destroy.yml │ ├── tests │ │ └── test_default.py │ └── create.yml └── multi-node │ ├── playbook.yml │ ├── prepare.yml │ ├── molecule.yml │ ├── INSTALL.rst │ ├── destroy.yml │ ├── tests │ └── test_default.py │ └── create.yml ├── meta └── main.yml ├── filter_plugins └── collections.py ├── DEVELEOPMENT.md └── README.md /cluster/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /core/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /defaults/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /node/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # No tasks here. 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | molecule 2 | python-vagrant 3 | -------------------------------------------------------------------------------- /core/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for core 3 | -------------------------------------------------------------------------------- /core/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for core 3 | -------------------------------------------------------------------------------- /core/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for core 3 | -------------------------------------------------------------------------------- /config/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: 'redis' } 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.log 3 | __pycache__ 4 | .molecule 5 | .vagrant 6 | .cache 7 | -------------------------------------------------------------------------------- /cluster/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - { role: 'redis/defaults' } 5 | -------------------------------------------------------------------------------- /core/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - { role: 'redis/defaults' } 5 | -------------------------------------------------------------------------------- /node/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - { role: 'redis/defaults' } 5 | -------------------------------------------------------------------------------- /files/redis-3.3.3.gem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alainchiasson/redis/HEAD/files/redis-3.3.3.gem -------------------------------------------------------------------------------- /handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for redis 3 | - name: restart redis_master 4 | debug: msg="restarting redis master" 5 | 6 | - name: restart redis_slave 7 | debug: msg="restarting redis slave" 8 | -------------------------------------------------------------------------------- /molecule/default/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: true 4 | roles: 5 | - redis/core 6 | - redis/node 7 | 8 | - hosts: redis_mgt 9 | become: true 10 | roles: 11 | - redis/cluster 12 | -------------------------------------------------------------------------------- /cluster/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | redis_cluster_replicas: 0 4 | 5 | redis_node_list: "{{ groups['all'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | arraypermute( [':'] ) | arraypermute( [ 6379 ] ) }}" 6 | -------------------------------------------------------------------------------- /core/.yamllint: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | rules: 4 | braces: 5 | max-spaces-inside: 1 6 | level: error 7 | brackets: 8 | max-spaces-inside: 1 9 | level: error 10 | line-length: disable 11 | truthy: disable 12 | -------------------------------------------------------------------------------- /config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for redis 3 | # Prepare the nodes 4 | 5 | - name: Create the cluster node redis.conf 6 | template: 7 | src: "redis.conf.j2" 8 | dest: "{{ redis_conf_dir }}/redis_{{ redis_port }}.conf" 9 | 10 | -------------------------------------------------------------------------------- /meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Alain Chiasson 4 | description: Installation of redis cluster 5 | company: your company (optional) 6 | license: license (GPLv2, CC-BY, etc) 7 | 8 | min_ansible_version: 1.2 9 | galaxy_tags: [] 10 | dependencies: [] 11 | -------------------------------------------------------------------------------- /defaults/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # User Vars 4 | redis_data_dir: /var/lib/redis 5 | redis_log_dir: /var/log/redis 6 | redis_run_dir: /var/run/redis 7 | redis_conf_dir: /etc 8 | 9 | # Note: default for node deployments. 10 | redis_port: 6379 11 | redis_packages: 12 | - redis 13 | -------------------------------------------------------------------------------- /molecule/stop-service/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: true 4 | roles: 5 | - redis/core 6 | - redis/node 7 | 8 | - hosts: redis_mgt 9 | become: true 10 | roles: 11 | - redis/cluster 12 | 13 | - hosts: all 14 | become: true 15 | roles: 16 | - { role: redis/node, redis_state: stop } 17 | -------------------------------------------------------------------------------- /defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # User Vars 3 | redis_data_dir: /var/lib/redis 4 | redis_log_dir: /var/log/redis 5 | redis_run_dir: /var/run/redis 6 | redis_conf_dir: /etc 7 | 8 | # Note: default for node deployments. 9 | redis_port: 6379 10 | 11 | # Role vars - private to role 12 | redis_packages: 13 | - redis 14 | # defaults file for redis 15 | redis_state: "present" 16 | -------------------------------------------------------------------------------- /node/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: roles/common/handlers/main.yml 3 | - name: restart redis node 4 | service: 5 | name: redis_{{ node_port }} 6 | state: restarted 7 | 8 | - name: stop redis node 9 | service: 10 | name: redis_{{ node_port }} 11 | state: stopped 12 | 13 | - name: start redis node 14 | service: 15 | name: redis_{{ node_port }} 16 | state: started 17 | -------------------------------------------------------------------------------- /node/templates/redis.conf.j2: -------------------------------------------------------------------------------- 1 | port {{ redis_port }} 2 | cluster-enabled yes 3 | cluster-config-file {{ redis_data_dir }}/{{ redis_port }}/nodes.conf 4 | dir {{ redis_data_dir }}/{{ redis_port }} 5 | pidfile {{ redis_run_dir }}/node_{{ redis_port }}.pid 6 | logfile {{ redis_log_dir }}/node_{{ redis_port }}.log 7 | cluster-node-timeout 5000 8 | daemonize no 9 | appendonly yes 10 | 11 | # MAKE INTERFACE UNSECURE !! 12 | protected-mode no 13 | -------------------------------------------------------------------------------- /molecule/single-node/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: vagrant 6 | provider: 7 | name: virtualbox 8 | lint: 9 | name: yamllint 10 | enabled: false 11 | platforms: 12 | - name: redis 13 | box: centos/7 14 | memory: 4096 15 | provisioner: 16 | name: ansible 17 | scenario: 18 | name: single-node 19 | verifier: 20 | name: testinfra 21 | options: 22 | v: 1 23 | lint: 24 | name: flake8 25 | -------------------------------------------------------------------------------- /filter_plugins/collections.py: -------------------------------------------------------------------------------- 1 | 2 | def arraypermute(collection, key): 3 | 4 | ''' 5 | Returns all combinations of collection and key 6 | ''' 7 | 8 | return [ str(i) + str(j) for i in collection for j in key ] 9 | 10 | 11 | class FilterModule(object): 12 | ''' 13 | custom jinja2 filters for working with collections 14 | ''' 15 | 16 | def filters(self): 17 | return { 18 | 'arraypermute': arraypermute 19 | } 20 | -------------------------------------------------------------------------------- /defaults/filter_plugins/collections.py: -------------------------------------------------------------------------------- 1 | 2 | def arraypermute(collection, key): 3 | 4 | ''' 5 | Returns all combinations of collection and key 6 | ''' 7 | 8 | return [ str(i) + str(j) for i in collection for j in key ] 9 | 10 | 11 | class FilterModule(object): 12 | ''' 13 | custom jinja2 filters for working with collections 14 | ''' 15 | 16 | def filters(self): 17 | return { 18 | 'arraypermute': arraypermute 19 | } 20 | -------------------------------------------------------------------------------- /molecule/multi-node/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: redis_nodes 3 | become: true 4 | roles: 5 | - redis/core 6 | - { role: redis/node, redis_port: 7000 } 7 | - { role: redis/node, redis_port: 7001 } 8 | 9 | - hosts: redis_mgt 10 | become: true 11 | roles: 12 | - { role: redis/cluster, redis_cluster_replicas: 1, redis_node_list: "{{ groups['redis_nodes'] | map('extract', hostvars, ['ansible_eth1', 'ipv4', 'address']) | arraypermute( [':'] ) | arraypermute( [7000,7001] ) | list }}" } 13 | -------------------------------------------------------------------------------- /node/templates/redis.service.j2: -------------------------------------------------------------------------------- 1 | # Genereated by Ansible - Do not edit. 2 | # Source is redis.serices from rpm redis-3.2.3-1.el7 3 | # NOTE: service name is used by redis-shutdown to derive config file name 4 | 5 | [Unit] 6 | Description=Redis persistent key-value database - port {{ redis_port }} 7 | After=network.target 8 | 9 | [Service] 10 | ExecStart=/usr/bin/redis-server {{ redis_conf_dir }}/redis_{{ redis_port }}.conf --daemonize no 11 | ExecStop=/usr/bin/redis-shutdown redis_{{ redis_port }} 12 | User=redis 13 | Group=redis 14 | Restart=always 15 | RestartSec=180 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: False 5 | become: true 6 | tasks: 7 | - name: Install python for Ansible 8 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal) 9 | become: True 10 | changed_when: False 11 | - name: Install required EPEL access for redis packages on CentOS / RedHat. 12 | yum: 13 | name: epel-release 14 | # Required to initialise the cluster - redis-trib is a ruby app 15 | - name: Install ruby 16 | package: 17 | name: ruby 18 | - name: Install net-tools for testinfra socket testing. 19 | package: 20 | name: net-tools 21 | -------------------------------------------------------------------------------- /molecule/multi-node/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: False 5 | become: true 6 | tasks: 7 | - name: Install python for Ansible 8 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal) 9 | become: True 10 | changed_when: False 11 | - name: Install required EPEL access for redis packages on CentOS / RedHat. 12 | yum: 13 | name: epel-release 14 | # Required to initialise the cluster - redis-trib is a ruby app 15 | - name: Install ruby 16 | package: 17 | name: ruby 18 | - name: Install net-tools for testinfra socket testing. 19 | package: 20 | name: net-tools 21 | -------------------------------------------------------------------------------- /molecule/stop-service/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: False 5 | become: true 6 | tasks: 7 | - name: Install python for Ansible 8 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal) 9 | become: True 10 | changed_when: False 11 | - name: Install required EPEL access for redis packages on CentOS / RedHat. 12 | yum: 13 | name: epel-release 14 | # Required to initialise the cluster - redis-trib is a ruby app 15 | - name: Install ruby 16 | package: 17 | name: ruby 18 | - name: Install net-tools for testinfra socket testing. 19 | package: 20 | name: net-tools 21 | -------------------------------------------------------------------------------- /cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # The following tasks are used to install redis cluster management software 3 | # 4 | - name: Install redis gem 5 | package: 6 | name: rubygem-redis 7 | 8 | - name: Install redis-trib from localfile. 9 | copy: 10 | src: redis-trib 11 | dest: /usr/bin/redis-trib 12 | mode: 0755 13 | 14 | - name: print node list 15 | debug: 16 | msg: "{{ redis_node_list }}" 17 | 18 | 19 | # link the cluster together. 20 | - name: Create cluster with all nodes 21 | shell: "redis-trib create --yes --replicas {{ redis_cluster_replicas }} {{ redis_node_list | join(' ') }} && touch {{ redis_conf_dir }}/redis-cluster-created" 22 | args: 23 | creates: "{{ redis_conf_dir }}/redis-cluster-created" 24 | -------------------------------------------------------------------------------- /molecule/single-node/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: False 5 | become: true 6 | tasks: 7 | - name: Install python for Ansible 8 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal) 9 | become: True 10 | changed_when: False 11 | - name: place SELinux in permissive mode. 12 | selinux: 13 | policy: targeted 14 | state: permissive 15 | - name: Install required EPEL access for redis packages on CentOS / RedHat. 16 | yum: 17 | name: epel-release 18 | # Required to initialise the cluster - redis-trib is a ruby app 19 | - name: Install ruby 20 | package: 21 | name: ruby 22 | - name: Install net-tools for testinfra socket testing. 23 | package: 24 | name: net-tools 25 | -------------------------------------------------------------------------------- /molecule/multi-node/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: vagrant 6 | provider: 7 | name: virtualbox 8 | lint: 9 | name: yamllint 10 | enabled: false 11 | platforms: 12 | - name: redis-1 13 | groups: 14 | - redis_nodes 15 | - redis_mgt 16 | box: centos/7 17 | memory: 2048 18 | interfaces: 19 | - auto_config: true 20 | network_name: private_network 21 | type: dhcp 22 | - name: redis-2 23 | groups: 24 | - redis_nodes 25 | box: centos/7 26 | memory: 2048 27 | interfaces: 28 | - auto_config: true 29 | network_name: private_network 30 | type: dhcp 31 | - name: redis-3 32 | groups: 33 | - redis_nodes 34 | box: centos/7 35 | memory: 2048 36 | interfaces: 37 | - auto_config: true 38 | network_name: private_network 39 | type: dhcp 40 | provisioner: 41 | name: ansible 42 | scenario: 43 | name: multi-node 44 | verifier: 45 | name: testinfra 46 | options: 47 | v: 1 48 | lint: 49 | name: flake8 50 | -------------------------------------------------------------------------------- /molecule/stop-service/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: vagrant 6 | provider: 7 | name: virtualbox 8 | lint: 9 | name: yamllint 10 | enabled: false 11 | platforms: 12 | - name: redis-1 13 | groups: 14 | - redis_nodes 15 | - redis_mgt 16 | box: centos/7 17 | memory: 2048 18 | interfaces: 19 | - auto_config: true 20 | network_name: private_network 21 | type: dhcp 22 | - name: redis-2 23 | groups: 24 | - redis_nodes 25 | box: centos/7 26 | memory: 2048 27 | interfaces: 28 | - auto_config: true 29 | network_name: private_network 30 | type: dhcp 31 | - name: redis-3 32 | groups: 33 | - redis_nodes 34 | box: centos/7 35 | memory: 2048 36 | interfaces: 37 | - auto_config: true 38 | network_name: private_network 39 | type: dhcp 40 | provisioner: 41 | name: ansible 42 | scenario: 43 | name: stop-service 44 | verifier: 45 | name: testinfra 46 | enabled: false 47 | options: 48 | v: 1 49 | lint: 50 | name: flake8 51 | -------------------------------------------------------------------------------- /molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: vagrant 6 | provider: 7 | name: virtualbox 8 | lint: 9 | name: yamllint 10 | enabled: false 11 | platforms: 12 | - name: redis-1 13 | groups: 14 | - redis_nodes 15 | - redis_mgt 16 | box: centos/7 17 | memory: 2048 18 | interfaces: 19 | - auto_config: true 20 | network_name: private_network 21 | type: dhcp 22 | - name: redis-2 23 | groups: 24 | - redis_nodes 25 | box: centos/7 26 | memory: 2048 27 | interfaces: 28 | - auto_config: true 29 | network_name: private_network 30 | type: dhcp 31 | - name: redis-3 32 | groups: 33 | - redis_nodes 34 | box: centos/7 35 | memory: 2048 36 | interfaces: 37 | - auto_config: true 38 | network_name: private_network 39 | type: dhcp 40 | provisioner: 41 | name: ansible 42 | lint: 43 | name: ansible-lint 44 | scenario: 45 | name: default 46 | verifier: 47 | name: testinfra 48 | options: 49 | v: 1 50 | lint: 51 | name: flake8 52 | -------------------------------------------------------------------------------- /molecule/stop-service/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Install 3 | ******* 4 | 5 | This set of playbooks have specific dependencies on Ansible due to the modules 6 | being used. 7 | 8 | Requirements 9 | ============ 10 | 11 | * Ansible 2.2 12 | * Docker Engine 13 | * docker-py 14 | 15 | Install OS dependencies on CentOS 7 16 | 17 | .. code-block:: bash 18 | 19 | $ sudo yum install -y epel-release 20 | $ sudo yum install -y gcc python-pip python-devel openssl-devel 21 | # If installing Molecule from source. 22 | $ sudo yum install libffi-devel git 23 | 24 | Install OS dependencies on Ubuntu 16.x 25 | 26 | .. code-block:: bash 27 | 28 | $ sudo apt-get update 29 | $ sudo apt-get install -y python-pip libssl-dev docker-engine 30 | # If installing Molecule from source. 31 | $ sudo apt-get install -y libffi-dev git 32 | 33 | Install OS dependencies on Mac OS 34 | 35 | .. code-block:: bash 36 | 37 | $ brew install python 38 | $ brew install git 39 | 40 | Install using pip: 41 | 42 | .. code-block:: bash 43 | 44 | $ sudo pip install ansible 45 | $ sudo pip install docker-py 46 | $ sudo pip install molecule --pre 47 | -------------------------------------------------------------------------------- /molecule/single-node/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: true 4 | 5 | roles: 6 | - { role: redis/core, redis_data_dir: "/opt/data/redis", redis_log_dir: "/opt/log/redis", redis_run_dir: "/opt/run/redis" } 7 | - { role: redis/node, redis_port: 7000, redis_data_dir: "/opt/data/redis", redis_log_dir: "/opt/log/redis", redis_run_dir: "/opt/run/redis" } 8 | - { role: redis/node, redis_port: 7001, redis_data_dir: "/opt/data/redis", redis_log_dir: "/opt/log/redis", redis_run_dir: "/opt/run/redis" } 9 | - { role: redis/node, redis_port: 7002, redis_data_dir: "/opt/data/redis", redis_log_dir: "/opt/log/redis", redis_run_dir: "/opt/run/redis" } 10 | - { role: redis/cluster, redis_cluster_replicas: 0, redis_node_list: "{{ groups['all'] | map('extract', hostvars, ['ansible_eth0', 'ipv4', 'address']) | arraypermute( [':'] ) | arraypermute( [7000,7001,7002] ) }}" } 11 | 12 | # NOTE: directory redirect fails - need to fix 13 | # redis_data_dir: "/opt/data/redis", redis_log_dir: "/opt/log/redis", redis_run_dir: "/opt/run/redis" } 14 | # redis_data_dir: "/var/lib/redis", redis_log_dir: "/var/log/redis", redis_run_dir: "/var/run/redis" } 15 | -------------------------------------------------------------------------------- /molecule/default/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Install 3 | ******* 4 | 5 | This set of playbooks have specific dependencies on Ansible due to the modules 6 | being used. 7 | 8 | Requirements 9 | ============ 10 | 11 | * Ansible 2.2 12 | * Vagrant 13 | * Virtualbox, Parallels, VMware Fusion, VMware Workstation or VMware Desktop 14 | * python-vagrant 15 | 16 | Install OS dependencies on CentOS 7 17 | 18 | .. code-block:: bash 19 | 20 | $ sudo yum install -y epel-release 21 | $ sudo yum install -y gcc python-pip python-devel openssl-devel 22 | # If installing Molecule from source. 23 | $ sudo yum install libffi-devel git 24 | 25 | Install OS dependencies on Ubuntu 16.x 26 | 27 | .. code-block:: bash 28 | 29 | $ sudo apt-get update 30 | $ sudo apt-get install -y python-pip libssl-dev vagrant virtualbox 31 | # If installing Molecule from source. 32 | $ sudo apt-get install -y libffi-dev git 33 | 34 | Install OS dependencies on Mac OS 35 | 36 | .. code-block:: bash 37 | 38 | $ brew install python 39 | $ brew install git 40 | 41 | Install using pip: 42 | 43 | .. code-block:: bash 44 | 45 | $ sudo pip install ansible 46 | $ sudo pip install python-vagrant 47 | $ sudo pip install molecule --pre 48 | -------------------------------------------------------------------------------- /molecule/multi-node/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Install 3 | ******* 4 | 5 | This set of playbooks have specific dependencies on Ansible due to the modules 6 | being used. 7 | 8 | Requirements 9 | ============ 10 | 11 | * Ansible 2.2 12 | * Vagrant 13 | * Virtualbox, Parallels, VMware Fusion, VMware Workstation or VMware Desktop 14 | * python-vagrant 15 | 16 | Install OS dependencies on CentOS 7 17 | 18 | .. code-block:: bash 19 | 20 | $ sudo yum install -y epel-release 21 | $ sudo yum install -y gcc python-pip python-devel openssl-devel 22 | # If installing Molecule from source. 23 | $ sudo yum install libffi-devel git 24 | 25 | Install OS dependencies on Ubuntu 16.x 26 | 27 | .. code-block:: bash 28 | 29 | $ sudo apt-get update 30 | $ sudo apt-get install -y python-pip libssl-dev vagrant virtualbox 31 | # If installing Molecule from source. 32 | $ sudo apt-get install -y libffi-dev git 33 | 34 | Install OS dependencies on Mac OS 35 | 36 | .. code-block:: bash 37 | 38 | $ brew install python 39 | $ brew install git 40 | 41 | Install using pip: 42 | 43 | .. code-block:: bash 44 | 45 | $ sudo pip install ansible 46 | $ sudo pip install python-vagrant 47 | $ sudo pip install molecule --pre 48 | -------------------------------------------------------------------------------- /molecule/single-node/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Install 3 | ******* 4 | 5 | This set of playbooks have specific dependencies on Ansible due to the modules 6 | being used. 7 | 8 | Requirements 9 | ============ 10 | 11 | * Ansible 2.2 12 | * Vagrant 13 | * Virtualbox, Parallels, VMware Fusion, VMware Workstation or VMware Desktop 14 | * python-vagrant 15 | 16 | Install OS dependencies on CentOS 7 17 | 18 | .. code-block:: bash 19 | 20 | $ sudo yum install -y epel-release 21 | $ sudo yum install -y gcc python-pip python-devel openssl-devel 22 | # If installing Molecule from source. 23 | $ sudo yum install libffi-devel git 24 | 25 | Install OS dependencies on Ubuntu 16.x 26 | 27 | .. code-block:: bash 28 | 29 | $ sudo apt-get update 30 | $ sudo apt-get install -y python-pip libssl-dev vagrant virtualbox 31 | # If installing Molecule from source. 32 | $ sudo apt-get install -y libffi-dev git 33 | 34 | Install OS dependencies on Mac OS 35 | 36 | .. code-block:: bash 37 | 38 | $ brew install python 39 | $ brew install git 40 | 41 | Install using pip: 42 | 43 | .. code-block:: bash 44 | 45 | $ sudo pip install ansible 46 | $ sudo pip install python-vagrant 47 | $ sudo pip install molecule --pre 48 | -------------------------------------------------------------------------------- /node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for redis 3 | # Prepare the nodes 4 | - name: Create the node data sub-directory 5 | file: 6 | path: "{{ redis_data_dir }}/{{ redis_port }}" 7 | state: directory 8 | owner: redis 9 | group: redis 10 | 11 | - name: Create the cluster node redis.conf 12 | template: 13 | src: "redis.conf.j2" 14 | dest: "{{ redis_conf_dir }}/redis_{{ redis_port }}.conf" 15 | 16 | - name: Create the node redis service file 17 | template: 18 | src: "redis.service.j2" 19 | dest: "/usr/lib/systemd/system/redis_{{ redis_port }}.service" 20 | 21 | - name: Debug ansible stuff 22 | debug: 23 | var: ansible_selinux 24 | 25 | 26 | # Add the ports to SELINUX context redis_t (port + cluster port) 27 | - name: Add ports to SELinux cotext redis_t ( port, cluster port ) 28 | seport: 29 | ports: "{{ redis_port | int }},{{ redis_port | int + 10000 }}" 30 | proto: tcp 31 | setype: redis_port_t 32 | state: present 33 | when: 34 | - ansible_selinux is defined 35 | - ansible_os_family == "RedHat" 36 | - ansible_selinux.status != "disabled" 37 | 38 | - name: start and enable the redis node. 39 | service: 40 | name: redis_{{ redis_port }} 41 | state: started 42 | enabled: yes 43 | 44 | # Test that all nodes are up 45 | # Cluster them. 46 | -------------------------------------------------------------------------------- /molecule/default/destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Destroy 4 | hosts: localhost 5 | connection: local 6 | gather_facts: False 7 | no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" 8 | vars: 9 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" 10 | molecule_instance_config: "{{ lookup('env',' MOLECULE_INSTANCE_CONFIG') }}" 11 | molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" 12 | tasks: 13 | - name: Destroy molecule instance(s) 14 | molecule_vagrant: 15 | instance_name: "{{ item.name }}" 16 | platform_box: "{{ item.box }}" 17 | provider_name: "{{ molecule_yml.driver.provider.name }}" 18 | force_stop: "{{ item.force_stop | default(True) }}" 19 | 20 | state: destroy 21 | register: server 22 | with_items: "{{ molecule_yml.platforms }}" 23 | 24 | # Mandatory configuration for Molecule to function. 25 | 26 | - name: Populate instance config 27 | set_fact: 28 | instance_conf: {} 29 | 30 | - name: Dump instance config 31 | copy: 32 | # NOTE(retr0h): Workaround for Ansible 2.2. 33 | # https://github.com/ansible/ansible/issues/20885 34 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 35 | dest: "{{ molecule_instance_config }}" 36 | when: server.changed | bool 37 | -------------------------------------------------------------------------------- /molecule/multi-node/destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Destroy 4 | hosts: localhost 5 | connection: local 6 | gather_facts: False 7 | no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" 8 | vars: 9 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" 10 | molecule_instance_config: "{{ lookup('env',' MOLECULE_INSTANCE_CONFIG') }}" 11 | molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" 12 | tasks: 13 | - name: Destroy molecule instance(s) 14 | molecule_vagrant: 15 | instance_name: "{{ item.name }}" 16 | platform_box: "{{ item.box }}" 17 | provider_name: "{{ molecule_yml.driver.provider.name }}" 18 | force_stop: "{{ item.force_stop | default(True) }}" 19 | 20 | state: destroy 21 | register: server 22 | with_items: "{{ molecule_yml.platforms }}" 23 | 24 | # Mandatory configuration for Molecule to function. 25 | 26 | - name: Populate instance config 27 | set_fact: 28 | instance_conf: {} 29 | 30 | - name: Dump instance config 31 | copy: 32 | # NOTE(retr0h): Workaround for Ansible 2.2. 33 | # https://github.com/ansible/ansible/issues/20885 34 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 35 | dest: "{{ molecule_instance_config }}" 36 | when: server.changed | bool 37 | -------------------------------------------------------------------------------- /molecule/single-node/destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Destroy 4 | hosts: localhost 5 | connection: local 6 | gather_facts: False 7 | no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" 8 | vars: 9 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" 10 | molecule_instance_config: "{{ lookup('env',' MOLECULE_INSTANCE_CONFIG') }}" 11 | molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" 12 | tasks: 13 | - name: Destroy molecule instance(s) 14 | molecule_vagrant: 15 | instance_name: "{{ item.name }}" 16 | platform_box: "{{ item.box }}" 17 | provider_name: "{{ molecule_yml.driver.provider.name }}" 18 | force_stop: "{{ item.force_stop | default(True) }}" 19 | 20 | state: destroy 21 | register: server 22 | with_items: "{{ molecule_yml.platforms }}" 23 | 24 | # Mandatory configuration for Molecule to function. 25 | 26 | - name: Populate instance config 27 | set_fact: 28 | instance_conf: {} 29 | 30 | - name: Dump instance config 31 | copy: 32 | # NOTE(retr0h): Workaround for Ansible 2.2. 33 | # https://github.com/ansible/ansible/issues/20885 34 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 35 | dest: "{{ molecule_instance_config }}" 36 | when: server.changed | bool 37 | -------------------------------------------------------------------------------- /molecule/stop-service/destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Destroy 4 | hosts: localhost 5 | connection: local 6 | gather_facts: False 7 | no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" 8 | vars: 9 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" 10 | molecule_instance_config: "{{ lookup('env',' MOLECULE_INSTANCE_CONFIG') }}" 11 | molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" 12 | tasks: 13 | - name: Destroy molecule instance(s) 14 | molecule_vagrant: 15 | instance_name: "{{ item.name }}" 16 | platform_box: "{{ item.box }}" 17 | provider_name: "{{ molecule_yml.driver.provider.name }}" 18 | force_stop: "{{ item.force_stop | default(True) }}" 19 | 20 | state: destroy 21 | register: server 22 | with_items: "{{ molecule_yml.platforms }}" 23 | 24 | # Mandatory configuration for Molecule to function. 25 | 26 | - name: Populate instance config 27 | set_fact: 28 | instance_conf: {} 29 | 30 | - name: Dump instance config 31 | copy: 32 | # NOTE(retr0h): Workaround for Ansible 2.2. 33 | # https://github.com/ansible/ansible/issues/20885 34 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 35 | dest: "{{ molecule_instance_config }}" 36 | when: server.changed | bool 37 | -------------------------------------------------------------------------------- /core/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # The core package will install evrything required on a server to setup one 4 | # or multiplle redis nodes. 5 | # 6 | # tasks file for redis 7 | - name: Install redis packages 8 | package: 9 | name: "{{ item }}" 10 | state: latest 11 | with_items: 12 | "{{ redis_packages }}" 13 | 14 | # Required for selinux changes 15 | # Add the ports to SELINUX context redis_t (port + cluster port) 16 | - name: Install selinux policy support packages 17 | package: 18 | name: policycoreutils-python 19 | when: ansible_os_family == "RedHat" 20 | 21 | - name: Create data dir 22 | file: 23 | path: "{{ redis_data_dir }}" 24 | state: directory 25 | owner: redis 26 | group: redis 27 | mode: 0755 28 | 29 | - name: Create conf dir 30 | file: 31 | path: "{{ redis_conf_dir }}" 32 | state: directory 33 | owner: redis 34 | group: redis 35 | mode: 0755 36 | 37 | - name: Create logs dir 38 | file: 39 | path: "{{ redis_log_dir }}" 40 | state: directory 41 | owner: redis 42 | group: redis 43 | mode: 0755 44 | 45 | - name: Create run directory 46 | file: 47 | path: "{{ redis_run_dir }}" 48 | state: directory 49 | owner: redis 50 | group: redis 51 | mode: 0755 52 | 53 | #Workaround the config dir to use is hardcoded in the script 54 | - name: Fix conf dir in redis-shutdown 55 | replace: 56 | dest: /usr/libexec/redis-shutdown 57 | regexp: '^CONFIG_FILE=.*$' 58 | replace: 'CONFIG_FILE="{{ redis_conf_dir }}/$SERVICE_NAME.conf"' 59 | -------------------------------------------------------------------------------- /molecule/default/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | import pytest 5 | 6 | # define the array of ports - base the configs on. 7 | 8 | ports = [ 9 | ("6379") 10 | ] 11 | 12 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 13 | os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') 14 | 15 | 16 | # Verify that package was installed 17 | def test_redis_installed(host): 18 | redis = host.package("redis") 19 | 20 | assert redis.is_installed 21 | 22 | 23 | # Verify the datapath 24 | def test_redis_data_path(host): 25 | d = host.file("/var/lib/redis") 26 | 27 | assert d.is_directory 28 | 29 | 30 | @pytest.mark.parametrize("port", ports) 31 | def test_redis_data_subdirs(host, port): 32 | 33 | f = host.file("/var/lib/redis/" + port) 34 | 35 | assert f.exists 36 | assert f.is_directory 37 | 38 | 39 | # Verify that the configs are correct 40 | @pytest.mark.parametrize("port", ports) 41 | def test_redis_conf_files(host, port): 42 | 43 | f = host.file("/etc/redis_" + port + ".conf") 44 | 45 | assert f.exists 46 | assert f.is_file 47 | # The following does not work as expected. Need to find out why 48 | assert f.contains("port " + port) 49 | 50 | 51 | @pytest.mark.parametrize("port", ports) 52 | def test_redis_systemd_conf(host, port): 53 | f = host.file("/usr/lib/systemd/system/redis_" + port + ".service") 54 | 55 | assert f.exists 56 | assert f.is_file 57 | 58 | 59 | @pytest.mark.parametrize("port", ports) 60 | def test_redis_service_ports(host, port): 61 | p = host.socket("tcp://127.0.0.1:" + port) 62 | 63 | assert p.is_listening 64 | 65 | 66 | @pytest.mark.parametrize("port", ports) 67 | def test_redis_service(host, port): 68 | p = host.service("redis_" + port) 69 | 70 | assert p.is_running 71 | assert p.is_enabled 72 | -------------------------------------------------------------------------------- /molecule/stop-service/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | import pytest 5 | 6 | # define the array of ports - base the configs on. 7 | 8 | ports = [ 9 | ("6379") 10 | ] 11 | 12 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 13 | os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') 14 | 15 | 16 | # Verify that package was installed 17 | def test_redis_installed(host): 18 | redis = host.package("redis") 19 | 20 | assert redis.is_installed 21 | 22 | 23 | # Verify the datapath 24 | def test_redis_data_path(host): 25 | d = host.file("/var/lib/redis") 26 | 27 | assert d.is_directory 28 | 29 | 30 | @pytest.mark.parametrize("port", ports) 31 | def test_redis_data_subdirs(host, port): 32 | 33 | f = host.file("/var/lib/redis/" + port) 34 | 35 | assert f.exists 36 | assert f.is_directory 37 | 38 | 39 | # Verify that the configs are correct 40 | @pytest.mark.parametrize("port", ports) 41 | def test_redis_conf_files(host, port): 42 | 43 | f = host.file("/etc/redis_" + port + ".conf") 44 | 45 | assert f.exists 46 | assert f.is_file 47 | # The following does not work as expected. Need to find out why 48 | assert f.contains("port " + port) 49 | 50 | 51 | @pytest.mark.parametrize("port", ports) 52 | def test_redis_systemd_conf(host, port): 53 | f = host.file("/usr/lib/systemd/system/redis_" + port + ".service") 54 | 55 | assert f.exists 56 | assert f.is_file 57 | 58 | 59 | @pytest.mark.parametrize("port", ports) 60 | def test_redis_service_ports(host, port): 61 | p = host.socket("tcp://127.0.0.1:" + port) 62 | 63 | assert p.is_listening 64 | 65 | 66 | @pytest.mark.parametrize("port", ports) 67 | def test_redis_service(host, port): 68 | p = host.service("redis_" + port) 69 | 70 | assert p.is_running 71 | assert p.is_enabled 72 | -------------------------------------------------------------------------------- /molecule/multi-node/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | import pytest 5 | 6 | # define the array of ports - base the configs on. 7 | 8 | ports = [ 9 | ("7000"), 10 | ("7001"), 11 | ] 12 | 13 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 14 | os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') 15 | 16 | 17 | # Verify that package was installed 18 | def test_redis_installed(host): 19 | redis = host.package("redis") 20 | 21 | assert redis.is_installed 22 | 23 | 24 | # Verify the datapath 25 | def test_redis_data_path(host): 26 | d = host.file("/var/lib/redis") 27 | 28 | assert d.is_directory 29 | 30 | 31 | @pytest.mark.parametrize("port", ports) 32 | def test_redis_data_subdirs(host, port): 33 | 34 | f = host.file("/var/lib/redis/" + port) 35 | 36 | assert f.exists 37 | assert f.is_directory 38 | 39 | 40 | # Verify that the configs are correct 41 | @pytest.mark.parametrize("port", ports) 42 | def test_redis_conf_files(host, port): 43 | 44 | f = host.file("/etc/redis_" + port + ".conf") 45 | 46 | assert f.exists 47 | assert f.is_file 48 | # The following does not work as expected. Need to find out why 49 | assert f.contains("port " + port) 50 | 51 | 52 | @pytest.mark.parametrize("port", ports) 53 | def test_redis_systemd_conf(host, port): 54 | f = host.file("/usr/lib/systemd/system/redis_" + port + ".service") 55 | 56 | assert f.exists 57 | assert f.is_file 58 | 59 | 60 | @pytest.mark.parametrize("port", ports) 61 | def test_redis_service_ports(host, port): 62 | p = host.socket("tcp://127.0.0.1:" + port) 63 | 64 | assert p.is_listening 65 | 66 | 67 | @pytest.mark.parametrize("port", ports) 68 | def test_redis_service(host, port): 69 | p = host.service("redis_" + port) 70 | 71 | assert p.is_running 72 | assert p.is_enabled 73 | -------------------------------------------------------------------------------- /molecule/single-node/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | import pytest 5 | 6 | # define the array of ports - base the configs on. 7 | 8 | ports = [ 9 | ("7000"), 10 | ("7001"), 11 | ("7002"), 12 | ] 13 | 14 | redis_data_root = "/opt/data/redis/" 15 | redis_log_root = "/opt/log/redis/" 16 | redis_run_root = "/opt/run/redis/" 17 | 18 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 19 | os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') 20 | 21 | 22 | # Verify that package was installed 23 | def test_redis_installed(host): 24 | redis = host.package("redis") 25 | 26 | assert redis.is_installed 27 | 28 | 29 | # Verify the datapath 30 | def test_redis_data_path(host): 31 | d = host.file(redis_data_root) 32 | 33 | assert d.is_directory 34 | 35 | 36 | @pytest.mark.parametrize("port", ports) 37 | def test_redis_data_subdirs(host, port): 38 | 39 | f = host.file(redis_data_root + port) 40 | 41 | assert f.exists 42 | assert f.is_directory 43 | 44 | 45 | # Verify that the configs are correct 46 | @pytest.mark.parametrize("port", ports) 47 | def test_redis_conf_files(host, port): 48 | # We are testing for this, but the shutdown script hardcodes /etc location. 49 | 50 | f = host.file("/etc/redis_" + port + ".conf") 51 | 52 | assert f.exists 53 | assert f.is_file 54 | # The following does not work as expected. Need to find out why 55 | assert f.contains("port " + port) 56 | 57 | 58 | @pytest.mark.parametrize("port", ports) 59 | def test_redis_systemd_conf(host, port): 60 | f = host.file("/usr/lib/systemd/system/redis_" + port + ".service") 61 | 62 | assert f.exists 63 | assert f.is_file 64 | 65 | 66 | @pytest.mark.parametrize("port", ports) 67 | def test_redis_service_ports(host, port): 68 | p = host.socket("tcp://127.0.0.1:" + port) 69 | 70 | assert p.is_listening 71 | 72 | 73 | @pytest.mark.parametrize("port", ports) 74 | def test_redis_service(host, port): 75 | p = host.service("redis_" + port) 76 | 77 | assert p.is_running 78 | assert p.is_enabled 79 | -------------------------------------------------------------------------------- /molecule/default/create.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create 3 | hosts: localhost 4 | connection: local 5 | gather_facts: False 6 | no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" 7 | vars: 8 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" 9 | molecule_instance_config: "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}" 10 | molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" 11 | tasks: 12 | - name: Create molecule instance(s) 13 | molecule_vagrant: 14 | instance_name: "{{ item.name }}" 15 | instance_interfaces: "{{ item.interfaces | default(omit) }}" 16 | instance_raw_config_args: "{{ item.instance_raw_config_args | default(omit) }}" 17 | 18 | platform_box: "{{ item.box }}" 19 | platform_box_version: "{{ item.box_version | default(omit) }}" 20 | platform_box_url: "{{ item.box_url | default(omit) }}" 21 | 22 | provider_name: "{{ molecule_yml.driver.provider.name }}" 23 | provider_memory: "{{ item.memory | default(omit) }}" 24 | provider_cpus: "{{ item.cpus | default(omit) }}" 25 | provider_raw_config_args: "{{ item.raw_config_args | default(omit) }}" 26 | 27 | state: up 28 | register: server 29 | with_items: "{{ molecule_yml.platforms }}" 30 | 31 | # Mandatory configuration for Molecule to function. 32 | 33 | - name: Populate instance config dict 34 | set_fact: 35 | instance_conf_dict: { 36 | 'instance': "{{ item.Host }}", 37 | 'address': "{{ item.HostName }}", 38 | 'user': "{{ item.User }}", 39 | 'port': "{{ item.Port }}", 40 | 'identity_file': "{{ item.IdentityFile }}", } 41 | with_items: "{{ server.results }}" 42 | register: instance_config_dict 43 | when: server.changed | bool 44 | 45 | - name: Convert instance config dict to a list 46 | set_fact: 47 | instance_conf: "{{ instance_config_dict.results | map(attribute='ansible_facts.instance_conf_dict') | list }}" 48 | when: server.changed | bool 49 | 50 | - name: Dump instance config 51 | copy: 52 | # NOTE(retr0h): Workaround for Ansible 2.2. 53 | # https://github.com/ansible/ansible/issues/20885 54 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 55 | dest: "{{ molecule_instance_config }}" 56 | when: server.changed | bool 57 | -------------------------------------------------------------------------------- /molecule/multi-node/create.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create 3 | hosts: localhost 4 | connection: local 5 | gather_facts: False 6 | no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" 7 | vars: 8 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" 9 | molecule_instance_config: "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}" 10 | molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" 11 | tasks: 12 | - name: Create molecule instance(s) 13 | molecule_vagrant: 14 | instance_name: "{{ item.name }}" 15 | instance_interfaces: "{{ item.interfaces | default(omit) }}" 16 | instance_raw_config_args: "{{ item.instance_raw_config_args | default(omit) }}" 17 | 18 | platform_box: "{{ item.box }}" 19 | platform_box_version: "{{ item.box_version | default(omit) }}" 20 | platform_box_url: "{{ item.box_url | default(omit) }}" 21 | 22 | provider_name: "{{ molecule_yml.driver.provider.name }}" 23 | provider_memory: "{{ item.memory | default(omit) }}" 24 | provider_cpus: "{{ item.cpus | default(omit) }}" 25 | provider_raw_config_args: "{{ item.raw_config_args | default(omit) }}" 26 | 27 | state: up 28 | register: server 29 | with_items: "{{ molecule_yml.platforms }}" 30 | 31 | # Mandatory configuration for Molecule to function. 32 | 33 | - name: Populate instance config dict 34 | set_fact: 35 | instance_conf_dict: { 36 | 'instance': "{{ item.Host }}", 37 | 'address': "{{ item.HostName }}", 38 | 'user': "{{ item.User }}", 39 | 'port': "{{ item.Port }}", 40 | 'identity_file': "{{ item.IdentityFile }}", } 41 | with_items: "{{ server.results }}" 42 | register: instance_config_dict 43 | when: server.changed | bool 44 | 45 | - name: Convert instance config dict to a list 46 | set_fact: 47 | instance_conf: "{{ instance_config_dict.results | map(attribute='ansible_facts.instance_conf_dict') | list }}" 48 | when: server.changed | bool 49 | 50 | - name: Dump instance config 51 | copy: 52 | # NOTE(retr0h): Workaround for Ansible 2.2. 53 | # https://github.com/ansible/ansible/issues/20885 54 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 55 | dest: "{{ molecule_instance_config }}" 56 | when: server.changed | bool 57 | -------------------------------------------------------------------------------- /molecule/single-node/create.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create 3 | hosts: localhost 4 | connection: local 5 | gather_facts: False 6 | no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" 7 | vars: 8 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" 9 | molecule_instance_config: "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}" 10 | molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" 11 | tasks: 12 | - name: Create molecule instance(s) 13 | molecule_vagrant: 14 | instance_name: "{{ item.name }}" 15 | instance_interfaces: "{{ item.interfaces | default(omit) }}" 16 | instance_raw_config_args: "{{ item.instance_raw_config_args | default(omit) }}" 17 | 18 | platform_box: "{{ item.box }}" 19 | platform_box_version: "{{ item.box_version | default(omit) }}" 20 | platform_box_url: "{{ item.box_url | default(omit) }}" 21 | 22 | provider_name: "{{ molecule_yml.driver.provider.name }}" 23 | provider_memory: "{{ item.memory | default(omit) }}" 24 | provider_cpus: "{{ item.cpus | default(omit) }}" 25 | provider_raw_config_args: "{{ item.raw_config_args | default(omit) }}" 26 | 27 | state: up 28 | register: server 29 | with_items: "{{ molecule_yml.platforms }}" 30 | 31 | # Mandatory configuration for Molecule to function. 32 | 33 | - name: Populate instance config dict 34 | set_fact: 35 | instance_conf_dict: { 36 | 'instance': "{{ item.Host }}", 37 | 'address': "{{ item.HostName }}", 38 | 'user': "{{ item.User }}", 39 | 'port': "{{ item.Port }}", 40 | 'identity_file': "{{ item.IdentityFile }}", } 41 | with_items: "{{ server.results }}" 42 | register: instance_config_dict 43 | when: server.changed | bool 44 | 45 | - name: Convert instance config dict to a list 46 | set_fact: 47 | instance_conf: "{{ instance_config_dict.results | map(attribute='ansible_facts.instance_conf_dict') | list }}" 48 | when: server.changed | bool 49 | 50 | - name: Dump instance config 51 | copy: 52 | # NOTE(retr0h): Workaround for Ansible 2.2. 53 | # https://github.com/ansible/ansible/issues/20885 54 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 55 | dest: "{{ molecule_instance_config }}" 56 | when: server.changed | bool 57 | -------------------------------------------------------------------------------- /molecule/stop-service/create.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create 3 | hosts: localhost 4 | connection: local 5 | gather_facts: False 6 | no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}" 7 | vars: 8 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}" 9 | molecule_instance_config: "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}" 10 | molecule_yml: "{{ lookup('file', molecule_file) | from_yaml }}" 11 | tasks: 12 | - name: Create molecule instance(s) 13 | molecule_vagrant: 14 | instance_name: "{{ item.name }}" 15 | instance_interfaces: "{{ item.interfaces | default(omit) }}" 16 | instance_raw_config_args: "{{ item.instance_raw_config_args | default(omit) }}" 17 | 18 | platform_box: "{{ item.box }}" 19 | platform_box_version: "{{ item.box_version | default(omit) }}" 20 | platform_box_url: "{{ item.box_url | default(omit) }}" 21 | 22 | provider_name: "{{ molecule_yml.driver.provider.name }}" 23 | provider_memory: "{{ item.memory | default(omit) }}" 24 | provider_cpus: "{{ item.cpus | default(omit) }}" 25 | provider_raw_config_args: "{{ item.raw_config_args | default(omit) }}" 26 | 27 | state: up 28 | register: server 29 | with_items: "{{ molecule_yml.platforms }}" 30 | 31 | # Mandatory configuration for Molecule to function. 32 | 33 | - name: Populate instance config dict 34 | set_fact: 35 | instance_conf_dict: { 36 | 'instance': "{{ item.Host }}", 37 | 'address': "{{ item.HostName }}", 38 | 'user': "{{ item.User }}", 39 | 'port': "{{ item.Port }}", 40 | 'identity_file': "{{ item.IdentityFile }}", } 41 | with_items: "{{ server.results }}" 42 | register: instance_config_dict 43 | when: server.changed | bool 44 | 45 | - name: Convert instance config dict to a list 46 | set_fact: 47 | instance_conf: "{{ instance_config_dict.results | map(attribute='ansible_facts.instance_conf_dict') | list }}" 48 | when: server.changed | bool 49 | 50 | - name: Dump instance config 51 | copy: 52 | # NOTE(retr0h): Workaround for Ansible 2.2. 53 | # https://github.com/ansible/ansible/issues/20885 54 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}" 55 | dest: "{{ molecule_instance_config }}" 56 | when: server.changed | bool 57 | -------------------------------------------------------------------------------- /DEVELEOPMENT.md: -------------------------------------------------------------------------------- 1 | Redis role development 2 | ====================== 3 | 4 | These are quick notes for folowup development. 5 | 6 | Development Environment 7 | ----------------------- 8 | 9 | This role was developed using the molecule test framework that allows automation 10 | of infrastructure testing. See the various ```INSTALL.rst``` files for additional 11 | information. 12 | 13 | Scenarios 14 | --------- 15 | 16 | The current scenario validate proper deployment for the following: 17 | 18 | - Defaults - where nothing is custom. Currently the role ONLY deploys in cluster 19 | mode, so 3 nodes are required. 20 | - Single-node - where we deploy 3 nodes on a single host. 21 | - multi-node : Where a 3 master + 3 Slave infrastructure is deployed accross 22 | hosts. This is closer to a typical deployment. 23 | 24 | Other notes: 25 | ------------ 26 | 27 | In testing, the server names are not defiend, so we had to derive them from the 28 | IP's. To get the node endpoints we had to combine the IP's with the node ports. 29 | We created a custom jinja2 filter for this : arraypermute(array) 30 | 31 | Usage: 32 | 33 | {{ array1 | arraypermute( array2 ) | list }} 34 | 35 | If Array1 = [ a,b,c,d ] 36 | and Array2 = [ 1,2,3 ] 37 | 38 | the resulting array will be every combination, namely : 39 | 40 | [ a1, a2, a3, b1, b2, b3, c1, c2, c3, d1, d2, d3 ] 41 | 42 | Notes on usage: 43 | --------------- 44 | A sane way to use this role, is to label hosts [ redis-nodes ] and create a 45 | [ redis-ports ] array. The ansible roles should be run on all redis-nodes, and 46 | redis/node should loop over the ports array, redefining the ```redis_ports``` 47 | variable. NOTE: there is currently no way to loop over a role. 48 | 49 | ``` 50 | - hosts: redis-nodes 51 | role: 52 | - redis/core 53 | - { role: redis/node, redis_ports: redis_ports[0] } 54 | - { role: redis/node, redis_ports: redis_ports[1] } 55 | - { role: redis/cluster, redis_cluster_replicas: 1, redis_node_list: {{ group['redis-nodes'] | arraypermute( [:] ) | arraypermute( redis_ports ) | list }}} 56 | ``` 57 | 58 | 59 | TODO (quick thoughts) 60 | ------ 61 | - More error checking 62 | - Test on ubunutu ( if we push to galaxy ) 63 | - Increase management tasks - eg: add a layer of slaves, add a master etc.) 64 | - Review ansible redis module (may have done work for nothing) 65 | - Create a "redis/config" role for more hands on redis.conf values. 66 | - add security - how to propagate config to clients ? 67 | - add logrotate - should be easy - base is already there. 68 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Redis 2 | ========= 3 | 4 | The role is to deploy a redis cluster across multiple machines. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Prior to running the roles, the following must be in place : 10 | 11 | - Access to an RPM with the EPEL redis RPM (In testing we add the EPEL repo) 12 | - A version of Ruby installed (In testing we use the Centos packages) 13 | 14 | Optionaly: 15 | - An Ansible group where the nodes are to be installed. 16 | - An Ansible group where the management node can be installed 17 | 18 | Role Usage 19 | ---------- 20 | 21 | The role is currently broken into 3 parts: 22 | - redis/core : to install the required components for redis 23 | - redis/node : which will configure everything for a node, and get it running 24 | - redis/cluster : Use to create the cluster from the individual nodes 25 | 26 | There is also the ```redis``` role which only contains the common default, of which 27 | all other roles are dependant (see meta/main.yml) 28 | 29 | Example 1: A single host with 3 master-nodes, no replication. 30 | 31 | ``` 32 | - hosts: all 33 | become: true 34 | roles: 35 | - redis/core 36 | - { role: redis/node, redis_port: 7000 } 37 | - { role: redis/node, redis_port: 7001 } 38 | - { role: redis/node, redis_port: 7002 } 39 | - { role: redis/cluster, redis_cluster_replicas: 0, redis_node_list: "{{ groups['all'] | map('extract', hostvars, ['ansible_eth0', 'ipv4', 'address']) | arraypermute( [':'] ) | arraypermute( [7000,7001,7002] ) }}" } 40 | ``` 41 | 42 | Example 2: 3 host cluster (defined in redis-nodes), with 3 master nodes with 1 replicas (6 nodes total). Notice that we define a node as management to install the management application. 43 | 44 | ``` 45 | - hosts: redis-nodes 46 | become: true 47 | roles: 48 | - redis/core 49 | - { role: redis/node, redis_port: 7000 } 50 | - { role: redis/node, redis_port: 7001 } 51 | 52 | - hosts: redis-mgt 53 | become: true 54 | roles: 55 | - { role: redis/cluster, redis_cluster_replicas: 1, redis_node_list: "{{ groups['redis-nodes'] | map('extract', hostvars, ['ansible_eth1', 'ipv4', 'address']) | arraypermute( [':'] ) | arraypermute( [7000,7001] ) | list }}" } 56 | 57 | ``` 58 | 59 | Role Variables 60 | -------------- 61 | This role has been developed with the goal to simplify the deployment of a redis cluster 62 | across multiple nodes. 63 | 64 | ## Redis/Core 65 | 66 | The following user variable can be redefined for the redis/core. 67 | 68 | | Variable | Description | Default | 69 | |----------|-------------|---------| 70 | | redis_data_dir | The root of the data directory | /var/lib/redis | 71 | | redis_log_dir | The root direcotry for redis logs | /var/log/redis | 72 | | redis_run_dir | The root direcotry for redis runtime information | /var/run/redis | 73 | | redis_conf_dir | The root directry for redis node configuraitons | /etc/ | 74 | 75 | NOTE: The role currently only supports Centos / rpm based installs. The role will install the redis package - which creates some default configs and 76 | directories - but will be ignored 77 | 78 | The following variables are also defined, but not usualy redefined in this role.: 79 | 80 | | Variable | Description | Default | 81 | |----------|-------------|---------| 82 | | redis_packages | A list of packages required to install redis | [ redis ] | 83 | | redis_port | The default port that redis will listen to. | 6379 | 84 | 85 | 86 | NOTE: redis_port can be overridden in the redis/node deployment. 87 | ## Redis/Node 88 | 89 | The redis node role will configure and start a redis node on a specific port. Tasks 90 | performed by the redis/node role include: 91 | 92 | - Creating the data sub-direcotries ( under redis_data_dir as redis_data_dir/) 93 | - Configure a custom redis.conf file ( under redis_conf_dir as redis_conf_dir/redis_.conf ) 94 | - Configure a custom systemd service definition ( In system standard location as redis_.service ) 95 | - Add the listening ports to the SElinux security contexts ( for RPM based systems ) 96 | - Start the configured redis node. 97 | 98 | The following variables can be redefined by users of this role: 99 | 100 | | Variable | Description | Default | 101 | |----------|-------------|---------| 102 | | redis_port | The port that redis will listen to. | 6379 | 103 | 104 | This port number is used to configure all above items. This is a sane default 105 | as the host cannot have more than one redis instance listening to a single port. 106 | The default redis shutdown script uses this information to derive the proper config. 107 | 108 | ## Redis/cluster 109 | 110 | Redis cluster will take a series of cluster nodes, and configure them in a single 111 | cluster based on configurable parameters. This has been done to simplefy the 112 | deployment of the cluster. 113 | 114 | The follwing are commonly overridden when creating the cluster: 115 | 116 | | Variables | Description | Default | 117 | |-----------|-------------|---------| 118 | | redis_cluster_replicas | The number of replicas each master requires | 0 | 119 | | redis_node_list | a list of 'IP:PORT' endpoint combinations | see defaults | 120 | 121 | 122 | Other variables used that should not be overridden: 123 | 124 | | Variables | Description | Default | 125 | |-----------|-------------|---------| 126 | | redis_conf_dir | to create a "cluster defined" flag for idempotency | see redis/core | 127 | 128 | Other notes: 129 | - In the redis.io ```redis-trib``` ruby application, we added a ```--yes``` flag to accept the 130 | proposed configuration without waiting for user input. 131 | - redis-trib does an inteligent distribution of the nodes based on number of 132 | replicas, number of nodes and hosts. One assumption is that different IP's are 133 | different hosts. It will rotate through the servers to increase availability in 134 | case of a host failure. 135 | 136 | Dependencies 137 | ------------ 138 | 139 | No external role or Ansible module dependancies. 140 | 141 | License 142 | ------- 143 | 144 | BSD 145 | 146 | Author Information 147 | ------------------ 148 | 149 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 150 | -------------------------------------------------------------------------------- /config/templates/redis.conf.j2: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################## NETWORK ##################################### 39 | 40 | # By default, if no "bind" configuration directive is specified, Redis listens 41 | # for connections from all the network interfaces available on the server. 42 | # It is possible to listen to just one or multiple selected interfaces using 43 | # the "bind" configuration directive, followed by one or more IP addresses. 44 | # 45 | # Examples: 46 | # 47 | # bind 192.168.1.100 10.0.0.1 48 | # bind 127.0.0.1 ::1 49 | # 50 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the 51 | # internet, binding to all the interfaces is dangerous and will expose the 52 | # instance to everybody on the internet. So by default we uncomment the 53 | # following bind directive, that will force Redis to listen only into 54 | # the IPv4 lookback interface address (this means Redis will be able to 55 | # accept connections only from clients running into the same computer it 56 | # is running). 57 | # 58 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES 59 | # JUST COMMENT THE FOLLOWING LINE. 60 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | # bind 127.0.0.1 62 | 63 | # Protected mode is a layer of security protection, in order to avoid that 64 | # Redis instances left open on the internet are accessed and exploited. 65 | # 66 | # When protected mode is on and if: 67 | # 68 | # 1) The server is not binding explicitly to a set of addresses using the 69 | # "bind" directive. 70 | # 2) No password is configured. 71 | # 72 | # The server only accepts connections from clients connecting from the 73 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 74 | # sockets. 75 | # 76 | # By default protected mode is enabled. You should disable it only if 77 | # you are sure you want clients from other hosts to connect to Redis 78 | # even if no authentication is configured, nor a specific set of interfaces 79 | # are explicitly listed using the "bind" directive. 80 | protected-mode no 81 | 82 | # Accept connections on the specified port, default is 6379 (IANA #815344). 83 | # If port 0 is specified Redis will not listen on a TCP socket. 84 | port {{ redis_port }} 85 | 86 | # TCP listen() backlog. 87 | # 88 | # In high requests-per-second environments you need an high backlog in order 89 | # to avoid slow clients connections issues. Note that the Linux kernel 90 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 91 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 92 | # in order to get the desired effect. 93 | tcp-backlog 511 94 | 95 | # Unix socket. 96 | # 97 | # Specify the path for the Unix socket that will be used to listen for 98 | # incoming connections. There is no default, so Redis will not listen 99 | # on a unix socket when not specified. 100 | # 101 | # unixsocket /tmp/redis.sock 102 | # unixsocketperm 700 103 | 104 | # Close the connection after a client is idle for N seconds (0 to disable) 105 | timeout 0 106 | 107 | # TCP keepalive. 108 | # 109 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 110 | # of communication. This is useful for two reasons: 111 | # 112 | # 1) Detect dead peers. 113 | # 2) Take the connection alive from the point of view of network 114 | # equipment in the middle. 115 | # 116 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 117 | # Note that to close the connection the double of the time is needed. 118 | # On other kernels the period depends on the kernel configuration. 119 | # 120 | # A reasonable value for this option is 300 seconds, which is the new 121 | # Redis default starting with Redis 3.2.1. 122 | tcp-keepalive 300 123 | 124 | ################################# GENERAL ##################################### 125 | 126 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 127 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 128 | daemonize no 129 | 130 | # If you run Redis from upstart or systemd, Redis can interact with your 131 | # supervision tree. Options: 132 | # supervised no - no supervision interaction 133 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode 134 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET 135 | # supervised auto - detect upstart or systemd method based on 136 | # UPSTART_JOB or NOTIFY_SOCKET environment variables 137 | # Note: these supervision methods only signal "process is ready." 138 | # They do not enable continuous liveness pings back to your supervisor. 139 | supervised no 140 | 141 | # If a pid file is specified, Redis writes it where specified at startup 142 | # and removes it at exit. 143 | # 144 | # When the server runs non daemonized, no pid file is created if none is 145 | # specified in the configuration. When the server is daemonized, the pid file 146 | # is used even if not specified, defaulting to "/var/run/redis.pid". 147 | # 148 | # Creating a pid file is best effort: if Redis is not able to create it 149 | # nothing bad happens, the server will start and run normally. 150 | pidfile {{ redis_run_dir }}/node_{{ redis_port }}.pid 151 | 152 | # Specify the server verbosity level. 153 | # This can be one of: 154 | # debug (a lot of information, useful for development/testing) 155 | # verbose (many rarely useful info, but not a mess like the debug level) 156 | # notice (moderately verbose, what you want in production probably) 157 | # warning (only very important / critical messages are logged) 158 | loglevel notice 159 | 160 | # Specify the log file name. Also the empty string can be used to force 161 | # Redis to log on the standard output. Note that if you use standard 162 | # output for logging but daemonize, logs will be sent to /dev/null 163 | logfile {{ redis_log_dir }}/node_{{ redis_port }}.log 164 | 165 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 166 | # and optionally update the other syslog parameters to suit your needs. 167 | # syslog-enabled no 168 | 169 | # Specify the syslog identity. 170 | # syslog-ident redis 171 | 172 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 173 | # syslog-facility local0 174 | 175 | # Set the number of databases. The default database is DB 0, you can select 176 | # a different one on a per-connection basis using SELECT where 177 | # dbid is a number between 0 and 'databases'-1 178 | databases 16 179 | 180 | ################################ SNAPSHOTTING ################################ 181 | # 182 | # Save the DB on disk: 183 | # 184 | # save 185 | # 186 | # Will save the DB if both the given number of seconds and the given 187 | # number of write operations against the DB occurred. 188 | # 189 | # In the example below the behaviour will be to save: 190 | # after 900 sec (15 min) if at least 1 key changed 191 | # after 300 sec (5 min) if at least 10 keys changed 192 | # after 60 sec if at least 10000 keys changed 193 | # 194 | # Note: you can disable saving completely by commenting out all "save" lines. 195 | # 196 | # It is also possible to remove all the previously configured save 197 | # points by adding a save directive with a single empty string argument 198 | # like in the following example: 199 | # 200 | # save "" 201 | 202 | save 900 1 203 | save 300 10 204 | save 60 10000 205 | 206 | # By default Redis will stop accepting writes if RDB snapshots are enabled 207 | # (at least one save point) and the latest background save failed. 208 | # This will make the user aware (in a hard way) that data is not persisting 209 | # on disk properly, otherwise chances are that no one will notice and some 210 | # disaster will happen. 211 | # 212 | # If the background saving process will start working again Redis will 213 | # automatically allow writes again. 214 | # 215 | # However if you have setup your proper monitoring of the Redis server 216 | # and persistence, you may want to disable this feature so that Redis will 217 | # continue to work as usual even if there are problems with disk, 218 | # permissions, and so forth. 219 | stop-writes-on-bgsave-error yes 220 | 221 | # Compress string objects using LZF when dump .rdb databases? 222 | # For default that's set to 'yes' as it's almost always a win. 223 | # If you want to save some CPU in the saving child set it to 'no' but 224 | # the dataset will likely be bigger if you have compressible values or keys. 225 | rdbcompression yes 226 | 227 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 228 | # This makes the format more resistant to corruption but there is a performance 229 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 230 | # for maximum performances. 231 | # 232 | # RDB files created with checksum disabled have a checksum of zero that will 233 | # tell the loading code to skip the check. 234 | rdbchecksum yes 235 | 236 | # The filename where to dump the DB 237 | dbfilename dump.rdb 238 | 239 | # The working directory. 240 | # 241 | # The DB will be written inside this directory, with the filename specified 242 | # above using the 'dbfilename' configuration directive. 243 | # 244 | # The Append Only File will also be created inside this directory. 245 | # 246 | # Note that you must specify a directory here, not a file name. 247 | dir {{ redis_data_dir }}/{{ redis_port }} 248 | 249 | ################################# REPLICATION ################################# 250 | 251 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 252 | # another Redis server. A few things to understand ASAP about Redis replication. 253 | # 254 | # 1) Redis replication is asynchronous, but you can configure a master to 255 | # stop accepting writes if it appears to be not connected with at least 256 | # a given number of slaves. 257 | # 2) Redis slaves are able to perform a partial resynchronization with the 258 | # master if the replication link is lost for a relatively small amount of 259 | # time. You may want to configure the replication backlog size (see the next 260 | # sections of this file) with a sensible value depending on your needs. 261 | # 3) Replication is automatic and does not need user intervention. After a 262 | # network partition slaves automatically try to reconnect to masters 263 | # and resynchronize with them. 264 | # 265 | # slaveof 266 | 267 | # If the master is password protected (using the "requirepass" configuration 268 | # directive below) it is possible to tell the slave to authenticate before 269 | # starting the replication synchronization process, otherwise the master will 270 | # refuse the slave request. 271 | # 272 | # masterauth 273 | 274 | # When a slave loses its connection with the master, or when the replication 275 | # is still in progress, the slave can act in two different ways: 276 | # 277 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 278 | # still reply to client requests, possibly with out of date data, or the 279 | # data set may just be empty if this is the first synchronization. 280 | # 281 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 282 | # an error "SYNC with master in progress" to all the kind of commands 283 | # but to INFO and SLAVEOF. 284 | # 285 | slave-serve-stale-data yes 286 | 287 | # You can configure a slave instance to accept writes or not. Writing against 288 | # a slave instance may be useful to store some ephemeral data (because data 289 | # written on a slave will be easily deleted after resync with the master) but 290 | # may also cause problems if clients are writing to it because of a 291 | # misconfiguration. 292 | # 293 | # Since Redis 2.6 by default slaves are read-only. 294 | # 295 | # Note: read only slaves are not designed to be exposed to untrusted clients 296 | # on the internet. It's just a protection layer against misuse of the instance. 297 | # Still a read only slave exports by default all the administrative commands 298 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 299 | # security of read only slaves using 'rename-command' to shadow all the 300 | # administrative / dangerous commands. 301 | slave-read-only yes 302 | 303 | # Replication SYNC strategy: disk or socket. 304 | # 305 | # ------------------------------------------------------- 306 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 307 | # ------------------------------------------------------- 308 | # 309 | # New slaves and reconnecting slaves that are not able to continue the replication 310 | # process just receiving differences, need to do what is called a "full 311 | # synchronization". An RDB file is transmitted from the master to the slaves. 312 | # The transmission can happen in two different ways: 313 | # 314 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 315 | # file on disk. Later the file is transferred by the parent 316 | # process to the slaves incrementally. 317 | # 2) Diskless: The Redis master creates a new process that directly writes the 318 | # RDB file to slave sockets, without touching the disk at all. 319 | # 320 | # With disk-backed replication, while the RDB file is generated, more slaves 321 | # can be queued and served with the RDB file as soon as the current child producing 322 | # the RDB file finishes its work. With diskless replication instead once 323 | # the transfer starts, new slaves arriving will be queued and a new transfer 324 | # will start when the current one terminates. 325 | # 326 | # When diskless replication is used, the master waits a configurable amount of 327 | # time (in seconds) before starting the transfer in the hope that multiple slaves 328 | # will arrive and the transfer can be parallelized. 329 | # 330 | # With slow disks and fast (large bandwidth) networks, diskless replication 331 | # works better. 332 | repl-diskless-sync no 333 | 334 | # When diskless replication is enabled, it is possible to configure the delay 335 | # the server waits in order to spawn the child that transfers the RDB via socket 336 | # to the slaves. 337 | # 338 | # This is important since once the transfer starts, it is not possible to serve 339 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 340 | # waits a delay in order to let more slaves arrive. 341 | # 342 | # The delay is specified in seconds, and by default is 5 seconds. To disable 343 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 344 | repl-diskless-sync-delay 5 345 | 346 | # Slaves send PINGs to server in a predefined interval. It's possible to change 347 | # this interval with the repl_ping_slave_period option. The default value is 10 348 | # seconds. 349 | # 350 | # repl-ping-slave-period 10 351 | 352 | # The following option sets the replication timeout for: 353 | # 354 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 355 | # 2) Master timeout from the point of view of slaves (data, pings). 356 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 357 | # 358 | # It is important to make sure that this value is greater than the value 359 | # specified for repl-ping-slave-period otherwise a timeout will be detected 360 | # every time there is low traffic between the master and the slave. 361 | # 362 | # repl-timeout 60 363 | 364 | # Disable TCP_NODELAY on the slave socket after SYNC? 365 | # 366 | # If you select "yes" Redis will use a smaller number of TCP packets and 367 | # less bandwidth to send data to slaves. But this can add a delay for 368 | # the data to appear on the slave side, up to 40 milliseconds with 369 | # Linux kernels using a default configuration. 370 | # 371 | # If you select "no" the delay for data to appear on the slave side will 372 | # be reduced but more bandwidth will be used for replication. 373 | # 374 | # By default we optimize for low latency, but in very high traffic conditions 375 | # or when the master and slaves are many hops away, turning this to "yes" may 376 | # be a good idea. 377 | repl-disable-tcp-nodelay no 378 | 379 | # Set the replication backlog size. The backlog is a buffer that accumulates 380 | # slave data when slaves are disconnected for some time, so that when a slave 381 | # wants to reconnect again, often a full resync is not needed, but a partial 382 | # resync is enough, just passing the portion of data the slave missed while 383 | # disconnected. 384 | # 385 | # The bigger the replication backlog, the longer the time the slave can be 386 | # disconnected and later be able to perform a partial resynchronization. 387 | # 388 | # The backlog is only allocated once there is at least a slave connected. 389 | # 390 | # repl-backlog-size 1mb 391 | 392 | # After a master has no longer connected slaves for some time, the backlog 393 | # will be freed. The following option configures the amount of seconds that 394 | # need to elapse, starting from the time the last slave disconnected, for 395 | # the backlog buffer to be freed. 396 | # 397 | # A value of 0 means to never release the backlog. 398 | # 399 | # repl-backlog-ttl 3600 400 | 401 | # The slave priority is an integer number published by Redis in the INFO output. 402 | # It is used by Redis Sentinel in order to select a slave to promote into a 403 | # master if the master is no longer working correctly. 404 | # 405 | # A slave with a low priority number is considered better for promotion, so 406 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 407 | # pick the one with priority 10, that is the lowest. 408 | # 409 | # However a special priority of 0 marks the slave as not able to perform the 410 | # role of master, so a slave with priority of 0 will never be selected by 411 | # Redis Sentinel for promotion. 412 | # 413 | # By default the priority is 100. 414 | slave-priority 100 415 | 416 | # It is possible for a master to stop accepting writes if there are less than 417 | # N slaves connected, having a lag less or equal than M seconds. 418 | # 419 | # The N slaves need to be in "online" state. 420 | # 421 | # The lag in seconds, that must be <= the specified value, is calculated from 422 | # the last ping received from the slave, that is usually sent every second. 423 | # 424 | # This option does not GUARANTEE that N replicas will accept the write, but 425 | # will limit the window of exposure for lost writes in case not enough slaves 426 | # are available, to the specified number of seconds. 427 | # 428 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 429 | # 430 | # min-slaves-to-write 3 431 | # min-slaves-max-lag 10 432 | # 433 | # Setting one or the other to 0 disables the feature. 434 | # 435 | # By default min-slaves-to-write is set to 0 (feature disabled) and 436 | # min-slaves-max-lag is set to 10. 437 | 438 | # A Redis master is able to list the address and port of the attached 439 | # slaves in different ways. For example the "INFO replication" section 440 | # offers this information, which is used, among other tools, by 441 | # Redis Sentinel in order to discover slave instances. 442 | # Another place where this info is available is in the output of the 443 | # "ROLE" command of a masteer. 444 | # 445 | # The listed IP and address normally reported by a slave is obtained 446 | # in the following way: 447 | # 448 | # IP: The address is auto detected by checking the peer address 449 | # of the socket used by the slave to connect with the master. 450 | # 451 | # Port: The port is communicated by the slave during the replication 452 | # handshake, and is normally the port that the slave is using to 453 | # list for connections. 454 | # 455 | # However when port forwarding or Network Address Translation (NAT) is 456 | # used, the slave may be actually reachable via different IP and port 457 | # pairs. The following two options can be used by a slave in order to 458 | # report to its master a specific set of IP and port, so that both INFO 459 | # and ROLE will report those values. 460 | # 461 | # There is no need to use both the options if you need to override just 462 | # the port or the IP address. 463 | # 464 | # slave-announce-ip 5.5.5.5 465 | # slave-announce-port 1234 466 | 467 | ################################## SECURITY ################################### 468 | 469 | # Require clients to issue AUTH before processing any other 470 | # commands. This might be useful in environments in which you do not trust 471 | # others with access to the host running redis-server. 472 | # 473 | # This should stay commented out for backward compatibility and because most 474 | # people do not need auth (e.g. they run their own servers). 475 | # 476 | # Warning: since Redis is pretty fast an outside user can try up to 477 | # 150k passwords per second against a good box. This means that you should 478 | # use a very strong password otherwise it will be very easy to break. 479 | # 480 | # requirepass foobared 481 | 482 | # Command renaming. 483 | # 484 | # It is possible to change the name of dangerous commands in a shared 485 | # environment. For instance the CONFIG command may be renamed into something 486 | # hard to guess so that it will still be available for internal-use tools 487 | # but not available for general clients. 488 | # 489 | # Example: 490 | # 491 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 492 | # 493 | # It is also possible to completely kill a command by renaming it into 494 | # an empty string: 495 | # 496 | # rename-command CONFIG "" 497 | # 498 | # Please note that changing the name of commands that are logged into the 499 | # AOF file or transmitted to slaves may cause problems. 500 | 501 | ################################### LIMITS #################################### 502 | 503 | # Set the max number of connected clients at the same time. By default 504 | # this limit is set to 10000 clients, however if the Redis server is not 505 | # able to configure the process file limit to allow for the specified limit 506 | # the max number of allowed clients is set to the current file limit 507 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 508 | # 509 | # Once the limit is reached Redis will close all the new connections sending 510 | # an error 'max number of clients reached'. 511 | # 512 | # maxclients 10000 513 | 514 | # Don't use more memory than the specified amount of bytes. 515 | # When the memory limit is reached Redis will try to remove keys 516 | # according to the eviction policy selected (see maxmemory-policy). 517 | # 518 | # If Redis can't remove keys according to the policy, or if the policy is 519 | # set to 'noeviction', Redis will start to reply with errors to commands 520 | # that would use more memory, like SET, LPUSH, and so on, and will continue 521 | # to reply to read-only commands like GET. 522 | # 523 | # This option is usually useful when using Redis as an LRU cache, or to set 524 | # a hard memory limit for an instance (using the 'noeviction' policy). 525 | # 526 | # WARNING: If you have slaves attached to an instance with maxmemory on, 527 | # the size of the output buffers needed to feed the slaves are subtracted 528 | # from the used memory count, so that network problems / resyncs will 529 | # not trigger a loop where keys are evicted, and in turn the output 530 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 531 | # of more keys, and so forth until the database is completely emptied. 532 | # 533 | # In short... if you have slaves attached it is suggested that you set a lower 534 | # limit for maxmemory so that there is some free RAM on the system for slave 535 | # output buffers (but this is not needed if the policy is 'noeviction'). 536 | # 537 | # maxmemory 538 | 539 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 540 | # is reached. You can select among five behaviors: 541 | # 542 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 543 | # allkeys-lru -> remove any key according to the LRU algorithm 544 | # volatile-random -> remove a random key with an expire set 545 | # allkeys-random -> remove a random key, any key 546 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 547 | # noeviction -> don't expire at all, just return an error on write operations 548 | # 549 | # Note: with any of the above policies, Redis will return an error on write 550 | # operations, when there are no suitable keys for eviction. 551 | # 552 | # At the date of writing these commands are: set setnx setex append 553 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 554 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 555 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 556 | # getset mset msetnx exec sort 557 | # 558 | # The default is: 559 | # 560 | # maxmemory-policy noeviction 561 | 562 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 563 | # algorithms (in order to save memory), so you can tune it for speed or 564 | # accuracy. For default Redis will check five keys and pick the one that was 565 | # used less recently, you can change the sample size using the following 566 | # configuration directive. 567 | # 568 | # The default of 5 produces good enough results. 10 Approximates very closely 569 | # true LRU but costs a bit more CPU. 3 is very fast but not very accurate. 570 | # 571 | # maxmemory-samples 5 572 | 573 | ############################## APPEND ONLY MODE ############################### 574 | 575 | # By default Redis asynchronously dumps the dataset on disk. This mode is 576 | # good enough in many applications, but an issue with the Redis process or 577 | # a power outage may result into a few minutes of writes lost (depending on 578 | # the configured save points). 579 | # 580 | # The Append Only File is an alternative persistence mode that provides 581 | # much better durability. For instance using the default data fsync policy 582 | # (see later in the config file) Redis can lose just one second of writes in a 583 | # dramatic event like a server power outage, or a single write if something 584 | # wrong with the Redis process itself happens, but the operating system is 585 | # still running correctly. 586 | # 587 | # AOF and RDB persistence can be enabled at the same time without problems. 588 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 589 | # with the better durability guarantees. 590 | # 591 | # Please check http://redis.io/topics/persistence for more information. 592 | 593 | appendonly yes 594 | 595 | # The name of the append only file (default: "appendonly.aof") 596 | 597 | appendfilename "appendonly.aof" 598 | 599 | # The fsync() call tells the Operating System to actually write data on disk 600 | # instead of waiting for more data in the output buffer. Some OS will really flush 601 | # data on disk, some other OS will just try to do it ASAP. 602 | # 603 | # Redis supports three different modes: 604 | # 605 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 606 | # always: fsync after every write to the append only log. Slow, Safest. 607 | # everysec: fsync only one time every second. Compromise. 608 | # 609 | # The default is "everysec", as that's usually the right compromise between 610 | # speed and data safety. It's up to you to understand if you can relax this to 611 | # "no" that will let the operating system flush the output buffer when 612 | # it wants, for better performances (but if you can live with the idea of 613 | # some data loss consider the default persistence mode that's snapshotting), 614 | # or on the contrary, use "always" that's very slow but a bit safer than 615 | # everysec. 616 | # 617 | # More details please check the following article: 618 | # http://antirez.com/post/redis-persistence-demystified.html 619 | # 620 | # If unsure, use "everysec". 621 | 622 | # appendfsync always 623 | appendfsync everysec 624 | # appendfsync no 625 | 626 | # When the AOF fsync policy is set to always or everysec, and a background 627 | # saving process (a background save or AOF log background rewriting) is 628 | # performing a lot of I/O against the disk, in some Linux configurations 629 | # Redis may block too long on the fsync() call. Note that there is no fix for 630 | # this currently, as even performing fsync in a different thread will block 631 | # our synchronous write(2) call. 632 | # 633 | # In order to mitigate this problem it's possible to use the following option 634 | # that will prevent fsync() from being called in the main process while a 635 | # BGSAVE or BGREWRITEAOF is in progress. 636 | # 637 | # This means that while another child is saving, the durability of Redis is 638 | # the same as "appendfsync none". In practical terms, this means that it is 639 | # possible to lose up to 30 seconds of log in the worst scenario (with the 640 | # default Linux settings). 641 | # 642 | # If you have latency problems turn this to "yes". Otherwise leave it as 643 | # "no" that is the safest pick from the point of view of durability. 644 | 645 | no-appendfsync-on-rewrite no 646 | 647 | # Automatic rewrite of the append only file. 648 | # Redis is able to automatically rewrite the log file implicitly calling 649 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 650 | # 651 | # This is how it works: Redis remembers the size of the AOF file after the 652 | # latest rewrite (if no rewrite has happened since the restart, the size of 653 | # the AOF at startup is used). 654 | # 655 | # This base size is compared to the current size. If the current size is 656 | # bigger than the specified percentage, the rewrite is triggered. Also 657 | # you need to specify a minimal size for the AOF file to be rewritten, this 658 | # is useful to avoid rewriting the AOF file even if the percentage increase 659 | # is reached but it is still pretty small. 660 | # 661 | # Specify a percentage of zero in order to disable the automatic AOF 662 | # rewrite feature. 663 | 664 | auto-aof-rewrite-percentage 100 665 | auto-aof-rewrite-min-size 64mb 666 | 667 | # An AOF file may be found to be truncated at the end during the Redis 668 | # startup process, when the AOF data gets loaded back into memory. 669 | # This may happen when the system where Redis is running 670 | # crashes, especially when an ext4 filesystem is mounted without the 671 | # data=ordered option (however this can't happen when Redis itself 672 | # crashes or aborts but the operating system still works correctly). 673 | # 674 | # Redis can either exit with an error when this happens, or load as much 675 | # data as possible (the default now) and start if the AOF file is found 676 | # to be truncated at the end. The following option controls this behavior. 677 | # 678 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 679 | # the Redis server starts emitting a log to inform the user of the event. 680 | # Otherwise if the option is set to no, the server aborts with an error 681 | # and refuses to start. When the option is set to no, the user requires 682 | # to fix the AOF file using the "redis-check-aof" utility before to restart 683 | # the server. 684 | # 685 | # Note that if the AOF file will be found to be corrupted in the middle 686 | # the server will still exit with an error. This option only applies when 687 | # Redis will try to read more data from the AOF file but not enough bytes 688 | # will be found. 689 | aof-load-truncated yes 690 | 691 | ################################ LUA SCRIPTING ############################### 692 | 693 | # Max execution time of a Lua script in milliseconds. 694 | # 695 | # If the maximum execution time is reached Redis will log that a script is 696 | # still in execution after the maximum allowed time and will start to 697 | # reply to queries with an error. 698 | # 699 | # When a long running script exceeds the maximum execution time only the 700 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 701 | # used to stop a script that did not yet called write commands. The second 702 | # is the only way to shut down the server in the case a write command was 703 | # already issued by the script but the user doesn't want to wait for the natural 704 | # termination of the script. 705 | # 706 | # Set it to 0 or a negative value for unlimited execution without warnings. 707 | lua-time-limit 5000 708 | 709 | ################################ REDIS CLUSTER ############################### 710 | # 711 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 712 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however 713 | # in order to mark it as "mature" we need to wait for a non trivial percentage 714 | # of users to deploy it in production. 715 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 716 | # 717 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 718 | # started as cluster nodes can. In order to start a Redis instance as a 719 | # cluster node enable the cluster support uncommenting the following: 720 | # 721 | cluster-enabled yes 722 | 723 | # Every cluster node has a cluster configuration file. This file is not 724 | # intended to be edited by hand. It is created and updated by Redis nodes. 725 | # Every Redis Cluster node requires a different cluster configuration file. 726 | # Make sure that instances running in the same system do not have 727 | # overlapping cluster configuration file names. 728 | # 729 | cluster-config-file {{ redis_data_dir }}/{{ redis_port }}/nodes.conf 730 | 731 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 732 | # for it to be considered in failure state. 733 | # Most other internal time limits are multiple of the node timeout. 734 | # 735 | cluster-node-timeout 5000 736 | 737 | # A slave of a failing master will avoid to start a failover if its data 738 | # looks too old. 739 | # 740 | # There is no simple way for a slave to actually have a exact measure of 741 | # its "data age", so the following two checks are performed: 742 | # 743 | # 1) If there are multiple slaves able to failover, they exchange messages 744 | # in order to try to give an advantage to the slave with the best 745 | # replication offset (more data from the master processed). 746 | # Slaves will try to get their rank by offset, and apply to the start 747 | # of the failover a delay proportional to their rank. 748 | # 749 | # 2) Every single slave computes the time of the last interaction with 750 | # its master. This can be the last ping or command received (if the master 751 | # is still in the "connected" state), or the time that elapsed since the 752 | # disconnection with the master (if the replication link is currently down). 753 | # If the last interaction is too old, the slave will not try to failover 754 | # at all. 755 | # 756 | # The point "2" can be tuned by user. Specifically a slave will not perform 757 | # the failover if, since the last interaction with the master, the time 758 | # elapsed is greater than: 759 | # 760 | # (node-timeout * slave-validity-factor) + repl-ping-slave-period 761 | # 762 | # So for example if node-timeout is 30 seconds, and the slave-validity-factor 763 | # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the 764 | # slave will not try to failover if it was not able to talk with the master 765 | # for longer than 310 seconds. 766 | # 767 | # A large slave-validity-factor may allow slaves with too old data to failover 768 | # a master, while a too small value may prevent the cluster from being able to 769 | # elect a slave at all. 770 | # 771 | # For maximum availability, it is possible to set the slave-validity-factor 772 | # to a value of 0, which means, that slaves will always try to failover the 773 | # master regardless of the last time they interacted with the master. 774 | # (However they'll always try to apply a delay proportional to their 775 | # offset rank). 776 | # 777 | # Zero is the only value able to guarantee that when all the partitions heal 778 | # the cluster will always be able to continue. 779 | # 780 | # cluster-slave-validity-factor 10 781 | 782 | # Cluster slaves are able to migrate to orphaned masters, that are masters 783 | # that are left without working slaves. This improves the cluster ability 784 | # to resist to failures as otherwise an orphaned master can't be failed over 785 | # in case of failure if it has no working slaves. 786 | # 787 | # Slaves migrate to orphaned masters only if there are still at least a 788 | # given number of other working slaves for their old master. This number 789 | # is the "migration barrier". A migration barrier of 1 means that a slave 790 | # will migrate only if there is at least 1 other working slave for its master 791 | # and so forth. It usually reflects the number of slaves you want for every 792 | # master in your cluster. 793 | # 794 | # Default is 1 (slaves migrate only if their masters remain with at least 795 | # one slave). To disable migration just set it to a very large value. 796 | # A value of 0 can be set but is useful only for debugging and dangerous 797 | # in production. 798 | # 799 | # cluster-migration-barrier 1 800 | 801 | # By default Redis Cluster nodes stop accepting queries if they detect there 802 | # is at least an hash slot uncovered (no available node is serving it). 803 | # This way if the cluster is partially down (for example a range of hash slots 804 | # are no longer covered) all the cluster becomes, eventually, unavailable. 805 | # It automatically returns available as soon as all the slots are covered again. 806 | # 807 | # However sometimes you want the subset of the cluster which is working, 808 | # to continue to accept queries for the part of the key space that is still 809 | # covered. In order to do so, just set the cluster-require-full-coverage 810 | # option to no. 811 | # 812 | # cluster-require-full-coverage yes 813 | 814 | # In order to setup your cluster make sure to read the documentation 815 | # available at http://redis.io web site. 816 | 817 | ################################## SLOW LOG ################################### 818 | 819 | # The Redis Slow Log is a system to log queries that exceeded a specified 820 | # execution time. The execution time does not include the I/O operations 821 | # like talking with the client, sending the reply and so forth, 822 | # but just the time needed to actually execute the command (this is the only 823 | # stage of command execution where the thread is blocked and can not serve 824 | # other requests in the meantime). 825 | # 826 | # You can configure the slow log with two parameters: one tells Redis 827 | # what is the execution time, in microseconds, to exceed in order for the 828 | # command to get logged, and the other parameter is the length of the 829 | # slow log. When a new command is logged the oldest one is removed from the 830 | # queue of logged commands. 831 | 832 | # The following time is expressed in microseconds, so 1000000 is equivalent 833 | # to one second. Note that a negative number disables the slow log, while 834 | # a value of zero forces the logging of every command. 835 | slowlog-log-slower-than 10000 836 | 837 | # There is no limit to this length. Just be aware that it will consume memory. 838 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 839 | slowlog-max-len 128 840 | 841 | ################################ LATENCY MONITOR ############################## 842 | 843 | # The Redis latency monitoring subsystem samples different operations 844 | # at runtime in order to collect data related to possible sources of 845 | # latency of a Redis instance. 846 | # 847 | # Via the LATENCY command this information is available to the user that can 848 | # print graphs and obtain reports. 849 | # 850 | # The system only logs operations that were performed in a time equal or 851 | # greater than the amount of milliseconds specified via the 852 | # latency-monitor-threshold configuration directive. When its value is set 853 | # to zero, the latency monitor is turned off. 854 | # 855 | # By default latency monitoring is disabled since it is mostly not needed 856 | # if you don't have latency issues, and collecting data has a performance 857 | # impact, that while very small, can be measured under big load. Latency 858 | # monitoring can easily be enabled at runtime using the command 859 | # "CONFIG SET latency-monitor-threshold " if needed. 860 | latency-monitor-threshold 0 861 | 862 | ############################# EVENT NOTIFICATION ############################## 863 | 864 | # Redis can notify Pub/Sub clients about events happening in the key space. 865 | # This feature is documented at http://redis.io/topics/notifications 866 | # 867 | # For instance if keyspace events notification is enabled, and a client 868 | # performs a DEL operation on key "foo" stored in the Database 0, two 869 | # messages will be published via Pub/Sub: 870 | # 871 | # PUBLISH __keyspace@0__:foo del 872 | # PUBLISH __keyevent@0__:del foo 873 | # 874 | # It is possible to select the events that Redis will notify among a set 875 | # of classes. Every class is identified by a single character: 876 | # 877 | # K Keyspace events, published with __keyspace@__ prefix. 878 | # E Keyevent events, published with __keyevent@__ prefix. 879 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 880 | # $ String commands 881 | # l List commands 882 | # s Set commands 883 | # h Hash commands 884 | # z Sorted set commands 885 | # x Expired events (events generated every time a key expires) 886 | # e Evicted events (events generated when a key is evicted for maxmemory) 887 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 888 | # 889 | # The "notify-keyspace-events" takes as argument a string that is composed 890 | # of zero or multiple characters. The empty string means that notifications 891 | # are disabled. 892 | # 893 | # Example: to enable list and generic events, from the point of view of the 894 | # event name, use: 895 | # 896 | # notify-keyspace-events Elg 897 | # 898 | # Example 2: to get the stream of the expired keys subscribing to channel 899 | # name __keyevent@0__:expired use: 900 | # 901 | # notify-keyspace-events Ex 902 | # 903 | # By default all notifications are disabled because most users don't need 904 | # this feature and the feature has some overhead. Note that if you don't 905 | # specify at least one of K or E, no events will be delivered. 906 | notify-keyspace-events "" 907 | 908 | ############################### ADVANCED CONFIG ############################### 909 | 910 | # Hashes are encoded using a memory efficient data structure when they have a 911 | # small number of entries, and the biggest entry does not exceed a given 912 | # threshold. These thresholds can be configured using the following directives. 913 | hash-max-ziplist-entries 512 914 | hash-max-ziplist-value 64 915 | 916 | # Lists are also encoded in a special way to save a lot of space. 917 | # The number of entries allowed per internal list node can be specified 918 | # as a fixed maximum size or a maximum number of elements. 919 | # For a fixed maximum size, use -5 through -1, meaning: 920 | # -5: max size: 64 Kb <-- not recommended for normal workloads 921 | # -4: max size: 32 Kb <-- not recommended 922 | # -3: max size: 16 Kb <-- probably not recommended 923 | # -2: max size: 8 Kb <-- good 924 | # -1: max size: 4 Kb <-- good 925 | # Positive numbers mean store up to _exactly_ that number of elements 926 | # per list node. 927 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), 928 | # but if your use case is unique, adjust the settings as necessary. 929 | list-max-ziplist-size -2 930 | 931 | # Lists may also be compressed. 932 | # Compress depth is the number of quicklist ziplist nodes from *each* side of 933 | # the list to *exclude* from compression. The head and tail of the list 934 | # are always uncompressed for fast push/pop operations. Settings are: 935 | # 0: disable all list compression 936 | # 1: depth 1 means "don't start compressing until after 1 node into the list, 937 | # going from either the head or tail" 938 | # So: [head]->node->node->...->node->[tail] 939 | # [head], [tail] will always be uncompressed; inner nodes will compress. 940 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] 941 | # 2 here means: don't compress head or head->next or tail->prev or tail, 942 | # but compress all nodes between them. 943 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] 944 | # etc. 945 | list-compress-depth 0 946 | 947 | # Sets have a special encoding in just one case: when a set is composed 948 | # of just strings that happen to be integers in radix 10 in the range 949 | # of 64 bit signed integers. 950 | # The following configuration setting sets the limit in the size of the 951 | # set in order to use this special memory saving encoding. 952 | set-max-intset-entries 512 953 | 954 | # Similarly to hashes and lists, sorted sets are also specially encoded in 955 | # order to save a lot of space. This encoding is only used when the length and 956 | # elements of a sorted set are below the following limits: 957 | zset-max-ziplist-entries 128 958 | zset-max-ziplist-value 64 959 | 960 | # HyperLogLog sparse representation bytes limit. The limit includes the 961 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 962 | # this limit, it is converted into the dense representation. 963 | # 964 | # A value greater than 16000 is totally useless, since at that point the 965 | # dense representation is more memory efficient. 966 | # 967 | # The suggested value is ~ 3000 in order to have the benefits of 968 | # the space efficient encoding without slowing down too much PFADD, 969 | # which is O(N) with the sparse encoding. The value can be raised to 970 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 971 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 972 | hll-sparse-max-bytes 3000 973 | 974 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 975 | # order to help rehashing the main Redis hash table (the one mapping top-level 976 | # keys to values). The hash table implementation Redis uses (see dict.c) 977 | # performs a lazy rehashing: the more operation you run into a hash table 978 | # that is rehashing, the more rehashing "steps" are performed, so if the 979 | # server is idle the rehashing is never complete and some more memory is used 980 | # by the hash table. 981 | # 982 | # The default is to use this millisecond 10 times every second in order to 983 | # actively rehash the main dictionaries, freeing memory when possible. 984 | # 985 | # If unsure: 986 | # use "activerehashing no" if you have hard latency requirements and it is 987 | # not a good thing in your environment that Redis can reply from time to time 988 | # to queries with 2 milliseconds delay. 989 | # 990 | # use "activerehashing yes" if you don't have such hard requirements but 991 | # want to free memory asap when possible. 992 | activerehashing yes 993 | 994 | # The client output buffer limits can be used to force disconnection of clients 995 | # that are not reading data from the server fast enough for some reason (a 996 | # common reason is that a Pub/Sub client can't consume messages as fast as the 997 | # publisher can produce them). 998 | # 999 | # The limit can be set differently for the three different classes of clients: 1000 | # 1001 | # normal -> normal clients including MONITOR clients 1002 | # slave -> slave clients 1003 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 1004 | # 1005 | # The syntax of every client-output-buffer-limit directive is the following: 1006 | # 1007 | # client-output-buffer-limit 1008 | # 1009 | # A client is immediately disconnected once the hard limit is reached, or if 1010 | # the soft limit is reached and remains reached for the specified number of 1011 | # seconds (continuously). 1012 | # So for instance if the hard limit is 32 megabytes and the soft limit is 1013 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 1014 | # if the size of the output buffers reach 32 megabytes, but will also get 1015 | # disconnected if the client reaches 16 megabytes and continuously overcomes 1016 | # the limit for 10 seconds. 1017 | # 1018 | # By default normal clients are not limited because they don't receive data 1019 | # without asking (in a push way), but just after a request, so only 1020 | # asynchronous clients may create a scenario where data is requested faster 1021 | # than it can read. 1022 | # 1023 | # Instead there is a default limit for pubsub and slave clients, since 1024 | # subscribers and slaves receive data in a push fashion. 1025 | # 1026 | # Both the hard or the soft limit can be disabled by setting them to zero. 1027 | client-output-buffer-limit normal 0 0 0 1028 | client-output-buffer-limit slave 256mb 64mb 60 1029 | client-output-buffer-limit pubsub 32mb 8mb 60 1030 | 1031 | # Redis calls an internal function to perform many background tasks, like 1032 | # closing connections of clients in timeout, purging expired keys that are 1033 | # never requested, and so forth. 1034 | # 1035 | # Not all tasks are performed with the same frequency, but Redis checks for 1036 | # tasks to perform according to the specified "hz" value. 1037 | # 1038 | # By default "hz" is set to 10. Raising the value will use more CPU when 1039 | # Redis is idle, but at the same time will make Redis more responsive when 1040 | # there are many keys expiring at the same time, and timeouts may be 1041 | # handled with more precision. 1042 | # 1043 | # The range is between 1 and 500, however a value over 100 is usually not 1044 | # a good idea. Most users should use the default of 10 and raise this up to 1045 | # 100 only in environments where very low latency is required. 1046 | hz 10 1047 | 1048 | # When a child rewrites the AOF file, if the following option is enabled 1049 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1050 | # in order to commit the file to the disk more incrementally and avoid 1051 | # big latency spikes. 1052 | aof-rewrite-incremental-fsync yes 1053 | -------------------------------------------------------------------------------- /cluster/files/redis-trib: -------------------------------------------------------------------------------- 1 | #!/usr/bin/ruby 2 | 3 | # NOTE: this file 4 | # was downloaded from : http://download.redis.io/redis-stable/src/redis-trib.rb 5 | # With this note and the she-bang comment above (from /usr/bin/env ruby) 6 | # 7 | 8 | # TODO (temporary here, we'll move this into the Github issues once 9 | # redis-trib initial implementation is completed). 10 | # 11 | # - Make sure that if the rehashing fails in the middle redis-trib will try 12 | # to recover. 13 | # - When redis-trib performs a cluster check, if it detects a slot move in 14 | # progress it should prompt the user to continue the move from where it 15 | # stopped. 16 | # - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop 17 | # while rehashing, and performing the best cleanup possible if the user 18 | # forces the quit. 19 | # - When doing "fix" set a global Fix to true, and prompt the user to 20 | # fix the problem if automatically fixable every time there is something 21 | # to fix. For instance: 22 | # 1) If there is a node that pretend to receive a slot, or to migrate a 23 | # slot, but has no entries in that slot, fix it. 24 | # 2) If there is a node having keys in slots that are not owned by it 25 | # fix this condition moving the entries in the same node. 26 | # 3) Perform more possibly slow tests about the state of the cluster. 27 | # 4) When aborted slot migration is detected, fix it. 28 | 29 | require 'rubygems' 30 | require 'redis' 31 | 32 | ClusterHashSlots = 16384 33 | MigrateDefaultTimeout = 60000 34 | MigrateDefaultPipeline = 10 35 | RebalanceDefaultThreshold = 2 36 | 37 | $verbose = false 38 | 39 | def xputs(s) 40 | case s[0..2] 41 | when ">>>" 42 | color="29;1" 43 | when "[ER" 44 | color="31;1" 45 | when "[WA" 46 | color="31;1" 47 | when "[OK" 48 | color="32" 49 | when "[FA","***" 50 | color="33" 51 | else 52 | color=nil 53 | end 54 | 55 | color = nil if ENV['TERM'] != "xterm" 56 | print "\033[#{color}m" if color 57 | print s 58 | print "\033[0m" if color 59 | print "\n" 60 | end 61 | 62 | class ClusterNode 63 | def initialize(addr) 64 | s = addr.split(":") 65 | if s.length < 2 66 | puts "Invalid IP or Port (given as #{addr}) - use IP:Port format" 67 | exit 1 68 | end 69 | port = s.pop # removes port from split array 70 | ip = s.join(":") # if s.length > 1 here, it's IPv6, so restore address 71 | @r = nil 72 | @info = {} 73 | @info[:host] = ip 74 | @info[:port] = port 75 | @info[:slots] = {} 76 | @info[:migrating] = {} 77 | @info[:importing] = {} 78 | @info[:replicate] = false 79 | @dirty = false # True if we need to flush slots info into node. 80 | @friends = [] 81 | end 82 | 83 | def friends 84 | @friends 85 | end 86 | 87 | def slots 88 | @info[:slots] 89 | end 90 | 91 | def has_flag?(flag) 92 | @info[:flags].index(flag) 93 | end 94 | 95 | def to_s 96 | "#{@info[:host]}:#{@info[:port]}" 97 | end 98 | 99 | def connect(o={}) 100 | return if @r 101 | print "Connecting to node #{self}: " if $verbose 102 | STDOUT.flush 103 | begin 104 | @r = Redis.new(:host => @info[:host], :port => @info[:port], :timeout => 60) 105 | @r.ping 106 | rescue 107 | xputs "[ERR] Sorry, can't connect to node #{self}" 108 | exit 1 if o[:abort] 109 | @r = nil 110 | end 111 | xputs "OK" if $verbose 112 | end 113 | 114 | def assert_cluster 115 | info = @r.info 116 | if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0 117 | xputs "[ERR] Node #{self} is not configured as a cluster node." 118 | exit 1 119 | end 120 | end 121 | 122 | def assert_empty 123 | if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) || 124 | (@r.info['db0']) 125 | xputs "[ERR] Node #{self} is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0." 126 | exit 1 127 | end 128 | end 129 | 130 | def load_info(o={}) 131 | self.connect 132 | nodes = @r.cluster("nodes").split("\n") 133 | nodes.each{|n| 134 | # name addr flags role ping_sent ping_recv link_status slots 135 | split = n.split 136 | name,addr,flags,master_id,ping_sent,ping_recv,config_epoch,link_status = split[0..6] 137 | slots = split[8..-1] 138 | info = { 139 | :name => name, 140 | :addr => addr, 141 | :flags => flags.split(","), 142 | :replicate => master_id, 143 | :ping_sent => ping_sent.to_i, 144 | :ping_recv => ping_recv.to_i, 145 | :link_status => link_status 146 | } 147 | info[:replicate] = false if master_id == "-" 148 | 149 | if info[:flags].index("myself") 150 | @info = @info.merge(info) 151 | @info[:slots] = {} 152 | slots.each{|s| 153 | if s[0..0] == '[' 154 | if s.index("->-") # Migrating 155 | slot,dst = s[1..-1].split("->-") 156 | @info[:migrating][slot.to_i] = dst 157 | elsif s.index("-<-") # Importing 158 | slot,src = s[1..-1].split("-<-") 159 | @info[:importing][slot.to_i] = src 160 | end 161 | elsif s.index("-") 162 | start,stop = s.split("-") 163 | self.add_slots((start.to_i)..(stop.to_i)) 164 | else 165 | self.add_slots((s.to_i)..(s.to_i)) 166 | end 167 | } if slots 168 | @dirty = false 169 | @r.cluster("info").split("\n").each{|e| 170 | k,v=e.split(":") 171 | k = k.to_sym 172 | v.chop! 173 | if k != :cluster_state 174 | @info[k] = v.to_i 175 | else 176 | @info[k] = v 177 | end 178 | } 179 | elsif o[:getfriends] 180 | @friends << info 181 | end 182 | } 183 | end 184 | 185 | def add_slots(slots) 186 | slots.each{|s| 187 | @info[:slots][s] = :new 188 | } 189 | @dirty = true 190 | end 191 | 192 | def set_as_replica(node_id) 193 | @info[:replicate] = node_id 194 | @dirty = true 195 | end 196 | 197 | def flush_node_config 198 | return if !@dirty 199 | if @info[:replicate] 200 | begin 201 | @r.cluster("replicate",@info[:replicate]) 202 | rescue 203 | # If the cluster did not already joined it is possible that 204 | # the slave does not know the master node yet. So on errors 205 | # we return ASAP leaving the dirty flag set, to flush the 206 | # config later. 207 | return 208 | end 209 | else 210 | new = [] 211 | @info[:slots].each{|s,val| 212 | if val == :new 213 | new << s 214 | @info[:slots][s] = true 215 | end 216 | } 217 | @r.cluster("addslots",*new) 218 | end 219 | @dirty = false 220 | end 221 | 222 | def info_string 223 | # We want to display the hash slots assigned to this node 224 | # as ranges, like in: "1-5,8-9,20-25,30" 225 | # 226 | # Note: this could be easily written without side effects, 227 | # we use 'slots' just to split the computation into steps. 228 | 229 | # First step: we want an increasing array of integers 230 | # for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30] 231 | slots = @info[:slots].keys.sort 232 | 233 | # As we want to aggregate adjacent slots we convert all the 234 | # slot integers into ranges (with just one element) 235 | # So we have something like [1..1,2..2, ... and so forth. 236 | slots.map!{|x| x..x} 237 | 238 | # Finally we group ranges with adjacent elements. 239 | slots = slots.reduce([]) {|a,b| 240 | if !a.empty? && b.first == (a[-1].last)+1 241 | a[0..-2] + [(a[-1].first)..(b.last)] 242 | else 243 | a + [b] 244 | end 245 | } 246 | 247 | # Now our task is easy, we just convert ranges with just one 248 | # element into a number, and a real range into a start-end format. 249 | # Finally we join the array using the comma as separator. 250 | slots = slots.map{|x| 251 | x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}" 252 | }.join(",") 253 | 254 | role = self.has_flag?("master") ? "M" : "S" 255 | 256 | if self.info[:replicate] and @dirty 257 | is = "S: #{self.info[:name]} #{self.to_s}" 258 | else 259 | is = "#{role}: #{self.info[:name]} #{self.to_s}\n"+ 260 | " slots:#{slots} (#{self.slots.length} slots) "+ 261 | "#{(self.info[:flags]-["myself"]).join(",")}" 262 | end 263 | if self.info[:replicate] 264 | is += "\n replicates #{info[:replicate]}" 265 | elsif self.has_flag?("master") && self.info[:replicas] 266 | is += "\n #{info[:replicas].length} additional replica(s)" 267 | end 268 | is 269 | end 270 | 271 | # Return a single string representing nodes and associated slots. 272 | # TODO: remove slaves from config when slaves will be handled 273 | # by Redis Cluster. 274 | def get_config_signature 275 | config = [] 276 | @r.cluster("nodes").each_line{|l| 277 | s = l.split 278 | slots = s[8..-1].select {|x| x[0..0] != "["} 279 | next if slots.length == 0 280 | config << s[0]+":"+(slots.sort.join(",")) 281 | } 282 | config.sort.join("|") 283 | end 284 | 285 | def info 286 | @info 287 | end 288 | 289 | def is_dirty? 290 | @dirty 291 | end 292 | 293 | def r 294 | @r 295 | end 296 | end 297 | 298 | class RedisTrib 299 | def initialize 300 | @nodes = [] 301 | @fix = false 302 | @errors = [] 303 | @timeout = MigrateDefaultTimeout 304 | end 305 | 306 | def check_arity(req_args, num_args) 307 | if ((req_args > 0 and num_args != req_args) || 308 | (req_args < 0 and num_args < req_args.abs)) 309 | xputs "[ERR] Wrong number of arguments for specified sub command" 310 | exit 1 311 | end 312 | end 313 | 314 | def add_node(node) 315 | @nodes << node 316 | end 317 | 318 | def reset_nodes 319 | @nodes = [] 320 | end 321 | 322 | def cluster_error(msg) 323 | @errors << msg 324 | xputs msg 325 | end 326 | 327 | # Return the node with the specified ID or Nil. 328 | def get_node_by_name(name) 329 | @nodes.each{|n| 330 | return n if n.info[:name] == name.downcase 331 | } 332 | return nil 333 | end 334 | 335 | # Like get_node_by_name but the specified name can be just the first 336 | # part of the node ID as long as the prefix in unique across the 337 | # cluster. 338 | def get_node_by_abbreviated_name(name) 339 | l = name.length 340 | candidates = [] 341 | @nodes.each{|n| 342 | if n.info[:name][0...l] == name.downcase 343 | candidates << n 344 | end 345 | } 346 | return nil if candidates.length != 1 347 | candidates[0] 348 | end 349 | 350 | # This function returns the master that has the least number of replicas 351 | # in the cluster. If there are multiple masters with the same smaller 352 | # number of replicas, one at random is returned. 353 | def get_master_with_least_replicas 354 | masters = @nodes.select{|n| n.has_flag? "master"} 355 | sorted = masters.sort{|a,b| 356 | a.info[:replicas].length <=> b.info[:replicas].length 357 | } 358 | sorted[0] 359 | end 360 | 361 | def check_cluster(opt={}) 362 | xputs ">>> Performing Cluster Check (using node #{@nodes[0]})" 363 | show_nodes if !opt[:quiet] 364 | check_config_consistency 365 | check_open_slots 366 | check_slots_coverage 367 | end 368 | 369 | def show_cluster_info 370 | masters = 0 371 | keys = 0 372 | @nodes.each{|n| 373 | if n.has_flag?("master") 374 | puts "#{n} (#{n.info[:name][0...8]}...) -> #{n.r.dbsize} keys | #{n.slots.length} slots | "+ 375 | "#{n.info[:replicas].length} slaves." 376 | masters += 1 377 | keys += n.r.dbsize 378 | end 379 | } 380 | xputs "[OK] #{keys} keys in #{masters} masters." 381 | keys_per_slot = sprintf("%.2f",keys/16384.0) 382 | puts "#{keys_per_slot} keys per slot on average." 383 | end 384 | 385 | # Merge slots of every known node. If the resulting slots are equal 386 | # to ClusterHashSlots, then all slots are served. 387 | def covered_slots 388 | slots = {} 389 | @nodes.each{|n| 390 | slots = slots.merge(n.slots) 391 | } 392 | slots 393 | end 394 | 395 | def check_slots_coverage 396 | xputs ">>> Check slots coverage..." 397 | slots = covered_slots 398 | if slots.length == ClusterHashSlots 399 | xputs "[OK] All #{ClusterHashSlots} slots covered." 400 | else 401 | cluster_error \ 402 | "[ERR] Not all #{ClusterHashSlots} slots are covered by nodes." 403 | fix_slots_coverage if @fix 404 | end 405 | end 406 | 407 | def check_open_slots 408 | xputs ">>> Check for open slots..." 409 | open_slots = [] 410 | @nodes.each{|n| 411 | if n.info[:migrating].size > 0 412 | cluster_error \ 413 | "[WARNING] Node #{n} has slots in migrating state (#{n.info[:migrating].keys.join(",")})." 414 | open_slots += n.info[:migrating].keys 415 | end 416 | if n.info[:importing].size > 0 417 | cluster_error \ 418 | "[WARNING] Node #{n} has slots in importing state (#{n.info[:importing].keys.join(",")})." 419 | open_slots += n.info[:importing].keys 420 | end 421 | } 422 | open_slots.uniq! 423 | if open_slots.length > 0 424 | xputs "[WARNING] The following slots are open: #{open_slots.join(",")}" 425 | end 426 | if @fix 427 | open_slots.each{|slot| fix_open_slot slot} 428 | end 429 | end 430 | 431 | def nodes_with_keys_in_slot(slot) 432 | nodes = [] 433 | @nodes.each{|n| 434 | next if n.has_flag?("slave") 435 | nodes << n if n.r.cluster("getkeysinslot",slot,1).length > 0 436 | } 437 | nodes 438 | end 439 | 440 | def fix_slots_coverage 441 | not_covered = (0...ClusterHashSlots).to_a - covered_slots.keys 442 | xputs ">>> Fixing slots coverage..." 443 | xputs "List of not covered slots: " + not_covered.join(",") 444 | 445 | # For every slot, take action depending on the actual condition: 446 | # 1) No node has keys for this slot. 447 | # 2) A single node has keys for this slot. 448 | # 3) Multiple nodes have keys for this slot. 449 | slots = {} 450 | not_covered.each{|slot| 451 | nodes = nodes_with_keys_in_slot(slot) 452 | slots[slot] = nodes 453 | xputs "Slot #{slot} has keys in #{nodes.length} nodes: #{nodes.join(", ")}" 454 | } 455 | 456 | none = slots.select {|k,v| v.length == 0} 457 | single = slots.select {|k,v| v.length == 1} 458 | multi = slots.select {|k,v| v.length > 1} 459 | 460 | # Handle case "1": keys in no node. 461 | if none.length > 0 462 | xputs "The folowing uncovered slots have no keys across the cluster:" 463 | xputs none.keys.join(",") 464 | yes_or_die "Fix these slots by covering with a random node?" 465 | none.each{|slot,nodes| 466 | node = @nodes.sample 467 | xputs ">>> Covering slot #{slot} with #{node}" 468 | node.r.cluster("addslots",slot) 469 | } 470 | end 471 | 472 | # Handle case "2": keys only in one node. 473 | if single.length > 0 474 | xputs "The folowing uncovered slots have keys in just one node:" 475 | puts single.keys.join(",") 476 | yes_or_die "Fix these slots by covering with those nodes?" 477 | single.each{|slot,nodes| 478 | xputs ">>> Covering slot #{slot} with #{nodes[0]}" 479 | nodes[0].r.cluster("addslots",slot) 480 | } 481 | end 482 | 483 | # Handle case "3": keys in multiple nodes. 484 | if multi.length > 0 485 | xputs "The folowing uncovered slots have keys in multiple nodes:" 486 | xputs multi.keys.join(",") 487 | yes_or_die "Fix these slots by moving keys into a single node?" 488 | multi.each{|slot,nodes| 489 | target = get_node_with_most_keys_in_slot(nodes,slot) 490 | xputs ">>> Covering slot #{slot} moving keys to #{target}" 491 | 492 | target.r.cluster('addslots',slot) 493 | target.r.cluster('setslot',slot,'stable') 494 | nodes.each{|src| 495 | next if src == target 496 | # Set the source node in 'importing' state (even if we will 497 | # actually migrate keys away) in order to avoid receiving 498 | # redirections for MIGRATE. 499 | src.r.cluster('setslot',slot,'importing',target.info[:name]) 500 | move_slot(src,target,slot,:dots=>true,:fix=>true,:cold=>true) 501 | src.r.cluster('setslot',slot,'stable') 502 | } 503 | } 504 | end 505 | end 506 | 507 | # Return the owner of the specified slot 508 | def get_slot_owners(slot) 509 | owners = [] 510 | @nodes.each{|n| 511 | next if n.has_flag?("slave") 512 | n.slots.each{|s,_| 513 | owners << n if s == slot 514 | } 515 | } 516 | owners 517 | end 518 | 519 | # Return the node, among 'nodes' with the greatest number of keys 520 | # in the specified slot. 521 | def get_node_with_most_keys_in_slot(nodes,slot) 522 | best = nil 523 | best_numkeys = 0 524 | @nodes.each{|n| 525 | next if n.has_flag?("slave") 526 | numkeys = n.r.cluster("countkeysinslot",slot) 527 | if numkeys > best_numkeys || best == nil 528 | best = n 529 | best_numkeys = numkeys 530 | end 531 | } 532 | return best 533 | end 534 | 535 | # Slot 'slot' was found to be in importing or migrating state in one or 536 | # more nodes. This function fixes this condition by migrating keys where 537 | # it seems more sensible. 538 | def fix_open_slot(slot) 539 | puts ">>> Fixing open slot #{slot}" 540 | 541 | # Try to obtain the current slot owner, according to the current 542 | # nodes configuration. 543 | owners = get_slot_owners(slot) 544 | owner = owners[0] if owners.length == 1 545 | 546 | migrating = [] 547 | importing = [] 548 | @nodes.each{|n| 549 | next if n.has_flag? "slave" 550 | if n.info[:migrating][slot] 551 | migrating << n 552 | elsif n.info[:importing][slot] 553 | importing << n 554 | elsif n.r.cluster("countkeysinslot",slot) > 0 && n != owner 555 | xputs "*** Found keys about slot #{slot} in node #{n}!" 556 | importing << n 557 | end 558 | } 559 | puts "Set as migrating in: #{migrating.join(",")}" 560 | puts "Set as importing in: #{importing.join(",")}" 561 | 562 | # If there is no slot owner, set as owner the slot with the biggest 563 | # number of keys, among the set of migrating / importing nodes. 564 | if !owner 565 | xputs ">>> Nobody claims ownership, selecting an owner..." 566 | owner = get_node_with_most_keys_in_slot(@nodes,slot) 567 | 568 | # If we still don't have an owner, we can't fix it. 569 | if !owner 570 | xputs "[ERR] Can't select a slot owner. Impossible to fix." 571 | exit 1 572 | end 573 | 574 | # Use ADDSLOTS to assign the slot. 575 | puts "*** Configuring #{owner} as the slot owner" 576 | owner.r.cluster("setslot",slot,"stable") 577 | owner.r.cluster("addslots",slot) 578 | # Make sure this information will propagate. Not strictly needed 579 | # since there is no past owner, so all the other nodes will accept 580 | # whatever epoch this node will claim the slot with. 581 | owner.r.cluster("bumpepoch") 582 | 583 | # Remove the owner from the list of migrating/importing 584 | # nodes. 585 | migrating.delete(owner) 586 | importing.delete(owner) 587 | end 588 | 589 | # If there are multiple owners of the slot, we need to fix it 590 | # so that a single node is the owner and all the other nodes 591 | # are in importing state. Later the fix can be handled by one 592 | # of the base cases above. 593 | # 594 | # Note that this case also covers multiple nodes having the slot 595 | # in migrating state, since migrating is a valid state only for 596 | # slot owners. 597 | if owners.length > 1 598 | owner = get_node_with_most_keys_in_slot(owners,slot) 599 | owners.each{|n| 600 | next if n == owner 601 | n.r.cluster('delslots',slot) 602 | n.r.cluster('setslot',slot,'importing',owner.info[:name]) 603 | importing.delete(n) # Avoid duplciates 604 | importing << n 605 | } 606 | owner.r.cluster('bumpepoch') 607 | end 608 | 609 | # Case 1: The slot is in migrating state in one slot, and in 610 | # importing state in 1 slot. That's trivial to address. 611 | if migrating.length == 1 && importing.length == 1 612 | move_slot(migrating[0],importing[0],slot,:dots=>true,:fix=>true) 613 | # Case 2: There are multiple nodes that claim the slot as importing, 614 | # they probably got keys about the slot after a restart so opened 615 | # the slot. In this case we just move all the keys to the owner 616 | # according to the configuration. 617 | elsif migrating.length == 0 && importing.length > 0 618 | xputs ">>> Moving all the #{slot} slot keys to its owner #{owner}" 619 | importing.each {|node| 620 | next if node == owner 621 | move_slot(node,owner,slot,:dots=>true,:fix=>true,:cold=>true) 622 | xputs ">>> Setting #{slot} as STABLE in #{node}" 623 | node.r.cluster("setslot",slot,"stable") 624 | } 625 | # Case 3: There are no slots claiming to be in importing state, but 626 | # there is a migrating node that actually don't have any key. We 627 | # can just close the slot, probably a reshard interrupted in the middle. 628 | elsif importing.length == 0 && migrating.length == 1 && 629 | migrating[0].r.cluster("getkeysinslot",slot,10).length == 0 630 | migrating[0].r.cluster("setslot",slot,"stable") 631 | else 632 | xputs "[ERR] Sorry, Redis-trib can't fix this slot yet (work in progress). Slot is set as migrating in #{migrating.join(",")}, as importing in #{importing.join(",")}, owner is #{owner}" 633 | end 634 | end 635 | 636 | # Check if all the nodes agree about the cluster configuration 637 | def check_config_consistency 638 | if !is_config_consistent? 639 | cluster_error "[ERR] Nodes don't agree about configuration!" 640 | else 641 | xputs "[OK] All nodes agree about slots configuration." 642 | end 643 | end 644 | 645 | def is_config_consistent? 646 | signatures=[] 647 | @nodes.each{|n| 648 | signatures << n.get_config_signature 649 | } 650 | return signatures.uniq.length == 1 651 | end 652 | 653 | def wait_cluster_join 654 | print "Waiting for the cluster to join" 655 | while !is_config_consistent? 656 | print "." 657 | STDOUT.flush 658 | sleep 1 659 | end 660 | print "\n" 661 | end 662 | 663 | def alloc_slots 664 | nodes_count = @nodes.length 665 | masters_count = @nodes.length / (@replicas+1) 666 | masters = [] 667 | 668 | # The first step is to split instances by IP. This is useful as 669 | # we'll try to allocate master nodes in different physical machines 670 | # (as much as possible) and to allocate slaves of a given master in 671 | # different physical machines as well. 672 | # 673 | # This code assumes just that if the IP is different, than it is more 674 | # likely that the instance is running in a different physical host 675 | # or at least a different virtual machine. 676 | ips = {} 677 | @nodes.each{|n| 678 | ips[n.info[:host]] = [] if !ips[n.info[:host]] 679 | ips[n.info[:host]] << n 680 | } 681 | 682 | # Select master instances 683 | puts "Using #{masters_count} masters:" 684 | interleaved = [] 685 | stop = false 686 | while not stop do 687 | # Take one node from each IP until we run out of nodes 688 | # across every IP. 689 | ips.each do |ip,nodes| 690 | if nodes.empty? 691 | # if this IP has no remaining nodes, check for termination 692 | if interleaved.length == nodes_count 693 | # stop when 'interleaved' has accumulated all nodes 694 | stop = true 695 | next 696 | end 697 | else 698 | # else, move one node from this IP to 'interleaved' 699 | interleaved.push nodes.shift 700 | end 701 | end 702 | end 703 | 704 | masters = interleaved.slice!(0, masters_count) 705 | nodes_count -= masters.length 706 | 707 | ## fix/redis_cluster_issue 708 | 709 | interleaved.push interleaved.shift 710 | 711 | masters.each{|m| puts m} 712 | 713 | # Alloc slots on masters 714 | slots_per_node = ClusterHashSlots.to_f / masters_count 715 | first = 0 716 | cursor = 0.0 717 | masters.each_with_index{|n,masternum| 718 | last = (cursor+slots_per_node-1).round 719 | if last > ClusterHashSlots || masternum == masters.length-1 720 | last = ClusterHashSlots-1 721 | end 722 | last = first if last < first # Min step is 1. 723 | n.add_slots first..last 724 | first = last+1 725 | cursor += slots_per_node 726 | } 727 | 728 | # Select N replicas for every master. 729 | # We try to split the replicas among all the IPs with spare nodes 730 | # trying to avoid the host where the master is running, if possible. 731 | # 732 | # Note we loop two times. The first loop assigns the requested 733 | # number of replicas to each master. The second loop assigns any 734 | # remaining instances as extra replicas to masters. Some masters 735 | # may end up with more than their requested number of replicas, but 736 | # all nodes will be used. 737 | assignment_verbose = false 738 | 739 | [:requested,:unused].each do |assign| 740 | masters.each do |m| 741 | assigned_replicas = 0 742 | while assigned_replicas < @replicas 743 | break if nodes_count == 0 744 | if assignment_verbose 745 | if assign == :requested 746 | puts "Requesting total of #{@replicas} replicas " \ 747 | "(#{assigned_replicas} replicas assigned " \ 748 | "so far with #{nodes_count} total remaining)." 749 | elsif assign == :unused 750 | puts "Assigning extra instance to replication " \ 751 | "role too (#{nodes_count} remaining)." 752 | end 753 | end 754 | 755 | # Return the first node not matching our current master 756 | node = interleaved.find{|n| n.info[:host] != m.info[:host]} 757 | 758 | # If we found a node, use it as a best-first match. 759 | # Otherwise, we didn't find a node on a different IP, so we 760 | # go ahead and use a same-IP replica. 761 | if node 762 | slave = node 763 | interleaved.delete node 764 | else 765 | slave = interleaved.shift 766 | end 767 | slave.set_as_replica(m.info[:name]) 768 | nodes_count -= 1 769 | assigned_replicas += 1 770 | puts "Adding replica #{slave} to #{m}" 771 | 772 | # If we are in the "assign extra nodes" loop, 773 | # we want to assign one extra replica to each 774 | # master before repeating masters. 775 | # This break lets us assign extra replicas to masters 776 | # in a round-robin way. 777 | break if assign == :unused 778 | end 779 | end 780 | end 781 | end 782 | 783 | def flush_nodes_config 784 | @nodes.each{|n| 785 | n.flush_node_config 786 | } 787 | end 788 | 789 | def show_nodes 790 | @nodes.each{|n| 791 | xputs n.info_string 792 | } 793 | end 794 | 795 | # Redis Cluster config epoch collision resolution code is able to eventually 796 | # set a different epoch to each node after a new cluster is created, but 797 | # it is slow compared to assign a progressive config epoch to each node 798 | # before joining the cluster. However we do just a best-effort try here 799 | # since if we fail is not a problem. 800 | def assign_config_epoch 801 | config_epoch = 1 802 | @nodes.each{|n| 803 | begin 804 | n.r.cluster("set-config-epoch",config_epoch) 805 | rescue 806 | end 807 | config_epoch += 1 808 | } 809 | end 810 | 811 | def join_cluster 812 | # We use a brute force approach to make sure the node will meet 813 | # each other, that is, sending CLUSTER MEET messages to all the nodes 814 | # about the very same node. 815 | # Thanks to gossip this information should propagate across all the 816 | # cluster in a matter of seconds. 817 | first = false 818 | @nodes.each{|n| 819 | if !first then first = n.info; next; end # Skip the first node 820 | n.r.cluster("meet",first[:host],first[:port]) 821 | } 822 | end 823 | 824 | def yes_or_die(msg) 825 | print "#{msg} (type 'yes' to accept): " 826 | STDOUT.flush 827 | if !(STDIN.gets.chomp.downcase == "yes") 828 | xputs "*** Aborting..." 829 | exit 1 830 | end 831 | end 832 | 833 | def load_cluster_info_from_node(nodeaddr) 834 | node = ClusterNode.new(nodeaddr) 835 | node.connect(:abort => true) 836 | node.assert_cluster 837 | node.load_info(:getfriends => true) 838 | add_node(node) 839 | node.friends.each{|f| 840 | next if f[:flags].index("noaddr") || 841 | f[:flags].index("disconnected") || 842 | f[:flags].index("fail") 843 | fnode = ClusterNode.new(f[:addr]) 844 | fnode.connect() 845 | next if !fnode.r 846 | begin 847 | fnode.load_info() 848 | add_node(fnode) 849 | rescue => e 850 | xputs "[ERR] Unable to load info for node #{fnode}" 851 | end 852 | } 853 | populate_nodes_replicas_info 854 | end 855 | 856 | # This function is called by load_cluster_info_from_node in order to 857 | # add additional information to every node as a list of replicas. 858 | def populate_nodes_replicas_info 859 | # Start adding the new field to every node. 860 | @nodes.each{|n| 861 | n.info[:replicas] = [] 862 | } 863 | 864 | # Populate the replicas field using the replicate field of slave 865 | # nodes. 866 | @nodes.each{|n| 867 | if n.info[:replicate] 868 | master = get_node_by_name(n.info[:replicate]) 869 | if !master 870 | xputs "*** WARNING: #{n} claims to be slave of unknown node ID #{n.info[:replicate]}." 871 | else 872 | master.info[:replicas] << n 873 | end 874 | end 875 | } 876 | end 877 | 878 | # Given a list of source nodes return a "resharding plan" 879 | # with what slots to move in order to move "numslots" slots to another 880 | # instance. 881 | def compute_reshard_table(sources,numslots) 882 | moved = [] 883 | # Sort from bigger to smaller instance, for two reasons: 884 | # 1) If we take less slots than instances it is better to start 885 | # getting from the biggest instances. 886 | # 2) We take one slot more from the first instance in the case of not 887 | # perfect divisibility. Like we have 3 nodes and need to get 10 888 | # slots, we take 4 from the first, and 3 from the rest. So the 889 | # biggest is always the first. 890 | sources = sources.sort{|a,b| b.slots.length <=> a.slots.length} 891 | source_tot_slots = sources.inject(0) {|sum,source| 892 | sum+source.slots.length 893 | } 894 | sources.each_with_index{|s,i| 895 | # Every node will provide a number of slots proportional to the 896 | # slots it has assigned. 897 | n = (numslots.to_f/source_tot_slots*s.slots.length) 898 | if i == 0 899 | n = n.ceil 900 | else 901 | n = n.floor 902 | end 903 | s.slots.keys.sort[(0...n)].each{|slot| 904 | if moved.length < numslots 905 | moved << {:source => s, :slot => slot} 906 | end 907 | } 908 | } 909 | return moved 910 | end 911 | 912 | def show_reshard_table(table) 913 | table.each{|e| 914 | puts " Moving slot #{e[:slot]} from #{e[:source].info[:name]}" 915 | } 916 | end 917 | 918 | # Move slots between source and target nodes using MIGRATE. 919 | # 920 | # Options: 921 | # :verbose -- Print a dot for every moved key. 922 | # :fix -- We are moving in the context of a fix. Use REPLACE. 923 | # :cold -- Move keys without opening slots / reconfiguring the nodes. 924 | # :update -- Update nodes.info[:slots] for source/target nodes. 925 | # :quiet -- Don't print info messages. 926 | def move_slot(source,target,slot,o={}) 927 | o = {:pipeline => MigrateDefaultPipeline}.merge(o) 928 | 929 | # We start marking the slot as importing in the destination node, 930 | # and the slot as migrating in the target host. Note that the order of 931 | # the operations is important, as otherwise a client may be redirected 932 | # to the target node that does not yet know it is importing this slot. 933 | if !o[:quiet] 934 | print "Moving slot #{slot} from #{source} to #{target}: " 935 | STDOUT.flush 936 | end 937 | 938 | if !o[:cold] 939 | target.r.cluster("setslot",slot,"importing",source.info[:name]) 940 | source.r.cluster("setslot",slot,"migrating",target.info[:name]) 941 | end 942 | # Migrate all the keys from source to target using the MIGRATE command 943 | while true 944 | keys = source.r.cluster("getkeysinslot",slot,o[:pipeline]) 945 | break if keys.length == 0 946 | begin 947 | source.r.client.call(["migrate",target.info[:host],target.info[:port],"",0,@timeout,:keys,*keys]) 948 | rescue => e 949 | if o[:fix] && e.to_s =~ /BUSYKEY/ 950 | xputs "*** Target key exists. Replacing it for FIX." 951 | source.r.client.call(["migrate",target.info[:host],target.info[:port],"",0,@timeout,:replace,:keys,*keys]) 952 | else 953 | puts "" 954 | xputs "[ERR] Calling MIGRATE: #{e}" 955 | exit 1 956 | end 957 | end 958 | print "."*keys.length if o[:dots] 959 | STDOUT.flush 960 | end 961 | 962 | puts if !o[:quiet] 963 | # Set the new node as the owner of the slot in all the known nodes. 964 | if !o[:cold] 965 | @nodes.each{|n| 966 | next if n.has_flag?("slave") 967 | n.r.cluster("setslot",slot,"node",target.info[:name]) 968 | } 969 | end 970 | 971 | # Update the node logical config 972 | if o[:update] then 973 | source.info[:slots].delete(slot) 974 | target.info[:slots][slot] = true 975 | end 976 | end 977 | 978 | # redis-trib subcommands implementations. 979 | 980 | def check_cluster_cmd(argv,opt) 981 | load_cluster_info_from_node(argv[0]) 982 | check_cluster 983 | end 984 | 985 | def info_cluster_cmd(argv,opt) 986 | load_cluster_info_from_node(argv[0]) 987 | show_cluster_info 988 | end 989 | 990 | def rebalance_cluster_cmd(argv,opt) 991 | opt = { 992 | 'pipeline' => MigrateDefaultPipeline, 993 | 'threshold' => RebalanceDefaultThreshold 994 | }.merge(opt) 995 | 996 | # Load nodes info before parsing options, otherwise we can't 997 | # handle --weight. 998 | load_cluster_info_from_node(argv[0]) 999 | 1000 | # Options parsing 1001 | threshold = opt['threshold'].to_i 1002 | autoweights = opt['auto-weights'] 1003 | weights = {} 1004 | opt['weight'].each{|w| 1005 | fields = w.split("=") 1006 | node = get_node_by_abbreviated_name(fields[0]) 1007 | if !node || !node.has_flag?("master") 1008 | puts "*** No such master node #{fields[0]}" 1009 | exit 1 1010 | end 1011 | weights[node.info[:name]] = fields[1].to_f 1012 | } if opt['weight'] 1013 | useempty = opt['use-empty-masters'] 1014 | 1015 | # Assign a weight to each node, and compute the total cluster weight. 1016 | total_weight = 0 1017 | nodes_involved = 0 1018 | @nodes.each{|n| 1019 | if n.has_flag?("master") 1020 | next if !useempty && n.slots.length == 0 1021 | n.info[:w] = weights[n.info[:name]] ? weights[n.info[:name]] : 1 1022 | total_weight += n.info[:w] 1023 | nodes_involved += 1 1024 | end 1025 | } 1026 | 1027 | # Check cluster, only proceed if it looks sane. 1028 | check_cluster(:quiet => true) 1029 | if @errors.length != 0 1030 | puts "*** Please fix your cluster problems before rebalancing" 1031 | exit 1 1032 | end 1033 | 1034 | # Calculate the slots balance for each node. It's the number of 1035 | # slots the node should lose (if positive) or gain (if negative) 1036 | # in order to be balanced. 1037 | threshold = opt['threshold'].to_f 1038 | threshold_reached = false 1039 | @nodes.each{|n| 1040 | if n.has_flag?("master") 1041 | next if !n.info[:w] 1042 | expected = ((ClusterHashSlots.to_f / total_weight) * 1043 | n.info[:w]).to_i 1044 | n.info[:balance] = n.slots.length - expected 1045 | # Compute the percentage of difference between the 1046 | # expected number of slots and the real one, to see 1047 | # if it's over the threshold specified by the user. 1048 | over_threshold = false 1049 | if threshold > 0 1050 | if n.slots.length > 0 1051 | err_perc = (100-(100.0*expected/n.slots.length)).abs 1052 | over_threshold = true if err_perc > threshold 1053 | elsif expected > 0 1054 | over_threshold = true 1055 | end 1056 | end 1057 | threshold_reached = true if over_threshold 1058 | end 1059 | } 1060 | if !threshold_reached 1061 | xputs "*** No rebalancing needed! All nodes are within the #{threshold}% threshold." 1062 | return 1063 | end 1064 | 1065 | # Only consider nodes we want to change 1066 | sn = @nodes.select{|n| 1067 | n.has_flag?("master") && n.info[:w] 1068 | } 1069 | 1070 | # Because of rounding, it is possible that the balance of all nodes 1071 | # summed does not give 0. Make sure that nodes that have to provide 1072 | # slots are always matched by nodes receiving slots. 1073 | total_balance = sn.map{|x| x.info[:balance]}.reduce{|a,b| a+b} 1074 | while total_balance > 0 1075 | sn.each{|n| 1076 | if n.info[:balance] < 0 && total_balance > 0 1077 | n.info[:balance] -= 1 1078 | total_balance -= 1 1079 | end 1080 | } 1081 | end 1082 | 1083 | # Sort nodes by their slots balance. 1084 | sn = sn.sort{|a,b| 1085 | a.info[:balance] <=> b.info[:balance] 1086 | } 1087 | 1088 | xputs ">>> Rebalancing across #{nodes_involved} nodes. Total weight = #{total_weight}" 1089 | 1090 | if $verbose 1091 | sn.each{|n| 1092 | puts "#{n} balance is #{n.info[:balance]} slots" 1093 | } 1094 | end 1095 | 1096 | # Now we have at the start of the 'sn' array nodes that should get 1097 | # slots, at the end nodes that must give slots. 1098 | # We take two indexes, one at the start, and one at the end, 1099 | # incrementing or decrementing the indexes accordingly til we 1100 | # find nodes that need to get/provide slots. 1101 | dst_idx = 0 1102 | src_idx = sn.length - 1 1103 | 1104 | while dst_idx < src_idx 1105 | dst = sn[dst_idx] 1106 | src = sn[src_idx] 1107 | numslots = [dst.info[:balance],src.info[:balance]].map{|n| 1108 | n.abs 1109 | }.min 1110 | 1111 | if numslots > 0 1112 | puts "Moving #{numslots} slots from #{src} to #{dst}" 1113 | 1114 | # Actaully move the slots. 1115 | reshard_table = compute_reshard_table([src],numslots) 1116 | if reshard_table.length != numslots 1117 | xputs "*** Assertio failed: Reshard table != number of slots" 1118 | exit 1 1119 | end 1120 | if opt['simulate'] 1121 | print "#"*reshard_table.length 1122 | else 1123 | reshard_table.each{|e| 1124 | move_slot(e[:source],dst,e[:slot], 1125 | :quiet=>true, 1126 | :dots=>false, 1127 | :update=>true, 1128 | :pipeline=>opt['pipeline']) 1129 | print "#" 1130 | STDOUT.flush 1131 | } 1132 | end 1133 | puts 1134 | end 1135 | 1136 | # Update nodes balance. 1137 | dst.info[:balance] += numslots 1138 | src.info[:balance] -= numslots 1139 | dst_idx += 1 if dst.info[:balance] == 0 1140 | src_idx -= 1 if src.info[:balance] == 0 1141 | end 1142 | end 1143 | 1144 | def fix_cluster_cmd(argv,opt) 1145 | @fix = true 1146 | @timeout = opt['timeout'].to_i if opt['timeout'] 1147 | 1148 | load_cluster_info_from_node(argv[0]) 1149 | check_cluster 1150 | end 1151 | 1152 | def reshard_cluster_cmd(argv,opt) 1153 | opt = {'pipeline' => MigrateDefaultPipeline}.merge(opt) 1154 | 1155 | load_cluster_info_from_node(argv[0]) 1156 | check_cluster 1157 | if @errors.length != 0 1158 | puts "*** Please fix your cluster problems before resharding" 1159 | exit 1 1160 | end 1161 | 1162 | @timeout = opt['timeout'].to_i if opt['timeout'].to_i 1163 | 1164 | # Get number of slots 1165 | if opt['slots'] 1166 | numslots = opt['slots'].to_i 1167 | else 1168 | numslots = 0 1169 | while numslots <= 0 or numslots > ClusterHashSlots 1170 | print "How many slots do you want to move (from 1 to #{ClusterHashSlots})? " 1171 | numslots = STDIN.gets.to_i 1172 | end 1173 | end 1174 | 1175 | # Get the target instance 1176 | if opt['to'] 1177 | target = get_node_by_name(opt['to']) 1178 | if !target || target.has_flag?("slave") 1179 | xputs "*** The specified node is not known or not a master, please retry." 1180 | exit 1 1181 | end 1182 | else 1183 | target = nil 1184 | while not target 1185 | print "What is the receiving node ID? " 1186 | target = get_node_by_name(STDIN.gets.chop) 1187 | if !target || target.has_flag?("slave") 1188 | xputs "*** The specified node is not known or not a master, please retry." 1189 | target = nil 1190 | end 1191 | end 1192 | end 1193 | 1194 | # Get the source instances 1195 | sources = [] 1196 | if opt['from'] 1197 | opt['from'].split(',').each{|node_id| 1198 | if node_id == "all" 1199 | sources = "all" 1200 | break 1201 | end 1202 | src = get_node_by_name(node_id) 1203 | if !src || src.has_flag?("slave") 1204 | xputs "*** The specified node is not known or is not a master, please retry." 1205 | exit 1 1206 | end 1207 | sources << src 1208 | } 1209 | else 1210 | xputs "Please enter all the source node IDs." 1211 | xputs " Type 'all' to use all the nodes as source nodes for the hash slots." 1212 | xputs " Type 'done' once you entered all the source nodes IDs." 1213 | while true 1214 | print "Source node ##{sources.length+1}:" 1215 | line = STDIN.gets.chop 1216 | src = get_node_by_name(line) 1217 | if line == "done" 1218 | break 1219 | elsif line == "all" 1220 | sources = "all" 1221 | break 1222 | elsif !src || src.has_flag?("slave") 1223 | xputs "*** The specified node is not known or is not a master, please retry." 1224 | elsif src.info[:name] == target.info[:name] 1225 | xputs "*** It is not possible to use the target node as source node." 1226 | else 1227 | sources << src 1228 | end 1229 | end 1230 | end 1231 | 1232 | if sources.length == 0 1233 | puts "*** No source nodes given, operation aborted" 1234 | exit 1 1235 | end 1236 | 1237 | # Handle soures == all. 1238 | if sources == "all" 1239 | sources = [] 1240 | @nodes.each{|n| 1241 | next if n.info[:name] == target.info[:name] 1242 | next if n.has_flag?("slave") 1243 | sources << n 1244 | } 1245 | end 1246 | 1247 | # Check if the destination node is the same of any source nodes. 1248 | if sources.index(target) 1249 | xputs "*** Target node is also listed among the source nodes!" 1250 | exit 1 1251 | end 1252 | 1253 | puts "\nReady to move #{numslots} slots." 1254 | puts " Source nodes:" 1255 | sources.each{|s| puts " "+s.info_string} 1256 | puts " Destination node:" 1257 | puts " #{target.info_string}" 1258 | reshard_table = compute_reshard_table(sources,numslots) 1259 | puts " Resharding plan:" 1260 | show_reshard_table(reshard_table) 1261 | if !opt['yes'] 1262 | print "Do you want to proceed with the proposed reshard plan (yes/no)? " 1263 | yesno = STDIN.gets.chop 1264 | exit(1) if (yesno != "yes") 1265 | end 1266 | reshard_table.each{|e| 1267 | move_slot(e[:source],target,e[:slot], 1268 | :dots=>true, 1269 | :pipeline=>opt['pipeline']) 1270 | } 1271 | end 1272 | 1273 | # This is an helper function for create_cluster_cmd that verifies if 1274 | # the number of nodes and the specified replicas have a valid configuration 1275 | # where there are at least three master nodes and enough replicas per node. 1276 | def check_create_parameters 1277 | masters = @nodes.length/(@replicas+1) 1278 | if masters < 3 1279 | puts "*** ERROR: Invalid configuration for cluster creation." 1280 | puts "*** Redis Cluster requires at least 3 master nodes." 1281 | puts "*** This is not possible with #{@nodes.length} nodes and #{@replicas} replicas per node." 1282 | puts "*** At least #{3*(@replicas+1)} nodes are required." 1283 | exit 1 1284 | end 1285 | end 1286 | 1287 | def create_cluster_cmd(argv,opt) 1288 | opt = {'replicas' => 0}.merge(opt) 1289 | @replicas = opt['replicas'].to_i 1290 | 1291 | xputs ">>> Creating cluster" 1292 | argv[0..-1].each{|n| 1293 | node = ClusterNode.new(n) 1294 | node.connect(:abort => true) 1295 | node.assert_cluster 1296 | node.load_info 1297 | node.assert_empty 1298 | add_node(node) 1299 | } 1300 | check_create_parameters 1301 | xputs ">>> Performing hash slots allocation on #{@nodes.length} nodes..." 1302 | alloc_slots 1303 | show_nodes 1304 | 1305 | if !opt['yes'] 1306 | yes_or_die "Can I set the above configuration?" 1307 | end 1308 | 1309 | flush_nodes_config 1310 | xputs ">>> Nodes configuration updated" 1311 | xputs ">>> Assign a different config epoch to each node" 1312 | assign_config_epoch 1313 | xputs ">>> Sending CLUSTER MEET messages to join the cluster" 1314 | join_cluster 1315 | # Give one second for the join to start, in order to avoid that 1316 | # wait_cluster_join will find all the nodes agree about the config as 1317 | # they are still empty with unassigned slots. 1318 | sleep 1 1319 | wait_cluster_join 1320 | flush_nodes_config # Useful for the replicas 1321 | # Reset the node information, so that when the 1322 | # final summary is listed in check_cluster about the newly created cluster 1323 | # all the nodes would get properly listed as slaves or masters 1324 | reset_nodes 1325 | load_cluster_info_from_node(argv[0]) 1326 | check_cluster 1327 | end 1328 | 1329 | def addnode_cluster_cmd(argv,opt) 1330 | xputs ">>> Adding node #{argv[0]} to cluster #{argv[1]}" 1331 | 1332 | # Check the existing cluster 1333 | load_cluster_info_from_node(argv[1]) 1334 | check_cluster 1335 | 1336 | # If --master-id was specified, try to resolve it now so that we 1337 | # abort before starting with the node configuration. 1338 | if opt['slave'] 1339 | if opt['master-id'] 1340 | master = get_node_by_name(opt['master-id']) 1341 | if !master 1342 | xputs "[ERR] No such master ID #{opt['master-id']}" 1343 | end 1344 | else 1345 | master = get_master_with_least_replicas 1346 | xputs "Automatically selected master #{master}" 1347 | end 1348 | end 1349 | 1350 | # Add the new node 1351 | new = ClusterNode.new(argv[0]) 1352 | new.connect(:abort => true) 1353 | new.assert_cluster 1354 | new.load_info 1355 | new.assert_empty 1356 | first = @nodes.first.info 1357 | add_node(new) 1358 | 1359 | # Send CLUSTER MEET command to the new node 1360 | xputs ">>> Send CLUSTER MEET to node #{new} to make it join the cluster." 1361 | new.r.cluster("meet",first[:host],first[:port]) 1362 | 1363 | # Additional configuration is needed if the node is added as 1364 | # a slave. 1365 | if opt['slave'] 1366 | wait_cluster_join 1367 | xputs ">>> Configure node as replica of #{master}." 1368 | new.r.cluster("replicate",master.info[:name]) 1369 | end 1370 | xputs "[OK] New node added correctly." 1371 | end 1372 | 1373 | def delnode_cluster_cmd(argv,opt) 1374 | id = argv[1].downcase 1375 | xputs ">>> Removing node #{id} from cluster #{argv[0]}" 1376 | 1377 | # Load cluster information 1378 | load_cluster_info_from_node(argv[0]) 1379 | 1380 | # Check if the node exists and is not empty 1381 | node = get_node_by_name(id) 1382 | 1383 | if !node 1384 | xputs "[ERR] No such node ID #{id}" 1385 | exit 1 1386 | end 1387 | 1388 | if node.slots.length != 0 1389 | xputs "[ERR] Node #{node} is not empty! Reshard data away and try again." 1390 | exit 1 1391 | end 1392 | 1393 | # Send CLUSTER FORGET to all the nodes but the node to remove 1394 | xputs ">>> Sending CLUSTER FORGET messages to the cluster..." 1395 | @nodes.each{|n| 1396 | next if n == node 1397 | if n.info[:replicate] && n.info[:replicate].downcase == id 1398 | # Reconfigure the slave to replicate with some other node 1399 | master = get_master_with_least_replicas 1400 | xputs ">>> #{n} as replica of #{master}" 1401 | n.r.cluster("replicate",master.info[:name]) 1402 | end 1403 | n.r.cluster("forget",argv[1]) 1404 | } 1405 | 1406 | # Finally shutdown the node 1407 | xputs ">>> SHUTDOWN the node." 1408 | node.r.shutdown 1409 | end 1410 | 1411 | def set_timeout_cluster_cmd(argv,opt) 1412 | timeout = argv[1].to_i 1413 | if timeout < 100 1414 | puts "Setting a node timeout of less than 100 milliseconds is a bad idea." 1415 | exit 1 1416 | end 1417 | 1418 | # Load cluster information 1419 | load_cluster_info_from_node(argv[0]) 1420 | ok_count = 0 1421 | err_count = 0 1422 | 1423 | # Send CLUSTER FORGET to all the nodes but the node to remove 1424 | xputs ">>> Reconfiguring node timeout in every cluster node..." 1425 | @nodes.each{|n| 1426 | begin 1427 | n.r.config("set","cluster-node-timeout",timeout) 1428 | n.r.config("rewrite") 1429 | ok_count += 1 1430 | xputs "*** New timeout set for #{n}" 1431 | rescue => e 1432 | puts "ERR setting node-timeot for #{n}: #{e}" 1433 | err_count += 1 1434 | end 1435 | } 1436 | xputs ">>> New node timeout set. #{ok_count} OK, #{err_count} ERR." 1437 | end 1438 | 1439 | def call_cluster_cmd(argv,opt) 1440 | cmd = argv[1..-1] 1441 | cmd[0] = cmd[0].upcase 1442 | 1443 | # Load cluster information 1444 | load_cluster_info_from_node(argv[0]) 1445 | xputs ">>> Calling #{cmd.join(" ")}" 1446 | @nodes.each{|n| 1447 | begin 1448 | res = n.r.send(*cmd) 1449 | puts "#{n}: #{res}" 1450 | rescue => e 1451 | puts "#{n}: #{e}" 1452 | end 1453 | } 1454 | end 1455 | 1456 | def import_cluster_cmd(argv,opt) 1457 | source_addr = opt['from'] 1458 | xputs ">>> Importing data from #{source_addr} to cluster #{argv[1]}" 1459 | use_copy = opt['copy'] 1460 | use_replace = opt['replace'] 1461 | 1462 | # Check the existing cluster. 1463 | load_cluster_info_from_node(argv[0]) 1464 | check_cluster 1465 | 1466 | # Connect to the source node. 1467 | xputs ">>> Connecting to the source Redis instance" 1468 | src_host,src_port = source_addr.split(":") 1469 | source = Redis.new(:host =>src_host, :port =>src_port) 1470 | if source.info['cluster_enabled'].to_i == 1 1471 | xputs "[ERR] The source node should not be a cluster node." 1472 | end 1473 | xputs "*** Importing #{source.dbsize} keys from DB 0" 1474 | 1475 | # Build a slot -> node map 1476 | slots = {} 1477 | @nodes.each{|n| 1478 | n.slots.each{|s,_| 1479 | slots[s] = n 1480 | } 1481 | } 1482 | 1483 | # Use SCAN to iterate over the keys, migrating to the 1484 | # right node as needed. 1485 | cursor = nil 1486 | while cursor != 0 1487 | cursor,keys = source.scan(cursor, :count => 1000) 1488 | cursor = cursor.to_i 1489 | keys.each{|k| 1490 | # Migrate keys using the MIGRATE command. 1491 | slot = key_to_slot(k) 1492 | target = slots[slot] 1493 | print "Migrating #{k} to #{target}: " 1494 | STDOUT.flush 1495 | begin 1496 | cmd = ["migrate",target.info[:host],target.info[:port],k,0,@timeout] 1497 | cmd << :copy if use_copy 1498 | cmd << :replace if use_replace 1499 | source.client.call(cmd) 1500 | rescue => e 1501 | puts e 1502 | else 1503 | puts "OK" 1504 | end 1505 | } 1506 | end 1507 | end 1508 | 1509 | def help_cluster_cmd(argv,opt) 1510 | show_help 1511 | exit 0 1512 | end 1513 | 1514 | # Parse the options for the specific command "cmd". 1515 | # Returns an hash populate with option => value pairs, and the index of 1516 | # the first non-option argument in ARGV. 1517 | def parse_options(cmd) 1518 | idx = 1 ; # Current index into ARGV 1519 | options={} 1520 | while idx < ARGV.length && ARGV[idx][0..1] == '--' 1521 | if ARGV[idx][0..1] == "--" 1522 | option = ARGV[idx][2..-1] 1523 | idx += 1 1524 | 1525 | # --verbose is a global option 1526 | if option == "verbose" 1527 | $verbose = true 1528 | next 1529 | end 1530 | 1531 | if ALLOWED_OPTIONS[cmd] == nil || ALLOWED_OPTIONS[cmd][option] == nil 1532 | puts "Unknown option '#{option}' for command '#{cmd}'" 1533 | exit 1 1534 | end 1535 | if ALLOWED_OPTIONS[cmd][option] != false 1536 | value = ARGV[idx] 1537 | idx += 1 1538 | else 1539 | value = true 1540 | end 1541 | 1542 | # If the option is set to [], it's a multiple arguments 1543 | # option. We just queue every new value into an array. 1544 | if ALLOWED_OPTIONS[cmd][option] == [] 1545 | options[option] = [] if !options[option] 1546 | options[option] << value 1547 | else 1548 | options[option] = value 1549 | end 1550 | else 1551 | # Remaining arguments are not options. 1552 | break 1553 | end 1554 | end 1555 | 1556 | # Enforce mandatory options 1557 | if ALLOWED_OPTIONS[cmd] 1558 | ALLOWED_OPTIONS[cmd].each {|option,val| 1559 | if !options[option] && val == :required 1560 | puts "Option '--#{option}' is required "+ \ 1561 | "for subcommand '#{cmd}'" 1562 | exit 1 1563 | end 1564 | } 1565 | end 1566 | return options,idx 1567 | end 1568 | end 1569 | 1570 | ################################################################################# 1571 | # Libraries 1572 | # 1573 | # We try to don't depend on external libs since this is a critical part 1574 | # of Redis Cluster. 1575 | ################################################################################# 1576 | 1577 | # This is the CRC16 algorithm used by Redis Cluster to hash keys. 1578 | # Implementation according to CCITT standards. 1579 | # 1580 | # This is actually the XMODEM CRC 16 algorithm, using the 1581 | # following parameters: 1582 | # 1583 | # Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN" 1584 | # Width : 16 bit 1585 | # Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1) 1586 | # Initialization : 0000 1587 | # Reflect Input byte : False 1588 | # Reflect Output CRC : False 1589 | # Xor constant to output CRC : 0000 1590 | # Output for "123456789" : 31C3 1591 | 1592 | module RedisClusterCRC16 1593 | def RedisClusterCRC16.crc16(bytes) 1594 | crc = 0 1595 | bytes.each_byte{|b| 1596 | crc = ((crc<<8) & 0xffff) ^ XMODEMCRC16Lookup[((crc>>8)^b) & 0xff] 1597 | } 1598 | crc 1599 | end 1600 | 1601 | private 1602 | XMODEMCRC16Lookup = [ 1603 | 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, 1604 | 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, 1605 | 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, 1606 | 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, 1607 | 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, 1608 | 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, 1609 | 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, 1610 | 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, 1611 | 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, 1612 | 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, 1613 | 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, 1614 | 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, 1615 | 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, 1616 | 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, 1617 | 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, 1618 | 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, 1619 | 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, 1620 | 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, 1621 | 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, 1622 | 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, 1623 | 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, 1624 | 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, 1625 | 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, 1626 | 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, 1627 | 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, 1628 | 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, 1629 | 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, 1630 | 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, 1631 | 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, 1632 | 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, 1633 | 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, 1634 | 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0 1635 | ] 1636 | end 1637 | 1638 | # Turn a key name into the corrisponding Redis Cluster slot. 1639 | def key_to_slot(key) 1640 | # Only hash what is inside {...} if there is such a pattern in the key. 1641 | # Note that the specification requires the content that is between 1642 | # the first { and the first } after the first {. If we found {} without 1643 | # nothing in the middle, the whole key is hashed as usually. 1644 | s = key.index "{" 1645 | if s 1646 | e = key.index "}",s+1 1647 | if e && e != s+1 1648 | key = key[s+1..e-1] 1649 | end 1650 | end 1651 | RedisClusterCRC16.crc16(key) % 16384 1652 | end 1653 | 1654 | ################################################################################# 1655 | # Definition of commands 1656 | ################################################################################# 1657 | 1658 | COMMANDS={ 1659 | "create" => ["create_cluster_cmd", -2, "host1:port1 ... hostN:portN"], 1660 | "check" => ["check_cluster_cmd", 2, "host:port"], 1661 | "info" => ["info_cluster_cmd", 2, "host:port"], 1662 | "fix" => ["fix_cluster_cmd", 2, "host:port"], 1663 | "reshard" => ["reshard_cluster_cmd", 2, "host:port"], 1664 | "rebalance" => ["rebalance_cluster_cmd", -2, "host:port"], 1665 | "add-node" => ["addnode_cluster_cmd", 3, "new_host:new_port existing_host:existing_port"], 1666 | "del-node" => ["delnode_cluster_cmd", 3, "host:port node_id"], 1667 | "set-timeout" => ["set_timeout_cluster_cmd", 3, "host:port milliseconds"], 1668 | "call" => ["call_cluster_cmd", -3, "host:port command arg arg .. arg"], 1669 | "import" => ["import_cluster_cmd", 2, "host:port"], 1670 | "help" => ["help_cluster_cmd", 1, "(show this help)"] 1671 | } 1672 | 1673 | ALLOWED_OPTIONS={ 1674 | "create" => {"replicas" => true, "yes" => false }, 1675 | "add-node" => {"slave" => false, "master-id" => true}, 1676 | "import" => {"from" => :required, "copy" => false, "replace" => false}, 1677 | "reshard" => {"from" => true, "to" => true, "slots" => true, "yes" => false, "timeout" => true, "pipeline" => true}, 1678 | "rebalance" => {"weight" => [], "auto-weights" => false, "use-empty-masters" => false, "timeout" => true, "simulate" => false, "pipeline" => true, "threshold" => true}, 1679 | "fix" => {"timeout" => MigrateDefaultTimeout}, 1680 | } 1681 | 1682 | def show_help 1683 | puts "Usage: redis-trib \n\n" 1684 | COMMANDS.each{|k,v| 1685 | o = "" 1686 | puts " #{k.ljust(15)} #{v[2]}" 1687 | if ALLOWED_OPTIONS[k] 1688 | ALLOWED_OPTIONS[k].each{|optname,has_arg| 1689 | puts " --#{optname}" + (has_arg ? " " : "") 1690 | } 1691 | end 1692 | } 1693 | puts "\nFor check, fix, reshard, del-node, set-timeout you can specify the host and port of any working node in the cluster.\n" 1694 | end 1695 | 1696 | # Sanity check 1697 | if ARGV.length == 0 1698 | show_help 1699 | exit 1 1700 | end 1701 | 1702 | rt = RedisTrib.new 1703 | cmd_spec = COMMANDS[ARGV[0].downcase] 1704 | if !cmd_spec 1705 | puts "Unknown redis-trib subcommand '#{ARGV[0]}'" 1706 | exit 1 1707 | end 1708 | 1709 | # Parse options 1710 | cmd_options,first_non_option = rt.parse_options(ARGV[0].downcase) 1711 | rt.check_arity(cmd_spec[1],ARGV.length-(first_non_option-1)) 1712 | 1713 | # Dispatch 1714 | rt.send(cmd_spec[0],ARGV[first_non_option..-1],cmd_options) 1715 | --------------------------------------------------------------------------------