├── .ansible-lint ├── .gitattributes ├── .github └── workflows │ ├── linter.yaml │ └── molecule.yaml ├── .gitignore ├── .markdownlint.json ├── .prettierrc.yaml ├── .yamllint.yaml ├── LICENSE ├── README.md ├── defaults └── main.yaml ├── handlers └── main.yaml ├── meta └── main.yml ├── molecule └── default │ ├── Dockerfile.j2 │ ├── INSTALL.rst │ ├── converge.yml │ ├── molecule.yml │ ├── prepare.yml │ ├── requirements.yml │ └── verify.yml ├── tasks ├── firewalld.yml └── main.yaml ├── templates ├── default.j2 ├── myid.j2 ├── zoo.cfg.j2 └── zookeeper.service.j2 └── vars ├── Debian.yml ├── RedHat.yml └── Rocky.yml /.ansible-lint: -------------------------------------------------------------------------------- 1 | exclude_paths: 2 | - ./molecule-venv/ 3 | - ./tests/roles/ 4 | 5 | skip_list: 6 | - '106' # Role name does not match ``^[a-z][a-z0-9_]+$`` pattern -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.yml linguist-detectable 2 | *.yaml linguist-detectable 3 | -------------------------------------------------------------------------------- /.github/workflows/linter.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Lint Code Base 3 | 4 | # 5 | # Documentation: 6 | # https://help.github.com/en/articles/workflow-syntax-for-github-actions 7 | # 8 | on: 9 | push: 10 | branches: [master] 11 | pull_request: 12 | branches: [master] 13 | workflow_dispatch: 14 | 15 | jobs: 16 | build: 17 | name: Linter 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - name: Checkout Code 22 | uses: actions/checkout@v4 23 | with: 24 | fetch-depth: 0 25 | 26 | - name: Lint Code Base 27 | uses: github/super-linter/slim@v4 28 | env: 29 | DEFAULT_BRANCH: master 30 | FILTER_REGEX_EXCLUDE: .*(tests/|Dockerfile.j2).* 31 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 32 | LINTER_RULES_PATH: / 33 | MARKDOWN_CONFIG_FILE: .markdownlint.json 34 | 35 | - name: Ansible Lint 36 | uses: ansible/ansible-lint@v6 37 | -------------------------------------------------------------------------------- /.github/workflows/molecule.yaml: -------------------------------------------------------------------------------- 1 | name: Molecule 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - release/v* 8 | pull_request: 9 | branches: 10 | - master 11 | workflow_dispatch: 12 | 13 | defaults: 14 | run: 15 | working-directory: 'sleighzy.zookeeper' 16 | 17 | jobs: 18 | molecule: 19 | name: Molecule 20 | runs-on: ubuntu-latest 21 | 22 | steps: 23 | - name: Check out the codebase. 24 | uses: actions/checkout@v4 25 | with: 26 | path: 'sleighzy.zookeeper' 27 | 28 | - name: Set up Python 3. 29 | uses: actions/setup-python@v4 30 | with: 31 | python-version: '3.x' 32 | 33 | - name: Install test dependencies. 34 | run: pip3 install ansible docker "molecule-plugins[docker]" 35 | 36 | - name: Run Molecule tests. 37 | run: molecule test 38 | env: 39 | PY_COLORS: '1' 40 | ANSIBLE_FORCE_COLOR: '1' 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # General file ignores 2 | .DS_Store 3 | 4 | # Symlink to current repository to enable Ansible to find the role 5 | # using its expected full name for Molecule tests 6 | .cache/ 7 | 8 | # Python virtual env for Molecule testing 9 | molecule-venv/ 10 | 11 | super-linter.log 12 | .vscode 13 | 14 | *.iml -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "line-length": { 3 | "code_blocks": false, 4 | "tables": false 5 | }, 6 | "list-marker-space": { 7 | "ol_single": 2, 8 | "ol_multi": 2 9 | }, 10 | "no-inline-html": { 11 | "allowed_elements": ["br"] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /.prettierrc.yaml: -------------------------------------------------------------------------------- 1 | proseWrap: 'always' 2 | singleQuote: true 3 | overrides: 4 | - files: 5 | - '*.yml' 6 | - '*.yaml' 7 | options: 8 | proseWrap: 'never' 9 | -------------------------------------------------------------------------------- /.yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | yaml-files: 6 | - '*.yaml' 7 | - '*.yml' 8 | 9 | ignore: | 10 | molecule-venv/ 11 | tests/roles/ 12 | 13 | rules: 14 | braces: 15 | max-spaces-inside: 1 16 | level: error 17 | brackets: 18 | max-spaces-inside: 1 19 | level: error 20 | colons: 21 | max-spaces-after: -1 22 | level: error 23 | commas: 24 | max-spaces-after: -1 25 | level: error 26 | comments: disable 27 | comments-indentation: disable 28 | document-start: disable 29 | empty-lines: 30 | max: 3 31 | level: error 32 | hyphens: 33 | level: error 34 | indentation: disable 35 | key-duplicates: enable 36 | line-length: disable 37 | new-line-at-end-of-file: disable 38 | new-lines: 39 | type: unix 40 | trailing-spaces: disable 41 | truthy: disable 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Simon Leigh 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Apache ZooKeeper 2 | 3 | ![Lint Code Base] ![Molecule] 4 | 5 | Ansible role for installing and configuring Apache ZooKeeper 6 | 7 | This role can be used to install and cluster multiple ZooKeeper nodes, this uses 8 | all hosts defined for the "zookeeper-nodes" group in the inventory file by 9 | default. All servers are added to the zoo.cfg file along with the leader and 10 | election ports. Firewall ports could be opened after setting `true` to 11 | `zookeeper_firewalld` variable 12 | 13 | ## Supported Platforms 14 | 15 | - Debian 10.x 16 | - RedHat 7 17 | - RedHat 8 18 | - Ubuntu 18.04.x 19 | - Ubuntu 20.04.x 20 | 21 | ## Requirements 22 | 23 | Java: Java 8 / 11 24 | 25 | Ansible 2.9.16 or 2.10.4 are the minimum required versions to workaround an 26 | issue with certain kernels that have broken the `systemd` status check. The 27 | error message "`Service is in unknown state`" will be output when attempting to 28 | start the service via the Ansible role and the task will fail. The service will 29 | start as expected if the `systemctl start` command is run on the physical host. 30 | See for more information. 31 | 32 | ## Role Variables 33 | 34 | | Variable | Default | Comment | 35 | | ---------------------------------------- | ----------------------------------------------------------------- | -------------------------------------------------------------- | 36 | | zookeeper_mirror | | | 37 | | zookeeper_version | 3.9.3 | | 38 | | zookeeper_package | apache-zookeeper-{{ zookeeper_version }}-bin.tar.gz | | 39 | | zookeeper_group | zookeeper | | 40 | | zookeeper_user | zookeeper | | 41 | | zookeeper_root_dir | /usr/share | | 42 | | zookeeper_install_dir | '{{ zookeeper_root_dir}}/apache-zookeeper-{{zookeeper_version}}' | | 43 | | zookeeper_dir | '{{ zookeeper_root_dir }}/zookeeper' | | 44 | | zookeeper_log_dir | /var/log/zookeeper | | 45 | | zookeeper_data_dir | /var/lib/zookeeper | | 46 | | zookeeper_data_log_dir | /var/lib/zookeeper | | 47 | | zookeeper_client_port | 2181 | | 48 | | zookeeper_id | 1 | Unique per server and should be declared in the inventory file | 49 | | zookeeper_leader_port | 2888 | | 50 | | zookeeper_election_port | 3888 | | 51 | | zookeeper_servers | zookeeper-nodes | See below | 52 | | zookeeper_servers_use_inventory_hostname | false | See below | 53 | | zookeeper_environment | "JVMFLAGS": "-javaagent:/opt/jolokia/jolokia-jvm-1.6.0-agent.jar" | | 54 | | zookeeper_config_params | | A key-value dictionary that will be templated into zoo.cfg | 55 | | zookeeper_firewalld | false | | 56 | 57 | ## Inventory and zookeeper_servers variable 58 | 59 | zookeeper_servers variable above accepts a list of inventory hostnames. These 60 | will be used in the `zoo.cfg` to configure a multi-server cluster so the hosts 61 | can find each other. By default, the hostname used in the `zoo.cfg` will be the 62 | hostname reported by the `hostname` command on the server(provided by the 63 | `ansible_nodename` variable). See the example below. 64 | 65 | Assuming the below inventory file, and that the `hostname` command returns only 66 | the hostname and does not include the domain name. 67 | 68 | ```ini 69 | [zookeeper-nodes] 70 | zoo1.foo.com zookeeper_id=1 #hostname command returns "zoo1" 71 | zoo2.foo.com zookeeper_id=2 #hostname command returns "zoo2" 72 | zoo3.foo.com zookeeper_id=3 #hostname command returns "zoo3" 73 | ``` 74 | 75 | And assuming the following role variables: 76 | 77 | ```yaml 78 | --- 79 | - role: sleighzy.zookeeper 80 | zookeeper_servers: 81 | - zoo1.foo.com 82 | - zoo2.foo.com 83 | - zoo3.foo.com 84 | ``` 85 | 86 | The templated `zoo.cfg` file will contain the below entries: 87 | 88 | ```ini 89 | server.1=zoo1:2888:3888 90 | server.2=zoo2:2888:3888 91 | server.3=zoo3:2888:3888 92 | ``` 93 | 94 | If you DO NOT want this behaviour and would like the `zoo.cfg` to template the 95 | inventory_hostname then set `zookeeper_servers_use_inventory_hostname` to `true` 96 | 97 | ### Default Ports 98 | 99 | | Port | Description | 100 | | ---- | ----------------------------------- | 101 | | 2181 | Client connection port | 102 | | 2888 | Quorum port for clustering | 103 | | 3888 | Leader election port for clustering | 104 | 105 | ### Default Directories and Files 106 | 107 | | Description | Directory / File | 108 | | ------------------------------------------ | ------------------------------------------- | 109 | | Installation directory | `/usr/share/apache-zookeeper-` | 110 | | Symlink to install directory | `/usr/share/zookeeper` | 111 | | Symlink to configuration | `/etc/zookeeper/zoo.cfg` | 112 | | Log files | `/var/log/zookeeper` | 113 | | Data directory for snapshots and myid file | `/var/lib/zookeeper` | 114 | | Data directory for transaction log files | `/var/lib/zookeeper` | 115 | | Systemd service | `/usr/lib/systemd/system/zookeeper.service` | 116 | | System Defaults | `/etc/default/zookeeper` | 117 | 118 | ## Starting and Stopping ZooKeeper services 119 | 120 | - The ZooKeeper service can be started via: `systemctl start zookeeper` 121 | - The ZooKeeper service can be stopped via: `systemctl stop zookeeper` 122 | 123 | ## Four Letter Word Commands 124 | 125 | ZooKeeper can use commands based on four letter words, see 126 | 127 | 128 | The below example uses the stat command to find out which instance is the leader 129 | : 130 | 131 | ```bash 132 | for i in 1 2 3 ; do 133 | echo "zookeeper0$i is a "$(echo stat | nc zookeeper0$i 2181 | grep ^Mode | awk '{print $2}'); 134 | done 135 | ``` 136 | 137 | ## Dependencies 138 | 139 | No dependencies 140 | 141 | ## Example Playbook 142 | 143 | ```yaml 144 | - hosts: zookeeper-nodes 145 | roles: 146 | - sleighzy.zookeeper 147 | ``` 148 | 149 | ## Linting 150 | 151 | Linting should be done using [ansible-lint] 152 | 153 | ```sh 154 | pip3 install ansible-lint --user 155 | 156 | ansible-lint -c ./.ansible-lint . 157 | ``` 158 | 159 | ## Testing 160 | 161 | This module uses the [Ansible Molecule] testing framework. This test suite 162 | creates a ZooKeeper cluster consisting of three nodes running within Docker 163 | containers. Each container runs a different OS to test the supported platforms 164 | for this Ansible role. 165 | 166 | As per the [Molecule Installation guide] this should be done using a virtual 167 | environment. The commands below will create a Python virtual environment and 168 | install Molecule including the Docker driver. 169 | 170 | ```sh 171 | $ python3 -m venv molecule-venv 172 | $ source molecule-venv/bin/activate 173 | (molecule-venv) $ pip3 install ansible docker "molecule-plugins[docker]" 174 | ``` 175 | 176 | Run playbook and tests. Linting errors need to be corrected before Molecule will 177 | execute any tests. This will run all tests and then destroy the Docker 178 | containers. 179 | 180 | ```sh 181 | molecule test 182 | ``` 183 | 184 | The below command can be used to run the playbook without the tests. This can be 185 | run multiple times when making changes to the role, and ensuring that operations 186 | are idempotent. 187 | 188 | ```sh 189 | molecule converge 190 | ``` 191 | 192 | The below commands can be used to just run the tests without tearing everything 193 | down. The command `molecule verify` can be repeated for each test run. 194 | 195 | ```sh 196 | molecule create 197 | molecule converge 198 | molecule verify 199 | ``` 200 | 201 | Tear down Molecule tests and Docker containers. 202 | 203 | ```sh 204 | molecule destroy 205 | ``` 206 | 207 | ## License 208 | 209 | ![MIT license] 210 | 211 | [ansible-lint]: https://docs.ansible.com/ansible-lint/ 212 | [ansible molecule]: https://molecule.readthedocs.io/en/latest/ 213 | [lint code base]: 214 | https://github.com/sleighzy/ansible-zookeeper/workflows/Lint%20Code%20Base/badge.svg 215 | [mit license]: https://img.shields.io/badge/License-MIT-blue.svg 216 | [molecule]: 217 | https://github.com/sleighzy/ansible-zookeeper/workflows/Molecule/badge.svg 218 | -------------------------------------------------------------------------------- /defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Installation variables 3 | 4 | zookeeper_version: 3.9.3 5 | zookeeper_mirror: https://dlcdn.apache.org/zookeeper 6 | zookeeper_package: apache-zookeeper-{{ zookeeper_version }}-bin.tar.gz 7 | 8 | zookeeper_create_user_group: true 9 | zookeeper_group: zookeeper 10 | zookeeper_user: zookeeper 11 | 12 | zookeeper_root_dir: /usr/share 13 | zookeeper_install_dir: '{{ zookeeper_root_dir }}/apache-zookeeper-{{ zookeeper_version }}' 14 | # The zookeeper_dir is created as a symlik to the actual installation directory. 15 | # All other configuration and variables use the symlinked directory. 16 | zookeeper_dir: '{{ zookeeper_root_dir }}/zookeeper' 17 | zookeeper_log_dir: /var/log/zookeeper 18 | 19 | # Start zookeeper after installation 20 | zookeeper_start: yes 21 | # Restart zookeeper on configuration change 22 | zookeeper_restart: yes 23 | 24 | # setup firewalld rules 25 | zookeeper_firewalld: false 26 | 27 | # Configuration variables 28 | 29 | # The unit of time for ZooKeeper translated to milliseconds. 30 | # This governs all ZooKeeper time dependent operations. It is used for heartbeats and timeouts especially. 31 | # Note that the minimum session timeout will be two ticks. 32 | zookeeper_ticktime: 3000 33 | 34 | # Amount of time, in ticks (see tickTime), to allow followers to connect and sync to a leader. 35 | # Increased this value as needed, if the amount of data managed by ZooKeeper is large. 36 | zookeeper_init_limit: 10 37 | 38 | # Amount of time, in ticks (see tickTime), to allow followers to sync with ZooKeeper. 39 | # If followers fall too far behind a leader, they will be dropped. 40 | zookeeper_sync_limit: 5 41 | 42 | # The directory where ZooKeeper in-memory database snapshots and, unless specified in dataLogDir, the transaction log of updates to the database. 43 | # This location should be a dedicated disk that is ideally an SSD. 44 | # For more information, see the ZooKeeper Administration Guide (https://zookeeper.apache.org/doc/current/zookeeperAdmin.html). 45 | zookeeper_data_dir: /var/lib/zookeeper 46 | # The location where the transaction log is written to. If you don’t specify this option, the log is written to dataDir. 47 | # By specifying this option, you can use a dedicated log device, and help avoid competition between logging and snapshots. 48 | # For more information, see the ZooKeeper Administration Guide (https://zookeeper.apache.org/doc/current/zookeeperAdmin.html). 49 | zookeeper_data_log_dir: /var/lib/zookeeper 50 | 51 | # This is the port where ZooKeeper clients will listen on. This is where the Brokers will connect to ZooKeeper. Typically this is set to 2181. 52 | zookeeper_client_port: 2181 53 | 54 | # The maximum allowed number of client connections for a ZooKeeper server. To avoid running out of allowed connections set this to 0 (unlimited). 55 | zookeeper_max_client_cnxns: 60 56 | 57 | # When enabled, ZooKeeper auto purge feature retains the autopurge.snapRetainCount most recent snapshots and the corresponding transaction logs 58 | # in the dataDir and dataLogDir respectively and deletes the rest. 59 | zookeeper_autopurge_snap_retain_count: 3 60 | # The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. 61 | zookeeper_purge_interval: 0 62 | 63 | # Uniquely identifies the ZooKeeper instance when clustering ZooKeeper nodes. 64 | # This value is placed in the /var/lib/zookeeper/myid file. 65 | zookeeper_id: 1 66 | zookeeper_leader_port: 2888 67 | zookeeper_election_port: 3888 68 | zookeeper_servers: "{{ groups['zookeeper-nodes'] }}" 69 | zookeeper_servers_use_inventory_hostname: false 70 | zookeeper_environment: {} 71 | 72 | # Set to "false" to disable the AdminServer. By default the AdminServer is enabled. 73 | zookeeper_enable_server: yes 74 | # The address the embedded Jetty server listens on. Defaults to 0.0.0.0. 75 | zookeeper_server_address: 0.0.0.0 76 | # The port the embedded Jetty server listens on. Defaults to 8080. 77 | zookeeper_server_port: 8080 78 | # Set the maximum idle time in milliseconds that a connection can wait before sending or receiving data. Defaults to 30000 ms. 79 | zookeeper_idle_timeout: 30000 80 | # The URL for listing and issuing commands relative to the root URL. Defaults to "/commands". 81 | zookeeper_command_url: /commands 82 | # See https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw. Use * to allow all commands. 83 | zookeeper_command_whitelist: stat, ruok, conf, isro 84 | 85 | # Set to "org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider" to enable Prometheus.io exporter. 86 | zookeeper_metricsprovider_classname: org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider 87 | # Prometheus.io exporter will start a Jetty server and bind to this port, it default to 7000. Prometheus end point will be http://hostname:httPort/metrics. 88 | zookeeper_metricsprovider_httpport: 7000 89 | # If this property is set to true Prometheus.io will export useful metrics about the JVM. The default is true. 90 | zookeeper_metricsprovider_exportjvminfo: yes 91 | -------------------------------------------------------------------------------- /handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart ZooKeeper service 3 | ansible.builtin.systemd: 4 | name: zookeeper.service 5 | state: restarted 6 | daemon_reload: yes 7 | when: zookeeper_restart 8 | 9 | - name: Reload firewalld 10 | ansible.builtin.systemd: 11 | name: firewalld 12 | state: reloaded 13 | -------------------------------------------------------------------------------- /meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | namespace: sleighzy 4 | role_name: zookeeper 5 | author: Simon Leigh 6 | description: Apache ZooKeeper installation for RHEL/CentOS and Debian/Ubuntu 7 | license: MIT 8 | min_ansible_version: 2.10.4 9 | platforms: 10 | - name: EL 11 | versions: 12 | - '7' 13 | - '8' 14 | - name: Debian 15 | versions: 16 | - buster 17 | - name: Ubuntu 18 | versions: 19 | - bionic 20 | - focal 21 | galaxy_tags: 22 | - zookeeper 23 | - clustering 24 | 25 | dependencies: [] 26 | -------------------------------------------------------------------------------- /molecule/default/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | {% if item.env is defined %} 10 | {% for var, value in item.env.items() %} 11 | {% if value %} 12 | ENV {{ var }} {{ value }} 13 | {% endif %} 14 | {% endfor %} 15 | {% endif %} 16 | 17 | RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates iproute2 && apt-get clean; \ 18 | elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python*-dnf bash iproute && dnf clean all; \ 19 | elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash iproute && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ 20 | elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml iproute2 && zypper clean -a; \ 21 | elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ 22 | elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates iproute2 && xbps-remove -O; fi 23 | -------------------------------------------------------------------------------- /molecule/default/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Docker driver installation guide 3 | ******* 4 | 5 | Requirements 6 | ============ 7 | 8 | * Docker Engine 9 | 10 | Install 11 | ======= 12 | 13 | Please refer to the `Virtual environment`_ documentation for installation best 14 | practices. If not using a virtual environment, please consider passing the 15 | widely recommended `'--user' flag`_ when invoking ``pip``. 16 | 17 | .. _Virtual environment: https://virtualenv.pypa.io/en/latest/ 18 | .. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site 19 | 20 | .. code-block:: bash 21 | 22 | $ pip install 'molecule-plugins[docker]' 23 | -------------------------------------------------------------------------------- /molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | roles: 5 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 6 | -------------------------------------------------------------------------------- /molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | options: 5 | ignore-certs: True 6 | ignore-errors: True 7 | requirements-file: molecule/default/requirements.yml 8 | driver: 9 | name: docker 10 | platforms: 11 | - name: zookeeper-1 12 | image: geerlingguy/docker-debian10-ansible:latest 13 | docker_networks: 14 | - name: zookeeper 15 | ipam_config: 16 | - subnet: '172.26.0.0/16' 17 | networks: 18 | - name: zookeeper 19 | ipv4_address: '172.26.10.1' 20 | etc_hosts: "{'zookeeper-2': '172.26.10.2', 'zookeeper-3': '172.26.10.3'}" 21 | pre_build_image: true 22 | privileged: true 23 | tmpfs: 24 | - /run 25 | - /tmp 26 | volumes: 27 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 28 | cgroupns_mode: host 29 | capabilities: 30 | - SYS_ADMIN 31 | command: /lib/systemd/systemd 32 | groups: 33 | - zookeeper-nodes 34 | - name: zookeeper-2 35 | image: redhat/ubi9:latest 36 | networks: 37 | - name: zookeeper 38 | ipv4_address: '172.26.10.2' 39 | etc_hosts: "{'zookeeper-1': '172.26.10.1', 'zookeeper-3': '172.26.10.3'}" 40 | tmpfs: 41 | - /run 42 | - /tmp 43 | volumes: 44 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 45 | cgroupns_mode: host 46 | command: '/usr/lib/systemd/systemd' 47 | pre_build_image: true 48 | capabilities: 49 | - SYS_ADMIN 50 | groups: 51 | - zookeeper-nodes 52 | - name: zookeeper-3 53 | image: redhat/ubi9:latest 54 | networks: 55 | - name: zookeeper 56 | ipv4_address: '172.26.10.3' 57 | etc_hosts: "{'zookeeper-1': '172.26.10.1', 'zookeeper-2': '172.26.10.2'}" 58 | privileged: true 59 | tmpfs: 60 | - /run 61 | - /tmp 62 | volumes: 63 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 64 | cgroupns_mode: host 65 | command: '/usr/lib/systemd/systemd' 66 | pre_build_image: true 67 | capabilities: 68 | - SYS_ADMIN 69 | groups: 70 | - zookeeper-nodes 71 | provisioner: 72 | name: ansible 73 | inventory: 74 | host_vars: 75 | # The zookeeper_id is not provided for zookeeper-1 as this 76 | # should use the value of 1 from the defaults. 77 | zookeeper-1: 78 | zookeeper-2: 79 | zookeeper_id: 2 80 | zookeeper-3: 81 | zookeeper_id: 3 82 | verifier: 83 | name: ansible 84 | scenario: 85 | create_sequence: 86 | - dependency 87 | - create 88 | - prepare 89 | check_sequence: 90 | - dependency 91 | - cleanup 92 | - destroy 93 | - create 94 | - prepare 95 | - converge 96 | - check 97 | - destroy 98 | converge_sequence: 99 | - dependency 100 | - create 101 | - prepare 102 | - converge 103 | test_sequence: 104 | - destroy 105 | - dependency 106 | - syntax 107 | - create 108 | - prepare 109 | - converge 110 | - idempotence 111 | - verify 112 | - destroy 113 | -------------------------------------------------------------------------------- /molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | gather_facts: true 5 | 6 | pre_tasks: 7 | - name: Install python-firewall 8 | ansible.builtin.yum: 9 | name: 10 | - python3-firewall 11 | state: installed 12 | when: ansible_os_family == "RedHat" 13 | 14 | - name: Install Java 8 (OpenJDK) on RedHat/CentOS 15 | ansible.builtin.yum: 16 | name: java-1.8.0-openjdk 17 | state: installed 18 | when: ansible_os_family == "RedHat" 19 | 20 | - name: Install Java 11 (OpenJDK) on Debian 21 | ansible.builtin.apt: 22 | name: openjdk-11-jdk 23 | state: present 24 | update_cache: yes 25 | when: ansible_os_family == "Debian" 26 | 27 | # The installation of this package into the container means 28 | # that the "ps" command is available for viewing the running process. 29 | # Installing this package however also prevents an issue whereby the 30 | # ZooKeeper service is constantly restarted by systemd which causes 31 | # the Molecule tests to fail as the service is not started correctly. 32 | - name: Install ps on Debian 33 | ansible.builtin.apt: 34 | name: procps 35 | state: present 36 | when: ansible_os_family == "Debian" 37 | 38 | - name: Install ps on RedHat/CentOS 39 | ansible.builtin.yum: 40 | name: procps 41 | state: present 42 | when: ansible_os_family == "RedHat" 43 | -------------------------------------------------------------------------------- /molecule/default/requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - community.docker 3 | -------------------------------------------------------------------------------- /molecule/default/verify.yml: -------------------------------------------------------------------------------- 1 | - name: Verify 2 | hosts: all 3 | 4 | tasks: 5 | - name: Get users 6 | ansible.builtin.getent: 7 | database: passwd 8 | 9 | - name: Get groups 10 | ansible.builtin.getent: 11 | database: group 12 | 13 | - name: Assert that user and group 'zookeeper' exists 14 | ansible.builtin.assert: 15 | that: 16 | - "'zookeeper' in getent_passwd" 17 | - "'zookeeper' in getent_group" 18 | 19 | - name: Register '/usr/share/apache-zookeeper-3.9.3' installation directory status 20 | ansible.builtin.stat: 21 | path: '/usr/share/apache-zookeeper-3.9.3' 22 | register: install_dir 23 | 24 | - name: Assert that '/usr/share/apache-zookeeper-3.9.3' directory is created 25 | ansible.builtin.assert: 26 | that: 27 | - install_dir.stat.exists 28 | - install_dir.stat.isdir 29 | - install_dir.stat.pw_name == 'zookeeper' 30 | - install_dir.stat.gr_name == 'zookeeper' 31 | 32 | - name: Register '/usr/share/zookeeper' symlink directory status 33 | ansible.builtin.stat: 34 | path: '/usr/share/zookeeper' 35 | register: zookeeper_dir 36 | 37 | - name: Assert that '/usr/share/zookeeper' symlink is created 38 | ansible.builtin.assert: 39 | that: 40 | - zookeeper_dir.stat.exists 41 | - zookeeper_dir.stat.islnk 42 | - zookeeper_dir.stat.lnk_target == '/usr/share/apache-zookeeper-3.9.3' 43 | 44 | - name: Register '/etc/zookeeper' directory status 45 | ansible.builtin.stat: 46 | path: '/etc/zookeeper' 47 | register: config_dir 48 | 49 | - name: Assert that '/etc/zookeeper' directory is created 50 | ansible.builtin.assert: 51 | that: 52 | - config_dir.stat.exists 53 | - config_dir.stat.isdir 54 | - config_dir.stat.pw_name == 'zookeeper' 55 | - config_dir.stat.gr_name == 'zookeeper' 56 | 57 | - name: Populate service facts 58 | ansible.builtin.service_facts: 59 | 60 | - name: Assert that the ZooKeeper service is installed, running, and enabled 61 | ansible.builtin.assert: 62 | that: 63 | - "'zookeeper.service' in ansible_facts.services" 64 | - ansible_facts.services['zookeeper.service'].state == 'running' 65 | - ansible_facts.services['zookeeper.service'].status == 'enabled' 66 | 67 | - name: Create Znode and assert visible by all servers 68 | block: 69 | - name: Create a Znode entry in ZooKeeper 70 | ansible.builtin.command: /usr/share/zookeeper/bin/zkCli.sh create /TestZnode1 "test-node-1" 71 | when: ansible_hostname == "zookeeper-1" 72 | register: command_result 73 | changed_when: false 74 | 75 | - name: Output Znode creation command result 76 | ansible.builtin.debug: 77 | msg: '{{ command_result }}' 78 | when: ansible_hostname == "zookeeper-1" 79 | 80 | - name: Assert that the Znode creation command succeeded 81 | ansible.builtin.assert: 82 | that: 83 | - '"Created /TestZnode1" in command_result.stderr' 84 | when: ansible_hostname == "zookeeper-1" 85 | 86 | - name: Read the created Znode 87 | ansible.builtin.command: /usr/share/zookeeper/bin/zkCli.sh get /TestZnode1 88 | register: command_result 89 | changed_when: false 90 | 91 | - name: Assert that ZooKeeper servers can read the created Znode 92 | ansible.builtin.assert: 93 | that: 94 | - '"test-node-1" in command_result.stdout' 95 | 96 | - name: Delete Znode 97 | when: ansible_hostname == "zookeeper-1" 98 | block: 99 | - name: Get Znode 100 | ansible.builtin.command: /usr/share/zookeeper/bin/zkCli.sh get /TestZnode1 101 | when: ansible_hostname == "zookeeper-1" 102 | register: command_result 103 | failed_when: command_result.rc != 0 104 | changed_when: false 105 | 106 | - name: Delete Znode if it already exists 107 | ansible.builtin.command: /usr/share/zookeeper/bin/zkCli.sh delete /TestZnode1 108 | register: command_result 109 | failed_when: command_result.rc != 0 110 | when: 111 | - ansible_hostname == "zookeeper-1" 112 | - '"test-node-1" in command_result.stdout' 113 | changed_when: false 114 | -------------------------------------------------------------------------------- /tasks/firewalld.yml: -------------------------------------------------------------------------------- 1 | - name: Add firewalld rules 2 | firewalld: 3 | port: "{{ item }}" 4 | permanent: yes 5 | state: enabled 6 | loop: 7 | - 2181/tcp 8 | - 2888/tcp 9 | - 3888/tcp 10 | notify: Reload firewalld 11 | -------------------------------------------------------------------------------- /tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set zookeeper version variables 3 | ansible.builtin.set_fact: 4 | zookeeper_version_major: "{{ zookeeper_version.split('.')[0] }}" 5 | zookeeper_version_minor: "{{ zookeeper_version.split('.')[1] }}" 6 | 7 | - name: Load OS-specific variables 8 | ansible.builtin.include_vars: '{{ item }}' 9 | with_first_found: 10 | - ../vars/{{ ansible_os_family }}.yml 11 | - ../vars/{{ ansible_distribution_release }}.yml 12 | - ../vars/empty.yml 13 | tags: 14 | - always 15 | 16 | - name: Create zookeeper group 17 | ansible.builtin.group: 18 | name: '{{ zookeeper_group }}' 19 | state: present 20 | system: yes 21 | when: zookeeper_create_user_group | bool 22 | tags: 23 | - zookeeper_group 24 | 25 | - name: Create zookeeper user 26 | ansible.builtin.user: 27 | name: '{{ zookeeper_user }}' 28 | group: '{{ zookeeper_group }}' 29 | state: present 30 | createhome: no 31 | system: yes 32 | when: zookeeper_create_user_group | bool 33 | tags: 34 | - zookeeper_user 35 | 36 | - name: Check if ZooKeeper has already been downloaded and unpacked 37 | ansible.builtin.stat: 38 | path: '{{ zookeeper_install_dir }}' 39 | register: dir 40 | 41 | - name: Download Apache ZooKeeper 42 | ansible.builtin.get_url: 43 | url: '{{ zookeeper_mirror }}/zookeeper-{{ zookeeper_version }}/{{ zookeeper_package }}' 44 | dest: /tmp 45 | mode: 0644 46 | when: not dir.stat.exists 47 | tags: 48 | - zookeeper_download 49 | 50 | - name: Create ZooKeeper installation dir {{ zookeeper_install_dir }} 51 | ansible.builtin.file: 52 | path: '{{ zookeeper_install_dir }}' 53 | state: directory 54 | group: '{{ zookeeper_group }}' 55 | owner: '{{ zookeeper_user }}' 56 | mode: 0755 57 | when: not dir.stat.exists 58 | tags: 59 | - zookeeper_dirs 60 | 61 | - name: Unpack Apache ZooKeeper 62 | ansible.builtin.unarchive: 63 | src: /tmp/{{ zookeeper_package }} 64 | dest: '{{ zookeeper_install_dir }}' 65 | copy: no 66 | extra_opts: [--strip-components=1] 67 | group: '{{ zookeeper_group }}' 68 | owner: '{{ zookeeper_user }}' 69 | when: not dir.stat.exists 70 | tags: 71 | - zookeeper_unpack 72 | 73 | - name: Create symlink to ZooKeeper installation 74 | ansible.builtin.file: 75 | src: '{{ zookeeper_install_dir }}' 76 | dest: '{{ zookeeper_dir }}' 77 | state: link 78 | group: '{{ zookeeper_group }}' 79 | owner: '{{ zookeeper_user }}' 80 | tags: 81 | - zookeeper_dirs 82 | 83 | - name: Create directory for snapshot files and myid file 84 | ansible.builtin.file: 85 | path: '{{ zookeeper_data_dir }}' 86 | state: directory 87 | group: '{{ zookeeper_group }}' 88 | owner: '{{ zookeeper_user }}' 89 | mode: 0755 90 | tags: 91 | - zookeeper_dirs 92 | 93 | - name: Create directory for transaction log files 94 | ansible.builtin.file: 95 | path: '{{ zookeeper_data_log_dir }}' 96 | state: directory 97 | group: '{{ zookeeper_group }}' 98 | owner: '{{ zookeeper_user }}' 99 | mode: 0755 100 | tags: 101 | - zookeeper_dirs 102 | 103 | - name: Create zookeeper log directory 104 | ansible.builtin.file: 105 | path: '{{ zookeeper_log_dir }}' 106 | state: directory 107 | group: '{{ zookeeper_group }}' 108 | owner: '{{ zookeeper_user }}' 109 | mode: 0755 110 | tags: 111 | - zookeeper_dirs 112 | 113 | # /usr/share/zookeeper/conf/zoo.cfg is the default file ZooKeeper will use when starting 114 | - name: Template configuration file to zoo.cfg 115 | ansible.builtin.template: 116 | src: zoo.cfg.j2 117 | dest: '{{ zookeeper_dir }}/conf/zoo.cfg' 118 | group: '{{ zookeeper_group }}' 119 | owner: '{{ zookeeper_user }}' 120 | mode: 0644 121 | notify: 122 | - Restart ZooKeeper service 123 | tags: 124 | - zookeeper_config 125 | 126 | - name: Create directory for symlink to ZooKeeper configuration file 127 | ansible.builtin.file: 128 | path: /etc/zookeeper 129 | state: directory 130 | group: '{{ zookeeper_group }}' 131 | owner: '{{ zookeeper_user }}' 132 | mode: 0755 133 | tags: 134 | - zookeeper_config 135 | 136 | - name: Create symlink to ZooKeeper configuration file 137 | ansible.builtin.file: 138 | src: '{{ zookeeper_dir }}/conf/zoo.cfg' 139 | dest: /etc/zookeeper/zoo.cfg 140 | state: link 141 | group: '{{ zookeeper_group }}' 142 | owner: '{{ zookeeper_user }}' 143 | tags: 144 | - zookeeper_config 145 | 146 | - name: Template myid file in {{ zookeeper_data_dir }} 147 | ansible.builtin.template: 148 | src: myid.j2 149 | dest: '{{ zookeeper_data_dir }}/myid' 150 | group: '{{ zookeeper_group }}' 151 | owner: '{{ zookeeper_user }}' 152 | mode: 0644 153 | notify: 154 | - Restart ZooKeeper service 155 | tags: 156 | - zookeeper_config 157 | 158 | - name: Template /etc/default 159 | ansible.builtin.template: 160 | src: default.j2 161 | dest: '/etc/default/zookeeper' 162 | group: '{{ zookeeper_group }}' 163 | owner: '{{ zookeeper_user }}' 164 | mode: 0644 165 | notify: 166 | - Restart ZooKeeper service 167 | tags: 168 | - zookeeper_config 169 | 170 | # Uncomment the log4j.properties line for setting the maximum number of logs to rollover and keep 171 | - name: Set maximum log rollover history 172 | ansible.builtin.replace: 173 | dest: '{{ zookeeper_dir }}/conf/log4j.properties' 174 | regexp: '^#log4j.appender.ROLLINGFILE.MaxBackupIndex' 175 | replace: 'log4j.appender.ROLLINGFILE.MaxBackupIndex' 176 | notify: 177 | - Restart ZooKeeper service 178 | tags: 179 | - zookeeper_config 180 | when: zookeeper_version_major|int == 3 and zookeeper_version_minor|int < 8 181 | 182 | - name: Template ZooKeeper systemd service file 183 | ansible.builtin.template: 184 | src: zookeeper.service.j2 185 | dest: '{{ zookeeper_unit_path }}' 186 | group: '{{ zookeeper_group }}' 187 | owner: '{{ zookeeper_user }}' 188 | mode: 0644 189 | tags: 190 | - zookeeper_service 191 | 192 | - name: Start the ZooKeeper service 193 | ansible.builtin.systemd: 194 | name: zookeeper.service 195 | state: started 196 | enabled: yes 197 | when: zookeeper_start 198 | tags: 199 | - zookeeper_service 200 | 201 | # Cleanup install by deleting downloaded ZooKeeper archive 202 | - name: Delete file /tmp/{{ zookeeper_package }} 203 | ansible.builtin.file: 204 | path: /tmp/{{ zookeeper_package }} 205 | state: absent 206 | tags: 207 | - zookeeper_cleanup 208 | 209 | # Set Ansible fact that ZooKeeper has completed installation. This is so that this role can be used in 210 | # other dependent roles. Those roles can check for this fact to determine whether or not this role 211 | # should be run. Failing to do so will mean that this role is executed even if it has already been run. 212 | - name: Set fact zookeeper_installed 213 | ansible.builtin.set_fact: 214 | zookeeper_installed: true 215 | tags: 216 | - zookeeper_install_fact 217 | 218 | # (Rhel/Rocky) 8 firewall rules 219 | - name: Install firewall rules 220 | ansible.builtin.include_tasks: firewalld.yml 221 | when: (ansible_os_family == "RedHat" or ansible_os_family == "Rocky") and ansible_distribution_major_version == "8" and zookeeper_firewalld 222 | -------------------------------------------------------------------------------- /templates/default.j2: -------------------------------------------------------------------------------- 1 | ZOO_LOG_DIR={{ zookeeper_log_dir }} 2 | ZOO_LOG4J_PROP=INFO,ROLLINGFILE 3 | 4 | {% for key, value in zookeeper_environment.items() %} 5 | {{key}}={{value}} 6 | {% endfor %} 7 | -------------------------------------------------------------------------------- /templates/myid.j2: -------------------------------------------------------------------------------- 1 | {{ zookeeper_id }} 2 | -------------------------------------------------------------------------------- /templates/zoo.cfg.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | # The number of milliseconds of each tick 4 | tickTime={{ zookeeper_ticktime }} 5 | 6 | # The number of ticks that the initial 7 | # synchronization phase can take 8 | initLimit={{ zookeeper_init_limit }} 9 | 10 | # The number of ticks that can pass between 11 | # sending a request and getting an acknowledgement 12 | syncLimit={{ zookeeper_sync_limit }} 13 | 14 | # the directory where the snapshot is stored. 15 | # do not use /tmp for storage, /tmp here is just 16 | # example sakes. 17 | dataDir={{ zookeeper_data_dir }} 18 | 19 | # Directory to write the transaction log to the dataLogDir rather than the dataDir. 20 | # This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. 21 | dataLogDir={{ zookeeper_data_log_dir }} 22 | 23 | # the port at which the clients will connect 24 | clientPort={{ zookeeper_client_port }} 25 | 26 | # the maximum number of client connections. 27 | # increase this if you need to handle more clients 28 | maxClientCnxns={{ zookeeper_max_client_cnxns }} 29 | 30 | # 31 | # Be sure to read the maintenance section of the 32 | # administrator guide before turning on autopurge. 33 | # 34 | # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance 35 | # 36 | # The number of snapshots to retain in dataDir 37 | autopurge.snapRetainCount={{ zookeeper_autopurge_snap_retain_count }} 38 | # Purge task interval in hours 39 | # Set to "0" to disable auto purge feature 40 | autopurge.purgeInterval={{ zookeeper_purge_interval }} 41 | 42 | # List of clustered ZooKeeper nodes. 43 | # Port 2888 is the default quorum port and 3888 is the default 44 | # leader election port. These must be unique per server. 45 | # Each server also needs a /var/lib/zookeeper/myid file created containing 46 | # a single unique number, e.g. 1, 2, etc. 47 | {% for host in zookeeper_servers %} 48 | server.{{ hostvars[host].zookeeper_id | default(zookeeper_id) }}={{ 49 | host if zookeeper_servers_use_inventory_hostname else hostvars[host].ansible_nodename 50 | }}:{{ zookeeper_leader_port }}:{{ zookeeper_election_port }} 51 | {% endfor %} 52 | 53 | {% if zookeeper_version is version('3.5', '>=') %} 54 | # AdminServer 55 | admin.enableServer={{ zookeeper_enable_server | ternary('true', 'false') }} 56 | admin.serverAddress={{ zookeeper_server_address }} 57 | admin.serverPort={{ zookeeper_server_port }} 58 | admin.idleTimeout={{ zookeeper_idle_timeout }} 59 | admin.commandURL={{ zookeeper_command_url }} 60 | 4lw.commands.whitelist={{ zookeeper_command_whitelist }} 61 | {% endif %} 62 | 63 | {% if zookeeper_version is version('3.6', '>=') %} 64 | # Metrics Providers 65 | metricsProvider.className={{ zookeeper_metricsprovider_classname }} 66 | metricsProvider.httpPort={{ zookeeper_metricsprovider_httpport }} 67 | metricsProvider.exportJvmInfo={{ zookeeper_metricsprovider_exportjvminfo | ternary('true', 'false') }} 68 | {% endif %} 69 | 70 | {% for key, value in (zookeeper_config_params | default({})).items() %} 71 | {{key}}={{value}} 72 | {% endfor %} -------------------------------------------------------------------------------- /templates/zookeeper.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Apache Zookeeper 3 | Documentation=http://zookeeper.apache.org 4 | Requires=network.target 5 | After=network.target 6 | 7 | [Service] 8 | Type=forking 9 | EnvironmentFile=/etc/default/zookeeper 10 | ExecStart={{ zookeeper_dir }}/bin/zkServer.sh start 11 | ExecStop={{ zookeeper_dir }}/bin/zkServer.sh stop 12 | SuccessExitStatus=143 13 | Restart=on-failure 14 | RestartSec=5 15 | User={{ zookeeper_user }} 16 | Group={{ zookeeper_group }} 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | Alias=zookeeper.service 21 | -------------------------------------------------------------------------------- /vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | zookeeper_unit_path: /lib/systemd/system/zookeeper.service 3 | -------------------------------------------------------------------------------- /vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | zookeeper_unit_path: /usr/lib/systemd/system/zookeeper.service 3 | -------------------------------------------------------------------------------- /vars/Rocky.yml: -------------------------------------------------------------------------------- 1 | --- 2 | zookeeper_unit_path: /usr/lib/systemd/system/zookeeper.service 3 | --------------------------------------------------------------------------------