├── templates
├── agent-version.txt.j2
├── broker.properties.kraft.j2
├── controller.properties.kraft.j2
├── server.properties.kraft.j2
├── kafka.service.j2
├── log4j2.xml.j2
├── kafka-acls-command.j2
├── log4j.properties.j2
├── log4j2.properties.j2
├── log4j2.yaml.j2
└── server.properties.j2
├── logo.gif
├── .markdownlint.yaml
├── molecule
├── agents
│ ├── converge.yml
│ ├── group_vars
│ │ ├── zookeeper.yml
│ │ └── kafka.yml
│ ├── tests
│ │ └── test_kafka.yml
│ ├── Dockerfile.j2
│ ├── molecule.yml
│ ├── verify.yml
│ └── templates
│ │ └── agents
│ │ └── newrelic
│ │ └── newrelic.yml.j2
├── zookeeper
│ ├── converge.yml
│ ├── group_vars
│ │ ├── zookeeper.yml
│ │ └── kafka.yml
│ ├── tests
│ │ ├── test_kafka_user_group.yml
│ │ ├── test_kafka_zookeepers.yml
│ │ ├── test_kafka_service.yml
│ │ ├── test_kafka_files.yml
│ │ ├── 2.0
│ │ │ └── test_kafka_topics.yml
│ │ └── 3.0
│ │ │ └── test_kafka_topics.yml
│ ├── Dockerfile.j2
│ ├── molecule.yml
│ └── verify.yml
└── default
│ ├── converge.yml
│ ├── tests
│ ├── test_kafka_user_group.yml
│ ├── test_kafka_service.yml
│ ├── test_kafka_files.yml
│ └── 3.0
│ │ └── test_kafka_topics.yml
│ ├── host_vars
│ ├── kafka1.yml
│ ├── kafka2.yml
│ └── kafka3.yml
│ ├── Dockerfile.j2
│ ├── verify.yml
│ ├── molecule.yml
│ └── group_vars
│ └── kafka.yml
├── .gitattributes
├── test-requirements.txt
├── .ansible-lint
├── .gitignore
├── Pipfile
├── meta
└── main.yml
├── vars
└── main.yml
├── handlers
└── main.yml
├── .travis.yml
├── .yamllint
├── filter_plugins
├── collection.py
├── uuid.py
└── list.py
├── tasks
├── service.yml
├── main.yml
├── 3.0
│ ├── topic_config.yml
│ └── topics.yml
├── 2.0
│ ├── topic_config.yml
│ └── topics.yml
├── install.yml
├── acls.yml
├── config.yml
└── agent.yml
├── .github
├── PULL_REQUEST_TEMPLATE.md
├── ISSUE_TEMPLATE.md
├── CODE_OF_CONDUCT.md
└── CONTRIBUTING.md
├── defaults
└── main
│ ├── kafka-cfg.yml
│ └── main.yml
├── README.md
├── CHANGELOG.md
└── LICENSE
/templates/agent-version.txt.j2:
--------------------------------------------------------------------------------
1 | {{ item.version }}
--------------------------------------------------------------------------------
/logo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/idealista/kafka_role/HEAD/logo.gif
--------------------------------------------------------------------------------
/.markdownlint.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | MD013: false
3 | MD024: false
4 | MD041: false
5 |
--------------------------------------------------------------------------------
/molecule/agents/converge.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Converge
4 | hosts: kafka
5 | roles:
6 | - kafka_role
7 |
--------------------------------------------------------------------------------
/molecule/zookeeper/converge.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Converge
4 | hosts: kafka
5 | roles:
6 | - kafka_role
7 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.yml linguist-detectable=true
2 | *.yaml linguist-detectable=true
3 | *.html linguist-detectable=false
4 |
--------------------------------------------------------------------------------
/molecule/agents/group_vars/zookeeper.yml:
--------------------------------------------------------------------------------
1 | ---
2 | zookeeper_hosts:
3 | - host: zookeeper # the machine running
4 | id: 1
5 |
--------------------------------------------------------------------------------
/molecule/default/converge.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Converge
4 | hosts: kafka
5 | serial: 3
6 | roles:
7 | - kafka_role
8 |
--------------------------------------------------------------------------------
/molecule/zookeeper/group_vars/zookeeper.yml:
--------------------------------------------------------------------------------
1 | ---
2 | zookeeper_hosts:
3 | - host: zookeeper # the machine running
4 | id: 1
5 |
--------------------------------------------------------------------------------
/molecule/agents/tests/test_kafka.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | file:
4 | /opt/kafka/newrelic/newrelic.jar:
5 | owner: kafka
6 | group: kafka
7 | exists: true
8 |
--------------------------------------------------------------------------------
/test-requirements.txt:
--------------------------------------------------------------------------------
1 | ansible==11.9.0
2 | ansible-lint==25.8.2
3 | molecule==25.7.0
4 | molecule-plugins[docker]==25.8.12
5 | docker==7.1.0
6 | yamllint==1.37.1
7 |
--------------------------------------------------------------------------------
/.ansible-lint:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | exclude_paths:
4 | - ./molecule
5 | - ./.github
6 | parseable: true
7 | skip_list:
8 | - '204'
9 | - schema[moves]
10 | use_default_rules: true
11 | verbosity: 1
12 |
--------------------------------------------------------------------------------
/molecule/default/tests/test_kafka_user_group.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | group:
4 | {{ kafka_group }}:
5 | exists: true
6 |
7 | user:
8 | {{ kafka_user }}:
9 | exists: true
10 | groups:
11 | - {{ kafka_group }}
12 |
--------------------------------------------------------------------------------
/molecule/zookeeper/tests/test_kafka_user_group.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | group:
4 | {{ kafka_group }}:
5 | exists: true
6 |
7 | user:
8 | {{ kafka_user }}:
9 | exists: true
10 | groups:
11 | - {{ kafka_group }}
12 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | tests/playbook.retry
2 | tests/.cache
3 | tests/__pycache__
4 | .pytest_cache
5 | .molecule
6 | .vagrant
7 | .cache
8 |
9 | *.iml
10 | .idea
11 | .project
12 |
13 | *.pyc
14 | Pipfile
15 | Pipfile.lock
16 | molecule/**/tmp/
17 |
--------------------------------------------------------------------------------
/templates/broker.properties.kraft.j2:
--------------------------------------------------------------------------------
1 | {% for cfg in broker_cfg %}
2 | ############################# {{ cfg.section }} #############################
3 | {% for key, value in cfg.properties.items() %}
4 | {{key}}={{value}}
5 | {% endfor %}
6 | {% endfor %}
7 |
--------------------------------------------------------------------------------
/templates/controller.properties.kraft.j2:
--------------------------------------------------------------------------------
1 | {% for cfg in controller_cfg %}
2 | ############################# {{ cfg.section }} #############################
3 | {% for key, value in cfg.properties.items() %}
4 | {{key}}={{value}}
5 | {% endfor %}
6 | {% endfor %}
7 |
--------------------------------------------------------------------------------
/templates/server.properties.kraft.j2:
--------------------------------------------------------------------------------
1 | {% for cfg in kafka_cfg %}
2 | ############################# {{ cfg.section | capitalize }} #############################
3 | {% for key, value in cfg.properties.items() %}
4 | {{key}}={{value}}
5 | {% endfor %}
6 | {% endfor %}
7 |
--------------------------------------------------------------------------------
/molecule/default/host_vars/kafka1.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | kafka_node_id: 1
4 | kafka_node_uuid: "{{ kafka_node_id | to_uuid | uuid_to_base64 }}"
5 | kafka_controller_uri: "{{ kafka_node_id }}@{{ ansible_hostname }}:{{ kafka_controller_port }}"
6 | kafka_initial_controller: "{{ kafka_controller_uri }}:{{ kafka_node_uuid }}"
7 |
--------------------------------------------------------------------------------
/molecule/default/host_vars/kafka2.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | kafka_node_id: 2
4 | kafka_node_uuid: "{{ kafka_node_id | to_uuid | uuid_to_base64 }}"
5 | kafka_controller_uri: "{{ kafka_node_id }}@{{ ansible_hostname }}:{{ kafka_controller_port }}"
6 | kafka_initial_controller: "{{ kafka_controller_uri }}:{{ kafka_node_uuid }}"
7 |
--------------------------------------------------------------------------------
/molecule/default/host_vars/kafka3.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | kafka_node_id: 3
4 | kafka_node_uuid: "{{ kafka_node_id | to_uuid | uuid_to_base64 }}"
5 | kafka_controller_uri: "{{ kafka_node_id }}@{{ ansible_hostname }}:{{ kafka_controller_port }}"
6 | kafka_initial_controller: "{{ kafka_controller_uri }}:{{ kafka_node_uuid }}"
7 |
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | url = "https://pypi.org/simple"
3 | verify_ssl = true
4 | name = "pypi"
5 |
6 | [packages]
7 | ansible = "==11.9.0"
8 | ansible-lint = "==25.8.2"
9 | molecule = "==25.7.0"
10 | docker = "==7.1.0"
11 | yamllint = "==1.37.1"
12 | molecule-plugins = {extras = ["docker"], version = "==25.8.12"}
13 |
14 | [dev-packages]
15 |
16 | [requires]
17 | python_version = "3.12"
18 |
--------------------------------------------------------------------------------
/molecule/zookeeper/tests/test_kafka_zookeepers.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | command:
4 | {{ kafka_install_path }}/bin/zookeeper-shell.sh zookeeper:2181/brokers/ids ls -R /:
5 | exit-status: 0
6 | stdout: |
7 | Connecting to zookeeper:2181/brokers/ids
8 |
9 | WATCHER::
10 |
11 | WatchedEvent state:SyncConnected type:None path:null
12 | /
13 | /1
14 | /2
15 | /3
16 |
--------------------------------------------------------------------------------
/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | galaxy_info:
4 | role_name: kafka
5 | namespace: idealista
6 | author: idealista
7 | company: Idealista S.A.U.
8 | description: Ansible role for Kafka distributed streaming platform
9 | min_ansible_version: 2.9.9
10 | license: Apache 2.0
11 | platforms:
12 | - name: Debian
13 | versions:
14 | - bookworm
15 | - bullseye
16 | galaxy_tags:
17 | - clustering
18 | - queue
19 | - kafka
20 | - events
21 |
--------------------------------------------------------------------------------
/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | kafka_required_libs:
3 | - unzip
4 |
5 | kafka_removeable_folders:
6 | - bin
7 | - config
8 | - libs
9 | - site-docs
10 |
11 | kafka_filter_function_not_removable_topic: "lambda k: not k.startswith('__'){{ kafka_not_removable_topics | map('regex_replace', '^(.*)$', ' and \"\\1\" not in k') | list | join('') }}"
12 |
13 | kafka_supported_agents_extensions:
14 | - '.zip'
15 | - '.tar'
16 | - '.tar.gz'
17 | - '.tar.bz2'
18 | - '.tar.xz'
19 |
--------------------------------------------------------------------------------
/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Restart kafka
4 | ansible.builtin.systemd:
5 | name: kafka
6 | state: restarted
7 | when: kafka_service_state != 'stopped'
8 |
9 | - name: Kafka | Notify about upgrading to 4.x
10 | ansible.builtin.debug:
11 | msg: "⚠️ Kafka is being upgraded to version 4.x consider following the upgrade directions from https://kafka.apache.org/documentation/#upgrade_servers_4_0_0 and don't forget to update your configuration to use KRaft mode."
12 | listen: message about upgrading
13 |
--------------------------------------------------------------------------------
/molecule/agents/Dockerfile.j2:
--------------------------------------------------------------------------------
1 | # Molecule managed
2 |
3 | {% if item.registry is defined %}
4 | FROM {{ item.registry.url }}/{{ item.image }}
5 | {% else %}
6 | FROM {{ item.image }}
7 | {% endif %}
8 |
9 | {% if 'bullseye' in item.image %}
10 | RUN apt-get update && \
11 | apt-get install -y python3 sudo bash ca-certificates iproute2 systemd systemd-sysv python3-pip && \
12 | apt-get clean
13 |
14 | STOPSIGNAL SIGRTMIN+3
15 | RUN systemctl set-default multi-user.target
16 | # TIP 2 solution 2
17 | # RUN systemctl mask getty.target
18 | {% endif %}
19 |
20 |
21 | RUN mkdir -p /usr/share/man/man1
22 |
--------------------------------------------------------------------------------
/molecule/default/Dockerfile.j2:
--------------------------------------------------------------------------------
1 | # Molecule managed
2 |
3 | {% if item.registry is defined %}
4 | FROM {{ item.registry.url }}/{{ item.image }}
5 | {% else %}
6 | FROM {{ item.image }}
7 | {% endif %}
8 |
9 | {% if 'bullseye' in item.image %}
10 | RUN apt-get update && \
11 | apt-get install -y python3 sudo bash ca-certificates iproute2 systemd systemd-sysv python3-pip && \
12 | apt-get clean
13 |
14 | STOPSIGNAL SIGRTMIN+3
15 | RUN systemctl set-default multi-user.target
16 | # TIP 2 solution 2
17 | # RUN systemctl mask getty.target
18 | {% endif %}
19 |
20 |
21 | RUN mkdir -p /usr/share/man/man1
22 |
--------------------------------------------------------------------------------
/molecule/zookeeper/Dockerfile.j2:
--------------------------------------------------------------------------------
1 | # Molecule managed
2 |
3 | {% if item.registry is defined %}
4 | FROM {{ item.registry.url }}/{{ item.image }}
5 | {% else %}
6 | FROM {{ item.image }}
7 | {% endif %}
8 |
9 | {% if 'bullseye' in item.image %}
10 | RUN apt-get update && \
11 | apt-get install -y python3 sudo bash ca-certificates iproute2 systemd systemd-sysv python3-pip && \
12 | apt-get clean
13 |
14 | STOPSIGNAL SIGRTMIN+3
15 | RUN systemctl set-default multi-user.target
16 | # TIP 2 solution 2
17 | # RUN systemctl mask getty.target
18 | {% endif %}
19 |
20 |
21 | RUN mkdir -p /usr/share/man/man1
22 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dist: focal
3 | language: python
4 | python: "3.12"
5 | os: linux
6 | sudo: required
7 | services:
8 | - docker
9 | install:
10 | - pip install -I pipenv
11 | - pipenv sync
12 | env:
13 | global:
14 | - PIPENV_IGNORE_VIRTUALENVS=1
15 | jobs:
16 | - MOLECULE_DISTRO=idealista/jdk:17.0.8-bullseye-temurin-jdk
17 | script:
18 | - pipenv run molecule test
19 |
20 | notifications:
21 | webhooks: https://galaxy.ansible.com/api/v1/notifications/
22 | email:
23 | if: branch = main
24 | on_success: change
25 | on_failure: always
26 | recipients:
27 | - desarrollo.benders@idealista.com
28 |
--------------------------------------------------------------------------------
/.yamllint:
--------------------------------------------------------------------------------
1 | ---
2 | extends: default
3 |
4 | rules:
5 | braces:
6 | max-spaces-inside: 1
7 | level: error
8 | brackets:
9 | max-spaces-inside: 1
10 | level: error
11 | colons:
12 | max-spaces-after: -1
13 | level: error
14 | commas:
15 | max-spaces-after: -1
16 | level: error
17 | comments: disable
18 | empty-lines:
19 | max: 3
20 | level: error
21 | hyphens:
22 | level: error
23 | key-duplicates: enable
24 | line-length: disable
25 | new-lines:
26 | type: unix
27 | truthy: disable
28 |
29 | ignore: |
30 | .molecule/
31 | *vault*
32 | test_*yml
33 | roles/
34 | .ansible-lint
35 |
--------------------------------------------------------------------------------
/molecule/default/tests/test_kafka_service.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | service:
4 | kafka:
5 | enabled: true
6 | running: true
7 |
8 | file:
9 | {{ kafka_service_file_path }}:
10 | exists: true
11 |
12 | # port:
13 | # # https://github.com/goss-org/goss/blob/master/docs/gossfile.md#port
14 | # # https://github.com/goss-org/goss/issues/149
15 | # tcp6:{{ kafka_port }}:
16 | # listening: true
17 | # ip:
18 | # - {{ ansible_default_ipv4.address }}
19 | # Alternative to check port because tcp and tcp6 inconsistencies
20 | command:
21 | ss -tl | grep {{ kafka_port }}:
22 | exit-status: 0
23 | stdout:
24 | match-regexp: '^LISTEN(.*)(\:{{ kafka_port }})(.*)'
25 |
--------------------------------------------------------------------------------
/molecule/zookeeper/tests/test_kafka_service.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | service:
4 | kafka:
5 | enabled: true
6 | running: true
7 |
8 | file:
9 | {{ kafka_service_file_path }}:
10 | exists: true
11 |
12 | # port:
13 | # # https://github.com/goss-org/goss/blob/master/docs/gossfile.md#port
14 | # # https://github.com/goss-org/goss/issues/149
15 | # tcp6:{{ kafka_port }}:
16 | # listening: true
17 | # ip:
18 | # - {{ ansible_default_ipv4.address }}
19 | # Alternative to check port because tcp and tcp6 inconsistencies
20 | command:
21 | ss -tl | grep {{ kafka_port }}:
22 | exit-status: 0
23 | stdout:
24 | match-regexp: '^LISTEN(.*)(\:{{ kafka_port }})(.*)'
25 |
--------------------------------------------------------------------------------
/filter_plugins/collection.py:
--------------------------------------------------------------------------------
1 | def in_list(collection, key, list):
2 | '''
3 | extract x for every item x in the collection if x[key] is in the list
4 | '''
5 |
6 | return [x for x in collection if x[key] in list]
7 |
8 |
9 | def not_in_list(collection, key, list):
10 | '''
11 | extract x for every item x in the collection if x[key] is not in the list
12 | '''
13 |
14 | return [x for x in collection if x[key] not in list]
15 |
16 | class FilterModule(object):
17 | '''
18 | custom jinja2 filters for working with collections
19 | '''
20 |
21 | def filters(self):
22 | return {
23 | 'in_list': in_list,
24 | 'not_in_list': not_in_list,
25 | }
--------------------------------------------------------------------------------
/molecule/zookeeper/tests/test_kafka_files.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | file:
4 | {{ kafka_data_path }}:
5 | owner: {{ kafka_user }}
6 | group: {{ kafka_group }}
7 | exists: true
8 | filetype: directory
9 |
10 | {{ kafka_install_path }}:
11 | owner: {{ kafka_user }}
12 | group: {{ kafka_group }}
13 | exists: true
14 | filetype: directory
15 |
16 | {{ kafka_log_path }}:
17 | owner: {{ kafka_user }}
18 | group: {{ kafka_group }}
19 | exists: true
20 | filetype: directory
21 |
22 | {{ kafka_conf_path }}:
23 | owner: {{ kafka_user }}
24 | group: {{ kafka_group }}
25 | exists: true
26 | filetype: directory
27 |
28 | {{ kafka_conf_path }}/server.properties:
29 | owner: {{ kafka_user }}
30 | group: {{ kafka_group }}
31 | exists: true
32 | contains:
33 | - message.max.bytes=409715200
34 |
--------------------------------------------------------------------------------
/molecule/default/tests/test_kafka_files.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | file:
4 | {{ kafka_data_path }}:
5 | owner: {{ kafka_user }}
6 | group: {{ kafka_group }}
7 | exists: true
8 | filetype: directory
9 |
10 | {{ kafka_install_path }}:
11 | owner: {{ kafka_user }}
12 | group: {{ kafka_group }}
13 | exists: true
14 | filetype: directory
15 |
16 | {{ kafka_log_path }}:
17 | owner: {{ kafka_user }}
18 | group: {{ kafka_group }}
19 | exists: true
20 | filetype: directory
21 |
22 | {{ kafka_conf_path }}:
23 | owner: {{ kafka_user }}
24 | group: {{ kafka_group }}
25 | exists: true
26 | filetype: directory
27 |
28 | "{{ kafka_conf_path }}/server.properties":
29 | owner: {{ kafka_user }}
30 | group: {{ kafka_group }}
31 | exists: true
32 | contains:
33 | - log.dirs={{ kafka_data_path }}
34 |
--------------------------------------------------------------------------------
/tasks/service.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: KAFKA | Copy Daemon script
3 | ansible.builtin.template:
4 | src: "{{ kafka_service_template_path }}"
5 | dest: "{{ kafka_service_file_path }}"
6 | mode: 0644
7 | notify: Restart kafka
8 |
9 | - name: KAFKA | Configuring service
10 | ansible.builtin.systemd:
11 | name: kafka
12 | state: "{{ kafka_service_state }}"
13 | enabled: "{{ kafka_service_enabled }}"
14 | daemon_reload: true
15 |
16 | - name: KAFKA | Restart if necessary
17 | ansible.builtin.meta: flush_handlers
18 |
19 | - name: KAFKA | Wait for service listening
20 | ansible.builtin.wait_for:
21 | host: "{{ kafka_host_name }}"
22 | port: "{{ kafka_port }}"
23 | state: "{{ kafka_service_state }}"
24 | delay: 5
25 | timeout: "{{ kafka_service_state_timeout }}"
26 | when:
27 | - kafka_service_enabled
28 | - kafka_service_state == "started"
29 |
--------------------------------------------------------------------------------
/molecule/zookeeper/tests/2.0/test_kafka_topics.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | command:
4 | {{ kafka_install_path }}/bin/kafka-topics.sh --zookeeper {{ kafka_zookeeper_hosts | join(',') }} --list:
5 | exit-status: 0
6 | stdout:
7 | - "test"
8 | - "test2"
9 |
10 | command:
11 | {{ kafka_install_path }}/bin/kafka-configs.sh --zookeeper {{ kafka_zookeeper_hosts | join(',') }} --entity-type topics --entity-name test --describe:
12 | exit-status: 0
13 | stdout:
14 | - "Configs for topic 'test' are delete.retention.ms=100000,max.message.bytes=1024"
15 | timeout: 20000
16 |
17 | command:
18 | {{ kafka_install_path }}/bin/kafka-configs.sh --zookeeper {{ kafka_zookeeper_hosts | join(',') }} --entity-type topics --entity-name test2 --describe:
19 | exit-status: 0
20 | stdout:
21 | - "Configs for topic 'test2' are delete.retention.ms=100000,max.message.bytes=2048"
22 | timeout: 20000
23 |
--------------------------------------------------------------------------------
/filter_plugins/uuid.py:
--------------------------------------------------------------------------------
1 | import uuid
2 | import base64
3 |
4 | def uuid_to_base64(self: str):
5 | """
6 | Convierte un UUID string estándar a base64-url-safe sin padding, como Kafka.
7 | """
8 | u = uuid.UUID(self)
9 | b64 = base64.urlsafe_b64encode(u.bytes).decode('ascii').rstrip("=")
10 | return b64
11 |
12 | def base64_to_uuid(self: str):
13 | """
14 | Convierte un base64-url-safe sin padding a UUID string estándar, como Kafka.
15 | """
16 | # Añadir padding si falta
17 | padded = self + '=' * (-len(self) % 4)
18 | u = uuid.UUID(bytes=base64.urlsafe_b64decode(padded))
19 | return str(u)
20 | class FilterModule(object):
21 | '''
22 | custom jinja2 filters for working with Kafka-style UUIDs
23 | '''
24 | def filters(self):
25 | return {
26 | 'uuid_to_base64': uuid_to_base64,
27 | 'base64_to_uuid': base64_to_uuid
28 | }
29 |
--------------------------------------------------------------------------------
/molecule/agents/group_vars/kafka.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | kafka_private_tmp: false
4 | kafka_use_kraft: false
5 | kafka_zookeeper_hosts:
6 | - zookeeper:2181
7 |
8 | kafka_jvm_performance_opts: "-XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80"
9 |
10 | kafka_hosts:
11 | - host: kafka1
12 | id: 1
13 | - host: kafka2
14 | id: 2
15 | - host: kafka3
16 | id: 3
17 |
18 | kafka_agents_config:
19 | - name: "newrelic"
20 | download_url: "http://download.newrelic.com/newrelic/java-agent/newrelic-agent/current/newrelic-java.zip"
21 | version: "5.11.0"
22 | java_opts:
23 | - '-javaagent:{{ kafka_install_path }}/newrelic/newrelic.jar'
24 | configuration_files:
25 | - "newrelic.yml"
26 | params: {
27 | application_name: "application_sample_name",
28 | license_key: "sddsasd"
29 | }
30 |
31 | kafka_cfg: []
32 | broker_cfg: []
33 | controller_cfg: []
34 |
--------------------------------------------------------------------------------
/filter_plugins/list.py:
--------------------------------------------------------------------------------
1 | def zip_dict(list, keys):
2 | '''
3 | return a dictionary of zipped lists
4 | '''
5 |
6 | return dict(zip(keys, list))
7 |
8 | def flatten(list):
9 | '''
10 | flat a list of lists
11 | '''
12 |
13 | return [item for sublist in list for item in sublist]
14 |
15 | def filter_list(list, cond=None):
16 | '''
17 | Filter the input list by condition
18 | '''
19 |
20 | return filter(cond, list)
21 |
22 |
23 | def filter_evaluated_list(list, cond):
24 | '''
25 | Filter the input list by evaluating the input condition from string
26 | '''
27 |
28 | return filter(eval(cond), list)
29 |
30 |
31 | class FilterModule(object):
32 | '''
33 | custom jinja2 filters for working with collections
34 | '''
35 |
36 | def filters(self):
37 | return {
38 | 'zip_dict': zip_dict,
39 | 'flatten_list': flatten,
40 | 'filter': filter_list,
41 | 'filter_evaluated': filter_evaluated_list
42 | }
--------------------------------------------------------------------------------
/templates/kafka.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kafka distributed streaming platform
3 | After=network.target
4 |
5 | [Service]
6 | PrivateTmp= {{ kafka_private_tmp }}
7 | Restart=on-failure
8 | {% if kafka_version is version('4.0.0', '>=') %}
9 | Environment="KAFKA_LOG4J_OPTS=-Dlog4j.configurationFile={{ kafka_conf_path }}/{{ kafka_log4j_file_name }}"
10 | {% else %}
11 | Environment="KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:{{ kafka_conf_path }}/{{ kafka_log4j_file_name }}"
12 | {% endif %}
13 | Environment="KAFKA_HEAP_OPTS=-Xmx{{ kafka_xmx }} -Xms{{ kafka_xms }}"
14 | Environment="JMX_PORT={{ kafka_jmx_port }}"
15 | Environment="KAFKA_OPTS={{ kafka_opts }} {{ kafka_agent_java_opts|default([])|join(' ') }}"
16 | Environment="KAFKA_JVM_PERFORMANCE_OPTS={{ kafka_jvm_performance_opts }}"
17 | ExecStart={{ kafka_install_path }}/bin/kafka-server-start.sh {{ kafka_conf_path }}/server.properties
18 | ExecStop={{ kafka_install_path }}/bin/kafka-server-stop.sh
19 | User={{ kafka_user }}
20 |
21 | [Install]
22 | WantedBy=multi-user.target
23 |
--------------------------------------------------------------------------------
/molecule/zookeeper/group_vars/kafka.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | kafka_version: 3.9.1
4 |
5 | kafka_private_tmp: false
6 | kafka_use_kraft: false
7 | kafka_zookeeper_hosts:
8 | - zookeeper:2181
9 |
10 | kafka_jvm_performance_opts: "-XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80"
11 |
12 | kafka_hosts:
13 | - host: kafka1
14 | id: 1
15 | - host: kafka2
16 | id: 2
17 | - host: kafka3
18 | id: 3
19 |
20 | kafka_xmx: "768m"
21 | kafka_xms: "512m"
22 |
23 | kafka_topics:
24 | - name: 'test'
25 | partitions: '3'
26 | replicas: '3'
27 | - name: 'test2'
28 | partitions: '5'
29 | replicas: '1'
30 |
31 | kafka_topics_config:
32 | - name: 'test'
33 | delete.retention.ms: 100000
34 | max.message.bytes: 1024
35 | - name: 'test2'
36 | delete.retention.ms: 100000
37 | max.message.bytes: 2048
38 |
39 | kafka_extra_properties:
40 | - key: message.max.bytes
41 | value: 409715200
42 |
43 | kafka_cfg: []
44 | broker_cfg: []
45 | controller_cfg: []
46 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ### Requirements
2 |
3 | * Filling out the template is required. Any pull request that does not include enough information to be reviewed in a timely manner may be closed at the maintainers' discretion.
4 | * All new code requires tests to ensure against regressions
5 | * Remember to set **idealista:develop** as base branch;
6 |
7 | ### Description of the Change
8 |
9 |
14 |
15 |
16 | ### Benefits
17 |
18 |
19 |
20 | ### Possible Drawbacks
21 |
22 |
23 |
24 | ### Applicable Issues
25 |
26 |
27 |
--------------------------------------------------------------------------------
/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: KAFKA | Install
4 | ansible.builtin.import_tasks: install.yml
5 | tags:
6 | - kafka_install
7 |
8 | - name: KAFKA | Agent config
9 | ansible.builtin.import_tasks: agent.yml
10 | tags:
11 | - kafka_agents_config
12 | when: kafka_agents_config is defined
13 |
14 | - name: KAFKA | Configure
15 | ansible.builtin.import_tasks: config.yml
16 | tags:
17 | - kafka_configure
18 |
19 | - name: KAFKA | Service
20 | ansible.builtin.import_tasks: service.yml
21 | tags:
22 | - kafka_service
23 |
24 | - name: KAFKA | Topics (2.X Version)
25 | ansible.builtin.import_tasks: 2.0/topics.yml
26 | run_once: true
27 | when: kafka_version is version('3.0.0', "<")
28 | tags:
29 | - kafka_topics
30 |
31 | - name: KAFKA | Topics (>= 3.X Version)
32 | ansible.builtin.import_tasks: 3.0/topics.yml
33 | run_once: true
34 | when: kafka_version is version('3.0.0', ">=")
35 | tags:
36 | - kafka_topics
37 |
38 | - name: KAFKA | ACLs
39 | ansible.builtin.import_tasks: acls.yml
40 | tags:
41 | - kafka_acls
42 | when: kafka_acls is defined
43 | run_once: true
44 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
7 |
8 | ### Prerequisites
9 |
10 | * [ ] Put an X between the brackets on this line if you have done all of the following:
11 | * Checked that your issue isn't already filled: https://github.com/issues?utf8=✓&q=is%3Aissue+user%3Aidealista
12 | * Checked that there is not already provided the described functionality
13 |
14 | ### Description
15 |
16 | [Description of the issue]
17 |
18 | ### Steps to Reproduce
19 |
20 | 1. [First Step]
21 | 2. [Second Step]
22 | 3. [and so on...]
23 |
24 | **Expected behavior:** [What you expect to happen]
25 |
26 | **Actual behavior:** [What actually happens]
27 |
28 | **Reproduces how often:** [What percentage of the time does it reproduce?]
29 |
30 | ### Versions
31 |
32 | The version/s you notice the behavior.
33 |
34 | ### Additional Information
35 |
36 | Any additional information, configuration or data that might be necessary to reproduce the issue.
37 |
--------------------------------------------------------------------------------
/molecule/zookeeper/tests/3.0/test_kafka_topics.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | command:
4 | {{ kafka_install_path }}/bin/kafka-topics.sh --bootstrap-server {{ ansible_default_ipv4.address }}:{{ kafka_port }} --list:
5 | exit-status: 0
6 | stdout:
7 | - "test"
8 | - "test2"
9 |
10 | {{ kafka_install_path }}/bin/kafka-configs.sh --bootstrap-server {{ ansible_default_ipv4.address }}:{{ kafka_port }} --entity-type topics --entity-name test --describe:
11 | exit-status: 0
12 | stdout:
13 | - "Dynamic configs for topic test are:"
14 | - "delete.retention.ms=100000 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:delete.retention.ms=100000, DEFAULT_CONFIG:log.cleaner.delete.retention.ms=86400000}"
15 | - "max.message.bytes=1024 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:max.message.bytes=1024, STATIC_BROKER_CONFIG:message.max.bytes=409715200, DEFAULT_CONFIG:message.max.bytes=1048588}"
16 | timeout: 20000
17 |
18 | {{ kafka_install_path }}/bin/kafka-configs.sh --bootstrap-server {{ ansible_default_ipv4.address }}:{{ kafka_port }} --entity-type topics --entity-name test2 --describe:
19 | exit-status: 0
20 | stdout:
21 | - "Dynamic configs for topic test2 are:"
22 | - "delete.retention.ms=100000 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:delete.retention.ms=100000, DEFAULT_CONFIG:log.cleaner.delete.retention.ms=86400000}"
23 | - "max.message.bytes=2048 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:max.message.bytes=2048, STATIC_BROKER_CONFIG:message.max.bytes=409715200, DEFAULT_CONFIG:message.max.bytes=1048588}"
24 | timeout: 20000
25 |
--------------------------------------------------------------------------------
/defaults/main/kafka-cfg.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # General / Server config
4 | kafka_cfg: "{{ [kafka_cfg_default, kafka_cfg_base | default([]), kafka_cfg_extra | default([])] | community.general.lists_mergeby('section', recursive=true) }}"
5 |
6 | kafka_cfg_default:
7 | - section: 'broker' # Section as in Kafka docs https://kafka.apache.org/documentation/#configuration
8 | properties:
9 | process.roles: "{{ kafka_process_roles | default('broker,controller') }}"
10 | node.id: "{{ kafka_node_id }}"
11 | controller.quorum.voters: "{{ kafka_controller_quorum_voters }}"
12 | listeners: "{{ kafka_listeners }}"
13 | inter.broker.listener.name: "{{ kafka_inter_broker_listener_name }}"
14 | advertised.listeners: "{{ kafka_advertised_listeners }}"
15 | controller.listener.names: "{{ kafka_controller_listener_names }}"
16 | listener.security.protocol.map: "{{ kafka_security_protocol_map }}"
17 | log.dirs: "{{ kafka_data_path }}"
18 | controller.quorum.bootstrap.servers: "{{ kafka_controller_quorum_bootstrap_servers }}"
19 | # kafka_cfg_base: []
20 | # kafka_cfg_extra: []
21 |
22 | # Brokers
23 | broker_cfg: "{{ [broker_cfg_default, broker_cfg_base | default([]), broker_cfg_extra | default([])] | community.general.lists_mergeby('section', recursive=true) }}"
24 | # roker_cfg_default: []
25 | # broker_cfg_base: []
26 | # broker_cfg_extra: []
27 |
28 | # Controllers
29 | controller_cfg: "{{ [controller_cfg_default, controller_cfg_base | default([]), controller_cfg_extra | default([])] | community.general.lists_mergeby('section', recursive=true) }}"
30 | # controller_cfg_default: []
31 | # controller_cfg_base: []
32 | # controller_cfg_extra: []
33 |
--------------------------------------------------------------------------------
/templates/log4j2.xml.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/tasks/3.0/topic_config.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: KAFKA | Init temp vars config (1/3) for {{ item.name }}
4 | ansible.builtin.set_fact:
5 | string_properties: ""
6 | properties: {}
7 |
8 | - name: KAFKA | Init temp vars config (2/3) for {{ item.name }}
9 | ansible.builtin.set_fact:
10 | properties: "{{ properties | combine({property.key: property.value}) }}"
11 | when: property.key not in ['name']
12 | with_dict: "{{ item }}"
13 | loop_control:
14 | loop_var: property
15 |
16 | - name: KAFKA | Init temp vars config (3/3) for {{ item.name }}
17 | ansible.builtin.set_fact:
18 | string_properties: "{{ string_properties }}{{ (id > 0) | ternary(',', '') }}{{ conf.key }}={{ conf.value }}"
19 | loop: "{{ properties | dict2items }}"
20 | loop_control:
21 | index_var: id
22 | loop_var: conf
23 |
24 | - name: KAFKA | Obtain config for topic
25 | ansible.builtin.command: >
26 | {{ kafka_install_path }}/bin/kafka-configs.sh
27 | --entity-type topics
28 | --entity-name {{ item.name }}
29 | --describe
30 | --bootstrap-server 0.0.0.0:{{ kafka_port }}
31 | register: kafka_topics_config_described_full
32 | changed_when: false
33 |
34 | - name: KAFKA | Topics to alter
35 | ansible.builtin.set_fact:
36 | kafka_topics_config_to_alter: |
37 | {{ kafka_topics_config_described_full.stdout |
38 | replace("Configs for topic '" + item.name + "' are ", '')
39 | }}
40 |
41 | - name: KAFKA | Configure kafka topics
42 | ansible.builtin.command: >
43 | {{ kafka_install_path }}/bin/kafka-configs.sh
44 | --entity-type topics
45 | --entity-name {{ item.name }}
46 | --add-config {{ string_properties }}
47 | --alter
48 | --force
49 | --bootstrap-server 0.0.0.0:{{ kafka_port }}
50 | when: kafka_topics_config_to_alter | trim != string_properties
51 | changed_when: false
52 | tags:
53 | skip_ansible_lint
54 |
--------------------------------------------------------------------------------
/tasks/2.0/topic_config.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: KAFKA | Init temp vars config (1/3) for {{ item.name }}
4 | ansible.builtin.set_fact:
5 | string_properties: ""
6 | properties: {}
7 |
8 | - name: KAFKA | Init temp vars config (2/3) for {{ item.name }}
9 | ansible.builtin.set_fact:
10 | properties: "{{ properties | combine({property.key: property.value}) }}"
11 | when: property.key not in ['name']
12 | with_dict: "{{ item }}"
13 | loop_control:
14 | loop_var: property
15 |
16 | - name: KAFKA | Init temp vars config (3/3) for {{ item.name }}
17 | ansible.builtin.set_fact:
18 | string_properties: "{{ string_properties }}{{ (id > 0) | ternary(',', '') }}{{ conf.key }}={{ conf.value }}"
19 | loop: "{{ properties | dict2items }}"
20 | loop_control:
21 | index_var: id
22 | loop_var: conf
23 |
24 | - name: KAFKA | Obtain config for topic
25 | ansible.builtin.command: >
26 | {{ kafka_install_path }}/bin/kafka-configs.sh
27 | --zookeeper {{ kafka_zookeeper_hosts | join(',') }}
28 | --entity-type topics
29 | --entity-name {{ item.name }}
30 | --describe
31 | register: kafka_topics_config_described_full
32 | changed_when: false
33 |
34 | - name: KAFKA | Topics to alter
35 | ansible.builtin.set_fact:
36 | kafka_topics_config_to_alter: |
37 | {{ kafka_topics_config_described_full.stdout |
38 | replace("Configs for topic '" + item.name + "' are ", '')
39 | }}
40 |
41 | - name: KAFKA | Configure kafka topics
42 | ansible.builtin.command: >
43 | {{ kafka_install_path }}/bin/kafka-configs.sh
44 | --zookeeper {{ kafka_zookeeper_hosts | join(',') }}
45 | --entity-type topics
46 | --entity-name {{ item.name }}
47 | --add-config {{ string_properties }}
48 | --alter
49 | --force
50 | when: kafka_topics_config_to_alter | trim != string_properties
51 | changed_when: false
52 | tags:
53 | skip_ansible_lint
54 |
--------------------------------------------------------------------------------
/templates/kafka-acls-command.j2:
--------------------------------------------------------------------------------
1 | {# Command construction #}
2 | {{ kafka_install_path }}/bin/kafka-acls.sh --bootstrap-server {{ kafka_acl_bootstrap_server }}:{{ kafka_acl_port }} --{{ acl_action }}
3 | {# Whether to use the force #}
4 | {%- if acl_action|lower == 'remove' %} --force{% endif %}
5 | {# Resource type #}
6 | {%- if acl.resource_type|lower == 'topic' %}
7 | {%- set resource = 'topic' %}
8 | {%- elif acl.resource_type|lower == 'group' %}
9 | {%- set resource = 'group' %}
10 | {%- elif acl.resource_type|lower == 'cluster' %}
11 | {%- set resource = 'cluster' %}
12 | {%- elif acl.resource_type|lower == 'transactionalid' %}
13 | {%- set resource = 'transactional-id' %}
14 | {%- elif acl.resource_type|lower == 'delegationtoken' %}
15 | {%- set resource = 'delegation-token' %}
16 | {% endif %}
17 | {%- if acl.names is string %}
18 | --{{ resource }} '{{ acl.names }}'
19 | {%- else %}
20 | {%- for name in acl.names %} --{{ resource }} '{{ name }}'{% endfor %}
21 | {%- endif %}
22 | {# Pattern type #}
23 | {%- if acl.pattern_type is defined %} --resource-pattern-type {{ acl.pattern_type}}{%- endif %}
24 | {# Principals #}
25 | {%- if acl.principals is string %}
26 | --{{ acl.permission_type|lower}}-principal '{{ acl.principals }}'
27 | {%- else %}
28 | {%- for principal in acl.principals %} --{{ acl.permission_type|lower }}-principal '{{ principal }}'{% endfor %}
29 | {%- endif %}
30 | {# Hosts #}
31 | {%- if acl.hosts is defined %}
32 | {%- if acl.hosts is string %}
33 | --{{ acl.permission_type|lower }}-host '{{ acl.hosts }}'
34 | {%- else %}
35 | {%- for host in acl.hosts %} --{{ acl.permission_type|lower }}-host '{{ host }}'{% endfor %}
36 | {%- endif %}
37 | {%- endif %}
38 | {# Operations #}
39 | {%- if acl.operations is string %}
40 | --operation '{{ acl.operations }}'
41 | {%- else %}
42 | {%- for operation in acl.operations %} --operation '{{ operation }}'{% endfor %}
43 | {%- endif %}
44 |
--------------------------------------------------------------------------------
/molecule/default/tests/3.0/test_kafka_topics.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | command:
4 | {{ kafka_install_path }}/bin/kafka-topics.sh --bootstrap-server {{ ansible_default_ipv4.address }}:{{ kafka_port }} --list:
5 | exit-status: 0
6 | stdout:
7 | - "test"
8 | - "test2"
9 |
10 | {{ kafka_install_path }}/bin/kafka-configs.sh --bootstrap-server {{ ansible_default_ipv4.address }}:{{ kafka_port }} --entity-type topics --entity-name test --describe:
11 | exit-status: 0
12 | stdout:
13 | - "Dynamic configs for topic test are:"
14 | - "delete.retention.ms=100000 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:delete.retention.ms=100000, DEFAULT_CONFIG:log.cleaner.delete.retention.ms=86400000}"
15 | - "max.message.bytes=1024 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:max.message.bytes=1024, DEFAULT_CONFIG:message.max.bytes=1048588}"
16 | timeout: 20000
17 |
18 | {{ kafka_install_path }}/bin/kafka-configs.sh --bootstrap-server {{ ansible_default_ipv4.address }}:{{ kafka_port }} --entity-type topics --entity-name test2 --describe:
19 | exit-status: 0
20 | stdout:
21 | - "Dynamic configs for topic test2 are:"
22 | - "delete.retention.ms=100000 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:delete.retention.ms=100000, DEFAULT_CONFIG:log.cleaner.delete.retention.ms=86400000}"
23 | - "max.message.bytes=2048 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:max.message.bytes=2048, DEFAULT_CONFIG:message.max.bytes=1048588}"
24 | timeout: 20000
25 |
26 | {{ kafka_install_path }}/bin/kafka-configs.sh --bootstrap-server {{ ansible_default_ipv4.address }}:{{ kafka_port }} --entity-type topics --entity-name compacted-topic --describe:
27 | exit-status: 0
28 | stdout:
29 | - "Dynamic configs for topic compacted-topic are:"
30 | - "cleanup.policy=compact sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:cleanup.policy=compact, DEFAULT_CONFIG:log.cleanup.policy=delete}"
31 | - "delete.retention.ms=60000 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:delete.retention.ms=60000, DEFAULT_CONFIG:log.cleaner.delete.retention.ms=86400000}"
32 | - "max.compaction.lag.ms=120000 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:max.compaction.lag.ms=120000, DEFAULT_CONFIG:log.cleaner.max.compaction.lag.ms=9223372036854775807}"
33 | - "min.compaction.lag.ms=30000 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:min.compaction.lag.ms=30000, DEFAULT_CONFIG:log.cleaner.min.compaction.lag.ms=0}"
34 | - "segment.ms=10000 sensitive=false synonyms={DYNAMIC_TOPIC_CONFIG:segment.ms=10000}"
35 | timeout: 20000
36 |
--------------------------------------------------------------------------------
/molecule/agents/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependency:
3 | name: galaxy
4 |
5 | driver:
6 | name: docker
7 |
8 | lint: |
9 | yamllint .
10 | ansible-lint .
11 |
12 | platforms:
13 | - name: zookeeper
14 | image: confluentinc/cp-zookeeper:7.5.0
15 | hostname: zookeeper
16 | exposed_ports:
17 | - 2181/tcp
18 | published_ports:
19 | - 2181:2181
20 | command: "sh /etc/confluent/docker/run"
21 | env:
22 | ZOOKEEPER_CLIENT_PORT: "2181"
23 | ZOOKEEPER_TICK_TIME: "2000"
24 | volumes:
25 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
26 | groups:
27 | - zookeeper
28 | networks:
29 | - name: kafka-network
30 | cgroupns_mode: host
31 |
32 | - name: kafka1
33 | hostname: kafka1
34 | image: ${MOLECULE_DISTRO:-idealista/jdk:17.0.8-bullseye-temurin-jdk}
35 | privileged: false
36 | capabilities:
37 | - SYS_ADMIN
38 | tmpfs:
39 | - /tmp
40 | - /run
41 | - /run/lock
42 | volumes:
43 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
44 | groups:
45 | - kafka
46 | command: '/lib/systemd/systemd'
47 | stop_signal: 'RTMIN+3'
48 | networks:
49 | - name: kafka-network
50 | published_ports:
51 | - 0.0.0.0:9092:9092/tcp
52 | cgroupns_mode: host
53 |
54 | - name: kafka2
55 | hostname: kafka2
56 | image: ${MOLECULE_DISTRO:-idealista/jdk:17.0.8-bullseye-temurin-jdk}
57 | privileged: false
58 | capabilities:
59 | - SYS_ADMIN
60 | tmpfs:
61 | - /tmp
62 | - /run
63 | - /run/lock
64 | volumes:
65 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
66 | groups:
67 | - kafka
68 | command: '/lib/systemd/systemd'
69 | stop_signal: 'RTMIN+3'
70 | networks:
71 | - name: kafka-network
72 | published_ports:
73 | - 0.0.0.0:9093:9092/tcp
74 | cgroupns_mode: host
75 |
76 | - name: kafka3
77 | hostname: kafka3
78 | image: ${MOLECULE_DISTRO:-idealista/jdk:17.0.8-bullseye-temurin-jdk}
79 | privileged: false
80 | capabilities:
81 | - SYS_ADMIN
82 | tmpfs:
83 | - /tmp
84 | - /run
85 | - /run/lock
86 | volumes:
87 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
88 | groups:
89 | - kafka
90 | command: '/lib/systemd/systemd'
91 | stop_signal: 'RTMIN+3'
92 | networks:
93 | - name: kafka-network
94 | published_ports:
95 | - 0.0.0.0:9094:9092/tcp
96 | cgroupns_mode: host
97 |
98 | provisioner:
99 | name: ansible
100 | lint:
101 | - name: ansible-lint
102 | - name: yamllint
103 |
104 | scenario:
105 | name: agents
106 |
107 | verifier:
108 | name: ansible
109 |
--------------------------------------------------------------------------------
/molecule/zookeeper/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependency:
3 | name: galaxy
4 |
5 | driver:
6 | name: docker
7 |
8 | lint: |
9 | yamllint .
10 | ansible-lint .
11 |
12 | platforms:
13 | - name: zookeeper
14 | image: confluentinc/cp-zookeeper:7.5.0
15 | hostname: zookeeper
16 | exposed_ports:
17 | - 2181/tcp
18 | published_ports:
19 | - 2181:2181
20 | command: "sh /etc/confluent/docker/run"
21 | env:
22 | ZOOKEEPER_CLIENT_PORT: "2181"
23 | ZOOKEEPER_TICK_TIME: "2000"
24 | volumes:
25 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
26 | groups:
27 | - zookeeper
28 | networks:
29 | - name: kafka-network
30 | cgroupns_mode: host
31 |
32 | - name: kafka1
33 | hostname: kafka1
34 | image: ${MOLECULE_DISTRO:-idealista/jdk:8u302-bullseye-corretto-headless}
35 | privileged: false
36 | capabilities:
37 | - SYS_ADMIN
38 | tmpfs:
39 | - /tmp
40 | - /run
41 | - /run/lock
42 | volumes:
43 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
44 | groups:
45 | - kafka
46 | command: '/lib/systemd/systemd'
47 | stop_signal: 'RTMIN+3'
48 | networks:
49 | - name: kafka-network
50 | published_ports:
51 | - 0.0.0.0:9092:9092/tcp
52 | cgroupns_mode: host
53 |
54 | - name: kafka2
55 | hostname: kafka2
56 | image: ${MOLECULE_DISTRO:-idealista/jdk:8u302-bullseye-corretto-headless}
57 | privileged: false
58 | capabilities:
59 | - SYS_ADMIN
60 | tmpfs:
61 | - /tmp
62 | - /run
63 | - /run/lock
64 | volumes:
65 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
66 | groups:
67 | - kafka
68 | command: '/lib/systemd/systemd'
69 | stop_signal: 'RTMIN+3'
70 | networks:
71 | - name: kafka-network
72 | published_ports:
73 | - 0.0.0.0:9093:9092/tcp
74 | cgroupns_mode: host
75 |
76 | - name: kafka3
77 | hostname: kafka3
78 | image: ${MOLECULE_DISTRO:-idealista/jdk:8u302-bullseye-corretto-headless}
79 | privileged: false
80 | capabilities:
81 | - SYS_ADMIN
82 | tmpfs:
83 | - /tmp
84 | - /run
85 | - /run/lock
86 | volumes:
87 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
88 | groups:
89 | - kafka
90 | command: '/lib/systemd/systemd'
91 | stop_signal: 'RTMIN+3'
92 | networks:
93 | - name: kafka-network
94 | published_ports:
95 | - 0.0.0.0:9094:9092/tcp
96 | cgroupns_mode: host
97 |
98 | provisioner:
99 | name: ansible
100 | lint:
101 | - name: ansible-lint
102 | - name: yamllint
103 | env:
104 | ANSIBLE_ROLES_PATH: ${MOLECULE_PROJECT_DIRECTORY}/..
105 |
106 | scenario:
107 | name: zookeeper
108 |
109 | verifier:
110 | name: ansible
111 |
--------------------------------------------------------------------------------
/molecule/agents/verify.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This is an example playbook to execute goss tests.
3 | # Tests need distributed to the appropriate ansible host/groups
4 | # prior to execution by `goss validate`.
5 | #
6 | # The goss ansible module is installed with molecule. The ANSIBLE_LIBRARY
7 | # path is updated appropriately on `molecule verify`.
8 |
9 | # Details about ansible module:
10 | # - https://github.com/indusbox/goss-ansible
11 |
12 | - name: Verify
13 | hosts: kafka1
14 | become: true
15 | gather_facts: true
16 | vars:
17 | goss_version: v0.4.9
18 | goss_arch: amd64
19 | goss_dst: /usr/local/bin/goss
20 | goss_sha256sum: 87dd36cfa1b8b50554e6e2ca29168272e26755b19ba5438341f7c66b36decc19
21 | goss_url: "https://github.com/goss-org/goss/releases/download/{{ goss_version }}/goss-linux-{{ goss_arch }}"
22 | goss_test_directory: /tmp
23 | goss_format: documentation
24 |
25 | vars_files:
26 | - ../../defaults/main/main.yml
27 | - ../../defaults/main/kafka-cfg.yml
28 | - group_vars/kafka.yml
29 |
30 | tasks:
31 | - name: Download and install Goss
32 | ansible.builtin.get_url:
33 | url: "{{ goss_url }}"
34 | dest: "{{ goss_dst }}"
35 | checksum: "sha256:{{ goss_sha256sum }}"
36 | mode: 0755
37 | register: download_goss
38 | until: download_goss is succeeded
39 | retries: 3
40 |
41 | - name: Copy Goss tests to remote version 3.x and above
42 | ansible.builtin.template:
43 | src: "{{ item }}"
44 | dest: "{{ goss_test_directory }}/{{ item | basename }}"
45 | mode: "0755"
46 | when: kafka_version is version('3.0.0', ">=")
47 | with_fileglob:
48 | - "tests/test_*.yml"
49 | - "tests/3.0/test_*.yml"
50 |
51 | - name: Copy Goss tests to remote version 2.x and below
52 | ansible.builtin.template:
53 | src: "{{ item }}"
54 | dest: "{{ goss_test_directory }}/{{ item | basename }}"
55 | mode: "0755"
56 | when: kafka_version is version('3.0.0', "<")
57 | with_fileglob:
58 | - "tests/test_*.yml"
59 | - "tests/2.0/test_*.yml"
60 |
61 | - name: Register test files
62 | ansible.builtin.shell: "ls {{ goss_test_directory }}/test_*.yml"
63 | register: test_files
64 | changed_when: test_files.rc != 0
65 |
66 | - name: Execute Goss tests
67 | ansible.builtin.command: "{{ goss_dst }} -g {{ item }} validate --format {{ goss_format }}"
68 | register: test_results
69 | changed_when: test_results.rc != 0
70 | with_items: "{{ test_files.stdout_lines }}"
71 |
72 | - name: Display details about the Goss results
73 | ansible.builtin.debug:
74 | msg: "{{ item.stdout_lines }}"
75 | with_items: "{{ test_results.results }}"
76 |
77 | - name: Fail when tests fail
78 | ansible.builtin.fail:
79 | msg: "Goss failed to validate"
80 | when: item.rc != 0
81 | with_items: "{{ test_results.results }}"
82 |
--------------------------------------------------------------------------------
/molecule/default/verify.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This is an example playbook to execute goss tests.
3 | # Tests need distributed to the appropriate ansible host/groups
4 | # prior to execution by `goss validate`.
5 | #
6 | # The goss ansible module is installed with molecule. The ANSIBLE_LIBRARY
7 | # path is updated appropriately on `molecule verify`.
8 |
9 | # Details about ansible module:
10 | # - https://github.com/indusbox/goss-ansible
11 |
12 | - name: Verify
13 | hosts: kafka1
14 | become: true
15 | gather_facts: true
16 | vars:
17 | goss_version: v0.4.9
18 | goss_arch: amd64
19 | goss_dst: /usr/local/bin/goss
20 | goss_sha256sum: 87dd36cfa1b8b50554e6e2ca29168272e26755b19ba5438341f7c66b36decc19
21 | goss_url: "https://github.com/goss-org/goss/releases/download/{{ goss_version }}/goss-linux-{{ goss_arch }}"
22 | goss_test_directory: /tmp
23 | goss_format: documentation
24 |
25 | vars_files:
26 | - ../../defaults/main/main.yml
27 | - ../../defaults/main/kafka-cfg.yml
28 | - group_vars/kafka.yml
29 |
30 | tasks:
31 | - name: Download and install Goss
32 | ansible.builtin.get_url:
33 | url: "{{ goss_url }}"
34 | dest: "{{ goss_dst }}"
35 | checksum: "sha256:{{ goss_sha256sum }}"
36 | mode: 0755
37 | register: download_goss
38 | until: download_goss is succeeded
39 | retries: 3
40 |
41 | - name: Copy Goss tests to remote version 3.x and above
42 | ansible.builtin.template:
43 | src: "{{ item }}"
44 | dest: "{{ goss_test_directory }}/{{ item | basename }}"
45 | mode: "0755"
46 | when: kafka_version is version('3.0.0', ">=")
47 | with_fileglob:
48 | - "tests/test_*.yml"
49 | - "tests/3.0/test_*.yml"
50 |
51 | - name: Copy Goss tests to remote version 2.x and below
52 | ansible.builtin.template:
53 | src: "{{ item }}"
54 | dest: "{{ goss_test_directory }}/{{ item | basename }}"
55 | mode: "0755"
56 | when: kafka_version is version('3.0.0', "<")
57 | with_fileglob:
58 | - "tests/test_*.yml"
59 | - "tests/2.0/test_*.yml"
60 |
61 | - name: Register test files
62 | ansible.builtin.shell: "ls {{ goss_test_directory }}/test_*.yml"
63 | register: test_files
64 | changed_when: test_files.rc != 0
65 |
66 | - name: Execute Goss tests
67 | ansible.builtin.command: "{{ goss_dst }} -g {{ item }} validate --format {{ goss_format }}"
68 | register: test_results
69 | changed_when: test_results.rc != 0
70 | with_items: "{{ test_files.stdout_lines }}"
71 |
72 | - name: Display details about the Goss results
73 | ansible.builtin.debug:
74 | msg: "{{ item.stdout_lines }}"
75 | with_items: "{{ test_results.results }}"
76 |
77 | - name: Fail when tests fail
78 | ansible.builtin.fail:
79 | msg: "Goss failed to validate"
80 | when: item.rc != 0
81 | with_items: "{{ test_results.results }}"
82 |
--------------------------------------------------------------------------------
/molecule/zookeeper/verify.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This is an example playbook to execute goss tests.
3 | # Tests need distributed to the appropriate ansible host/groups
4 | # prior to execution by `goss validate`.
5 | #
6 | # The goss ansible module is installed with molecule. The ANSIBLE_LIBRARY
7 | # path is updated appropriately on `molecule verify`.
8 |
9 | # Details about ansible module:
10 | # - https://github.com/indusbox/goss-ansible
11 |
12 | - name: Verify
13 | hosts: kafka1
14 | become: true
15 | gather_facts: true
16 | vars:
17 | goss_version: v0.4.9
18 | goss_arch: amd64
19 | goss_dst: /usr/local/bin/goss
20 | goss_sha256sum: 87dd36cfa1b8b50554e6e2ca29168272e26755b19ba5438341f7c66b36decc19
21 | goss_url: "https://github.com/goss-org/goss/releases/download/{{ goss_version }}/goss-linux-{{ goss_arch }}"
22 | goss_test_directory: /tmp
23 | goss_format: documentation
24 |
25 | vars_files:
26 | - ../../defaults/main/main.yml
27 | - ../../defaults/main/kafka-cfg.yml
28 | - group_vars/kafka.yml
29 |
30 | tasks:
31 | - name: Download and install Goss
32 | ansible.builtin.get_url:
33 | url: "{{ goss_url }}"
34 | dest: "{{ goss_dst }}"
35 | checksum: "sha256:{{ goss_sha256sum }}"
36 | mode: 0755
37 | register: download_goss
38 | until: download_goss is succeeded
39 | retries: 3
40 |
41 | - name: Copy Goss tests to remote version 3.x and above
42 | ansible.builtin.template:
43 | src: "{{ item }}"
44 | dest: "{{ goss_test_directory }}/{{ item | basename }}"
45 | mode: "0755"
46 | when: kafka_version is version('3.0.0', ">=")
47 | with_fileglob:
48 | - "tests/test_*.yml"
49 | - "tests/3.0/test_*.yml"
50 |
51 | - name: Copy Goss tests to remote version 2.x and below
52 | ansible.builtin.template:
53 | src: "{{ item }}"
54 | dest: "{{ goss_test_directory }}/{{ item | basename }}"
55 | mode: "0755"
56 | when: kafka_version is version('3.0.0', "<")
57 | with_fileglob:
58 | - "tests/test_*.yml"
59 | - "tests/2.0/test_*.yml"
60 |
61 | - name: Register test files
62 | ansible.builtin.shell: "ls {{ goss_test_directory }}/test_*.yml"
63 | register: test_files
64 | changed_when: test_files.rc != 0
65 |
66 | - name: Execute Goss tests
67 | ansible.builtin.command: "{{ goss_dst }} -g {{ item }} validate --format {{ goss_format }}"
68 | register: test_results
69 | changed_when: test_results.rc != 0
70 | with_items: "{{ test_files.stdout_lines }}"
71 |
72 | - name: Display details about the Goss results
73 | ansible.builtin.debug:
74 | msg: "{{ item.stdout_lines }}"
75 | with_items: "{{ test_results.results }}"
76 |
77 | - name: Fail when tests fail
78 | ansible.builtin.fail:
79 | msg: "Goss failed to validate"
80 | when: item.rc != 0
81 | with_items: "{{ test_results.results }}"
82 |
--------------------------------------------------------------------------------
/templates/log4j.properties.j2:
--------------------------------------------------------------------------------
1 | log4j.rootLogger={{ kafka_log_level }}, stdout
2 |
3 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
4 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
5 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
6 |
7 | log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
8 | log4j.appender.kafkaAppender.MaxFileSize=50MB
9 | log4j.appender.kafkaAppender.MaxBackupIndex=4
10 | log4j.appender.kafkaAppender.File={{ kafka_log_path }}/kafka-server.log
11 | log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
12 | log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
13 |
14 | log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
15 | log4j.appender.stateChangeAppender.MaxFileSize=50MB
16 | log4j.appender.stateChangeAppender.MaxBackupIndex=4
17 | log4j.appender.stateChangeAppender.File={{ kafka_log_path }}/state-change.log
18 | log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
19 | log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
20 |
21 | log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
22 | log4j.appender.controllerAppender.MaxFileSize=50MB
23 | log4j.appender.controllerAppender.MaxBackupIndex=4
24 | log4j.appender.controllerAppender.File={{ kafka_log_path }}/controller.log
25 | log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
26 | log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
27 |
28 | # Turn on all our debugging info
29 | #log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
30 | #log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
31 | #log4j.logger.kafka.perf=DEBUG, kafkaAppender
32 | #log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
33 | #log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
34 | log4j.logger.kafka={{ kafka_log_level }}, kafkaAppender
35 |
36 | # Tracing requests results in large logs
37 | #log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
38 | #log4j.appender.requestAppender.MaxFileSize=50MB
39 | #log4j.appender.requestAppender.MaxBackupIndex=4
40 | #log4j.appender.requestAppender.File={{ kafka_log_path }}/kafka-request.log
41 | #log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
42 | #log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
43 | #
44 | #log4j.logger.kafka.network.RequestChannel$=TRACE, requestAppender
45 | #log4j.additivity.kafka.network.RequestChannel$=false
46 | #
47 | #log4j.logger.kafka.network.Processor=TRACE, requestAppender
48 | #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
49 | #log4j.additivity.kafka.server.KafkaApis=false
50 | #log4j.logger.kafka.request.logger=TRACE, requestAppender
51 | #log4j.additivity.kafka.request.logger=false
52 |
53 | log4j.logger.kafka.controller={{ kafka_log_level }}, controllerAppender
54 | log4j.additivity.kafka.controller=false
55 |
56 | log4j.logger.state.change.logger={{ kafka_log_level }}, stateChangeAppender
57 | log4j.additivity.state.change.logger=false
58 |
--------------------------------------------------------------------------------
/templates/log4j2.properties.j2:
--------------------------------------------------------------------------------
1 | status = {{ kafka_log_level }}
2 | name = KafkaLog4j2
3 |
4 | # Console Appender
5 | appender.stdout.type = Console
6 | appender.stdout.name = STDOUT
7 | appender.stdout.layout.type = PatternLayout
8 | appender.stdout.layout.pattern = [%d] %p %m (%c)%n
9 |
10 | # Kafka Rolling File Appender
11 | appender.kafka.type = RollingFile
12 | appender.kafka.name = KAFKA
13 | appender.kafka.fileName = {{ kafka_log_path }}/kafka-server.log
14 | appender.kafka.filePattern = {{ kafka_log_path }}/kafka-server.log.%i
15 | appender.kafka.layout.type = PatternLayout
16 | appender.kafka.layout.pattern = [%d] %p %m (%c)%n
17 | appender.kafka.policies.type = Policies
18 | appender.kafka.policies.size.type = SizeBasedTriggeringPolicy
19 | appender.kafka.policies.size.size = 50MB
20 | appender.kafka.strategy.type = DefaultRolloverStrategy
21 | appender.kafka.strategy.max = 4
22 |
23 | # State Change Rolling File Appender
24 | appender.stateChange.type = RollingFile
25 | appender.stateChange.name = STATECHANGE
26 | appender.stateChange.fileName = {{ kafka_log_path }}/state-change.log
27 | appender.stateChange.filePattern = {{ kafka_log_path }}/state-change.log.%i
28 | appender.stateChange.layout.type = PatternLayout
29 | appender.stateChange.layout.pattern = [%d] %p %m (%c)%n
30 | appender.stateChange.policies.type = Policies
31 | appender.stateChange.policies.size.type = SizeBasedTriggeringPolicy
32 | appender.stateChange.policies.size.size = 50MB
33 | appender.stateChange.strategy.type = DefaultRolloverStrategy
34 | appender.stateChange.strategy.max = 4
35 |
36 | # Controller Rolling File Appender
37 | appender.controller.type = RollingFile
38 | appender.controller.name = CONTROLLER
39 | appender.controller.fileName = {{ kafka_log_path }}/controller.log
40 | appender.controller.filePattern = {{ kafka_log_path }}/controller.log.%i
41 | appender.controller.layout.type = PatternLayout
42 | appender.controller.layout.pattern = [%d] %p %m (%c)%n
43 | appender.controller.policies.type = Policies
44 | appender.controller.policies.size.type = SizeBasedTriggeringPolicy
45 | appender.controller.policies.size.size = 50MB
46 | appender.controller.strategy.type = DefaultRolloverStrategy
47 | appender.controller.strategy.max = 4
48 |
49 | # Root Logger
50 | rootLogger.level = warn
51 | rootLogger.appenderRefs = stdout
52 | rootLogger.appenderRef.stdout.ref = STDOUT
53 |
54 | # Kafka Logger
55 | logger.kafka.name = kafka
56 | logger.kafka.level = warn
57 | logger.kafka.additivity = false
58 | logger.kafka.appenderRefs = kafka
59 | logger.kafka.appenderRef.kafka.ref = KAFKA
60 |
61 | # Kafka Controller Logger
62 | logger.kafkaController.name = kafka.controller
63 | logger.kafkaController.level = warn
64 | logger.kafkaController.additivity = false
65 | logger.kafkaController.appenderRefs = controller
66 | logger.kafkaController.appenderRef.controller.ref = CONTROLLER
67 |
68 | # State Change Logger
69 | logger.stateChange.name = state.change.logger
70 | logger.stateChange.level = warn
71 | logger.stateChange.additivity = false
72 | logger.stateChange.appenderRefs = stateChange
73 | logger.stateChange.appenderRef.stateChange.ref = STATECHANGE
74 |
--------------------------------------------------------------------------------
/tasks/install.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: KAFKA | Installing dependencies
4 | ansible.builtin.apt:
5 | pkg: "{{ kafka_required_libs }}"
6 | state: present
7 | become: true
8 | tags:
9 | skip_ansible_lint
10 |
11 | - name: KAFKA | Ensure kafka group
12 | ansible.builtin.group:
13 | name: "{{ kafka_group }}"
14 |
15 | - name: KAFKA | Ensure kafka user
16 | ansible.builtin.user:
17 | name: "{{ kafka_user }}"
18 | group: "{{ kafka_group }}"
19 | home: /bin/false
20 |
21 | - name: KAFKA | Create install path
22 | ansible.builtin.file:
23 | path: "{{ kafka_install_path }}"
24 | state: directory
25 | owner: "{{ kafka_user }}"
26 | group: "{{ kafka_group }}"
27 | mode: "0755"
28 |
29 | - name: KAFKA | Create data path
30 | ansible.builtin.file:
31 | path: "{{ kafka_data_path }}"
32 | mode: 0760
33 | state: directory
34 | owner: "{{ kafka_user }}"
35 | group: "{{ kafka_user }}"
36 |
37 | - name: KAFKA | Check prev installation
38 | ansible.builtin.command: bash -c "ls {{ kafka_install_path }}/libs"
39 | register: kafka_check
40 | changed_when: false
41 | ignore_errors: true
42 |
43 | - name: KAFKA | Get installed package version
44 | ansible.builtin.command: bash -c "ls {{ kafka_install_path }}/libs | grep 'kafka_.*\.jar'"
45 | register: kafka_installed_version
46 | changed_when: false
47 | when: kafka_check is success
48 |
49 | - name: KAFKA | Check kafka version
50 | ansible.builtin.command: bash -c "ls {{ kafka_install_path }}/libs | grep 'kafka_{{ kafka_scala_version }}-{{ kafka_version }}.jar'"
51 | register: kafka_version_check
52 | changed_when: false
53 | ignore_errors: true
54 |
55 | - name: KAFKA | Check if upgrading to 4.x
56 | ansible.builtin.debug:
57 | msg: "Checking if performing a \"safe\" upgrade from Kafka 3.x to 4.x"
58 | changed_when: True
59 | notify: message about upgrading
60 | when:
61 | - kafka_check is success
62 | - kafka_installed_version.stdout | regex_search('^kafka_[^|-]+-(\\d+\\.\\d+\\.\\d)(?=\\.jar)', '\\1') | first is version('3.3', '>=')
63 | - kafka_installed_version.stdout | regex_search('^kafka_[^|-]+-(\\d+\\.\\d+\\.\\d)(?=\\.jar)', '\\1') | first is version('4.0', '<')
64 | - kafka_version is version('4.0.0', '>=')
65 |
66 | - name: KAFKA | Stop service
67 | ansible.builtin.systemd:
68 | name: kafka
69 | state: stopped
70 | when: kafka_check is success and (kafka_force_reinstall or kafka_version_check is failed or kafka_check.stdout == "")
71 |
72 | - name: KAFKA | Remove old version
73 | ansible.builtin.file:
74 | path: "{{ kafka_install_path }}/{{ item }}"
75 | state: absent
76 | with_items: "{{ kafka_removeable_folders }}"
77 | when: kafka_check is success and (kafka_force_reinstall or kafka_version_check is failed or kafka_check.stdout == "")
78 |
79 | - name: KAFKA | Untar kafka
80 | ansible.builtin.unarchive:
81 | extra_opts: ['--strip-components=1']
82 | src: "{{ kafka_sources_url }}"
83 | remote_src: true
84 | dest: "{{ kafka_install_path }}"
85 | owner: "{{ kafka_user }}"
86 | group: "{{ kafka_group }}"
87 | when: 'kafka_force_reinstall or kafka_version_check is failed or kafka_version_check.stdout == ""'
88 |
--------------------------------------------------------------------------------
/molecule/default/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependency:
3 | name: galaxy
4 |
5 | driver:
6 | name: docker
7 |
8 | lint: |
9 | yamllint .
10 | ansible-lint .
11 |
12 | platforms:
13 | - name: kafka1
14 | hostname: kafka1
15 | image: ${MOLECULE_DISTRO:-idealista/jdk:17.0.8-bullseye-temurin-jdk}
16 | privileged: false
17 | capabilities:
18 | - SYS_ADMIN
19 | tmpfs:
20 | - /tmp
21 | - /run
22 | - /run/lock
23 | volumes:
24 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
25 | - '/var/run/docker.sock:/var/run/docker.sock'
26 | groups:
27 | - kafka
28 | command: '/lib/systemd/systemd'
29 | stop_signal: 'RTMIN+3'
30 | networks:
31 | - name: kafka-network
32 | exposed_ports:
33 | - 9091/tcp
34 | - 9092/tcp
35 | - 9093/tcp
36 | published_ports:
37 | - 0.0.0.0:19091:9091/tcp
38 | - 0.0.0.0:19092:9092/tcp
39 | - 0.0.0.0:19093:9093/tcp
40 | cgroupns_mode: host
41 |
42 | - name: kafka2
43 | hostname: kafka2
44 | image: ${MOLECULE_DISTRO:-idealista/jdk:17.0.8-bullseye-temurin-jdk}
45 | privileged: false
46 | capabilities:
47 | - SYS_ADMIN
48 | tmpfs:
49 | - /tmp
50 | - /run
51 | - /run/lock
52 | volumes:
53 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
54 | - '/var/run/docker.sock:/var/run/docker.sock'
55 | groups:
56 | - kafka
57 | command: '/lib/systemd/systemd'
58 | stop_signal: 'RTMIN+3'
59 | networks:
60 | - name: kafka-network
61 | exposed_ports:
62 | - 9091/tcp
63 | - 9092/tcp
64 | - 9093/tcp
65 | published_ports:
66 | - 0.0.0.0:29091:9091/tcp
67 | - 0.0.0.0:29092:9092/tcp
68 | - 0.0.0.0:29093:9093/tcp
69 | cgroupns_mode: host
70 |
71 | - name: kafka3
72 | hostname: kafka3
73 | image: ${MOLECULE_DISTRO:-idealista/jdk:17.0.8-bullseye-temurin-jdk}
74 | privileged: false
75 | capabilities:
76 | - SYS_ADMIN
77 | tmpfs:
78 | - /tmp
79 | - /run
80 | - /run/lock
81 | volumes:
82 | - '/sys/fs/cgroup:/sys/fs/cgroup:rw'
83 | - '/var/run/docker.sock:/var/run/docker.sock'
84 | groups:
85 | - kafka
86 | command: '/lib/systemd/systemd'
87 | stop_signal: 'RTMIN+3'
88 | networks:
89 | - name: kafka-network
90 | exposed_ports:
91 | - 9091/tcp
92 | - 9092/tcp
93 | - 9093/tcp
94 | published_ports:
95 | - 0.0.0.0:39091:9091/tcp
96 | - 0.0.0.0:39092:9092/tcp
97 | - 0.0.0.0:39093:9093/tcp
98 | cgroupns_mode: host
99 |
100 | provisioner:
101 | name: ansible
102 | env:
103 | ANSIBLE_ROLES_PATH: ${MOLECULE_PROJECT_DIRECTORY}/..
104 | lint:
105 | - name: ansible-lint
106 | - name: yamllint
107 | config_options:
108 | defaults:
109 | callback_result_format: yaml
110 | callbacks_enabled: timer, profile_tasks, profile_roles
111 | show_per_host_start: true
112 | show_custom_stats: true
113 | fact_caching: yaml
114 | # fact_caching_connection: ./tmp/facts_cache
115 |
116 | scenario:
117 | name: default
118 |
119 | verifier:
120 | name: ansible
121 |
--------------------------------------------------------------------------------
/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, gender identity and expression, level of experience,
9 | nationality, personal appearance, race, religion, or sexual identity and
10 | orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 |
35 | ## Our Responsibilities
36 |
37 | Project maintainers are responsible for clarifying the standards of acceptable
38 | behavior and are expected to take appropriate and fair corrective action in
39 | response to any instances of unacceptable behavior.
40 |
41 | Project maintainers have the right and responsibility to remove, edit, or
42 | reject comments, commits, code, wiki edits, issues, and other contributions
43 | that are not aligned to this Code of Conduct, or to ban temporarily or
44 | permanently any contributor for other behaviors that they deem inappropriate,
45 | threatening, offensive, or harmful.
46 |
47 | ## Scope
48 |
49 | This Code of Conduct applies both within project spaces and in public spaces
50 | when an individual is representing the project or its community. Examples of
51 | representing a project or community include using an official project e-mail
52 | address, posting via an official social media account, or acting as an appointed
53 | representative at an online or offline event. Representation of a project may be
54 | further defined and clarified by project maintainers.
55 |
56 | ## Enforcement
57 |
58 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
59 | reported by contacting the project team at [labs@idealista.com](mailto:labs@idealista.com). All
60 | complaints will be reviewed and investigated and will result in a response that
61 | is deemed necessary and appropriate to the circumstances. The project team is
62 | obligated to maintain confidentiality with regard to the reporter of an incident.
63 | Further details of specific enforcement policies may be posted separately.
64 |
65 | Project maintainers who do not follow or enforce the Code of Conduct in good
66 | faith may face temporary or permanent repercussions as determined by other
67 | members of the project's leadership.
68 |
69 | ## Attribution
70 |
71 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
72 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
73 |
74 | [homepage]: https://www.contributor-covenant.org
75 |
--------------------------------------------------------------------------------
/tasks/3.0/topics.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: KAFKA | Check kafka topics
4 | ansible.builtin.command: >
5 | {{ kafka_install_path }}/bin/kafka-topics.sh
6 | --list
7 | --bootstrap-server 0.0.0.0:{{ kafka_port }}
8 | register: kafka_topics_listed
9 | changed_when: false
10 |
11 | - name: KAFKA | Topics in server
12 | ansible.builtin.set_fact:
13 | kafka_topics_origin: "{{ kafka_topics_listed.stdout_lines }}"
14 |
15 | - name: KAFKA | Topics to create
16 | ansible.builtin.set_fact:
17 | kafka_topics_to_create: "{{ kafka_topics | not_in_list('name', kafka_topics_origin) }}"
18 |
19 | - name: KAFKA | Topics to check
20 | ansible.builtin.set_fact:
21 | kafka_topics_to_check: "{{ kafka_topics | in_list('name', kafka_topics_origin) }}"
22 |
23 | - name: KAFKA | Obtain info of topics to check
24 | ansible.builtin.command: >
25 | {{ kafka_install_path }}/bin/kafka-topics.sh
26 | --topic {{ item.name }}
27 | --describe
28 | --bootstrap-server 0.0.0.0:{{ kafka_port }}
29 | with_items:
30 | - "{{ kafka_topics_to_check }}"
31 | register: kafka_topics_described_full
32 | changed_when: false
33 |
34 | - name: KAFKA | Topics to alter
35 | ansible.builtin.set_fact:
36 | kafka_topics_to_alter: |
37 | {{ kafka_topics_described_full.results |
38 | map(attribute='stdout_lines') |
39 | map('first') |
40 | map('regex_findall', '^Topic:(.*)\tPartitionCount:([0-9]+)\tReplicationFactor:([0-9]+)\tConfigs:') |
41 | flatten_list |
42 | map('zip_dict', ['name', 'partitions', 'replicas']) |
43 | list
44 | }}
45 |
46 | - name: KAFKA | Topics to remove
47 | ansible.builtin.set_fact:
48 | kafka_topics_to_remove: |
49 | {{ kafka_topics_origin |
50 | difference(kafka_topics | map(attribute='name') |
51 | list) |
52 | filter_evaluated(kafka_filter_function_not_removable_topic) |
53 | list }}
54 |
55 | - name: KAFKA | Create kafka topics
56 | ansible.builtin.command: >
57 | {{ kafka_install_path }}/bin/kafka-topics.sh
58 | --topic {{ item.name }}
59 | --partitions {{ item.partitions }}
60 | --replication-factor {{ item.replicas }}
61 | --create
62 | --if-not-exists
63 | --bootstrap-server 0.0.0.0:{{ kafka_port }}
64 | with_items:
65 | - "{{ kafka_topics_to_create }}"
66 | tags:
67 | skip_ansible_lint
68 |
69 | - name: KAFKA | Alter kafka topics
70 | ansible.builtin.command: >
71 | {{ kafka_install_path }}/bin/kafka-topics.sh
72 | --zookeeper {{ kafka_zookeeper_hosts | join(',') }}
73 | --topic {{ item.name }}
74 | --partitions {{ item.partitions }}
75 | --alter
76 | --if-exists
77 | --force
78 | with_items:
79 | - "{{ kafka_topics_to_check | difference(kafka_topics_to_alter) }}"
80 | when: kafka_topics_to_alter != []
81 | tags:
82 | skip_ansible_lint
83 |
84 | - name: KAFKA | Remove kafka topics
85 | ansible.builtin.command: >
86 | {{ kafka_install_path }}/bin/kafka-topics.sh
87 | --zookeeper {{ kafka_zookeeper_hosts | join(',') }}
88 | --topic {{ item }}
89 | --delete
90 | --if-exists
91 | --force
92 | with_items:
93 | - "{{ kafka_topics_to_remove }}"
94 | when: kafka_delete_topic_enable == 'true'
95 | register: cmd_output
96 | changed_when: cmd_output.rc != 0
97 |
98 | - name: KAFKA | Configure kafka topics
99 | ansible.builtin.include_tasks: topic_config.yml
100 | with_items:
101 | - "{{ kafka_topics_config }}"
102 |
--------------------------------------------------------------------------------
/tasks/2.0/topics.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: KAFKA | Check kafka topics
4 | ansible.builtin.command: >
5 | {{ kafka_install_path }}/bin/kafka-topics.sh
6 | --zookeeper {{ kafka_zookeeper_hosts | join(',') }}
7 | --list
8 | register: kafka_topics_listed
9 | changed_when: false
10 |
11 | - name: KAFKA | Topics in server
12 | ansible.builtin.set_fact:
13 | kafka_topics_origin: "{{ kafka_topics_listed.stdout_lines }}"
14 |
15 | - name: KAFKA | Topics to create
16 | ansible.builtin.set_fact:
17 | kafka_topics_to_create: "{{ kafka_topics | not_in_list('name', kafka_topics_origin) }}"
18 |
19 | - name: KAFKA | Topics to check
20 | ansible.builtin.set_fact:
21 | kafka_topics_to_check: "{{ kafka_topics | in_list('name', kafka_topics_origin) }}"
22 |
23 | - name: KAFKA | Obtain info of topics to check
24 | ansible.builtin.command: >
25 | {{ kafka_install_path }}/bin/kafka-topics.sh
26 | --zookeeper {{ kafka_zookeeper_hosts | join(',') }}
27 | --topic {{ item.name }}
28 | --describe
29 | with_items:
30 | - "{{ kafka_topics_to_check }}"
31 | register: kafka_topics_described_full
32 | changed_when: false
33 |
34 | - name: KAFKA | Topics to alter
35 | ansible.builtin.set_fact:
36 | kafka_topics_to_alter: |
37 | {{ kafka_topics_described_full.results |
38 | map(attribute='stdout_lines') |
39 | map('first') |
40 | map('regex_findall', '^Topic:(.*)\tPartitionCount:([0-9]+)\tReplicationFactor:([0-9]+)\tConfigs:') |
41 | flatten_list |
42 | map('zip_dict', ['name', 'partitions', 'replicas']) |
43 | list
44 | }}
45 |
46 | - name: KAFKA | Topics to remove
47 | ansible.builtin.set_fact:
48 | kafka_topics_to_remove: |
49 | {{ kafka_topics_origin |
50 | difference(kafka_topics | map(attribute='name') |
51 | list) |
52 | filter_evaluated(kafka_filter_function_not_removable_topic) |
53 | list }}
54 |
55 | - name: KAFKA | Create kafka topics
56 | ansible.builtin.command: >
57 | {{ kafka_install_path }}/bin/kafka-topics.sh
58 | --zookeeper {{ kafka_zookeeper_hosts | join(',') }}
59 | --topic {{ item.name }}
60 | --partitions {{ item.partitions }}
61 | --replication-factor {{ item.replicas }}
62 | --create
63 | --if-not-exists
64 | --force
65 | with_items:
66 | - "{{ kafka_topics_to_create }}"
67 | tags:
68 | skip_ansible_lint
69 |
70 | - name: KAFKA | Alter kafka topics
71 | ansible.builtin.command: >
72 | {{ kafka_install_path }}/bin/kafka-topics.sh
73 | --zookeeper {{ kafka_zookeeper_hosts | join(',') }}
74 | --topic {{ item.name }}
75 | --partitions {{ item.partitions }}
76 | --alter
77 | --if-exists
78 | --force
79 | with_items:
80 | - "{{ kafka_topics_to_check | difference(kafka_topics_to_alter) }}"
81 | when: kafka_topics_to_alter != []
82 | tags:
83 | skip_ansible_lint
84 |
85 | - name: KAFKA | Remove kafka topics
86 | ansible.builtin.command: >
87 | {{ kafka_install_path }}/bin/kafka-topics.sh
88 | --zookeeper {{ kafka_zookeeper_hosts | join(',') }}
89 | --topic {{ item }}
90 | --delete
91 | --if-exists
92 | --force
93 | with_items:
94 | - "{{ kafka_topics_to_remove }}"
95 | when: kafka_delete_topic_enable == 'true'
96 | register: cmd_output
97 | changed_when: cmd_output.rc != 0
98 |
99 | - name: KAFKA | Configure kafka topics
100 | ansible.builtin.include_tasks: topic_config.yml
101 | with_items:
102 | - "{{ kafka_topics_config }}"
103 |
--------------------------------------------------------------------------------
/molecule/default/group_vars/kafka.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | kafka_version: 4.0.0
4 |
5 | kafka_private_tmp: false
6 |
7 | kafka_jvm_performance_opts: "-XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80"
8 |
9 | kafka_controller_quorum_bootstrap_servers: kafka1:9093,kafka2:9093,kafka3:9093
10 | kafka_controller_quorum_voters: "1@kafka1:9093,2@kafka2:9093,3@kafka3:9093"
11 |
12 | kafka_brokers_port: 9091
13 | kafka_clients_port: 9092
14 | kafka_controller_port: 9093
15 |
16 | kafka_advertised_ports:
17 | kafka1:
18 | clients_advertised_port: 19092
19 | brokers_advertised_port: 19091
20 | controller_advertised_port: 19093
21 | kafka2:
22 | clients_advertised_port: 29092
23 | brokers_advertised_port: 29091
24 | controller_advertised_port: 29093
25 | kafka3:
26 | clients_advertised_port: 39092
27 | brokers_advertised_port: 39091
28 | controller_advertised_port: 39093
29 |
30 | kafka_listeners_list:
31 | # - name: BROKERS
32 | # host: "0.0.0.0:{{ kafka_brokers_port }}"
33 | # advertised_host: "localhost:{{ kafka_advertised_ports[inventory_hostname]['brokers_advertised_port'] }}" # Necessary config to use localhost and the ports published by molecule docker for testing purposes
34 | # protocol: PLAINTEXT
35 | - name: BROKERS
36 | host: "0.0.0.0:{{ kafka_brokers_port }}"
37 | advertised_host: "{{ kafka_host_name }}:{{ kafka_brokers_port }}"
38 | protocol: PLAINTEXT
39 | - name: CLIENTS
40 | host: "0.0.0.0:{{ kafka_clients_port }}"
41 | advertised_host: "{{ kafka_host_name }}:{{ kafka_clients_port }}"
42 | protocol: PLAINTEXT
43 | - name: CONTROLLER
44 | host: "0.0.0.0:{{ kafka_controller_port }}"
45 | advertised_host: "{{ kafka_host_name }}:{{ kafka_controller_port }}"
46 | protocol: PLAINTEXT
47 |
48 | kafka_inter_broker_listener_name: BROKERS
49 | kafka_controller_listener_names: CONTROLLER
50 |
51 | kafka_xmx: "768m"
52 | kafka_xms: "512m"
53 |
54 | kafka_listeners: >-
55 | {%- for listener in kafka_listeners_list -%}
56 | {{ listener.name }}://{{ listener.host }}
57 | {%- if not loop.last -%},{%- endif -%}
58 | {%- endfor -%}
59 |
60 | kafka_advertised_listeners: >-
61 | {%- for listener in kafka_listeners_list -%}
62 | {{ listener.name }}://{{ listener.advertised_host }}
63 | {%- if not loop.last -%},{%- endif -%}
64 | {%- endfor -%}
65 |
66 | kafka_security_protocol_map: >-
67 | {%- for listener in kafka_listeners_list -%}
68 | {{ listener.name }}:{{ listener.protocol }}
69 | {%- if not loop.last -%},{%- endif -%}
70 | {%- endfor -%},SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
71 |
72 | kafka_initial_controllers: "{{ groups['kafka'] | map('extract', hostvars, 'kafka_initial_controller') | join(',') }}"
73 |
74 | kafka_topics:
75 | - name: 'test'
76 | partitions: '3'
77 | replicas: '3'
78 | - name: 'test2'
79 | partitions: '5'
80 | replicas: '1'
81 | - name: 'compacted-topic'
82 | partitions: '3'
83 | replicas: '3'
84 |
85 | kafka_topics_config:
86 | - name: 'test'
87 | delete.retention.ms: 100000
88 | max.message.bytes: 1024
89 | - name: 'test2'
90 | delete.retention.ms: 100000
91 | max.message.bytes: 2048
92 | - name: 'compacted-topic'
93 | cleanup.policy: compact
94 | min.compaction.lag.ms: 30000
95 | max.compaction.lag.ms: 120000
96 | delete.retention.ms: 60000
97 | segment.ms: 10000
98 |
99 | kafka_extra_properties:
100 | - key: message.max.bytes
101 | value: 409715200
102 |
--------------------------------------------------------------------------------
/tasks/acls.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: KAFKA | Get current kafka ACLs in server
4 | ansible.builtin.command: >
5 | {{ kafka_install_path }}/bin/kafka-acls.sh
6 | --bootstrap-server {{ kafka_acl_bootstrap_server }}:{{ kafka_acl_port }}
7 | --list
8 | register: current_raw_kafka_acls_in_server
9 | changed_when: false
10 |
11 | - name: KAFKA | Set var with raw ACLs in server
12 | ansible.builtin.set_fact:
13 | current_kafka_acls_in_server: "{{ current_raw_kafka_acls_in_server.stdout | regex_findall('(?s)(Current ACLs.*?)(?=Current ACLs.*|$)') | list }}"
14 |
15 | - name: KAFKA | Set var with ACLs Resources in server
16 | ansible.builtin.set_fact:
17 | current_kafka_acls_resources_in_server: |
18 | {{
19 | current_kafka_acls_in_server |
20 | map('regex_findall', 'resourceType=(.*), name=(.*), patternType=(.*)\)') |
21 | flatten_list |
22 | map('zip_dict', ['resource_type', 'names', 'pattern_type']) |
23 | list
24 | }}
25 |
26 | - name: KAFKA | Set var with all ACLs in server
27 | ansible.builtin.set_fact:
28 | current_kafka_acls: |
29 | {{
30 | current_kafka_acls |
31 | default([]) |
32 | union(
33 | current_kafka_acls_in_server[index]
34 | | split('\n')
35 | | map('regex_findall', 'principal=(.*), host=(.*), operation=(.*), permissionType=(.*)\)')
36 | | flatten_list
37 | | map('zip_dict', ['principals', 'hosts', 'operations', 'permission_type'])
38 | | map('combine', item)
39 | )
40 | }}
41 | loop: "{{ current_kafka_acls_resources_in_server }}"
42 | loop_control:
43 | index_var: index
44 | when:
45 | - item.resource_type in current_kafka_acls_in_server[index]
46 | - item.names in current_kafka_acls_in_server[index]
47 | - item.pattern_type in current_kafka_acls_in_server[index]
48 | - current_kafka_acls_in_server | length > 0
49 |
50 | - name: KAFKA | Set var with already ACLs in server
51 | ansible.builtin.set_fact:
52 | current_created_kafka_acls: |
53 | {{
54 | current_created_kafka_acls |
55 | default([]) |
56 | union(
57 | current_kafka_acls
58 | | selectattr('resource_type', 'match', ('(?i)' ~ item.resource_type))
59 | | selectattr('names', 'in', item.names)
60 | | selectattr('pattern_type', 'match', ('(?i)' ~ item.pattern_type | default('LITERAL')))
61 | | selectattr('principals', 'in', item.principals)
62 | | selectattr('hosts', 'in', item.hosts)
63 | | selectattr('operations', 'in', item.operations|upper)
64 | | selectattr('permission_type', 'match', ('(?i)' ~ item.permission_type))
65 | )
66 | }}
67 | loop: "{{ kafka_acls }}"
68 | when: current_kafka_acls_in_server | length > 0
69 |
70 | - name: KAFKA | Remove ACLs
71 | ansible.builtin.command:
72 | cmd: "{{ lookup('template', 'kafka-acls-command.j2') }}"
73 | vars:
74 | acl_action: remove
75 | loop: "{{ current_kafka_acls if kafka_acls == [] else current_kafka_acls | difference(current_created_kafka_acls) }}"
76 | # loop: "{{ current_kafka_acls | difference(current_created_kafka_acls) }}"
77 | loop_control:
78 | loop_var: acl
79 | when: current_kafka_acls_in_server | length > 0
80 | register: kafka_acls_create_output
81 | changed_when: kafka_acls_create_output.rc == 0
82 | failed_when: kafka_acls_create_output.rc != 0
83 |
84 | - name: KAFKA | Create ACLs
85 | ansible.builtin.command:
86 | cmd: "{{ lookup('template', 'kafka-acls-command.j2') }}"
87 | vars:
88 | acl_action: add
89 | loop: "{{ kafka_acls }}"
90 | loop_control:
91 | loop_var: acl
92 | register: kafka_acls_create_output
93 | changed_when: kafka_acls_create_output.rc == 0
94 | failed_when: kafka_acls_create_output.rc != 0
95 |
--------------------------------------------------------------------------------
/tasks/config.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: KAFKA | Create kafka config and log paths
4 | ansible.builtin.file:
5 | path: "{{ item }}"
6 | state: directory
7 | owner: "{{ kafka_user }}"
8 | group: "{{ kafka_group }}"
9 | recurse: true
10 | with_items:
11 | - "{{ kafka_conf_path }}"
12 | - "{{ kafka_log_path }}"
13 |
14 | - name: KAFKA | Copy server config
15 | block:
16 | - name: KAFKA | Copy server.properties config
17 | ansible.builtin.template:
18 | src: "{{ kafka_server_template_path }}"
19 | dest: "{{ kafka_conf_path }}/server.properties"
20 | mode: "0640"
21 | owner: "{{ kafka_user }}"
22 | group: "{{ kafka_group }}"
23 | when: not kafka_use_kraft or ('broker' in kafka_process_roles | lower and 'controller' in kafka_process_roles | lower)
24 | notify: Restart kafka
25 | register: bootstrap_storage_controller
26 | tags: [server_properties]
27 |
28 | - name: KAFKA | Copy broker.properties config
29 | ansible.builtin.template:
30 | src: "{{ kafka_broker_template_path }}"
31 | dest: "{{ kafka_conf_path }}/broker.properties"
32 | mode: "0640"
33 | owner: "{{ kafka_user }}"
34 | group: "{{ kafka_group }}"
35 | when:
36 | - kafka_use_kraft
37 | - "'broker' in kafka_process_roles | lower"
38 | - "'controller' not in kafka_process_roles | lower"
39 | notify: Restart kafka
40 | tags: [server_properties]
41 |
42 | - name: KAFKA | Copy controller config
43 | ansible.builtin.template:
44 | src: "{{ kafka_controller_template_path }}"
45 | dest: "{{ kafka_conf_path }}/controller.properties"
46 | mode: "0640"
47 | owner: "{{ kafka_user }}"
48 | group: "{{ kafka_group }}"
49 | when:
50 | - kafka_use_kraft
51 | - "'controller' in kafka_process_roles | lower"
52 | - "'broker' not in kafka_process_roles | lower"
53 | notify: Restart kafka
54 | register: bootstrap_storage_controller
55 | tags: [server_properties]
56 |
57 | - name: KAFKA | Bootstrap storage controller
58 | ansible.builtin.command: "{{ kafka_controller_bootstrap_command }}"
59 | args:
60 | creates: "{{ kafka_data_path }}/meta.properties"
61 | when:
62 | - kafka_use_kraft
63 | - bootstrap_storage_controller is defined
64 | become: true
65 | become_user: "{{ kafka_user }}"
66 |
67 | - name: KAFKA | Copy log properties
68 | ansible.builtin.template:
69 | src: "{{ kafka_log4j_template_path }}"
70 | dest: "{{ kafka_conf_path }}/{{ kafka_log4j_file_name }}"
71 | mode: "0644"
72 | owner: "{{ kafka_user }}"
73 | group: "{{ kafka_group }}"
74 | notify: Restart kafka
75 | - name: KAFKA | Enable GC log
76 | ansible.builtin.replace:
77 | path: "{{ kafka_install_path }}/bin/kafka-server-start.sh"
78 | regexp: ^EXTRA_ARGS=\${EXTRA_ARGS-'-name kafkaServer'}
79 | replace: EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
80 | when: kafka_gc_log_enabled
81 |
82 | - name: KAFKA | Disable GC log
83 | ansible.builtin.replace:
84 | path: "{{ kafka_install_path }}/bin/kafka-server-start.sh"
85 | regexp: ^EXTRA_ARGS=\${EXTRA_ARGS-'-name kafkaServer -loggc'}
86 | replace: EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer'}
87 | when: not kafka_gc_log_enabled
88 |
89 | - name: KAFKA | Copy optional config
90 | ansible.builtin.template:
91 | src: "{{ item.src }}"
92 | dest: "{{ kafka_conf_path }}/{{ item.name }}"
93 | mode: "0640"
94 | owner: "{{ kafka_user }}"
95 | group: "{{ kafka_group }}"
96 | with_items: "{{ kafka_optional_conf_files }}"
97 | when: kafka_optional_conf_files is defined
98 | notify: Restart kafka
99 | tags: [server_properties]
100 |
--------------------------------------------------------------------------------
/tasks/agent.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: KAFKA | agent installation | Install required packages
4 | ansible.builtin.apt:
5 | pkg: "{{ item }}"
6 | state: present
7 | update_cache: yes
8 | when: kafka_agents_required_libs is defined
9 | with_items: '{{ kafka_agents_required_libs }}'
10 |
11 | - name: KAFKA | agent installation | Ensure install path
12 | ansible.builtin.file:
13 | path: "{{ kafka_install_path }}/{{ item.name }}"
14 | state: directory
15 | owner: "{{ kafka_user }}"
16 | group: "{{ kafka_group }}"
17 | mode: "0755"
18 | with_items: '{{ kafka_agents_config }}'
19 |
20 | - name: KAFKA | agent installation | Check if agent is installed
21 | ansible.builtin.stat:
22 | path: "{{ kafka_install_path }}/{{ item.0.name }}/{{ item.1 | basename }}"
23 | register: agent_file_exists
24 | with_subelements:
25 | - "{{ kafka_agents_config }}"
26 | - java_opts
27 |
28 | - name: KAFKA | agent installation | Check agent version
29 | ansible.builtin.command: "cat {{ kafka_install_path }}/{{ item.0.name }}/version.txt"
30 | register: agent_version_checked
31 | changed_when: false
32 | ignore_errors: true
33 | with_subelements:
34 | - "{{ kafka_agents_config }}"
35 | - java_opts
36 |
37 | - name: KAFKA | agent installation | Download agent
38 | ansible.builtin.get_url:
39 | dest: "/tmp"
40 | url: "{{ item.download_url }}"
41 | owner: "{{ kafka_user }}"
42 | group: "{{ kafka_group }}"
43 | mode: "0644"
44 | register: agent_downloaded
45 | loop: "{{ kafka_agents_config | flatten(levels=1) }}"
46 | loop_control:
47 | index_var: index
48 | when: >
49 | item.download_url | basename | splitext | last in kafka_supported_agents_extensions and
50 | (kafka_agents_force_reinstall or not
51 | agent_file_exists.results[index].stat.exists or
52 | item.version != agent_version_checked.results[index].stdout)
53 |
54 | - name: KAFKA | agent installation | Unarchive package
55 | ansible.builtin.unarchive:
56 | src: "/tmp/{{ item.download_url | basename }}"
57 | dest: "{{ kafka_install_path }}"
58 | remote_src: yes
59 | owner: "{{ kafka_user }}"
60 | group: "{{ kafka_group }}"
61 | mode: 0755
62 | when: item.download_url | basename | splitext | last in kafka_supported_agents_extensions and agent_downloaded.changed
63 | with_items: '{{ kafka_agents_config }}'
64 | tags:
65 | - skip_ansible_lint
66 |
67 | - name: KAFKA | agent installation | Download agent jar
68 | ansible.builtin.get_url:
69 | dest: "{{ kafka_install_path }}/{{ item.name }}"
70 | url: "{{ item.download_url }}"
71 | owner: "{{ kafka_user }}"
72 | group: "{{ kafka_group }}"
73 | mode: "0644"
74 | loop: '{{ kafka_agents_config | flatten(levels=1) }}'
75 | register: agent_jar_downloaded
76 | when: item.download_url | basename | splitext | last not in kafka_supported_agents_extensions
77 |
78 | - name: KAFKA | agent installation | Copy version number file
79 | ansible.builtin.template:
80 | src: "agent-version.txt.j2"
81 | dest: "{{ kafka_install_path }}/{{ item.name }}/version.txt"
82 | owner: "{{ kafka_user }}"
83 | group: "{{ kafka_group }}"
84 | mode: 0644
85 | with_items: '{{ kafka_agents_config }}'
86 |
87 | - name: KAFKA | agent installation | Check configuration files
88 | ansible.builtin.set_fact:
89 | kafka_agents_configuration: "{{ kafka_agents_configuration|default([]) + [ {'name': item.0.name, 'file': item.1, 'params': item.0.params}] }}"
90 | with_subelements:
91 | - "{{ kafka_agents_config }}"
92 | - configuration_files
93 |
94 | - name: KAFKA | agent installation | Configure the Java agent
95 | ansible.builtin.template:
96 | src: "{{ playbook_dir }}/templates/agents/{{ item.name }}/{{ item.file }}.j2"
97 | dest: "{{ kafka_install_path }}/{{ item.name }}/{{ item.file }}"
98 | owner: "{{ kafka_user }}"
99 | group: "{{ kafka_group }}"
100 | mode: "0644"
101 | notify: Restart kafka
102 | with_items: '{{ kafka_agents_configuration }}'
103 |
104 | - name: KAFKA | agent installation | Check java options
105 | ansible.builtin.set_fact:
106 | kafka_agent_java_opts: "{% set kafka_agent_java_opts = kafka_agent_java_opts|default([]) + [item.1] %}{{ kafka_agent_java_opts|list }}"
107 | notify: Restart kafka
108 | with_subelements:
109 | - "{{ kafka_agents_config }}"
110 | - java_opts
111 |
--------------------------------------------------------------------------------
/templates/log4j2.yaml.j2:
--------------------------------------------------------------------------------
1 | Configuration:
2 | name: KafkaLog4j2
3 | Appenders:
4 | Console:
5 | name: STDOUT
6 | PatternLayout:
7 | pattern: "{{ kafka_log_pattern}}"
8 |
9 | RollingFile:
10 | - name: KafkaAppender
11 | fileName: "{{ kafka_log_path }}/kafka-server.log"
12 | filePattern: "{{ kafka_log_path }}/kafka-server.log.%i"
13 | PatternLayout:
14 | pattern: "{{ kafka_log_pattern}}"
15 | SizeBasedTriggeringPolicy:
16 | size: 50MB
17 | DefaultRolloverStrategy:
18 | max: 4
19 | # State Change appender
20 | - name: StateChangeAppender
21 | fileName: "{{ kafka_log_path }}/state-change.log"
22 | filePattern: "{{ kafka_log_path }}/state-change.log.%i"
23 | PatternLayout:
24 | pattern: "{{ kafka_log_pattern}}"
25 | SizeBasedTriggeringPolicy:
26 | size: 50MB
27 | DefaultRolloverStrategy:
28 | max: 4
29 | # Controller appender
30 | - name: ControllerAppender
31 | fileName: "{{ kafka_log_path }}/controller.log"
32 | filePattern: "{{ kafka_log_path }}/controller.log.%i"
33 | PatternLayout:
34 | pattern: "{{ kafka_log_pattern}}"
35 | SizeBasedTriggeringPolicy:
36 | size: 50MB
37 | DefaultRolloverStrategy:
38 | max: 4
39 | # Request appender
40 | - name: RequestAppender
41 | fileName: "{{ kafka_log_path }}/kafka-request.log"
42 | filePattern: "{{ kafka_log_path }}/kafka-request.log.%d{yyyy-MM-dd-HH}"
43 | PatternLayout:
44 | pattern: "{{ kafka_log_pattern}}"
45 | TimeBasedTriggeringPolicy:
46 | modulate: true
47 | interval: 1
48 | # Cleaner appender
49 | - name: CleanerAppender
50 | fileName: "{{ kafka_log_path }}/log-cleaner.log"
51 | filePattern: "{{ kafka_log_path }}/log-cleaner.log.%d{yyyy-MM-dd-HH}"
52 | PatternLayout:
53 | pattern: "{{ kafka_log_pattern}}"
54 | TimeBasedTriggeringPolicy:
55 | modulate: true
56 | interval: 1
57 | # Authorizer appender
58 | - name: AuthorizerAppender
59 | fileName: "{{ kafka_log_path }}/kafka-authorizer.log"
60 | filePattern: "{{ kafka_log_path }}/kafka-authorizer.log.%d{yyyy-MM-dd-HH}"
61 | PatternLayout:
62 | pattern: "{{ kafka_log_pattern}}"
63 | TimeBasedTriggeringPolicy:
64 | modulate: true
65 | interval: 1
66 |
67 | Loggers:
68 | Root:
69 | level: "{{ kafka_log_level }}"
70 | AppenderRef:
71 | - ref: STDOUT
72 | - ref: KafkaAppender
73 | Logger:
74 | # Kafka logger
75 | - name: kafka
76 | level: "{{ kafka_log_level }}"
77 | additivity: false
78 | AppenderRef:
79 | - ref: KafkaAppender
80 | # Kafka org.apache logger
81 | - name: org.apache.kafka
82 | level: "{{ kafka_log_level }}"
83 | # Kafka request logger
84 | - name: kafka.request.logger
85 | level: "{{ kafka_log_level }}"
86 | additivity: false
87 | AppenderRef:
88 | ref: RequestAppender
89 | # Kafka network RequestChannel$ logger
90 | - name: kafka.network.RequestChannel$
91 | level: "{{ kafka_log_level }}"
92 | additivity: false
93 | AppenderRef:
94 | ref: RequestAppender
95 | # Controller logger
96 | - name: org.apache.kafka.controller
97 | level: "{{ kafka_log_level }}"
98 | additivity: false
99 | AppenderRef:
100 | ref: ControllerAppender
101 | # LogCleaner logger
102 | - name: kafka.log.LogCleaner
103 | level: "{{ kafka_log_level }}"
104 | additivity: false
105 | AppenderRef:
106 | ref: CleanerAppender
107 | # State change logger
108 | - name: state.change.logger
109 | level: "{{ kafka_log_level }}"
110 | additivity: false
111 | AppenderRef:
112 | ref: StateChangeAppender
113 | # Authorizer logger
114 | - name: kafka.authorizer.logger
115 | level: "{{ kafka_log_level }}"
116 | additivity: false
117 | AppenderRef:
118 | ref: AuthorizerAppender
119 | # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE
120 | # for additional output related to the handling of requests
121 | # - name: kafka.network.Processor
122 | # level: TRACE
123 | # additivity: false
124 | # AppenderRef:
125 | # ref:
126 | # - name: kafka.server.KafkaApis
127 | # level: TRACE
128 | # additivity: false
129 | # AppenderRef:
130 | # ref: RequestAppender
131 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to Idealista
2 |
3 | :+1::tada: First off, thanks for taking the time to contribute! :tada::+1:
4 |
5 | The following is a set of guidelines for contributing to Idealista's repositories, which are hosted in the [Idealista Organization](https://github.com/idealista) on GitHub. These are mostly guidelines, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request.
6 |
7 | #### Table Of Contents
8 |
9 | [Code of Conduct](#code-of-conduct)
10 |
11 | [How Can I Contribute?](#how-can-i-contribute)
12 | * [Reporting Bugs](#reporting-bugs)
13 | * [Suggesting Enhancements](#suggesting-enhancements)
14 | * [Pull Requests](#pull-requests)
15 | * [Changelog](#changelog)
16 |
17 | [Styleguides](#styleguides)
18 | * [Git Commit Messages](#git-commit-messages)
19 |
20 | ## Code of Conduct
21 |
22 | This project and everyone participating in it is governed by the [Idealista Code of Conduct](CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. Please report unacceptable behavior to [labs@idealista.com](mailto:labs@idealista.com).
23 |
24 |
25 | ## How Can I Contribute?
26 |
27 | ### Reporting Bugs
28 |
29 | This section guides you through submitting a bug report for Idealista. Following these guidelines helps maintainers and the community understand your report :pencil:, reproduce the behavior :computer: :computer:, and find related reports :mag_right:.
30 |
31 | Before creating bug reports, please check [this list](#before-submitting-a-bug-report) as you might find out that you don't need to create one. When you are creating a bug report, please [include as many details as possible](#how-do-i-submit-a-good-bug-report). Fill out [the required template](ISSUE_TEMPLATE.md), the information it asks for helps us resolve issues faster.
32 |
33 | > **Note:** If you find a **Closed** issue that seems like it is the same thing that you're experiencing, open a new issue and include a link to the original issue in the body of your new one.
34 |
35 | #### Before Submitting A Bug Report
36 |
37 | * **Check the last version.** Check if you can reproduce the problem in the latest version of the project.
38 | * **Check the FAQ of the project** for a list of common questions and problems.
39 | * **Perform a [cursory search](https://github.com/issues?q=+is%3Aissue+user%3Aidealista)** to see if the problem has already been reported. If it has **and the issue is still open**, add a comment to the existing issue instead of opening a new one.
40 |
41 | #### How Do I Submit A (Good) Bug Report?
42 |
43 | Bugs are tracked as [GitHub issues](https://guides.github.com/features/issues/). Create an issue on the project repository and provide the following information by filling in [the template](ISSUE_TEMPLATE.md).
44 |
45 | Explain the problem and include additional details to help maintainers reproduce the problem:
46 |
47 | * **Use a clear and descriptive title** for the issue to identify the problem.
48 | * **Describe the exact steps which reproduce the problem** in as many details as possible.
49 | * **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior.
50 | * **Explain which behavior you expected to see instead and why.**
51 |
52 | ### Suggesting Enhancements
53 |
54 | This section guides you through submitting an enhancement suggestion for Idealista, including completely new features and minor improvements to existing functionality. Following these guidelines helps maintainers and the community understand your suggestion :pencil: and find related suggestions :mag_right:.
55 |
56 | Before creating enhancement suggestions, please check [this list](#before-submitting-an-enhancement-suggestion) as you might find out that you don't need to create one. When you are creating an enhancement suggestion, please [include as many details as possible](#how-do-i-submit-a-good-enhancement-suggestion). Fill in [the template](ISSUE_TEMPLATE.md), including the steps that you imagine you would take if the feature you're requesting existed.
57 |
58 | #### Before Submitting An Enhancement Suggestion
59 |
60 | * **Check the last version.** Check if you can reproduce the problem in the latest version of the project.
61 | * **Check the FAQ of the project** for a list of common questions and problems.
62 | * **Perform a [cursory search](https://github.com/issues?q=+is%3Aissue+user%3Aidealista)** to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
63 |
64 | #### How Do I Submit A (Good) Enhancement Suggestion?
65 |
66 | Enhancement suggestions are tracked as [GitHub issues](https://guides.github.com/features/issues/). Create an issue on the project repository and provide the following information by filling in [the template](ISSUE_TEMPLATE.md):
67 |
68 | * **Use a clear and descriptive title** for the issue to identify the suggestion.
69 | * **Provide a step-by-step description of the suggested enhancement** in as many details as possible.
70 | * **Provide specific examples to demonstrate the steps**.
71 | * **Describe the current behavior** and **explain which behavior you expected to see instead** and why.
72 | * **Explain why this enhancement would be useful**.
73 | * **List some other text editors or applications where this enhancement exists.**
74 | * **Specify which version are you're using.**
75 |
76 | ### Pull Requests
77 |
78 | * Fill in [the required template](PULL_REQUEST_TEMPLATE.md)
79 | * Any pull request should has **idealista:develop** as base branch.
80 |
81 | ### Changelog
82 |
83 | Every project has a CHANGELOG.md file. Once your code is ready to be merged please fill the issue after the **Unreleased** section as explained:
84 |
85 | * For an enhancement, fill the issue after the **Added** subsection (create it if doesn't exists)
86 | * For a fixed bug, fill the issue after the **Fixed** subsection (create it if doesn't exists)
87 | * For an improvement, fill the issue after the **Changed** subsection (create it if doesn't exists)
88 |
89 | Then write the issue info this way:
90 |
91 | - *[#29](https://github.com/idealista/nginx-role/issues/29) Support debian stretch* @jmonterrubio
92 |
93 | ## Styleguides
94 |
95 | ### Git Commit Messages
96 |
97 | * Use the present tense ("Add feature" not "Added feature")
98 | * Use the imperative mood ("Move cursor to..." not "Moves cursor to...")
99 | * Limit the first line to 72 characters or less
100 | * Reference issues and pull requests liberally after the first line
101 |
--------------------------------------------------------------------------------
/defaults/main/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## General
3 |
4 | kafka_version: 4.0.0
5 | kafka_scala_version: 2.13 # Recommended
6 | kafka_use_kraft: true # Use KRaft mode (Kafka Raft Metadata mode) instead of Zookeeper
7 |
8 | ## Service options
9 |
10 | kafka_private_tmp: true
11 |
12 | # Owner
13 | kafka_user: kafka
14 | kafka_group: kafka
15 |
16 | # start on boot
17 | kafka_service_enabled: true
18 | # current state: started, stopped
19 | kafka_service_state: started
20 | kafka_service_state_timeout: 300
21 | kafka_service_file_path: /etc/systemd/system/kafka.service
22 |
23 | # Files & Paths
24 | kafka_install_path: /opt/kafka
25 | kafka_conf_path: /etc/kafka
26 | kafka_data_path: "{{ kafka_install_path }}/data"
27 | kafka_log_path: /var/log/kafka
28 |
29 | # Brokers
30 | kafka_process_roles: "broker,controller" # broker, controller, or both
31 | kafka_cluster_uuid: "{{ 'kafka_role' | to_uuid | uuid_to_base64 }}"
32 |
33 | # Controllers
34 | kafka_controller_bootstrap_command: "{{ kafka_install_path }}/bin/kafka-storage.sh format --cluster-id {{ kafka_cluster_uuid }} --initial-controllers {{ kafka_initial_controllers }} -c {{ kafka_conf_path }}/server.properties --ignore-formatted"
35 |
36 | # Logging
37 | kafka_log_level: WARN
38 | kafka_log_pattern: "[%d] %p %m (%c)%n"
39 | kafka_gc_log_enabled: false
40 | kafka_log4j_opts: "{{ -Dlog4j.configurationFile={{ kafka_conf_path }}/{{ kafka_log4j_file_name }} if kafka_version >= '4.0.0' else -Dlog4j.configuration=file:{{ kafka_conf_path }}/{{ kafka_log4j_file_name }} }}"
41 |
42 | # JVM
43 | kafka_xmx: "{{ (ansible_memtotal_mb / 2) | int }}m"
44 | kafka_xms: "{{ (ansible_memtotal_mb / 2) | int }}m"
45 | kafka_jmx_port: 9010
46 | kafka_opts: ""
47 | kafka_jvm_performance_opts: ""
48 |
49 | # Service properties
50 |
51 | # The id of the broker. This must be set to a unique integer for each broker.
52 | # List of dict (i.e. {kafka_hosts:[{host:,id:},{host:,id:},...]})
53 | kafka_hosts:
54 | - host: "{{ inventory_hostname }}" # the machine running
55 | id: 0
56 | # Switch to enable topic deletion or not, default value is false
57 | kafka_delete_topic_enable: 'false'
58 |
59 | # Switch to enable auto create topic or not, default value is true
60 | kafka_auto_create_topics: 'true'
61 |
62 | kafka_topics: []
63 | kafka_not_removable_topics: ['KSTREAM-AGGREGATE-STATE-STORE'] # Contains filter
64 |
65 | # The address the socket server listens on. It will get the value returned from
66 | # java.net.InetAddress.getCanonicalHostName() if not configured.
67 | # FORMAT:
68 | # listeners = security_protocol://host_name:port
69 | # EXAMPLE:
70 | # listeners = PLAINTEXT://your.host.name:9092
71 | # listeners: PLAINTEXT://{{kafka_host}}:{{kafka_port}}
72 | kafka_host_name: "{{ ansible_nodename }}"
73 | kafka_port: 9092
74 |
75 | # Hostname and port the broker will advertise to producers and consumers. If not set,
76 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
77 | # returned from java.net.InetAddress.getCanonicalHostName().
78 | # kafka_advertised_listeners: PLAINTEXT://your.host.name:9092
79 |
80 | # The number of threads handling network requests
81 | kafka_num_network_threads: 3
82 | # The number of threads doing disk I/O
83 | kafka_num_io_threads: 8
84 | # The send buffer (SO_SNDBUF) used by the socket server
85 | kafka_socket_send_buffer_bytes: 102400
86 | # The receive buffer (SO_RCVBUF) used by the socket server
87 | kafka_socket_receive_buffer_bytes: 102400
88 | # The maximum size of a request that the socket server will accept (protection against OOM)
89 | kafka_socket_request_max_bytes: 104857600
90 |
91 | # The default number of log partitions per topic. More partitions allow greater
92 | # parallelism for consumption, but this will also result in more files across
93 | # the brokers.
94 | kafka_num_partitions: 1
95 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
96 | # This value is recommended to be increased for installations with data dirs located in RAID array.
97 | kafka_num_recovery_threads_per_data_dir: 1
98 |
99 | # The number of messages to accept before forcing a flush of data to disk
100 | kafka_log_flush_interval_messages: 10000
101 | # The maximum amount of time a message can sit in a log before we force a flush
102 | kafka_log_flush_interval_ms: 1000
103 |
104 | # The minimum age of a log file to be eligible for deletion
105 | kafka_log_retention_hours: 168
106 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
107 | # segments don't drop below log.retention.bytes.
108 | kafka_log_retention_bytes: 1073741824
109 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
110 | kafka_log_segment_bytes: 1073741824
111 | # The interval at which log segments are checked to see if they can be deleted according
112 | # to the retention policies
113 | kafka_log_retention_check_interval_ms: 300000
114 |
115 | # Zookeeper connection string (see zookeeper docs for details).
116 | # This is a comma separated host:port pairs, each corresponding to a zk
117 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
118 | # You can also append an optional chroot string to the urls to specify the
119 | # root directory for all kafka znodes.
120 | kafka_zookeeper_hosts:
121 | - localhost:2181
122 | # Timeout in ms for connecting to zookeeper
123 | kafka_zookeeper_connection_timeout_ms: 6000
124 |
125 | # The replication factor for the offsets topic (set higher to ensure availability).
126 | # Internal topic creation will fail until the cluster size meets this replication factor requirement.
127 | kafka_offsets_topic_replication_factor: 3
128 |
129 | # The replication factor for the transaction topic (set higher to ensure availability).
130 | # Internal topic creation will fail until the cluster size meets this replication factor requirement.
131 | kafka_transaction_state_log_replication_factor: 3
132 |
133 | # Overridden min.insync.replicas config for the transaction topic.
134 | kafka_transaction_state_log_min_isr: 2
135 |
136 | ## Miscellaneous
137 | kafka_force_reinstall: false
138 |
139 | ### Templates path
140 | kafka_log4j_template_path: log4j2.yaml.j2
141 | kafka_log4j_file_name: log4j2.yaml
142 | kafka_server_template_path: "{{ 'server.properties.kraft.j2' if kafka_use_kraft else 'server.properties.j2' }}"
143 | kafka_broker_template_path: broker.properties.kraft.j2
144 | kafka_controller_template_path: controller.properties.kraft.j2
145 | kafka_service_template_path: kafka.service.j2
146 |
147 | ## Extra properties
148 | kafka_topics_config: []
149 | # - name: 'test'
150 | # delete.retention.ms: 100000
151 | # max.message.bytes: 1024
152 | kafka_extra_properties: []
153 | # - key: message.max.bytes
154 | # value: 409715200
155 |
156 | kafka_mirror: "https://archive.apache.org/dist/kafka"
157 | kafka_download_url: "https://downloads.apache.org/kafka"
158 | kafka_package_name: "kafka_{{ kafka_scala_version }}-{{ kafka_version }}"
159 | kafka_package: "{{ kafka_package_name }}.tgz"
160 | kafka_sources_url: "{{ kafka_download_url }}/{{ kafka_version }}/{{ kafka_package }}"
161 |
162 |
163 | ## Agent configuration (optional)
164 | kafka_agents_required_libs:
165 | - unzip
166 | - tar
167 | - apt-transport-https
168 |
169 | kafka_agents_force_reinstall: false
170 |
171 | # kafka_agents_config:
172 | # - name: "agent_name"
173 | # download_url: "download_url"
174 | # version: "x.x.x"
175 | # java_opts:
176 | # - '-javaagent:{{ kafka_root_path }}/agent_name/agent_file'
177 | # configuration_files:
178 | # - "configuration_file.yml"
179 | # params: {
180 | # application_name: "application_name",
181 | # license_key: "license_key"
182 | # }
183 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | [](https://travis-ci.com/idealista/kafka_role)
4 | [](https://galaxy.ansible.com/idealista/kafka_role)
5 |
6 | # Kafka Ansible role
7 |
8 | This Ansible role installs Apache Kafka in a Debian Environment. The server is installed using the sources.
9 |
10 | - [Getting Started](#getting-started)
11 | - [Prerequisities](#prerequisities)
12 | - [Installing](#installing)
13 | - [Usage](#usage)
14 | - [Testing](#testing)
15 | - [Built With](#built-with)
16 | - [Versioning](#versioning)
17 | - [Authors](#authors)
18 | - [License](#license)
19 | - [Contributing](#contributing)
20 |
21 | ## Getting Started
22 |
23 | These instructions will get you a copy of the role for your Ansible Playbook. Once launched, it will install a [Apache Kafka](https://kafka.apache.org/) distributed streaming platform in a Debian Environment.
24 |
25 | > [!IMPORTANT]
26 | > From 3.0.0 Role version, Kafka v4.0.0 is installed in KRaft mode (without Zookeeper) and requires JDK 11 or higher, additionally the Log4j 2.x is used, see the [usage](#usage) section for more details.
27 |
28 | ### Prerequisities
29 |
30 | Ansible >= 2.9 version installed.
31 |
32 | Molecule >= 3.x.x version for testing purposes.
33 |
34 | For testing purposes, [Molecule](https://molecule.readthedocs.io/) with [Docker](https://www.docker.com/) as driver and [Goss](https://github.com/aelsabbahy/goss) as verifier.
35 |
36 | ### Installing
37 |
38 | Create or add to your roles dependency file (e.g requirements.yml):
39 |
40 | ```
41 | - src: idealista.kafka_role
42 | version: 3.0.0
43 | name: kafka_role
44 | ```
45 |
46 | Install the role with ansible-galaxy command:
47 |
48 | ```
49 | ansible-galaxy install -p roles -r requirements.yml -f
50 | ```
51 |
52 | Use in a playbook:
53 |
54 | ```
55 | ---
56 | - hosts: someserver
57 | roles:
58 | - role: kafka_role
59 | ```
60 |
61 | ## Usage
62 |
63 | Look to the [defaults](defaults/main.yml) properties file to see the possible configuration properties.
64 |
65 | This role supports Kafka in KRaft mode now and we made it the default mode, so if you were using this role before, please set the neccesary variables to keep your previous configuration working.
66 |
67 | To maintian compatibility with previous versions of the role, if you set `kafka_use_kraft: false` the role will install Kafka in Zookeeper mode as before, and use the old templates for the configuration files.
68 |
69 | This are an example of the minimum variables you may need to set get a working Kafka cluster in KRaft mode using the new template using the `kafka_cfg`, `broker_cfg` and `controller_cfg` variables (Check the [kafka-cfg](defaults/main/kafka-cfg.yml) for default cfg):
70 |
71 | At broker level or host_vars level:
72 |
73 | ```yml
74 | # Unique identifier for each Kafka node
75 | kafka_node_id: 1
76 |
77 | # We use the kafka_node_id to generate a unique uuid for each node, you can set the value of your choice
78 | kafka_node_uuid: "{{ kafka_node_id | to_uuid | uuid_to_base64 }}".
79 |
80 | # Controller URI for this node
81 | kafka_controller_uri: "{{ kafka_node_id }}@{{ ansible_host }}:{{ kafka_controller_port }}"
82 |
83 | # Initial controller for this node if the node acts as controller
84 | kafka_initial_controller: "{{ kafka_controller_uri }}:{{ kafka_node_uuid }}"
85 | ```
86 |
87 | At a general level or group_vars level:
88 |
89 | ```yml
90 | # The use purpose in general for all the nodes, instead you can set it at host_vars level
91 | kafka_process_roles: "broker,controller" # broker, controller, or both
92 |
93 | # A unique identifier for the Kafka cluster, you can set it to any value but it must be the same for all the nodes in the cluster
94 | kafka_cluster_uuid: "{{ 'kafka_role' | to_uuid | uuid_to_base64 }}"
95 |
96 | # List of all the controllers in the cluster with their node id and host/ip
97 | # An example to generate the controller quorum voters value could be:
98 | kafka_controller_quorum_voters: "{{ groups['brokers'] | map('extract', hostvars, 'kafka_controller_uri') | join(',') }}"
99 |
100 | # List of all the listeners for the cluster
101 | # An example to generate the listeners value could be:
102 | kafka_listeners: >-
103 | {%- for listener in kafka_listeners_list -%}
104 | {{ listener.name }}://{{ listener.host }}
105 | {%- if not loop.last -%},{%- endif -%}
106 | {%- endfor -%}
107 |
108 | # Where kafka_listeners_list looks like:
109 | kafka_listeners_list:
110 | - name: BROKERS
111 | host: "0.0.0.0:{{ kafka_brokers_port }}"
112 | advertised_host: "{{ kafka_host_name }}:{{ kafka_brokers_port }}"
113 | protocol: PLAINTEXT
114 | - name: CLIENTS
115 | host: "0.0.0.0:{{ kafka_clients_port }}"
116 | advertised_host: "{{ kafka_host_name }}:{{ kafka_clients_port }}"
117 | protocol: PLAINTEXT
118 | - name: CONTROLLER
119 | host: "0.0.0.0:{{ kafka_controller_port }}"
120 | advertised_host: "{{ kafka_host_name }}:{{ kafka_controller_port }}"
121 | protocol: PLAINTEXT
122 |
123 | # Kafka inter broker and controller listener names
124 | kafka_inter_broker_listener_name: BROKERS
125 | kafka_controller_listener_names: CONTROLLER
126 |
127 | # Kafka listeners using the kafka_listeners_list variable
128 | kafka_advertised_listeners: >-
129 | {%- for listener in kafka_listeners_list -%}
130 | {{ listener.name }}://{{ listener.advertised_host }}
131 | {%- if not loop.last -%},{%- endif -%}
132 | {%- endfor -%}
133 |
134 | # Map of listener name to security protocol using the kafka_listeners_list variable
135 | kafka_security_protocol_map: >-
136 | {%- for listener in kafka_listeners_list -%}
137 | {{ listener.name }}:{{ listener.protocol }}
138 | {%- if not loop.last -%},{%- endif -%}
139 | {%- endfor -%},SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
140 |
141 | # kafka controller quorum bootstrap servers using the inventory "brokers" group
142 | kafka_controller_quorum_bootstrap_servers: "{{ groups['brokers'] | map('extract', hostvars, 'ansible_host') | product([':' + kafka_controller_port | string]) | map('join') | join(',') }}"
143 |
144 | # If you want to customize the log4j configuration or if you are using kafka < 4.0.0 to set the log4j 1.x configuration
145 | kafka_log4j_template_path: log4j2.yml.j2
146 | kafka_log4j_file_name: log4j2.yml
147 | ```
148 |
149 | > [!CAUTION]
150 | > These were examples and should be adapted to your specific needs.
151 |
152 | Additionally but not necessary, we recommend to set the following variables too:
153 |
154 | ```yml
155 | kafka_xmx: "to_your_value"
156 | kafka_xms: "to_your_value"
157 | ```
158 |
159 | Kafka topics could be configured through the role. Just set the topics like:
160 |
161 | ```yml
162 | kafka_topics:
163 | - name: 'test'
164 | partitions: '3'
165 | replicas: '3'
166 | - name: 'test2'
167 | partitions: '3'
168 | replicas: '1'
169 | ```
170 |
171 | Enable delete topic var if you want to remove topics from the cluster.
172 |
173 | The number of partitions can be modified but not the replicas. Please have this in mind when create the topics.
174 |
175 | Also notice that you can't decrease the number of partitions of a created topic.
176 |
177 | > [!NOTE]
178 | > Ansible does not support generating base64 encoded UUIDs, so for this role we developed a custom filter plugin to do so. For this role it is included but, If you want to use this feature outside, you must copy the `filter_plugins` folder in this repo to your Ansible project.
179 |
180 | ## Testing
181 |
182 | ### Install dependencies
183 |
184 | ```sh
185 | $ pipenv sync
186 | ```
187 |
188 | For more information read the [pipenv docs](ipenv-fork.readthedocs.io/en/latest/).
189 |
190 | ### Testing
191 |
192 | ```sh
193 | $ pipenv run molecule test
194 | ```
195 |
196 | ## Built With
197 |
198 | 
199 |
200 | ## Versioning
201 |
202 | For the versions available, see the [tags on this repository](https://github.com/idealista/kafka_role/tags).
203 |
204 | Additionaly you can see what change in each version in the [CHANGELOG.md](CHANGELOG.md) file.
205 |
206 | ## Authors
207 |
208 | - **Idealista** - *Work with* - [idealista](https://github.com/idealista)
209 |
210 | See also the list of [contributors](https://github.com/idealista/kafka_role/contributors) who participated in this project.
211 |
212 | ## License
213 |
214 | 
215 |
216 | This project is licensed under the [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) license - see the [LICENSE](LICENSE) file for details.
217 |
218 | ## Contributing
219 |
220 | Please read [CONTRIBUTING.md](.github/CONTRIBUTING.md) for details on our code of conduct, and the process for submitting pull requests to us.
221 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Change Log
2 |
3 | All notable changes to this project will be documented in this file.
4 | This project adheres to [Semantic Versioning](http://semver.org/) and [Keep a changelog](https://github.com/olivierlacan/keep-a-changelog).
5 |
6 | ## [Unreleased](https://github.com/idealista/kafka_role/tree/develop)
7 |
8 | ## [3.0.0](https://github.com/idealista/kafka_role/tree/2.0.0) (2025-09-03)
9 |
10 | ## Added
11 |
12 | - Updated role to support Kafka 4.X versions
13 |
14 | ## Changed
15 |
16 | - Updated default Kafka version to 4.0.0
17 | - Refactored configuration templates for better maintainability
18 | - Updated molecule test scenarios
19 | - Improved error handling and validation
20 | - Updated molecule test scenarios
21 | - Updated Readme with examples and explanations
22 |
23 | ## Fixed
24 |
25 | - Resolved compatibility issues with newer Python versions
26 |
27 | ## [2.0.0](https://github.com/idealista/kafka_role/tree/2.0.0) (2024-02-05)
28 |
29 | ## Added
30 |
31 | - ACLs handling
32 |
33 | ## Changed
34 |
35 | - Updated role to allow install of 3.X versions
36 | - Updated test-requirements
37 | - Updated linting
38 | - Updated molecule default scenario
39 | - Updated molecule tests
40 | - Updated travis config file
41 | - [.gitattributes linguist detection](https://github.com/idealista/kafka_role/pull/104) by @YaraGomezSaiz
42 | - [Service listening timeout parametrizable](https://github.com/idealista/kafka_role/pull/98) by @devnix
43 |
44 | ## Fixed
45 |
46 | - Update tasks modules to use FQDN
47 |
48 | ## Deprecated
49 |
50 | - Buster general support
51 |
52 |
53 | ## [1.15.0](https://github.com/idealista/kafka_role/tree/1.15.0) (2021-12-17)
54 |
55 | ## Added
56 |
57 | - *[#95](https://github.com/idealista/kafka_role/issues/95) Support for agent installation.* @jmonterrubio
58 |
59 | ## [1.14.0](https://github.com/idealista/kafka_role/tree/1.14.0) (2020-12-03)
60 |
61 | ## Changed
62 |
63 | - *[#81](https://github.com/idealista/kafka_role/issues/81) Allowing to override kafka_mirror uri via group_vars* @dortegau
64 |
65 | ## [1.13.2](https://github.com/idealista/kafka_role/tree/1.13.2) (2019-08-06)
66 |
67 | ## Fixed
68 |
69 | - *[#78](https://github.com/idealista/kafka-role/issues/78) Fixed not removable topics was only considering the first one in the list* @jmonterrubio
70 |
71 | ## [1.13.1](https://github.com/idealista/kafka_role/tree/1.13.1) (2019-04-01)
72 |
73 | ## Fixed
74 |
75 | - *[#75](https://github.com/idealista/kafka-role/issues/75) Default kafka version 2.2.0* @frantsao
76 |
77 | ## Changed
78 |
79 | - *[#73](https://github.com/idealista/kafka_role/issues/73) Rename role* @frantsao
80 |
81 | ## [1.13.0](https://github.com/idealista/kafka_role/tree/1.13.0) (2019-03-20)
82 |
83 | ## Added
84 |
85 | - *[#70](https://github.com/idealista/kafka_role/issues/70) Provide list of not removable topics* @jmonterrubio
86 |
87 | ## [1.12.1](https://github.com/idealista/kafka_role/tree/1.12.1) (2019-02-19)
88 |
89 | ## Fixed
90 |
91 | - *[#65](https://github.com/idealista/kafka_role/issues/65) Remove space before extra properties* @jmonterrubio
92 |
93 | ## [1.12.0](https://github.com/idealista/kafka_role/tree/1.12.0) (2019-02-12)
94 |
95 | ## [Full Changelog](https://github.com/idealista/kafka_role/compare/1.11.0...1.12.0)
96 |
97 | ## Added
98 |
99 | - *[#62](https://github.com/idealista/kafka_role/issues/62) Provide the KAFKA_JVM_PERFORMANCE_OPTS* @jmonterrubio
100 |
101 | ## [1.11.0](https://github.com/idealista/kafka_role/tree/1.11.0) (2019-02-05)
102 |
103 | ## Fixed
104 |
105 | - *[#59](https://github.com/idealista/kafka_role/issues/59) Fix scala version cannot be setted* @jmonterrubio
106 |
107 | ## Changed
108 |
109 | - *[#59](https://github.com/idealista/kafka_role/issues/59) Set 2.12 as default scala version* @jmonterrubio
110 |
111 | ## [1.10.0](https://github.com/idealista/kafka_role/tree/1.10.0) (2019-01-30)
112 |
113 | ## Added
114 |
115 | - *[#56](https://github.com/idealista/kafka_role/issues/56) Configure topic properties* @jmonterrubio
116 |
117 | ## Changed
118 |
119 | - *[#56](https://github.com/idealista/kafka_role/issues/56) Remove compatibility with ansible < 2.7* @jmonterrubio
120 |
121 | ## [1.9.0](https://github.com/idealista/kafka_role/tree/1.9.0) (2018-12-18)
122 |
123 | ## Changed
124 |
125 | - *Default kafka version 2.1.0* @jmonterrubio
126 |
127 | ## Fixed
128 |
129 | - *[#53](https://github.com/idealista/kafka_role/issues/53) Fix problem removing topics in python 3* @jmonterrubio
130 |
131 | ## [1.8.1](https://github.com/idealista/kafka_role/tree/1.8.1) (2018-11-13)
132 |
133 | ## Fixed
134 |
135 | - *[#51](https://github.com/idealista/kafka_role/issues/51) Fix alter kafka topics* @jmonterrubio
136 |
137 | ## [1.8.0](https://github.com/idealista/kafka_role/tree/1.8.0) (2018-10-31)
138 |
139 | ## Added
140 |
141 | - *Add Kafka_OPTS hook* @john-delivuk
142 |
143 | ## Fixed
144 |
145 | - *[#48](https://github.com/idealista/kafka_role/issues/48) Fix error creating topics in a second launch* @jmonterrubio
146 |
147 | ## [1.7.1](https://github.com/idealista/kafka_role/tree/1.7.1) (2018-10-29)
148 |
149 | ## Fixed
150 |
151 | - *[#45](https://github.com/idealista/kafka_role/issues/45) Avoided remove all internal topics* @jmonterrubio
152 |
153 | ## [1.7.0](https://github.com/idealista/kafka_role/tree/1.7.0) (2018-10-29)
154 |
155 | ## Fixed
156 |
157 | - *[#40](https://github.com/idealista/kafka_role/issues/40) Avoided remove __consumer_offsets internal topic* @jmonterrubio
158 |
159 | ## Added
160 |
161 | - *[#39](https://github.com/idealista/kafka_role/issues/39) Add extra configuration properties* @jmonterrubio
162 |
163 | ## [1.6.0](https://github.com/idealista/kafka_role/tree/1.6.0) (2018-10-09)
164 |
165 | ## Added
166 |
167 | - *[#33](https://github.com/idealista/kafka_role/issues/33) Enable or disable GC log by configuration* @jmonterrubio
168 | - *[#35](https://github.com/idealista/kafka_role/issues/35) Remove old kafka installation* @jmonterrubio
169 | - *[#36](https://github.com/idealista/kafka_role/issues/36) Configure topics from role* @jmonterrubio
170 |
171 | ## Changed
172 |
173 | - *[#34](https://github.com/idealista/kafka_role/issues/34) Use goss instead of testinfra for molecule test* @jmonterrubio
174 |
175 | ## [1.5.0](https://github.com/idealista/kafka_role/tree/1.5.0) (2018-06-06)
176 |
177 | ## [Full Changelog](https://github.com/idealista/kafka_role/compare/1.4.0...1.5.0)
178 |
179 | ## Added
180 |
181 | - *[#28](https://github.com/idealista/kafka_role/issues/28) Adding new variables to server.properties template* @amanzanotejon
182 | - *[#30](https://github.com/idealista/kafka_role/issues/30) Kafka.service, log4j.properties and server.properties can be provided via playbooks* @jnogol
183 |
184 | ## [1.4.0](https://github.com/idealista/kafka_role/tree/1.4.0) (2018-06-06)
185 |
186 | ## [Full Changelog](https://github.com/idealista/kafka_role/compare/1.3.1...1.4.0)
187 |
188 | ## Changed
189 |
190 | - *[#20](https://github.com/idealista/kafka_role/issues/20) Update to kafka v1.1* @eskabetxe
191 |
192 | ## Fixed
193 |
194 | - *[#26](https://github.com/idealista/kafka_role/issues/26) Testinfra tests don't pass under Vagrant* @eskabetxe
195 |
196 | ## [1.3.1](https://github.com/idealista/kafka_role/tree/1.3.1) (2018-02-27)
197 |
198 | ## [Full Changelog](https://github.com/idealista/kafka_role/compare/1.3.0...1.3.1)
199 |
200 | ## Changed
201 |
202 | - *[#23](https://github.com/idealista/kafka_role/issues/23) Using Vagrant hostmanager instead of Landrush* @dortegau
203 |
204 | ## Fixed
205 |
206 | - *[#22](https://github.com/idealista/kafka_role/pull/22) Picking up Xms correctly from KAFKA_HEAP_OPTS environment variable* @didumgai
207 |
208 | ## [1.3.0](https://github.com/idealista/kafka_role/tree/1.3.0) (2018-02-01)
209 |
210 | ## [Full Changelog](https://github.com/idealista/kafka_role/compare/1.2.0...1.3.0)
211 |
212 | ## Added
213 |
214 | - *[#13](https://github.com/idealista/kafka_role/issues/13) Enable delete topics property* @jmonterrubio
215 | - *[#12](https://github.com/idealista/kafka_role/issues/12) Enable auto create topics property* @jmonterrubio
216 |
217 | ## Changed
218 |
219 | - *Use uvigo's mirror to download Kafka* @jnogol
220 | - *[#16](https://github.com/idealista/kafka_role/pull/16) Remove Java role dependency* @maqdev
221 |
222 | ## Fixed
223 |
224 | - *Fix Kafka errored version* @jnogol
225 | - *[#16](https://github.com/idealista/kafka_role/pull/16) Create data path if not exists* @maqdev
226 |
227 | ## [1.2.0](https://github.com/idealista/kafka_role/tree/1.2.0) (2017-05-19)
228 |
229 | ## [Full Changelog](https://github.com/idealista/kafka_role/compare/1.1.0...1.2.0)
230 |
231 | ## Changed
232 |
233 | - *[#5](https://github.com/idealista/kafka_role/issues/5) Added kafka_hosts var to fix broker id issue* @jmonterrubio
234 |
235 | ## [1.1.0](https://github.com/idealista/kafka_role/tree/1.1.0) (2017-04-04)
236 |
237 | ## [Full Changelog](https://github.com/idealista/kafka_role/compare/1.0.0...1.1.0)
238 |
239 | ## Changed
240 |
241 | - *[#2](https://github.com/idealista/kafka_role/issues/2) Change defaults vars and improve installation* @jmonterrubio
242 |
243 | ## [1.0.0](https://github.com/idealista/kafka_role/tree/1.0.0) (2017-02-28)
244 |
245 | ### Added
246 |
247 | - *First release*
248 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
10 |
11 | "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
12 |
13 | "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
14 |
15 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
16 |
17 | "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
18 |
19 | "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
20 |
21 | "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
22 |
23 | "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
24 |
25 | "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
26 |
27 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
28 |
29 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
30 |
31 | 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
32 |
33 | 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
34 |
35 | You must give any other recipients of the Work or Derivative Works a copy of this License; and
36 |
37 | You must cause any modified files to carry prominent notices stating that You changed the files; and
38 |
39 | You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
40 |
41 | If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
42 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
43 | 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
44 |
45 | 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
46 |
47 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
48 |
49 | 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
50 |
51 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
52 |
53 | END OF TERMS AND CONDITIONS
54 |
55 | APPENDIX: How to apply the Apache License to your work
56 |
57 | To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
58 |
59 |
60 | Copyright [yyyy] [name of copyright owner]
61 |
62 | Licensed under the Apache License, Version 2.0 (the "License");
63 | you may not use this file except in compliance with the License.
64 | You may obtain a copy of the License at
65 |
66 | http://www.apache.org/licenses/LICENSE-2.0
67 |
68 | Unless required by applicable law or agreed to in writing, software
69 | distributed under the License is distributed on an "AS IS" BASIS,
70 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
71 | See the License for the specific language governing permissions and
72 | limitations under the License.
73 |
74 | /**
75 | * Copyright Idealista S.A.
76 | *
77 | * Licensed under the Apache License, Version 2.0 (the "License");
78 | * you may not use this file except in compliance with the License.
79 | * You may obtain a copy of the License at
80 | *
81 | * http://www.apache.org/licenses/LICENSE-2.0
82 | *
83 | * Unless required by applicable law or agreed to in writing, software
84 | * distributed under the License is distributed on an "AS IS" BASIS,
85 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
86 | * See the License for the specific language governing permissions and
87 | * limitations under the License.
88 | */
89 |
--------------------------------------------------------------------------------
/templates/server.properties.j2:
--------------------------------------------------------------------------------
1 | ##
2 | # {{ ansible_managed }}
3 |
4 | ############################# Server Basics #############################
5 |
6 | # The id of the broker. This must be set to a unique integer for each broker.
7 | {% if kafka_hosts is defined %}
8 | {% for server in kafka_hosts %}
9 | {% if server.host is defined %}
10 | {% if server.host == inventory_hostname %}
11 | broker.id={{ server.id }}
12 | {% endif %}
13 | {% else %}
14 | broker.id=0
15 | {% endif %}
16 | {% endfor %}
17 | {% else %}
18 | broker.id=0
19 | {% endif %}
20 |
21 | {% if kafka_delete_topic_enable is defined %}
22 | delete.topic.enable={{ kafka_delete_topic_enable }}
23 | {% endif %}
24 |
25 | {% if kafka_auto_create_topics is defined %}
26 | auto.create.topics.enable={{ kafka_auto_create_topics }}
27 | {% endif %}
28 |
29 | ############################# Socket Server Settings #############################
30 |
31 | # The address the socket server listens on. It will get the value returned from
32 | # java.net.InetAddress.getCanonicalHostName() if not configured.
33 | # FORMAT:
34 | # listeners = security_protocol://host_name:port
35 | # EXAMPLE:
36 | # listeners = PLAINTEXT://your.host.name:9092
37 | {% if kafka_listeners is defined %}
38 | listeners={% set listener_str = '' %}
39 | {%- for listener in kafka_listeners %}
40 | {%- set listener_str = listener_str ~ listener.name ~ '://' ~ listener.host %}{{ listener_str }}{{ "," if not loop.last else "" }}
41 | {%- endfor %}
42 | {% else %}
43 | listeners=PLAINTEXT://{{ kafka_host_name }}:{{ kafka_port }}
44 | {% endif %}
45 |
46 |
47 | {% if kafka_listeners is defined %}
48 | # Map between listener names and security protocols. This must be defined for the same security protocol to be usable in more than one port or IP. For example, internal and external traffic can be separated even if SSL is required for both. Concretely, the user could define listeners with names INTERNAL and EXTERNAL and this property as: `INTERNAL:SSL,EXTERNAL:SSL`. As shown, key and value are separated by a colon and map entries are separated by commas. Each listener name should only appear once in the map. Different security (SSL and SASL) settings can be configured for each listener by adding a normalised prefix (the listener name is lowercased) to the config name. For example, to set a different keystore for the INTERNAL listener, a config with name listener.name.internal.ssl.keystore.location would be set. If the config for the listener name is not set, the config will fallback to the generic config (i.e. ssl.keystore.location). Note that in KRaft a default mapping from the listener names defined by controller.listener.names to PLAINTEXT is assumed if no explicit mapping is provided and no other security protocol is in use.
49 | listener.security.protocol.map={% set listener_str = '' %}
50 | {%- for listener in kafka_listeners %}
51 | {%- set listener_str = listener_str ~ listener.name ~ ':' ~ listener.protocol %}{{ listener_str }}{{ "," if not loop.last else "" }}
52 | {%- endfor %}
53 | {% endif %}
54 |
55 | {% if kafka_inter_broker_listener_name is defined %}
56 | # Name of listener used for communication between brokers. If this is unset, the listener name is defined by security.inter.broker.protocol. It is an error to set this and security.inter.broker.protocol properties at the same time.
57 | inter.broker.listener.name={{ kafka_inter_broker_listener_name }}
58 | {% endif %}
59 |
60 | {% if kafka_control_plane_listener_name is defined %}
61 | # Name of listener used for communication between brokers. If this is unset, the listener name is defined by security.inter.broker.protocol. It is an error to set this and security.inter.broker.protocol properties at the same time.
62 | control.plane.listener.name={{ kafka_control_plane_listener_name }}
63 | {% endif %}
64 |
65 | {% if kafka_listeners is defined %}
66 | # Hostname and port the broker will advertise to producers and consumers. If not set,
67 | # it uses the value for "listeners" if configured. Otherwise, it will use the value
68 | # returned from java.net.InetAddress.getCanonicalHostName().
69 | advertised.listeners={% set listener_str = '' %}
70 | {%- for listener in kafka_listeners %}
71 | {%- if listener.advertised_host is defined %}
72 | {%- set listener_str = listener_str ~ listener.name ~ '://' ~ listener.advertised_host %}{{ listener_str }}{{ "," if not loop.last else "" }}
73 | {%- endif %}
74 | {%- endfor %}
75 | {% endif %}
76 |
77 | # The number of threads handling network requests #
78 | num.network.threads={{ kafka_num_network_threads }}
79 |
80 | # The number of threads doing disk I/O
81 | num.io.threads={{ kafka_num_io_threads }}
82 |
83 | # The send buffer (SO_SNDBUF) used by the socket server
84 | socket.send.buffer.bytes={{ kafka_socket_send_buffer_bytes }}
85 |
86 | # The receive buffer (SO_RCVBUF) used by the socket server
87 | socket.receive.buffer.bytes={{ kafka_socket_receive_buffer_bytes }}
88 |
89 | # The maximum size of a request that the socket server will accept (protection against OOM)
90 | socket.request.max.bytes={{ kafka_socket_request_max_bytes }}
91 |
92 | ############################# Security #############################
93 |
94 | # The list of SASL mechanisms enabled in the Kafka server. The list may contain any mechanism for which a security provider is available. Only GSSAPI is enabled by default.
95 | {% if kafka_sasl_enabled_mechanisms is defined %}
96 | sasl.enabled.mechanisms={{ kafka_sasl_enabled_mechanisms }}
97 | {% endif %}
98 |
99 | {% if kafka_sasl_mechanism_inter_broker_protocol is defined %}
100 | sasl.mechanism.inter.broker.protocol={{ kafka_sasl_mechanism_inter_broker_protocol }}
101 | {% endif %}
102 |
103 | {% if kafka_allow_everyone_if_no_acl_found is defined %}
104 | allow.everyone.if.no.acl.found={{ kafka_allow_everyone_if_no_acl_found }}
105 | {% endif %}
106 |
107 | {% if kafka_authorizer_class_name is defined %}
108 | authorizer.class.name={{ kafka_authorizer_class_name }}
109 | {% endif %}
110 |
111 | {% if kafka_listeners_sasl_jaas_config is defined %}
112 | {# listener.name.clients.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
113 | username="admin" \
114 | password="admin-secret" \
115 | user_admin="admin-secret"; #}
116 | {% for listener_config in kafka_listeners_sasl_jaas_config %}
117 | listener.name.{{ listener_config.name|lower }}.{{ listener_config.mechanism|lower }}.sasl.jaas.config={{ listener_config.module }} required \
118 | {% if listener_config.broker_auth is defined %}
119 | username="{{ listener_config.broker_auth.user }}" \
120 | password="{{ listener_config.broker_auth.pass }}" \
121 | {% endif %}
122 | {% if listener_config.acl_auth is defined %}
123 | {% for user in listener_config.acl_auth %}
124 | user_{{ user.user }}="{{ user.pass }}"{{ ";" if loop.last else " \\"}}
125 | {% endfor %}
126 | {% endif %}
127 | {% endfor %}
128 | {% endif %}
129 |
130 | {% if kafka_super_users is defined %}
131 | super.users={{ ['User:'] | product(kafka_super_users) | map('join') | list | product([';']) | map('join') | list | join }}
132 | {% endif %}
133 |
134 |
135 | ############################# Internal Topic Settings #############################
136 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
137 | # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
138 | offsets.topic.replication.factor={{ kafka_offsets_topic_replication_factor }}
139 | transaction.state.log.replication.factor={{ kafka_transaction_state_log_replication_factor }}
140 | transaction.state.log.min.isr={{ kafka_transaction_state_log_min_isr }}
141 |
142 |
143 | ############################# Log Basics #############################
144 |
145 | # A comma seperated list of directories under which to store log files
146 | log.dirs={{ kafka_data_path }}
147 |
148 | # The number of logical partitions per topic per server. More partitions allow greater parallelism
149 | # for consumption, but also mean more files.
150 | num.partitions={{ kafka_num_partitions }}
151 |
152 | # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
153 | # This value is recommended to be increased for installations with data dirs located in RAID array.
154 | num.recovery.threads.per.data.dir={{ kafka_num_recovery_threads_per_data_dir }}
155 |
156 | ############################# Log Flush Policy #############################
157 |
158 | # Messages are immediately written to the filesystem but by default we only fsync() to sync
159 | # the OS cache lazily. The following configurations control the flush of data to disk.
160 | # There are a few important trade-offs here:
161 | # 1. Durability: Unflushed data may be lost if you are not using replication.
162 | # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
163 | # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
164 | # The settings below allow one to configure the flush policy to flush data after a period of time or
165 | # every N messages (or both). This can be done globally and overridden on a per-topic basis.
166 |
167 | # The number of messages to accept before forcing a flush of data to disk
168 | {% if kafka_log_flush_interval_messages is defined %}
169 | log.flush.interval.messages={{ kafka_log_flush_interval_messages }}
170 | {% endif %}
171 |
172 | # The maximum amount of time a message can sit in a log before we force a flush
173 | {% if kafka_log_flush_interval_ms is defined %}
174 | log.flush.interval.ms={{ kafka_log_flush_interval_ms }}
175 | {% endif %}
176 |
177 | ############################# Log Retention Policy #############################
178 |
179 | # The following configurations control the disposal of log segments. The policy can
180 | # be set to delete segments after a period of time, or after a given size has accumulated.
181 | # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
182 | # from the end of the log.
183 |
184 | # The minimum age of a log file to be eligible for deletion
185 | log.retention.hours={{ kafka_log_retention_hours }}
186 |
187 | # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
188 | # segments dont drop below log.retention.bytes.
189 | log.retention.bytes={{ kafka_log_retention_bytes }}
190 |
191 | # The maximum size of a log segment file. When this size is reached a new log segment will be created.
192 | log.segment.bytes={{ kafka_log_segment_bytes }}
193 |
194 | # The interval at which log segments are checked to see if they can be deleted according
195 | # to the retention policies
196 | log.retention.check.interval.ms={{ kafka_log_retention_check_interval_ms }}
197 |
198 | ############################# Zookeeper #############################
199 | {% if kafka_zookeeper_hosts is defined %}
200 | # Zookeeper connection string (see zookeeper docs for details).
201 | # This is a comma separated host:port pairs, each corresponding to a zk
202 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
203 | # You can also append an optional chroot string to the urls to specify the
204 | # root directory for all kafka znodes.
205 | zookeeper.connect={{ kafka_zookeeper_hosts | join(',') }}
206 |
207 | # Timeout in ms for connecting to zookeeper
208 | zookeeper.connection.timeout.ms={{ kafka_zookeeper_connection_timeout_ms }}
209 |
210 | {% if kafka_zookeeper_sasl_enabled is defined %}
211 | zookeeper.sasl.enabled={{ kafka_zookeeper_sasl_enabled }}
212 | {% endif %}
213 | {% endif %}
214 |
215 | ############################# Extra Properties #############################
216 | {% if kafka_extra_properties is defined %}
217 | {% for config in kafka_extra_properties %}
218 | {{ config.key }}={{ config.value }}
219 | {% endfor %}
220 | {% endif %}
221 |
--------------------------------------------------------------------------------
/molecule/agents/templates/agents/newrelic/newrelic.yml.j2:
--------------------------------------------------------------------------------
1 | # This file configures the New Relic Agent. New Relic monitors
2 | # Java applications with deep visibility and low overhead. For more details and additional
3 | # configuration options visit https://docs.newrelic.com/docs/java/java-agent-configuration.
4 | #
5 | # {{ ansible_managed }}
6 | #
7 | # This section is for settings common to all environments.
8 | # Do not add anything above this next line.
9 | common: &default_settings
10 |
11 | # ============================== LICENSE KEY ===============================
12 | # You must specify the license key associated with your New Relic
13 | # account. For example, if your license key is 12345 use this:
14 | # license_key: '12345'
15 | # The key binds your Agent's data to your account in the New Relic service.
16 | license_key: '{{ item.params.license_key }}'
17 |
18 | # Agent Enabled
19 | # Use this setting to disable the agent instead of removing it from the startup command.
20 | # Default is true.
21 | agent_enabled: true
22 |
23 | # Set the name of your application as you'd like it show up in New Relic.
24 | # If enable_auto_app_naming is false, the agent reports all data to this application.
25 | # Otherwise, the agent reports only background tasks (transactions for non-web applications)
26 | # to this application. To report data to more than one application
27 | # (useful for rollup reporting), separate the application names with ";".
28 | # For example, to report data to "My Application" and "My Application 2" use this:
29 | # app_name: My Application;My Application 2
30 | # This setting is required. Up to 3 different application names can be specified.
31 | # The first application name must be unique.
32 | app_name: {{ item.params.application_name }}
33 |
34 | # To enable high security, set this property to true. When in high
35 | # security mode, the agent will use SSL and obfuscated SQL. Additionally,
36 | # request parameters and message parameters will not be sent to New Relic.
37 | high_security: false
38 |
39 | # Set to true to enable support for auto app naming.
40 | # The name of each web app is detected automatically
41 | # and the agent reports data separately for each one.
42 | # This provides a finer-grained performance breakdown for
43 | # web apps in New Relic.
44 | # Default is false.
45 | enable_auto_app_naming: false
46 |
47 | # Set to true to enable component-based transaction naming.
48 | # Set to false to use the URI of a web request as the name of the transaction.
49 | # Default is true.
50 | enable_auto_transaction_naming: true
51 |
52 | # The agent uses its own log file to keep its logging
53 | # separate from that of your application. Specify the log level here.
54 | # This setting is dynamic, so changes do not require restarting your application.
55 | # The levels in increasing order of verboseness are:
56 | # off, severe, warning, info, fine, finer, finest
57 | # Default is info.
58 | log_level: info
59 |
60 | # Log all data sent to and from New Relic in plain text.
61 | # This setting is dynamic, so changes do not require restarting your application.
62 | # Default is false.
63 | audit_mode: false
64 |
65 | # The number of backup log files to save.
66 | # Default is 1.
67 | log_file_count: 1
68 |
69 | # The maximum number of kbytes to write to any one log file.
70 | # The log_file_count must be set greater than 1.
71 | # Default is 0 (no limit).
72 | log_limit_in_kbytes: 0
73 |
74 | # Override other log rolling configuration and roll the logs daily.
75 | # Default is false.
76 | log_daily: false
77 |
78 | # The name of the log file.
79 | # Default is newrelic_agent.log.
80 | log_file_name: newrelic_agent.log
81 |
82 | # The log file directory.
83 | # Default is the logs directory in the newrelic.jar parent directory.
84 | #log_file_path:
85 |
86 | # The agent communicates with New Relic via https by
87 | # default. If you want to communicate with newrelic via http,
88 | # then turn off SSL by setting this value to false.
89 | # This work is done asynchronously to the threads that process your
90 | # application code, so response times will not be directly affected
91 | # by this change.
92 | # Default is true.
93 | ssl: true
94 |
95 | # Proxy settings for connecting to the New Relic server:
96 | # If a proxy is used, the host setting is required. Other settings
97 | # are optional. Default port is 8080. The username and password
98 | # settings will be used to authenticate to Basic Auth challenges
99 | # from a proxy server.
100 | #proxy_host: hostname
101 | #proxy_port: 8080
102 | #proxy_user: username
103 | #proxy_password: password
104 |
105 | # Limits the number of lines to capture for each stack trace.
106 | # Default is 30
107 | max_stack_trace_lines: 30
108 |
109 | # Provides the ability to configure the attributes sent to New Relic. These
110 | # attributes can be found in transaction traces, traced errors, Insight's
111 | # transaction events, and Insight's page views.
112 | attributes:
113 |
114 | # When true, attributes will be sent to New Relic. The default is true.
115 | enabled: true
116 |
117 | #A comma separated list of attribute keys whose values should
118 | # be sent to New Relic.
119 | #include:
120 |
121 | # A comma separated list of attribute keys whose values should
122 | # not be sent to New Relic.
123 | #exclude:
124 |
125 |
126 | # Transaction tracer captures deep information about slow
127 | # transactions and sends this to the New Relic service once a
128 | # minute. Included in the transaction is the exact call sequence of
129 | # the transactions including any SQL statements issued.
130 | transaction_tracer:
131 |
132 | # Transaction tracer is enabled by default. Set this to false to turn it off.
133 | # This feature is not available to Lite accounts and is automatically disabled.
134 | # Default is true.
135 | enabled: true
136 |
137 | # Threshold in seconds for when to collect a transaction
138 | # trace. When the response time of a controller action exceeds
139 | # this threshold, a transaction trace will be recorded and sent to
140 | # New Relic. Valid values are any float value, or (default) "apdex_f",
141 | # which will use the threshold for the "Frustrated" Apdex level
142 | # (greater than four times the apdex_t value).
143 | # Default is apdex_f.
144 | transaction_threshold: apdex_f
145 |
146 | # When transaction tracer is on, SQL statements can optionally be
147 | # recorded. The recorder has three modes, "off" which sends no
148 | # SQL, "raw" which sends the SQL statement in its original form,
149 | # and "obfuscated", which strips out numeric and string literals.
150 | # Default is obfuscated.
151 | record_sql: obfuscated
152 |
153 | # Set this to true to log SQL statements instead of recording them.
154 | # SQL is logged using the record_sql mode.
155 | # Default is false.
156 | log_sql: false
157 |
158 | # Threshold in seconds for when to collect stack trace for a SQL
159 | # call. In other words, when SQL statements exceed this threshold,
160 | # then capture and send to New Relic the current stack trace. This is
161 | # helpful for pinpointing where long SQL calls originate from.
162 | # Default is 0.5 seconds.
163 | stack_trace_threshold: 0.5
164 |
165 | # Determines whether the agent will capture query plans for slow
166 | # SQL queries. Only supported for MySQL and PostgreSQL.
167 | # Default is true.
168 | explain_enabled: true
169 |
170 | # Threshold for query execution time below which query plans will not
171 | # not be captured. Relevant only when `explain_enabled` is true.
172 | # Default is 0.5 seconds.
173 | explain_threshold: 0.5
174 |
175 | # Use this setting to control the variety of transaction traces.
176 | # The higher the setting, the greater the variety.
177 | # Set this to 0 to always report the slowest transaction trace.
178 | # Default is 20.
179 | top_n: 20
180 |
181 | # Error collector captures information about uncaught exceptions and
182 | # sends them to New Relic for viewing.
183 | error_collector:
184 |
185 | # This property enables the collection of errors. If the property is not
186 | # set or the property is set to false, then errors will not be collected.
187 | # Default is true.
188 | enabled: true
189 |
190 | # Use this property to exclude specific exceptions from being reported as errors
191 | # by providing a comma separated list of full class names.
192 | # The default is to exclude akka.actor.ActorKilledException. If you want to override
193 | # this, you must provide any new value as an empty list is ignored.
194 | ignore_errors: akka.actor.ActorKilledException
195 |
196 | # Use this property to exclude specific http status codes from being reported as errors
197 | # by providing a comma separated list of status codes.
198 | # The default is to exclude 404s. If you want to override
199 | # this, you must provide any new value as an empty list is ignored.
200 | ignore_status_codes: 404
201 |
202 | # Transaction Events are used for Histograms and Percentiles. Unaggregated data is collected
203 | # for each web transaction and sent to the server on harvest.
204 | transaction_events:
205 |
206 | # Set to false to disable transaction events.
207 | # Default is true.
208 | enabled: true
209 |
210 | # Events are collected up to the configured amount. Afterwards, events are sampled to
211 | # maintain an even distribution across the harvest cycle.
212 | # Default is 2000. Setting to 0 will disable.
213 | max_samples_stored: 2000
214 |
215 | # Cross Application Tracing adds request and response headers to
216 | # external calls using supported HTTP libraries to provide better
217 | # performance data when calling applications monitored by other New Relic Agents.
218 | cross_application_tracer:
219 |
220 | # Set to false to disable cross application tracing.
221 | # Default is true.
222 | enabled: true
223 |
224 | # Thread profiler measures wall clock time, CPU time, and method call counts
225 | # in your application's threads as they run.
226 | # This feature is not available to Lite accounts and is automatically disabled.
227 | thread_profiler:
228 |
229 | # Set to false to disable the thread profiler.
230 | # Default is true.
231 | enabled: true
232 |
233 | # New Relic Real User Monitoring gives you insight into the performance real users are
234 | # experiencing with your website. This is accomplished by measuring the time it takes for
235 | # your users' browsers to download and render your web pages by injecting a small amount
236 | # of JavaScript code into the header and footer of each page.
237 | browser_monitoring:
238 |
239 | # By default the agent automatically inserts API calls in compiled JSPs to
240 | # inject the monitoring JavaScript into web pages. Not all rendering engines are supported.
241 | # See https://docs.newrelic.com/docs/java/real-user-monitoring-in-java#manual_instrumentation
242 | # for instructions to add these manually to your pages.
243 | # Set this attribute to false to turn off this behavior.
244 | auto_instrument: true
245 |
246 | class_transformer:
247 | # This instrumentation reports the name of the user principal returned from
248 | # HttpServletRequest.getUserPrincipal() when servlets and filters are invoked.
249 | com.newrelic.instrumentation.servlet-user:
250 | enabled: false
251 |
252 | com.newrelic.instrumentation.spring-aop-2:
253 | enabled: false
254 |
255 | # Classes loaded by classloaders in this list will not be instrumented.
256 | # This is a useful optimization for runtimes which use classloaders to
257 | # load dynamic classes which the agent would not instrument.
258 | classloader_excludes:
259 | groovy.lang.GroovyClassLoader$InnerLoader,
260 | org.codehaus.groovy.runtime.callsite.CallSiteClassLoader,
261 | com.collaxa.cube.engine.deployment.BPELClassLoader,
262 | org.springframework.data.convert.ClassGeneratingEntityInstantiator$ObjectInstantiatorClassGenerator,
263 | org.mvel2.optimizers.impl.asm.ASMAccessorOptimizer$ContextClassLoader,
264 | gw.internal.gosu.compiler.SingleServingGosuClassLoader,
265 |
266 | # User-configurable custom labels for this agent. Labels are name-value pairs.
267 | # There is a maximum of 64 labels per agent. Names and values are limited to 255 characters.
268 | # Names and values may not contain colons (:) or semicolons (;).
269 | labels:
270 |
271 | # An example label
272 | #label_name: label_value
273 |
274 |
275 | # Application Environments
276 | # ------------------------------------------
277 | # Environment specific settings are in this section.
278 | # You can use the environment to override the default settings.
279 | # For example, to change the app_name setting.
280 | # Use -Dnewrelic.environment= on the Java startup command line
281 | # to set the environment.
282 | # The default environment is production.
283 |
284 | # NOTE if your application has other named environments, you should
285 | # provide configuration settings for these environments here.
286 |
287 | development:
288 | <<: *default_settings
289 | app_name: My Application (Development)
290 |
291 | test:
292 | <<: *default_settings
293 | app_name: My Application (Test)
294 |
295 | production:
296 | <<: *default_settings
297 |
298 | staging:
299 | <<: *default_settings
300 | app_name: My Application (Staging)
--------------------------------------------------------------------------------