├── README.md ├── _config.yml ├── group_vars └── all ├── inventory ├── roles ├── common │ └── tasks │ │ └── main.yml ├── confluent │ └── tasks │ │ ├── install.yml │ │ ├── kafka-rest.yml │ │ ├── kafka.yml │ │ ├── main.yml │ │ ├── prereq.yml │ │ ├── schema-registry.yml │ │ └── zookeeper.yml ├── docker-ce │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ └── tasks │ │ ├── lvm.yml │ │ ├── main.yml │ │ └── proxy.yml ├── firewalld │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml └── init │ ├── defaults │ └── main.yml │ ├── files │ ├── id_rsa.pub │ └── selinux_policy │ │ ├── ifconfig_custom.te │ │ └── sshd_pol_create_userdir.te │ ├── handlers │ └── main.yml │ ├── tasks │ ├── bash.yml │ ├── dns.yml │ ├── main.yml │ ├── selinux.yml │ ├── sshd.yml │ └── user.yml │ └── vars │ ├── CentOS-7.yml │ └── main.yml └── site.yml /README.md: -------------------------------------------------------------------------------- 1 | # Confluent Cluster 2 | 3 | ## Intro 4 | 5 | Create a confluent cluster using Confluent community edition and Docker (yes, community edition...too) 6 | 7 | ## Requirements 8 | 9 | * 3 nodes running Centos 7 10 | * selinux enabled 11 | * firewalld enabled 12 | * docker-ce v.17.0.3 13 | * Ansible >= 2.3 14 | 15 | ## Install 16 | 17 | * Define your servers in your inventory file. 18 | 19 | * Run the playbook using this inventory. 20 | 21 | Example: 22 | 23 | ``` 24 | sudo ansible-playbook site.yml -i inventory 25 | ``` 26 | 27 | To check the installation: 28 | 29 | ``` 30 | [juan.enciso@satelite confluent-cluster]$ sudo ansible -i inventory -m shell -a "docker ps -a" confluent-cluster 31 | confluent02.iplanet.work | SUCCESS | rc=0 >> 32 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 33 | a4f44e608bf7 confluentinc/cp-kafka-rest:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds kafka-rest 34 | 6cefd811e43a confluentinc/cp-schema-registry:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds schema-registry 35 | 2b8f602d9c2d confluentinc/cp-kafka:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds kafka 36 | 38b6a9f4e7bb confluentinc/cp-zookeeper:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds zk 37 | 38 | confluent02.iplanet.work | SUCCESS | rc=0 >> 39 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 40 | 30ed1efd16ce confluentinc/cp-kafka-rest:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds kafka-rest 41 | ba148a13acc3 confluentinc/cp-schema-registry:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds schema-registry 42 | c78e3d9effa9 confluentinc/cp-kafka:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds kafka 43 | 7176a584768b confluentinc/cp-zookeeper:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds zk 44 | 45 | confluent01.iplanet.work | SUCCESS | rc=0 >> 46 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 47 | a9a3f4e623b6 confluentinc/cp-kafka-rest:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds kafka-rest 48 | 55aee8c87e3c confluentinc/cp-schema-registry:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds schema-registry 49 | 694f4b01d90e confluentinc/cp-kafka:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds kafka 50 | 7b3878c7aeb6 confluentinc/cp-zookeeper:3.2.1 "/etc/confluent/do..." 5 hours ago 10 seconds zk 51 | 52 | [juan.enciso@satelite confluent-cluster]$ 53 | ``` 54 | 55 | ## Notes 56 | 57 | if you are behind of a proxy server, setup the variable ***proxy_enabled: True*** 58 | 59 | 60 | ## Author 61 | 62 | Juan Enciso 63 | 64 | juan.enciso@gmail.com 65 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | ## Init 3 | unicred_init: False 4 | 5 | ## Proxy 6 | proxy_server: 192.168.0.20 7 | proxy_port: 8080 8 | proxy_url: http://{{ proxy_server }}:{{ proxy_port }} 9 | proxy_enabled: False 10 | proxy_env: 11 | http_proxy: "{{ proxy_url }}" 12 | https_proxy: "{{ proxy_url }}" 13 | no_proxy: "127.0.0.1, localhost" 14 | 15 | ## Confluent 16 | container_uid: 65534 17 | 18 | kafka_data_dir: /var/lib/kafka/data 19 | kafka_log_dir: /var/log/kafka 20 | kafka_auto_create_topics_enable: true 21 | kafka_delete_topic_enable: true 22 | 23 | zookeeper_data_dir: /var/lib/zookeeper/data 24 | zookeeper_txnlogs_dir: /var/lib/zookeeper/log 25 | 26 | ## SELINUX 27 | selinux_enabled: True 28 | 29 | ## Firewall Rules 30 | firewalld_disabled: False 31 | 32 | firewalld_rules: 33 | zookeeper-server: 34 | port: 2888 35 | protocol: tcp 36 | state: enabled 37 | zone: public 38 | permanent: true 39 | zookeeper-server-cluster: 40 | port: 3888 41 | protocol: tcp 42 | state: enabled 43 | zone: public 44 | permanent: true 45 | zookeeper-client: 46 | port: 2181 47 | protocol: tcp 48 | state: enabled 49 | zone: public 50 | permanent: true 51 | zookeeper-jmx: 52 | port: 8989 53 | protocol: tcp 54 | state: enabled 55 | zone: public 56 | permanent: true 57 | kafka: 58 | port: 9092 59 | protocol: tcp 60 | state: enabled 61 | zone: public 62 | permanent: true 63 | kafka-schema-register: 64 | port: 8081 65 | protocol: tcp 66 | state: enabled 67 | zone: public 68 | permanent: true 69 | kafka-rest: 70 | port: 8082 71 | protocol: tcp 72 | state: enabled 73 | zone: public 74 | permanent: true 75 | 76 | -------------------------------------------------------------------------------- /inventory: -------------------------------------------------------------------------------- 1 | [confluent-cluster] 2 | confluent01.iplanet.work 3 | confluent02.iplanet.work 4 | confluent03.iplanet.work 5 | -------------------------------------------------------------------------------- /roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: common | Install packages core 3 | yum: name={{ item }} state=present 4 | with_items: 5 | - vim-enhanced 6 | - net-tools 7 | - bind-utils 8 | - lrzsz 9 | - wget 10 | - lsof 11 | - nfs-utils 12 | - epel-release 13 | - libselinux-python 14 | - ntpdate 15 | - tcpdump 16 | - telnet 17 | - setroubleshoot 18 | - setools 19 | - policycoreutils-python 20 | - yum-utils 21 | - git 22 | - unzip 23 | - mlocate 24 | tags: 25 | - common 26 | -------------------------------------------------------------------------------- /roles/confluent/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: confluent | Create dir 3 | file: path={{ item }} owner={{ container_uid }} group={{ container_uid }} state=directory 4 | with_items: 5 | - "{{ kafka_data_dir }}" 6 | - "{{ kafka_log_dir }}" 7 | - "{{ zookeeper_data_dir }}" 8 | - "{{ zookeeper_txnlogs_dir }}" 9 | -------------------------------------------------------------------------------- /roles/confluent/tasks/kafka-rest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: confluent | Download docker kafka-rest image 3 | docker_image: name={{ item }} 4 | with_items: 5 | - confluentinc/cp-kafka-rest:3.2.1 6 | 7 | 8 | - name: confluent | Run kafka-rest 9 | docker_container: 10 | user: "{{ container_uid }}" 11 | name: "kafka-rest" 12 | image: confluentinc/cp-kafka-rest:3.2.1 13 | state: started 14 | restart_policy: on-failure 15 | network_mode: host 16 | restart: no 17 | env: 18 | KAFKA_REST_ZOOKEEPER_CONNECT: "localhost:2181" 19 | KAFKA_REST_HOST_NAME: "{{ ansible_default_ipv4.address }}" 20 | KAFKA_REST_LISTENERS: "http://{{ ansible_default_ipv4.address }}:8082" 21 | KAFKA_REST_SCHEMA_REGISTRY_URL: "http://{{ ansible_default_ipv4.address }}:8081" 22 | 23 | -------------------------------------------------------------------------------- /roles/confluent/tasks/kafka.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: confluent | Download docker kafka images 3 | docker_image: name={{ item }} 4 | with_items: 5 | - confluentinc/cp-kafka:3.2.1 6 | 7 | 8 | - name: confluent | set zk_listen servers 9 | set_fact: 10 | zk_listen: "{% for h in groups['confluent-cluster'] %}{{ hostvars[h]['ansible_default_ipv4'].address }}:2181{% if not loop.last %},{% endif %}{% endfor %}" 11 | 12 | 13 | - name: confluent | Run kafka container 14 | docker_container: 15 | user: "{{ container_uid }}" 16 | name: "kafka" 17 | image: confluentinc/cp-kafka:3.2.1 18 | state: started 19 | restart_policy: on-failure 20 | network_mode: host 21 | restart: no 22 | volumes: 23 | - "{{ kafka_data_dir }}:{{ kafka_data_dir }}" 24 | - "{{ kafka_log_dir }}:{{ kafka_log_dir }}" 25 | env: 26 | KAFKA_ZOOKEEPER_CONNECT: "{{ zk_listen }}" 27 | KAFKA_BROKER_ID: "{{ groups['confluent-cluster'].index(inventory_hostname) + 1 }}" 28 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://{{ ansible_fqdn }}:9092" 29 | KAFKA_AUTO_CREATE_TOPICS_ENABLE: "{{ kafka_auto_create_topics_enable }}" 30 | KAFKA_DELETE_TOPIC_ENABLE: "{{ kafka_delete_topic_enable }}" 31 | -------------------------------------------------------------------------------- /roles/confluent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: prereq.yml 3 | tags: prereq 4 | 5 | - include: install.yml 6 | tags: install 7 | 8 | - include: zookeeper.yml 9 | tags: zookeeper 10 | 11 | - include: kafka.yml 12 | tags: kafka 13 | 14 | - include: schema-registry.yml 15 | tags: schema-registry 16 | 17 | - include: kafka-rest.yml 18 | tags: kafka-rest 19 | -------------------------------------------------------------------------------- /roles/confluent/tasks/prereq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: confluent | Installing java 3 | yum: name=java-headless state=present 4 | 5 | - name: confluent | Installing pre packages 6 | yum: name={{ item }} state=present 7 | with_items: 8 | - python-pip 9 | 10 | - name: confluent | installing pip packages 11 | pip: name=docker-py 12 | -------------------------------------------------------------------------------- /roles/confluent/tasks/schema-registry.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: confluent | Download docker schema-registry image 3 | docker_image: name={{ item }} 4 | with_items: 5 | - confluentinc/cp-schema-registry:3.2.1 6 | 7 | 8 | - name: confluent | Run schema-registry 9 | docker_container: 10 | user: "{{ container_uid }}" 11 | name: "schema-registry" 12 | image: confluentinc/cp-schema-registry:3.2.1 13 | state: started 14 | restart_policy: on-failure 15 | network_mode: host 16 | restart: no 17 | env: 18 | SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: "localhost:2181" 19 | SCHEMA_REGISTRY_HOST_NAME: "{{ ansible_default_ipv4.address }}" 20 | SCHEMA_REGISTRY_LISTENERS: "http://{{ ansible_default_ipv4.address }}:8081" 21 | SCHEMA_REGISTRY_DEBUG: true 22 | 23 | -------------------------------------------------------------------------------- /roles/confluent/tasks/zookeeper.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: confluent | Download docker zookeeper images 3 | docker_image: name={{ item }} 4 | with_items: 5 | - confluentinc/cp-zookeeper:3.2.1 6 | 7 | 8 | - name: confluent | set zookeeper cluster 9 | set_fact: 10 | zk_cluster: "{% for h in groups['confluent-cluster'] %}{{ hostvars[h]['ansible_default_ipv4'].address }}:2888:3888{% if not loop.last %};{% endif %}{% endfor %}" 11 | 12 | 13 | - name: confluent | Run zookeeper container 14 | docker_container: 15 | user: "{{ container_uid }}" 16 | name: "zk" 17 | image: confluentinc/cp-zookeeper:3.2.1 18 | network_mode: host 19 | state: started 20 | restart: no 21 | restart_policy: on-failure 22 | volumes: 23 | - "{{ zookeeper_data_dir }}:{{ zookeeper_data_dir }}" 24 | - "{{ zookeeper_txnlogs_dir }}:{{ zookeeper_txnlogs_dir }}" 25 | env: 26 | ZOOKEEPER_SERVER_ID: "{{ groups['confluent-cluster'].index(inventory_hostname) + 1 }}" 27 | ZOOKEEPER_CLIENT_PORT: 2181 28 | ZOOKEEPER_TICK_TIME: 2000 29 | ZOOKEEPER_INIT_LIMIT: 5 30 | ZOOKEEPER_SYNC_LIMIT: 2 31 | ZOOKEEPER_SERVERS: "{{ zk_cluster }}" 32 | 33 | 34 | -------------------------------------------------------------------------------- /roles/docker-ce/README.md: -------------------------------------------------------------------------------- 1 | ## Docker CE Provision 2 | 3 | ### Requeriments 4 | 5 | * 1 Disk /dev/sdb (~60GB) 6 | 7 | If you device is diferent to sdb, change: 8 | 9 | ``` 10 | docker_lvm_device: sdX 11 | ``` 12 | 13 | If you prefer don't use lvm, then change: 14 | 15 | ``` 16 | docker_lvm_storage: no 17 | ``` 18 | -------------------------------------------------------------------------------- /roles/docker-ce/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## DOCKER 3 | 4 | docker_version: 17.03.1 5 | docker_lvm_storage: yes 6 | docker_lvm_device: /dev/sdb 7 | 8 | ## PROXY Settings 9 | proxy_enabled: no 10 | proxy_server: 192.168.0.20 11 | proxy_port: 8080 12 | proxy_url: http://{{ proxy_server }}:{{ proxy_port }} 13 | -------------------------------------------------------------------------------- /roles/docker-ce/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart docker 2 | systemd: name=docker state=restarted daemon_reload=yes 3 | -------------------------------------------------------------------------------- /roles/docker-ce/tasks/lvm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: docker | Configure Disk Part 3 | parted: 4 | device: "{{ docker_lvm_device }}" 5 | number: 1 6 | flags: [ lvm ] 7 | state: present 8 | 9 | 10 | - name: docker | Configure VG 11 | lvg: 12 | vg: docker 13 | pvs: "{{ docker_lvm_device }}1" 14 | register: vg_created 15 | 16 | 17 | - name: docker | Configure LV thinpool 18 | lvol: 19 | vg: docker 20 | lv: thinpool 21 | size: 95%VG 22 | opts: --wipesignatures y 23 | register: lv_created_thinpool 24 | when: vg_created.changed 25 | 26 | 27 | - name: docker | Configure LV thinpoolmeta 28 | lvol: 29 | vg: docker 30 | lv: thinpoolmeta 31 | size: 1%VG 32 | opts: --wipesignatures y 33 | register: lv_created_thinpoolmeta 34 | when: vg_created.changed 35 | 36 | 37 | - name: docker | Convert LV to thin pool 38 | command: lvconvert -y --zero n -c 512K --thinpool docker/thinpool --poolmetadata docker/thinpoolmeta 39 | when: 40 | - lv_created_thinpool.changed 41 | - lv_created_thinpoolmeta.changed 42 | 43 | 44 | - name: docker | Setup Profile thinpool 45 | copy: 46 | content: | 47 | activation { 48 | thin_pool_autoextend_threshold=80 49 | thin_pool_autoextend_percent=20 50 | } 51 | dest: /etc/lvm/profile/docker-thinpool.profile 52 | register: thinpool_profile 53 | 54 | 55 | - name: docker | Enable profile LV docker thinpool 56 | command: lvchange --metadataprofile docker-thinpool docker/thinpool 57 | when: thinpool_profile.changed 58 | 59 | 60 | - name: docker | Enable monitoring LVM 61 | command: lvs -o+seg_monitor 62 | when: thinpool_profile.changed 63 | 64 | 65 | - name: docker | Create directory /etc/docker 66 | file: path=/etc/docker state=directory 67 | 68 | 69 | - name: docker | Enable LVM Storage in Docker Service 70 | copy: 71 | content: | 72 | { 73 | "storage-driver": "devicemapper", 74 | "storage-opts": [ 75 | "dm.thinpooldev=/dev/mapper/docker-thinpool", 76 | "dm.use_deferred_removal=true", 77 | "dm.use_deferred_deletion=true" 78 | ] 79 | } 80 | dest: /etc/docker/daemon.json 81 | -------------------------------------------------------------------------------- /roles/docker-ce/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: docker | Installing prereq packages 3 | yum: name={{ item }} state=present 4 | with_items: 5 | - yum-utils 6 | - device-mapper-persistent-data 7 | - lvm2 8 | 9 | 10 | - name: docker | Installing repo 11 | yum_repository: 12 | name: docker-ce-stable 13 | description: Docker CE Stable - $basearch 14 | baseurl: https://download.docker.com/linux/centos/7/$basearch/stable 15 | file: docker-ce 16 | gpgcheck: yes 17 | gpgkey: https://download.docker.com/linux/centos/gpg 18 | 19 | 20 | - name: docker | Install docker-ce 21 | yum: name=docker-ce-{{ docker_version }}.ce-1.el7.centos 22 | 23 | - name: docker | LVM configuration 24 | include: lvm.yml 25 | when: docker_lvm_storage 26 | 27 | - name: docker | proxy configuration 28 | include: proxy.yml 29 | when: proxy_enabled 30 | 31 | - name: docker | Start Service 32 | systemd: name=docker state=started enabled=yes 33 | 34 | -------------------------------------------------------------------------------- /roles/docker-ce/tasks/proxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: docker-ce | change service config systemd 3 | lineinfile: > 4 | path=/usr/lib/systemd/system/docker.service 5 | regexp=^Environment= 6 | line='Environment="HTTP_PROXY={{ proxy_url }}" "HTTPS_PROXY={{ proxy_url }}" "NO_PROXY=localhost,127.0.0.1"' 7 | insertafter=^\[Service\] 8 | notify: restart docker 9 | 10 | -------------------------------------------------------------------------------- /roles/firewalld/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | firewalld_disabled: False 3 | firewalld_rules: 4 | ssh: 5 | port: 22 6 | protocol: tcp 7 | state: enabled 8 | zone: public 9 | permanent: true 10 | -------------------------------------------------------------------------------- /roles/firewalld/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart firewalld 3 | service: name=firewalld state=restarted enabled=yes 4 | -------------------------------------------------------------------------------- /roles/firewalld/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check package firewalld 3 | yum: name=firewalld state=present 4 | when: firewalld_disabled != True or firewalld_disabled is not defined 5 | 6 | - name: Enable firewalld 7 | service: name=firewalld enabled=yes state=started 8 | when: firewalld_disabled != True or firewalld_disabled is not defined 9 | 10 | - name: Updating firewall rules 11 | firewalld: port={{ item.value.port }}/{{ item.value.protocol }} permanent={{ item.value.permanent }} state={{ item.value.state }} zone={{ item.value.zone }} immediate=True 12 | with_dict: "{{ firewalld_rules }}" 13 | notify: restart firewalld 14 | when: firewalld_disabled != True or firewalld_disabled is not defined 15 | 16 | - name: Disabled firewalld 17 | service: name=firewalld enabled=no state=stopped 18 | when: firewalld_disabled 19 | ignore_errors: True 20 | -------------------------------------------------------------------------------- /roles/init/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | selinux_enabled: True 3 | selinux_mode: enforcing 4 | selinux_policy_modules: 5 | - sshd_pol_create_userdir 6 | - ifconfig_custom 7 | -------------------------------------------------------------------------------- /roles/init/files/id_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDXCarzuTqxXSSXLeYLYHKr2G32nxryaxmGF1yJpjggg59DOuTkA6q7e8P9JMbMtS7+D3lhLg+CY8xGKA69I0GpTGwHvwrc9IX6Yf61FPkcY/abxu+wPE5SvaGwmWq2MxfIAhTlkp89tZfEIPEbXPoStuIey2D1zcnOhEYgWrPWuWDvRAhcOzvwuFw9um5Bbx2XqAdTOmeN2U9YUb9QOYAOZ95DoX+RzmpdY4lERl9niVlQ9gFIOVC18MHPc14piRAQYw6mDZLCLzdHz9ckcdXtJ6iyo9y1I7qo4f+jRfkFOJwDYtIvYJYy/VhrtsgfK07IQmBiWFr5B07CHdMnz9L1 root@mgmt01.iplanet.work 2 | -------------------------------------------------------------------------------- /roles/init/files/selinux_policy/ifconfig_custom.te: -------------------------------------------------------------------------------- 1 | module ifconfig_custom 1.0; 2 | 3 | require { 4 | type ifconfig_t; 5 | type hypervkvp_device_t; 6 | class chr_file { read write }; 7 | } 8 | 9 | #============= ifconfig_t ============== 10 | allow ifconfig_t hypervkvp_device_t:chr_file { read write }; 11 | -------------------------------------------------------------------------------- /roles/init/files/selinux_policy/sshd_pol_create_userdir.te: -------------------------------------------------------------------------------- 1 | 2 | module sshd_pol_create_userdir 1.0; 3 | 4 | require { 5 | type unconfined_t; 6 | type oddjob_mkhomedir_exec_t; 7 | class file entrypoint; 8 | } 9 | 10 | #============= unconfined_t ============== 11 | allow unconfined_t oddjob_mkhomedir_exec_t:file entrypoint; 12 | -------------------------------------------------------------------------------- /roles/init/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart sssd 2 | service: name=sssd state=restarted 3 | 4 | - name: restart sshd 5 | service: name=sshd state=restarted 6 | 7 | -------------------------------------------------------------------------------- /roles/init/tasks/bash.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add bashrc content 3 | blockinfile: 4 | dest: /etc/bashrc 5 | insertafter: EOF 6 | marker: "# {mark} ANSIBLE MANAGED BLOCK" 7 | block: | 8 | export HISTTIMEFORMAT="%h/%d/%Y - %H:%M:%S " 9 | export HISTSIZE='15000' 10 | export HISTFILESIZE='15000' 11 | 12 | - name: Add profile 13 | blockinfile: 14 | dest: /etc/profile 15 | insertafter: EOF 16 | block: | 17 | function log2syslog 18 | { 19 | declare COMMAND 20 | COMMAND=$(fc -ln -0) 21 | logger -p local1.notice -t bash -i -- "`who am i | awk '{print $1,$5}'`:${USER}:$$:${COMMAND}" 22 | } 23 | trap log2syslog DEBUG 24 | -------------------------------------------------------------------------------- /roles/init/tasks/dns.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Insert line for DNS in /etc/resolv.conf 3 | lineinfile: 4 | dest: /etc/resolv.conf 5 | regexp: "^{{ item.name }} {{ item.ip }}" 6 | line: "{{ item.name }} {{ item.ip }}" 7 | insertafter: "^search" 8 | with_items: 9 | - { name: 'nameserver', ip: '192.168.1.1' } 10 | - { name: 'nameserver', ip: '8.8.8.8' } 11 | -------------------------------------------------------------------------------- /roles/init/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: dns.yml 3 | 4 | - include: sshd.yml 5 | 6 | - include: user.yml 7 | 8 | - include: bash.yml 9 | 10 | - include: selinux.yml 11 | when: selinux_enabled 12 | -------------------------------------------------------------------------------- /roles/init/tasks/selinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install tools 3 | yum: name={{ item }} state=installed 4 | with_items: 5 | - policycoreutils-python 6 | - libselinux-python 7 | 8 | - name: Create directory selinux custom 9 | file: path=/opt/selinux_custom state=directory 10 | 11 | - name: Set selinux config 12 | selinux: policy=targeted state={{ selinux_mode }} 13 | 14 | - name: Copy SELinux type enforcement file 15 | copy: src=selinux_policy/{{ item }}.te dest=/opt/selinux_custom/ 16 | with_items: "{{ selinux_policy_modules }}" 17 | register: selinux_policy 18 | 19 | - name: Compile SELinux module file 20 | command: checkmodule -M -m -o /opt/selinux_custom/{{ item }}.mod /opt/selinux_custom/{{ item }}.te 21 | with_items: "{{ selinux_policy_modules }}" 22 | when: selinux_policy.changed 23 | 24 | - name: Build SELinux policy package 25 | command: semodule_package -o /opt/selinux_custom/{{ item }}.pp -m /opt/selinux_custom/{{ item }}.mod 26 | with_items: "{{ selinux_policy_modules }}" 27 | when: selinux_policy.changed 28 | 29 | - name: Load SELinux policy package 30 | command: semodule -i /opt/selinux_custom/{{ item }}.pp 31 | with_items: "{{ selinux_policy_modules }}" 32 | when: selinux_policy.changed 33 | -------------------------------------------------------------------------------- /roles/init/tasks/sshd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deny access root via sshd 3 | lineinfile: 4 | dest: /etc/ssh/sshd_config 5 | regexp: "PermitRootLogin" 6 | line: "PermitRootLogin without-password" 7 | owner: root 8 | group: root 9 | mode: 0600 10 | notify: restart sshd 11 | 12 | - name: Permit RSAAuthentication 13 | lineinfile: 14 | dest: /etc/ssh/sshd_config 15 | regexp: "RSAAuthentication" 16 | line: "RSAAuthentication yes" 17 | owner: root 18 | group: root 19 | mode: 0600 20 | notify: restart sshd 21 | 22 | - name: Deny GSSAPIAuthentication 23 | lineinfile: 24 | dest: /etc/ssh/sshd_config 25 | regexp: "^GSSAPIAuthentication" 26 | line: "GSSAPIAuthentication no" 27 | owner: root 28 | group: root 29 | mode: 0600 30 | notify: restart sshd 31 | 32 | - name: no DNS 33 | lineinfile: 34 | dest: /etc/ssh/sshd_config 35 | regexp: "^(#)?UseDNS" 36 | line: "UseDNS no" 37 | owner: root 38 | group: root 39 | mode: 0600 40 | notify: restart sshd 41 | -------------------------------------------------------------------------------- /roles/init/tasks/user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Change root password 3 | user: 4 | name: root 5 | state: present 6 | password: "$6$qXycnebvEHg.ZJl1$boOZcpfalH13JYI0nEDN5TGY0poF18ZeZoKV1BO9Vq173c1tYqiQQrSrfllbQ03hxg10tWY7ZiRxGQOx.ctmQ/" 7 | 8 | - name: Change ansible password 9 | user: 10 | name: ansible 11 | state: present 12 | password: "$6$qXycnebvEHg.ZJl1$boOZcpfalH13JYI0nEDN5TGY0poF18ZeZoKV1BO9Vq173c1tYqiQQrSrfllbQ03hxg10tWY7ZiRxGQOx.ctmQ/" 13 | 14 | - name: Copy SSH Key - user ansible 15 | authorized_key: 16 | user: ansible 17 | key: "{{ lookup('file', './id_rsa.pub') }}" 18 | key_options: 'no-port-forwarding' 19 | 20 | - name: Add user ansible to sudoers 21 | lineinfile: 22 | dest: /etc/sudoers 23 | regexp: ^ansible 24 | line: "ansible ALL=(root) NOPASSWD: ALL" 25 | -------------------------------------------------------------------------------- /roles/init/vars/CentOS-7.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jenciso/confluent-cluster/265a626359f8d5ccb5d16ac32aa1ab39b53eeeee/roles/init/vars/CentOS-7.yml -------------------------------------------------------------------------------- /roles/init/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ldap_server01: pr001.domain.com.br 3 | ldap_server02: pr002.domain.com.br 4 | -------------------------------------------------------------------------------- /site.yml: -------------------------------------------------------------------------------- 1 | - name: Confluent Platform 2 | hosts: "confluent-cluster" 3 | remote_user: ansible 4 | become: true 5 | roles: 6 | - common 7 | - init 8 | - firewalld 9 | - docker-ce 10 | - confluent 11 | environment: "{{ proxy_env }}" 12 | --------------------------------------------------------------------------------