├── .gitignore ├── scheme ├── consul.png └── scheme_current.png ├── provisioning ├── roles │ ├── 08_zabbix │ │ ├── 03_web-optimization │ │ │ ├── files │ │ │ │ ├── override_nginx.conf │ │ │ │ └── override_php-fpm.conf │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── 01_zabbix_install │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ └── zabbix.conf.php.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── 02_nginx-php-fpm │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ └── zabbix.conf.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── 04_zabbix_createDB │ │ │ └── tasks │ │ │ └── main.yml │ ├── 07_pgsql-client │ │ ├── templates │ │ │ └── pgpass.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── 07_pgsql-client.yml │ ├── 05_consul-cluster.yml │ ├── 02_hl-client_docker-yandextank │ │ ├── handlers │ │ │ └── main.yml │ │ ├── files │ │ │ └── yandextank │ │ │ │ ├── hl-zabbix01.xml │ │ │ │ └── hl-zabbix02.xml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── load.yaml.j2 │ ├── 03_keepalived-haproxy.yml │ ├── 04_pgconpool │ │ ├── 05_install_odyssey │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── odyssey.conf.j2 │ │ ├── 06_keepalived │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── files │ │ │ │ └── keepalived.service │ │ │ ├── templates │ │ │ │ ├── master_keepalived.conf.j2 │ │ │ │ └── slave_keepalived.conf.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── 01_preparation_for_rpms_assembly │ │ │ ├── templates │ │ │ │ └── otus-odyssey-vip-manager.conf.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── 03_build_odyssey_binary │ │ │ ├── files │ │ │ │ ├── FindPostgreSQL.cmake │ │ │ │ └── postgres.h │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── 04_build_odyssey_rpm │ │ │ ├── files │ │ │ │ └── FindPostgreSQL_rpmbuild.cmake │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── 02_build_vip-manager_rpm │ │ │ └── tasks │ │ │ └── main.yml │ ├── 01_tuning_OS.yml │ ├── 02_hl-client_docker-yandextank.yml │ ├── 01_tuning_OS │ │ ├── 02_zabbix-agent_install │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── 01_tuning_OS │ │ │ ├── handlers │ │ │ └── main.yml │ │ │ ├── files │ │ │ └── screenrc │ │ │ └── tasks │ │ │ └── main.yml │ ├── 05_consul-cluster │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── consul-server.service.j2 │ │ │ └── consul-server.json.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── 09_mamonsu │ │ ├── 02_mamonsu_install │ │ │ ├── files │ │ │ │ ├── mamonsu2_sysconfig │ │ │ │ ├── mamonsu_logrotate │ │ │ │ ├── mamonsu2.service │ │ │ │ └── biggest_tables.py │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── mamonsu_agent.conf.j2 │ │ ├── 01_mamonsu_build_and_create_repo │ │ │ ├── templates │ │ │ │ └── mamonsu.conf.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── 03_mamonsu_zabbix-postgres │ │ │ └── tasks │ │ │ └── main.yml │ ├── 06_pgsql-patroni │ │ ├── 04_vip-manager_install │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── files │ │ │ │ └── vip-manager.service │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── vip-manager.yml.j2 │ │ ├── 02_patroni-server │ │ │ ├── files │ │ │ │ └── patroni.service │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── patroni.yml.j2 │ │ ├── 01_consul-client │ │ │ ├── templates │ │ │ │ └── consul-client.json.j2 │ │ │ ├── files │ │ │ │ └── consul-client.service │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── 03_pgsql_optimization │ │ │ └── tasks │ │ │ └── main.yml │ ├── 10_pacemaker │ │ ├── 02_pacemaker_create_cluster │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── 01_pacemaker_install │ │ │ └── tasks │ │ │ └── main.yml │ ├── 06_pgsql-patroni.yml │ ├── 03_keepalived-haproxy │ │ ├── handlers │ │ │ └── main.yml │ │ ├── files │ │ │ └── keepalived.service │ │ ├── templates │ │ │ ├── master_keepalived.conf.j2 │ │ │ ├── slave_keepalived.conf.j2 │ │ │ └── haproxy.cfg.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── 10_pacemaker.yml │ ├── 08_zabbix.yml │ ├── 09_mamonsu.yml │ └── 04_pgconpool.yml ├── 00_all.yml ├── variables └── hosts_vagrant ├── tests ├── locust │ ├── results │ │ ├── LA.png │ │ ├── locust.png │ │ ├── response_code_time.png │ │ └── summary_servers_load.png │ ├── test_resquest.py │ └── locust_zabbix.py └── tank │ ├── files │ ├── 03_db01.png │ ├── 01_web01.png │ ├── 02_web02_1.png │ ├── 02_web02_2.png │ ├── 03_db02_1.png │ ├── 03_db02_2.png │ ├── 03_db02_3.png │ ├── 04_web01.png │ ├── 04_web02.png │ ├── target │ │ ├── target_dashboard.png │ │ ├── target_custom_screens_all-zabbix.png │ │ ├── target_custom_screens_all_servers.png │ │ └── target_custom_screens_hl-zabbix-LA.png │ └── 03_db02_vacuum.md │ ├── yandextank │ └── load.yaml │ ├── 04_web01.md │ └── 02_web02.md └── provisioning_proxmox ├── roles ├── 08_zabbix │ ├── 03_web-optimization │ │ ├── files │ │ │ ├── override_nginx.conf │ │ │ └── override_php-fpm.conf │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── 02_nginx-php-fpm │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ └── zabbix.conf.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── 01_zabbix_install │ │ ├── templates │ │ │ └── zabbix.conf.php.j2 │ │ └── tasks │ │ │ └── main.yml │ └── 04_zabbix_createDB │ │ └── tasks │ │ └── main.yml ├── 07_pgsql-client │ ├── templates │ │ └── pgpass.j2 │ └── tasks │ │ └── main.yml ├── 07_pgsql-client.yml ├── 05_consul-cluster.yml ├── 03_keepalived-haproxy.yml ├── 02_hl-client_docker-yandextank │ ├── handlers │ │ └── main.yml │ ├── files │ │ └── yandextank │ │ │ ├── hl-zabbix01.xml │ │ │ └── hl-zabbix02.xml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── load.yaml.j2 ├── 01_tuning_OS.yml ├── 02_hl-client_docker-yandextank.yml ├── 04_pgconpool │ ├── 05_install_odyssey │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── odyssey.conf.j2 │ ├── 06_keepalived │ │ ├── handlers │ │ │ └── main.yml │ │ ├── files │ │ │ └── keepalived.service │ │ ├── templates │ │ │ ├── master_keepalived.conf.j2 │ │ │ └── slave_keepalived.conf.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── 01_preparation_for_rpms_assembly │ │ ├── templates │ │ │ └── otus-odyssey-vip-manager.conf.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── 03_build_odyssey_binary │ │ ├── files │ │ │ ├── FindPostgreSQL.cmake │ │ │ └── postgres.h │ │ └── tasks │ │ │ └── main.yml │ ├── 04_build_odyssey_rpm │ │ ├── files │ │ │ └── FindPostgreSQL_rpmbuild.cmake │ │ └── tasks │ │ │ └── main.yml │ └── 02_build_vip-manager_rpm │ │ └── tasks │ │ └── main.yml ├── 01_tuning_OS │ ├── 02_zabbix-agent_install │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ └── 01_tuning_OS │ │ ├── handlers │ │ └── main.yml │ │ └── files │ │ ├── hosts.redhat.tmpl │ │ └── screenrc ├── 09_mamonsu │ ├── 02_mamonsu_install │ │ ├── files │ │ │ ├── mamonsu2_sysconfig │ │ │ ├── mamonsu_logrotate │ │ │ ├── mamonsu2.service │ │ │ └── biggest_tables.py │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── mamonsu_agent.conf.j2 │ ├── 01_mamonsu_build_and_create_repo │ │ ├── templates │ │ │ └── mamonsu.conf.j2 │ │ └── tasks │ │ │ └── main.yml │ └── 03_mamonsu_zabbix-postgres │ │ └── tasks │ │ └── main.yml ├── 06_pgsql-patroni │ ├── 04_vip-manager_install │ │ ├── handlers │ │ │ └── main.yml │ │ ├── files │ │ │ └── vip-manager.service │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── vip-manager.yml.j2 │ ├── 02_patroni-server │ │ ├── files │ │ │ └── patroni.service │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── patroni.yml.j2 │ └── 01_consul-client │ │ ├── templates │ │ └── consul-client.json.j2 │ │ ├── files │ │ └── consul-client.service │ │ └── tasks │ │ └── main.yml ├── 10_pacemaker │ ├── 02_pacemaker_create_cluster │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ └── 01_pacemaker_install │ │ └── tasks │ │ └── main.yml ├── 06_pgsql-patroni.yml ├── 03_keepalived-haproxy │ ├── handlers │ │ └── main.yml │ ├── files │ │ └── keepalived.service │ ├── templates │ │ ├── master_keepalived.conf.j2 │ │ ├── slave_keepalived.conf.j2 │ │ └── haproxy.cfg.j2 │ └── tasks │ │ └── main.yml ├── 10_pacemaker.yml ├── 08_zabbix.yml ├── 09_mamonsu.yml ├── 04_pgconpool.yml └── 05_consul-cluster │ ├── templates │ ├── consul-server.service.j2 │ └── consul-server.json.j2 │ └── tasks │ └── main.yml ├── ansible.cfg ├── 00_all.yml ├── hosts ├── hosts_ip └── variables /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore any file named token.txt 2 | token.txt 3 | -------------------------------------------------------------------------------- /scheme/consul.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/scheme/consul.png -------------------------------------------------------------------------------- /scheme/scheme_current.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/scheme/scheme_current.png -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/03_web-optimization/files/override_nginx.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | LimitNOFILE=16384 3 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/03_web-optimization/files/override_php-fpm.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | LimitNOFILE=16384 3 | -------------------------------------------------------------------------------- /tests/locust/results/LA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/locust/results/LA.png -------------------------------------------------------------------------------- /tests/tank/files/03_db01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/03_db01.png -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/03_web-optimization/files/override_nginx.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | LimitNOFILE=16384 3 | -------------------------------------------------------------------------------- /tests/locust/results/locust.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/locust/results/locust.png -------------------------------------------------------------------------------- /tests/tank/files/01_web01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/01_web01.png -------------------------------------------------------------------------------- /tests/tank/files/02_web02_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/02_web02_1.png -------------------------------------------------------------------------------- /tests/tank/files/02_web02_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/02_web02_2.png -------------------------------------------------------------------------------- /tests/tank/files/03_db02_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/03_db02_1.png -------------------------------------------------------------------------------- /tests/tank/files/03_db02_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/03_db02_2.png -------------------------------------------------------------------------------- /tests/tank/files/03_db02_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/03_db02_3.png -------------------------------------------------------------------------------- /tests/tank/files/04_web01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/04_web01.png -------------------------------------------------------------------------------- /tests/tank/files/04_web02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/04_web02.png -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/03_web-optimization/files/override_php-fpm.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | LimitNOFILE=16384 3 | -------------------------------------------------------------------------------- /provisioning/roles/07_pgsql-client/templates/pgpass.j2: -------------------------------------------------------------------------------- 1 | *:*:*:postgres:{{ PASS_POSTGRES_FOR_DB }} 2 | *:*:*:zabbix:{{ PASS_ZAB_FOR_DB }} 3 | -------------------------------------------------------------------------------- /tests/locust/results/response_code_time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/locust/results/response_code_time.png -------------------------------------------------------------------------------- /provisioning_proxmox/roles/07_pgsql-client/templates/pgpass.j2: -------------------------------------------------------------------------------- 1 | *:*:*:postgres:{{ PASS_POSTGRES_FOR_DB }} 2 | *:*:*:zabbix:{{ PASS_ZAB_FOR_DB }} 3 | -------------------------------------------------------------------------------- /tests/locust/results/summary_servers_load.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/locust/results/summary_servers_load.png -------------------------------------------------------------------------------- /tests/tank/files/target/target_dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/target/target_dashboard.png -------------------------------------------------------------------------------- /provisioning/roles/07_pgsql-client.yml: -------------------------------------------------------------------------------- 1 | - name: 07_pgsql-client 2 | hosts: 3 | - web 4 | become: true 5 | roles: 6 | - 07_pgsql-client 7 | -------------------------------------------------------------------------------- /provisioning_proxmox/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = false 3 | #inventory = hosts 4 | inventory = hosts_ip 5 | #roles_path = roles 6 | -------------------------------------------------------------------------------- /provisioning/roles/05_consul-cluster.yml: -------------------------------------------------------------------------------- 1 | - name: 05_consul-cluster 2 | hosts: 3 | - dcs 4 | become: true 5 | roles: 6 | - 05_consul-cluster 7 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/07_pgsql-client.yml: -------------------------------------------------------------------------------- 1 | - name: 07_pgsql-client 2 | hosts: 3 | - web 4 | become: true 5 | roles: 6 | - 07_pgsql-client 7 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/05_consul-cluster.yml: -------------------------------------------------------------------------------- 1 | - name: 05_consul-cluster 2 | hosts: 3 | - dcs 4 | become: true 5 | roles: 6 | - 05_consul-cluster 7 | -------------------------------------------------------------------------------- /provisioning/roles/02_hl-client_docker-yandextank/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: docker restart 2 | systemd: 3 | name: docker 4 | state: restarted 5 | enabled: yes 6 | -------------------------------------------------------------------------------- /provisioning/roles/03_keepalived-haproxy.yml: -------------------------------------------------------------------------------- 1 | - name: 03_keepalived-haproxy 2 | hosts: 3 | - balancer 4 | become: true 5 | roles: 6 | - 03_keepalived-haproxy 7 | -------------------------------------------------------------------------------- /tests/tank/files/target/target_custom_screens_all-zabbix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/target/target_custom_screens_all-zabbix.png -------------------------------------------------------------------------------- /tests/tank/files/target/target_custom_screens_all_servers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/target/target_custom_screens_all_servers.png -------------------------------------------------------------------------------- /provisioning_proxmox/roles/03_keepalived-haproxy.yml: -------------------------------------------------------------------------------- 1 | - name: 03_keepalived-haproxy 2 | hosts: 3 | - balancer 4 | become: true 5 | roles: 6 | - 03_keepalived-haproxy 7 | -------------------------------------------------------------------------------- /tests/tank/files/target/target_custom_screens_hl-zabbix-LA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timlok/otus-highload/HEAD/tests/tank/files/target/target_custom_screens_hl-zabbix-LA.png -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/05_install_odyssey/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: odyssey restart 3 | systemd: 4 | name: odyssey 5 | state: restarted 6 | enabled: yes 7 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/02_hl-client_docker-yandextank/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: docker restart 2 | systemd: 3 | name: docker 4 | state: restarted 5 | enabled: yes 6 | -------------------------------------------------------------------------------- /provisioning/roles/01_tuning_OS.yml: -------------------------------------------------------------------------------- 1 | - name: 01_tuning_OS 2 | hosts: all 3 | become: true 4 | roles: 5 | - 01_tuning_OS/01_tuning_OS 6 | - 01_tuning_OS/02_zabbix-agent_install 7 | -------------------------------------------------------------------------------- /provisioning/roles/02_hl-client_docker-yandextank.yml: -------------------------------------------------------------------------------- 1 | - name: 02_hl-client_docker-yandextank 2 | hosts: client 3 | become: true 4 | roles: 5 | - 02_hl-client_docker-yandextank 6 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/01_zabbix_install/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: zabbix-server stopped 3 | systemd: 4 | name: zabbix-server 5 | state: stopped 6 | enabled: no 7 | -------------------------------------------------------------------------------- /tests/locust/test_resquest.py: -------------------------------------------------------------------------------- 1 | import requests as r 2 | base_url = "http://10.51.21.56:8080" 3 | response=r.post(base_url+"/zabbix/screens.php?elementid=23") 4 | print(response.status_code) 5 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/01_tuning_OS.yml: -------------------------------------------------------------------------------- 1 | - name: 01_tuning_OS 2 | hosts: all 3 | become: true 4 | roles: 5 | - 01_tuning_OS/01_tuning_OS 6 | - 01_tuning_OS/02_zabbix-agent_install 7 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/02_hl-client_docker-yandextank.yml: -------------------------------------------------------------------------------- 1 | - name: 02_hl-client_docker-yandextank 2 | hosts: client 3 | become: true 4 | roles: 5 | - 02_hl-client_docker-yandextank 6 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/05_install_odyssey/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: odyssey restart 3 | systemd: 4 | name: odyssey 5 | state: restarted 6 | enabled: yes 7 | -------------------------------------------------------------------------------- /provisioning/roles/01_tuning_OS/02_zabbix-agent_install/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: zabbix-agent restart 3 | service: 4 | name: zabbix-agent 5 | state: restarted 6 | enabled: yes 7 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/01_tuning_OS/02_zabbix-agent_install/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: zabbix-agent restart 3 | service: 4 | name: zabbix-agent 5 | state: restarted 6 | enabled: yes 7 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/06_keepalived/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: keepalived restart 3 | systemd: 4 | daemon_reload: yes 5 | name: keepalived 6 | state: restarted 7 | enabled: yes 8 | -------------------------------------------------------------------------------- /provisioning/roles/05_consul-cluster/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: consul-server restarted 3 | systemd: 4 | daemon_reload: yes 5 | name: consul-server 6 | state: restarted 7 | enabled: yes 8 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu/02_mamonsu_install/files/mamonsu2_sysconfig: -------------------------------------------------------------------------------- 1 | CONFIG=/etc/mamonsu/agent.conf 2 | PIDDIR=/var/run/mamonsu 3 | PIDFILE=/var/run/mamonsu/mamonsu2.pid 4 | LOGFILE=/var/log/mamonsu/agent.log 5 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/06_keepalived/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: keepalived restart 3 | systemd: 4 | daemon_reload: yes 5 | name: keepalived 6 | state: restarted 7 | enabled: yes 8 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu/02_mamonsu_install/files/mamonsu2_sysconfig: -------------------------------------------------------------------------------- 1 | CONFIG=/etc/mamonsu/agent.conf 2 | PIDDIR=/var/run/mamonsu 3 | PIDFILE=/var/run/mamonsu/mamonsu2.pid 4 | LOGFILE=/var/log/mamonsu/agent.log 5 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/04_vip-manager_install/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: vip-manager start and enable 3 | systemd: 4 | name: vip-manager 5 | state: started 6 | daemon-reload: yes 7 | enabled: yes 8 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/04_vip-manager_install/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: vip-manager start and enable 3 | systemd: 4 | name: vip-manager 5 | state: started 6 | daemon-reload: yes 7 | enabled: yes 8 | -------------------------------------------------------------------------------- /provisioning/roles/01_tuning_OS/01_tuning_OS/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: system restart 3 | reboot: 4 | reboot_timeout: 120 5 | 6 | - name: chronyd start and enable 7 | systemd: 8 | name: chronyd 9 | state: started 10 | enabled: yes 11 | -------------------------------------------------------------------------------- /provisioning/roles/02_hl-client_docker-yandextank/files/yandextank/hl-zabbix01.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /provisioning/roles/02_hl-client_docker-yandextank/files/yandextank/hl-zabbix02.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/02_hl-client_docker-yandextank/files/yandextank/hl-zabbix01.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/02_hl-client_docker-yandextank/files/yandextank/hl-zabbix02.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/02_nginx-php-fpm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: php-fpm restart 3 | systemd: 4 | name: php-fpm 5 | state: restarted 6 | enabled: yes 7 | 8 | - name: nginx restart 9 | systemd: 10 | name: nginx 11 | state: restarted 12 | enabled: yes 13 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/02_nginx-php-fpm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: php-fpm restart 3 | systemd: 4 | name: php-fpm 5 | state: restarted 6 | enabled: yes 7 | 8 | - name: nginx restart 9 | systemd: 10 | name: nginx 11 | state: restarted 12 | enabled: yes 13 | -------------------------------------------------------------------------------- /provisioning/roles/10_pacemaker/02_pacemaker_create_cluster/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: corosync enable 3 | systemd: 4 | name: corosync 5 | state: started 6 | enabled: yes 7 | 8 | - name: pacemaker enable 9 | systemd: 10 | name: pacemaker 11 | state: started 12 | enabled: yes 13 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni.yml: -------------------------------------------------------------------------------- 1 | - name: 06_pgsql-patroni 2 | hosts: 3 | - database 4 | become: true 5 | roles: 6 | - 06_pgsql-patroni/01_consul-client 7 | - 06_pgsql-patroni/02_patroni-server 8 | - 06_pgsql-patroni/03_pgsql_optimization 9 | - 06_pgsql-patroni/04_vip-manager_install 10 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/10_pacemaker/02_pacemaker_create_cluster/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: corosync enable 3 | systemd: 4 | name: corosync 5 | state: started 6 | enabled: yes 7 | 8 | - name: pacemaker enable 9 | systemd: 10 | name: pacemaker 11 | state: started 12 | enabled: yes 13 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni.yml: -------------------------------------------------------------------------------- 1 | - name: 06_pgsql-patroni 2 | hosts: 3 | - database 4 | become: true 5 | roles: 6 | - 06_pgsql-patroni/01_consul-client 7 | - 06_pgsql-patroni/02_patroni-server 8 | - 06_pgsql-patroni/03_pgsql_optimization 9 | - 06_pgsql-patroni/04_vip-manager_install 10 | -------------------------------------------------------------------------------- /provisioning/roles/03_keepalived-haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: keepalived restart 3 | systemd: 4 | daemon_reload: yes 5 | name: keepalived 6 | state: restarted 7 | enabled: yes 8 | 9 | - name: haproxy restart 10 | systemd: 11 | name: haproxy 12 | state: restarted 13 | enabled: yes 14 | -------------------------------------------------------------------------------- /provisioning/roles/10_pacemaker.yml: -------------------------------------------------------------------------------- 1 | - name: 10_pacemaker_install 2 | hosts: 3 | - web 4 | become: true 5 | roles: 6 | - 10_pacemaker/01_pacemaker_install 7 | 8 | - name: 10_pacemaker_create_cluster 9 | hosts: 10 | - web1 11 | become: true 12 | roles: 13 | - 10_pacemaker/02_pacemaker_create_cluster 14 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/03_keepalived-haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: keepalived restart 3 | systemd: 4 | daemon_reload: yes 5 | name: keepalived 6 | state: restarted 7 | enabled: yes 8 | 9 | - name: haproxy restart 10 | systemd: 11 | name: haproxy 12 | state: restarted 13 | enabled: yes 14 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/10_pacemaker.yml: -------------------------------------------------------------------------------- 1 | - name: 10_pacemaker_install 2 | hosts: 3 | - web 4 | become: true 5 | roles: 6 | - 10_pacemaker/01_pacemaker_install 7 | 8 | - name: 10_pacemaker_create_cluster 9 | hosts: 10 | - web1 11 | become: true 12 | roles: 13 | - 10_pacemaker/02_pacemaker_create_cluster 14 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu/02_mamonsu_install/files/mamonsu_logrotate: -------------------------------------------------------------------------------- 1 | /var/log/mamonsu/agent.log { 2 | daily 3 | rotate 7 4 | compress 5 | delaycompress 6 | missingok 7 | notifempty 8 | create 0640 mamonsu mamonsu 9 | sharedscripts 10 | postrotate 11 | /usr/bin/systemctl restart mamonsu2.service 12 | endscript 13 | } 14 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix.yml: -------------------------------------------------------------------------------- 1 | - name: 08_zabbix_install 2 | hosts: 3 | - web 4 | become: true 5 | roles: 6 | - 08_zabbix/01_zabbix_install 7 | - 08_zabbix/02_nginx-php-fpm 8 | - 08_zabbix/03_web-optimization 9 | 10 | - name: 08_zabbix_create_DB 11 | hosts: 12 | - web1 13 | become: true 14 | roles: 15 | - 08_zabbix/04_zabbix_createDB 16 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu/02_mamonsu_install/files/mamonsu_logrotate: -------------------------------------------------------------------------------- 1 | /var/log/mamonsu/agent.log { 2 | daily 3 | rotate 7 4 | compress 5 | delaycompress 6 | missingok 7 | notifempty 8 | create 0640 mamonsu mamonsu 9 | sharedscripts 10 | postrotate 11 | /usr/bin/systemctl restart mamonsu2.service 12 | endscript 13 | } 14 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix.yml: -------------------------------------------------------------------------------- 1 | - name: 08_zabbix_install 2 | hosts: 3 | - web 4 | become: true 5 | roles: 6 | - 08_zabbix/01_zabbix_install 7 | - 08_zabbix/02_nginx-php-fpm 8 | - 08_zabbix/03_web-optimization 9 | 10 | - name: 08_zabbix_create_DB 11 | hosts: 12 | - web1 13 | become: true 14 | roles: 15 | - 08_zabbix/04_zabbix_createDB 16 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/04_vip-manager_install/files/vip-manager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Manages Virtual IP for Patroni 3 | Before=patroni.service 4 | 5 | [Service] 6 | Type=simple 7 | 8 | ExecStart=/usr/bin/vip-manager -config=/etc/default/vip-manager.yml 9 | 10 | #Restart=on-failure 11 | Restart=always 12 | RestartSec=10 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/02_patroni-server/files/patroni.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Runners to orchestrate a high-availability PostgreSQL 3 | After=syslog.target network.target 4 | [Service] 5 | Type=simple 6 | User=postgres 7 | Group=postgres 8 | ExecStart=/bin/patroni /etc/patroni.yml 9 | KillMode=process 10 | TimeoutSec=30 11 | Restart=no 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/04_vip-manager_install/files/vip-manager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Manages Virtual IP for Patroni 3 | Before=patroni.service 4 | 5 | [Service] 6 | Type=simple 7 | 8 | ExecStart=/usr/bin/vip-manager -config=/etc/default/vip-manager.yml 9 | 10 | #Restart=on-failure 11 | Restart=always 12 | RestartSec=10 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/02_patroni-server/files/patroni.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Runners to orchestrate a high-availability PostgreSQL 3 | After=syslog.target network.target 4 | [Service] 5 | Type=simple 6 | User=postgres 7 | Group=postgres 8 | ExecStart=/bin/patroni /etc/patroni.yml 9 | KillMode=process 10 | TimeoutSec=30 11 | Restart=no 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /provisioning/roles/10_pacemaker/01_pacemaker_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install pcs, pacemaker, corosync 3 | yum: 4 | name: 5 | - pcs 6 | - pacemaker 7 | - corosync 8 | state: latest 9 | 10 | - name: change password for hacluster user 11 | shell: 12 | echo {{ PASS_HACLUSTER_USER }} | passwd --stdin hacluster 13 | 14 | - name: start pcsd.service 15 | systemd: 16 | name: pcsd 17 | state: started 18 | enabled: yes 19 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/10_pacemaker/01_pacemaker_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install pcs, pacemaker, corosync 3 | yum: 4 | name: 5 | - pcs 6 | - pacemaker 7 | - corosync 8 | state: latest 9 | 10 | - name: change password for hacluster user 11 | shell: 12 | echo {{ PASS_HACLUSTER_USER }} | passwd --stdin hacluster 13 | 14 | - name: start pcsd.service 15 | systemd: 16 | name: pcsd 17 | state: started 18 | enabled: yes 19 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu.yml: -------------------------------------------------------------------------------- 1 | - name: 09_mamonsu_build_and_create_repo 2 | hosts: 3 | - web2 4 | become: true 5 | roles: 6 | - 09_mamonsu/01_mamonsu_build_and_create_repo 7 | 8 | - name: 09_mamonsu_install 9 | hosts: 10 | - web 11 | become: true 12 | roles: 13 | - 09_mamonsu/02_mamonsu_install 14 | 15 | - name: 09_mamonsu_zabbix-postgres 16 | hosts: 17 | - web1 18 | become: true 19 | roles: 20 | - 09_mamonsu/03_mamonsu_zabbix-postgres 21 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu.yml: -------------------------------------------------------------------------------- 1 | - name: 09_mamonsu_build_and_create_repo 2 | hosts: 3 | - web2 4 | become: true 5 | roles: 6 | - 09_mamonsu/01_mamonsu_build_and_create_repo 7 | 8 | - name: 09_mamonsu_install 9 | hosts: 10 | - web 11 | become: true 12 | roles: 13 | - 09_mamonsu/02_mamonsu_install 14 | 15 | - name: 09_mamonsu_zabbix-postgres 16 | hosts: 17 | - web1 18 | become: true 19 | roles: 20 | - 09_mamonsu/03_mamonsu_zabbix-postgres 21 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/01_tuning_OS/01_tuning_OS/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: system restart 3 | reboot: 4 | reboot_timeout: 120 5 | 6 | - name: cloud-init restart 7 | systemd: 8 | name: cloud-init 9 | state: restarted 10 | 11 | - name: chronyd start and enable 12 | systemd: 13 | name: chronyd 14 | state: started 15 | enabled: yes 16 | 17 | - name: qemu-guest-agent restart 18 | systemd: 19 | name: qemu-guest-agent 20 | state: restarted 21 | enabled: yes 22 | -------------------------------------------------------------------------------- /provisioning/00_all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: roles/01_tuning_OS.yml 3 | - import_playbook: roles/02_hl-client_docker-yandextank.yml 4 | - import_playbook: roles/03_keepalived-haproxy.yml 5 | - import_playbook: roles/04_pgconpool.yml 6 | - import_playbook: roles/05_consul-cluster.yml 7 | - import_playbook: roles/06_pgsql-patroni.yml 8 | - import_playbook: roles/07_pgsql-client.yml 9 | - import_playbook: roles/08_zabbix.yml 10 | - import_playbook: roles/09_mamonsu.yml 11 | - import_playbook: roles/10_pacemaker.yml 12 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool.yml: -------------------------------------------------------------------------------- 1 | - name: 04_pgconpool_build_packages 2 | hosts: 3 | - pg_conpool1 4 | become: true 5 | roles: 6 | - 04_pgconpool/01_preparation_for_rpms_assembly 7 | - 04_pgconpool/02_build_vip-manager_rpm 8 | #- 04_pgconpool/03_build_odyssey_binary 9 | - 04_pgconpool/04_build_odyssey_rpm 10 | 11 | - name: 04_pgconpool_install_odyssey 12 | hosts: 13 | - pg_conpool 14 | become: true 15 | roles: 16 | - 04_pgconpool/05_install_odyssey 17 | - 04_pgconpool/06_keepalived 18 | -------------------------------------------------------------------------------- /provisioning_proxmox/00_all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: roles/01_tuning_OS.yml 3 | - import_playbook: roles/02_hl-client_docker-yandextank.yml 4 | - import_playbook: roles/03_keepalived-haproxy.yml 5 | - import_playbook: roles/04_pgconpool.yml 6 | - import_playbook: roles/05_consul-cluster.yml 7 | - import_playbook: roles/06_pgsql-patroni.yml 8 | - import_playbook: roles/07_pgsql-client.yml 9 | - import_playbook: roles/08_zabbix.yml 10 | - import_playbook: roles/09_mamonsu.yml 11 | - import_playbook: roles/10_pacemaker.yml 12 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu/01_mamonsu_build_and_create_repo/templates/mamonsu.conf.j2: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8081; 3 | #server_name {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }}; 4 | # return 301 https://$server_name$request_uri; # enforce https 5 | 6 | access_log /var/log/nginx/mamonsu.access.log; 7 | error_log /var/log/nginx/mamonsu.error.log; 8 | 9 | location / { 10 | root /usr/share/nginx/html; 11 | # index index.php index.html index.htm; 12 | autoindex on; 13 | } 14 | 15 | } 16 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool.yml: -------------------------------------------------------------------------------- 1 | - name: 04_pgconpool_build_packages 2 | hosts: 3 | - pg_conpool1 4 | become: true 5 | roles: 6 | - 04_pgconpool/01_preparation_for_rpms_assembly 7 | - 04_pgconpool/02_build_vip-manager_rpm 8 | #- 04_pgconpool/03_build_odyssey_binary 9 | - 04_pgconpool/04_build_odyssey_rpm 10 | 11 | - name: 04_pgconpool_install_odyssey 12 | hosts: 13 | - pg_conpool 14 | become: true 15 | roles: 16 | - 04_pgconpool/05_install_odyssey 17 | - 04_pgconpool/06_keepalived 18 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu/01_mamonsu_build_and_create_repo/templates/mamonsu.conf.j2: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8081; 3 | #server_name {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }}; 4 | # return 301 https://$server_name$request_uri; # enforce https 5 | 6 | access_log /var/log/nginx/mamonsu.access.log; 7 | error_log /var/log/nginx/mamonsu.error.log; 8 | 9 | location / { 10 | root /usr/share/nginx/html; 11 | # index index.php index.html index.htm; 12 | autoindex on; 13 | } 14 | 15 | } 16 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/01_preparation_for_rpms_assembly/templates/otus-odyssey-vip-manager.conf.j2: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8081; 3 | #server_name {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }}; 4 | # return 301 https://$server_name$request_uri; # enforce https 5 | 6 | access_log /var/log/nginx/otus-odyssey-vip-manager.access.log; 7 | error_log /var/log/nginx/otus-odyssey-vip-manager.error.log; 8 | 9 | location / { 10 | root /usr/share/nginx/html; 11 | # index index.php index.html index.htm; 12 | autoindex on; 13 | } 14 | 15 | } 16 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/03_web-optimization/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: memcached restart 3 | systemd: 4 | name: memcached 5 | state: restarted 6 | enabled: yes 7 | 8 | - name: redis restart 9 | systemd: 10 | name: redis 11 | state: restarted 12 | enabled: yes 13 | 14 | - name: php-fpm restart 15 | systemd: 16 | name: php-fpm 17 | daemon-reload: yes 18 | state: restarted 19 | enabled: yes 20 | 21 | - name: nginx restart 22 | systemd: 23 | name: nginx 24 | daemon-reload: yes 25 | state: restarted 26 | enabled: yes 27 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/01_preparation_for_rpms_assembly/templates/otus-odyssey-vip-manager.conf.j2: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8081; 3 | #server_name {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }}; 4 | # return 301 https://$server_name$request_uri; # enforce https 5 | 6 | access_log /var/log/nginx/otus-odyssey-vip-manager.access.log; 7 | error_log /var/log/nginx/otus-odyssey-vip-manager.error.log; 8 | 9 | location / { 10 | root /usr/share/nginx/html; 11 | # index index.php index.html index.htm; 12 | autoindex on; 13 | } 14 | 15 | } 16 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/03_web-optimization/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: memcached restart 3 | systemd: 4 | name: memcached 5 | state: restarted 6 | enabled: yes 7 | 8 | - name: redis restart 9 | systemd: 10 | name: redis 11 | state: restarted 12 | enabled: yes 13 | 14 | - name: php-fpm restart 15 | systemd: 16 | name: php-fpm 17 | daemon-reload: yes 18 | state: restarted 19 | enabled: yes 20 | 21 | - name: nginx restart 22 | systemd: 23 | name: nginx 24 | daemon-reload: yes 25 | state: restarted 26 | enabled: yes 27 | -------------------------------------------------------------------------------- /provisioning/roles/03_keepalived-haproxy/files/keepalived.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LVS and VRRP High Availability Monitor 3 | #After=syslog.target network-online.target 4 | After=syslog.target network.target 5 | After=network-online.target 6 | Wants=network-online.target 7 | 8 | [Service] 9 | Type=forking 10 | PIDFile=/var/run/keepalived.pid 11 | KillMode=process 12 | EnvironmentFile=-/etc/sysconfig/keepalived 13 | ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS 14 | ExecReload=/bin/kill -HUP $MAINPID 15 | Restart=always 16 | RestartSec=10 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/06_keepalived/files/keepalived.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LVS and VRRP High Availability Monitor 3 | #After=syslog.target network-online.target 4 | After=syslog.target network.target 5 | After=network-online.target 6 | Wants=network-online.target 7 | 8 | [Service] 9 | Type=forking 10 | PIDFile=/var/run/keepalived.pid 11 | KillMode=process 12 | EnvironmentFile=-/etc/sysconfig/keepalived 13 | ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS 14 | ExecReload=/bin/kill -HUP $MAINPID 15 | Restart=always 16 | RestartSec=10 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/03_keepalived-haproxy/files/keepalived.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LVS and VRRP High Availability Monitor 3 | #After=syslog.target network-online.target 4 | After=syslog.target network.target 5 | After=network-online.target 6 | Wants=network-online.target 7 | 8 | [Service] 9 | Type=forking 10 | PIDFile=/var/run/keepalived.pid 11 | KillMode=process 12 | EnvironmentFile=-/etc/sysconfig/keepalived 13 | ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS 14 | ExecReload=/bin/kill -HUP $MAINPID 15 | Restart=always 16 | RestartSec=10 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/06_keepalived/files/keepalived.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LVS and VRRP High Availability Monitor 3 | #After=syslog.target network-online.target 4 | After=syslog.target network.target 5 | After=network-online.target 6 | Wants=network-online.target 7 | 8 | [Service] 9 | Type=forking 10 | PIDFile=/var/run/keepalived.pid 11 | KillMode=process 12 | EnvironmentFile=-/etc/sysconfig/keepalived 13 | ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS 14 | ExecReload=/bin/kill -HUP $MAINPID 15 | Restart=always 16 | RestartSec=10 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/01_consul-client/templates/consul-client.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "advertise_addr": "{{ ansible_eth1.ipv4.address }}", 3 | "datacenter": "{{ CONSUL_DATACENTER }}", 4 | "data_dir": "/var/lib/consul", 5 | "domain": "consul", 6 | "enable_script_checks": true, 7 | "enable_syslog": true, 8 | "log_level": "INFO", 9 | "encrypt": "{{ CONSUL_KEY }}", 10 | "leave_on_terminate": true, 11 | "rejoin_after_leave": true, 12 | "server": false, 13 | "start_join": [ 14 | "{{ HOST_NAME_DCS_01 }}", 15 | "{{ HOST_NAME_DCS_02 }}", 16 | "{{ HOST_NAME_DCS_03 }}" 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/01_consul-client/templates/consul-client.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "advertise_addr": "{{ ansible_eth0.ipv4.address }}", 3 | "datacenter": "{{ CONSUL_DATACENTER }}", 4 | "data_dir": "/var/lib/consul", 5 | "domain": "consul", 6 | "enable_script_checks": true, 7 | "enable_syslog": true, 8 | "log_level": "INFO", 9 | "encrypt": "{{ CONSUL_KEY }}", 10 | "leave_on_terminate": true, 11 | "rejoin_after_leave": true, 12 | "server": false, 13 | "start_join": [ 14 | "{{ HOST_NAME_DCS_01 }}", 15 | "{{ HOST_NAME_DCS_02 }}", 16 | "{{ HOST_NAME_DCS_03 }}" 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/03_build_odyssey_binary/files/FindPostgreSQL.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find the PostgreSQL libraries 2 | # 3 | # POSTGRESQL_INCLUDE_DIR - PostgreSQL include directory 4 | # POSTGRESQL_LIBRARY - PostgreSQL library 5 | 6 | find_path( 7 | POSTGRESQL_INCLUDE_DIR 8 | NAMES common/base64.h common/saslprep.h common/scram-common.h 9 | PATH_SUFFIXES pgsql-11/include/server 10 | ) 11 | 12 | find_library( 13 | POSTGRESQL_LIBRARY 14 | NAMES libpq.a 15 | PATH_SUFFIXES pgsql-11/lib/ 16 | ) 17 | 18 | find_package_handle_standard_args( 19 | POSTGRESQL 20 | REQUIRED_VARS POSTGRESQL_LIBRARY POSTGRESQL_INCLUDE_DIR 21 | ) 22 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/03_build_odyssey_binary/files/FindPostgreSQL.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find the PostgreSQL libraries 2 | # 3 | # POSTGRESQL_INCLUDE_DIR - PostgreSQL include directory 4 | # POSTGRESQL_LIBRARY - PostgreSQL library 5 | 6 | find_path( 7 | POSTGRESQL_INCLUDE_DIR 8 | NAMES common/base64.h common/saslprep.h common/scram-common.h 9 | PATH_SUFFIXES pgsql-11/include/server 10 | ) 11 | 12 | find_library( 13 | POSTGRESQL_LIBRARY 14 | NAMES libpq.a 15 | PATH_SUFFIXES pgsql-11/lib/ 16 | ) 17 | 18 | find_package_handle_standard_args( 19 | POSTGRESQL 20 | REQUIRED_VARS POSTGRESQL_LIBRARY POSTGRESQL_INCLUDE_DIR 21 | ) 22 | -------------------------------------------------------------------------------- /provisioning/roles/05_consul-cluster/templates/consul-server.service.j2: -------------------------------------------------------------------------------- 1 | # Consul-server systemd service unit file 2 | [Unit] 3 | Description=Consul-server Service Discovery Agent 4 | Documentation=https://www.consul.io/ 5 | After=network-online.target 6 | Wants=network-online.target 7 | 8 | [Service] 9 | #Environment=GOMAXPROCS=2 10 | #PIDFile=/run/consul.pid 11 | Type=simple 12 | User=consul 13 | Group=consul 14 | ExecStart=/usr/local/bin/consul agent \ 15 | -node={{ ansible_hostname }} \ 16 | -config-dir=/etc/consul.d 17 | 18 | ExecReload=/bin/kill -HUP $MAINPID 19 | KillSignal=SIGINT 20 | TimeoutStopSec=5 21 | Restart=on-failure 22 | RestartSec=5 23 | #SyslogIdentifier=consul-server 24 | 25 | [Install] 26 | WantedBy=multi-user.target 27 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/05_consul-cluster/templates/consul-server.service.j2: -------------------------------------------------------------------------------- 1 | # Consul-server systemd service unit file 2 | [Unit] 3 | Description=Consul-server Service Discovery Agent 4 | Documentation=https://www.consul.io/ 5 | After=network-online.target 6 | Wants=network-online.target 7 | 8 | [Service] 9 | #Environment=GOMAXPROCS=2 10 | #PIDFile=/run/consul.pid 11 | Type=simple 12 | User=consul 13 | Group=consul 14 | ExecStart=/usr/local/bin/consul agent \ 15 | -node={{ ansible_hostname }} \ 16 | -config-dir=/etc/consul.d 17 | 18 | ExecReload=/bin/kill -HUP $MAINPID 19 | KillSignal=SIGINT 20 | TimeoutStopSec=5 21 | Restart=on-failure 22 | RestartSec=5 23 | #SyslogIdentifier=consul-server 24 | 25 | [Install] 26 | WantedBy=multi-user.target 27 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/01_consul-client/files/consul-client.service: -------------------------------------------------------------------------------- 1 | # Consul-client systemd service unit file 2 | [Unit] 3 | Description=Consul-client Service Discovery Agent 4 | Documentation=https://www.consul.io/ 5 | After=network-online.target 6 | Wants=network-online.target 7 | 8 | [Service] 9 | #Environment=GOMAXPROCS=2 10 | #PIDFile=/run/consul.pid 11 | Type=simple 12 | User=consul 13 | Group=consul 14 | ExecStart=/usr/local/bin/consul agent \ 15 | -client=0.0.0.0 \ 16 | -bind=0.0.0.0 \ 17 | -config-dir=/etc/consul.d 18 | 19 | ExecReload=/bin/kill -HUP $MAINPID 20 | KillSignal=SIGINT 21 | TimeoutStopSec=5 22 | Restart=on-failure 23 | 24 | #SyslogIdentifier=consul-client 25 | 26 | [Install] 27 | WantedBy=multi-user.target 28 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/01_consul-client/files/consul-client.service: -------------------------------------------------------------------------------- 1 | # Consul-client systemd service unit file 2 | [Unit] 3 | Description=Consul-client Service Discovery Agent 4 | Documentation=https://www.consul.io/ 5 | After=network-online.target 6 | Wants=network-online.target 7 | 8 | [Service] 9 | #Environment=GOMAXPROCS=2 10 | #PIDFile=/run/consul.pid 11 | Type=simple 12 | User=consul 13 | Group=consul 14 | ExecStart=/usr/local/bin/consul agent \ 15 | -client=0.0.0.0 \ 16 | -bind=0.0.0.0 \ 17 | -config-dir=/etc/consul.d 18 | 19 | ExecReload=/bin/kill -HUP $MAINPID 20 | KillSignal=SIGINT 21 | TimeoutStopSec=5 22 | Restart=on-failure 23 | 24 | #SyslogIdentifier=consul-client 25 | 26 | [Install] 27 | WantedBy=multi-user.target 28 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/05_install_odyssey/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add otus-odyssey-vip-manager repository 3 | yum_repository: 4 | name: otus-odyssey-vip-manager 5 | description: Repository with odyssey and vip-manager rpm packages 6 | baseurl: 7 | - http://"{{ HOST_NAME_PG_CON_POOL_01 }}"."{{ FAKE_DOMAIN }}":8081/repo 8 | - http://"{{ HOST_IP_PG_CON_POOL_01 }}":8081/repo 9 | gpgcheck: no 10 | enabled: yes 11 | 12 | - name: install odyssey from repo 13 | yum: 14 | name: odyssey 15 | state: latest 16 | 17 | - name: put odyssey.conf template 18 | template: 19 | src: odyssey.conf.j2 20 | dest: /etc/odyssey/odyssey.conf 21 | owner: root 22 | group: root 23 | mode: '0644' 24 | notify: 25 | - odyssey restart 26 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/05_install_odyssey/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add otus-odyssey-vip-manager repository 3 | yum_repository: 4 | name: otus-odyssey-vip-manager 5 | description: Repository with odyssey and vip-manager rpm packages 6 | baseurl: 7 | - http://"{{ HOST_NAME_PG_CON_POOL_01 }}"."{{ FAKE_DOMAIN }}":8081/repo 8 | - http://"{{ HOST_IP_PG_CON_POOL_01 }}":8081/repo 9 | gpgcheck: no 10 | enabled: yes 11 | 12 | - name: install odyssey from repo 13 | yum: 14 | name: odyssey 15 | state: latest 16 | 17 | - name: put odyssey.conf template 18 | template: 19 | src: odyssey.conf.j2 20 | dest: /etc/odyssey/odyssey.conf 21 | owner: root 22 | group: root 23 | mode: '0644' 24 | notify: 25 | - odyssey restart 26 | -------------------------------------------------------------------------------- /provisioning/roles/07_pgsql-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import postgresql repo a key from a url 3 | rpm_key: 4 | key: https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG-11 5 | state: present 6 | 7 | - name: install postgresql vanila repo 8 | yum: 9 | name: https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm 10 | state: installed 11 | 12 | - name: install PostgreSQL, web-browsers 13 | yum: 14 | name: 15 | - postgresql11 16 | - postgresql11-contrib 17 | - python2-psycopg2 18 | - elinks 19 | - lynx 20 | state: latest 21 | 22 | - name: put pgpass template 23 | template: 24 | src: pgpass.j2 25 | dest: /root/.pgpass 26 | owner: root 27 | group: root 28 | mode: '0600' 29 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/07_pgsql-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import postgresql repo a key from a url 3 | rpm_key: 4 | key: https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG-11 5 | state: present 6 | 7 | - name: install postgresql vanila repo 8 | yum: 9 | name: https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm 10 | state: installed 11 | 12 | - name: install PostgreSQL, web-browsers 13 | yum: 14 | name: 15 | - postgresql11 16 | - postgresql11-contrib 17 | - python2-psycopg2 18 | - elinks 19 | - lynx 20 | state: latest 21 | 22 | - name: put pgpass template 23 | template: 24 | src: pgpass.j2 25 | dest: /root/.pgpass 26 | owner: root 27 | group: root 28 | mode: '0600' 29 | -------------------------------------------------------------------------------- /tests/locust/locust_zabbix.py: -------------------------------------------------------------------------------- 1 | from locust import HttpLocust, TaskSet 2 | 3 | #запуск 4 | #locust -f locust_zabbix.py --host=http://10.51.21.56:8080 5 | # 6 | #на всякий случай гостевая ссылка 7 | #http://10.51.21.56:8080/zabbix/index.php?enter=guest 8 | 9 | 10 | def index(l): 11 | l.client.get("/zabbix/index.php?enter=guest") 12 | 13 | def graphs(l): 14 | l.client.get("/zabbix/screens.php?elementid=23") 15 | 16 | def dashboard(l): 17 | l.client.get("/zabbix/zabbix.php?action=dashboard.view&dashboardid=1") 18 | 19 | 20 | 21 | class UserBehavior(TaskSet): 22 | tasks = {graphs: 1, dashboard: 1} 23 | 24 | def on_start(self): 25 | index(self) 26 | 27 | def on_stop(self): 28 | graphs(self) 29 | 30 | class WebsiteUser(HttpLocust): 31 | task_set = UserBehavior 32 | min_wait = 5000 33 | max_wait = 9000 34 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu/02_mamonsu_install/files/mamonsu2.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Zabbix active agent mamonsu2 3 | After=syslog.target network.target 4 | After=network-online.target 5 | Wants=network-online.target 6 | 7 | [Service] 8 | SyslogIdentifier=mamonsu2 9 | #Type=simple 10 | Type=forking 11 | Restart=on-failure 12 | PIDFile=/var/run/mamonsu/mamonsu2.pid 13 | EnvironmentFile=/etc/sysconfig/mamonsu2 14 | ExecStartPre=/bin/sh -c 'rm -rf $PIDDIR && mkdir $PIDDIR && touch $PIDFILE && chown -R mamonsu. $PIDDIR' 15 | ExecStart=/usr/bin/python /usr/bin/mamonsu -d -a /etc/mamonsu/plugins -c $CONFIG -p $PIDFILE 16 | ExecStopPost=/bin/rm -f $PIDFile 17 | ExecStop=/bin/kill -SIGTERM $MAINPID 18 | #User=mamonsu 19 | #Group=mamonsu 20 | KillMode=process 21 | #KillMode=control-group 22 | RestartSec=10s 23 | 24 | [Install] 25 | WantedBy=multi-user.target 26 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu/02_mamonsu_install/files/mamonsu2.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Zabbix active agent mamonsu2 3 | After=syslog.target network.target 4 | After=network-online.target 5 | Wants=network-online.target 6 | 7 | [Service] 8 | SyslogIdentifier=mamonsu2 9 | #Type=simple 10 | Type=forking 11 | Restart=on-failure 12 | PIDFile=/var/run/mamonsu/mamonsu2.pid 13 | EnvironmentFile=/etc/sysconfig/mamonsu2 14 | ExecStartPre=/bin/sh -c 'rm -rf $PIDDIR && mkdir $PIDDIR && touch $PIDFILE && chown -R mamonsu. $PIDDIR' 15 | ExecStart=/usr/bin/python /usr/bin/mamonsu -d -a /etc/mamonsu/plugins -c $CONFIG -p $PIDFILE 16 | ExecStopPost=/bin/rm -f $PIDFile 17 | ExecStop=/bin/kill -SIGTERM $MAINPID 18 | #User=mamonsu 19 | #Group=mamonsu 20 | KillMode=process 21 | #KillMode=control-group 22 | RestartSec=10s 23 | 24 | [Install] 25 | WantedBy=multi-user.target 26 | -------------------------------------------------------------------------------- /provisioning/roles/03_keepalived-haproxy/templates/master_keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | # Keepalived process identifier 3 | router_id haproxy_DH 4 | } 5 | # Script used to check if HAProxy is running 6 | vrrp_script check_haproxy { 7 | script "/usr/bin/killall -0 haproxy" 8 | interval 2 9 | weight 2 10 | } 11 | # Virtual interface 12 | # The priority specifies the order in which the assigned interface to take over in a failover 13 | vrrp_instance balancer { 14 | state MASTER 15 | interface {{ IP_BALANCER_NIC }} 16 | virtual_router_id 51 17 | priority 101 18 | # The virtual ip address shared between the two loadbalancers 19 | virtual_ipaddress { 20 | {{ HOST_IP_BALANCER_VIP }} 21 | } 22 | track_script { 23 | check_haproxy 24 | } 25 | authentication { 26 | auth_type PASS 27 | auth_pass {{ PASS_FOR_KEEPALIVED }} 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /provisioning/roles/03_keepalived-haproxy/templates/slave_keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | # Keepalived process identifier 3 | router_id haproxy_DH_passive 4 | } 5 | # Script used to check if HAProxy is running 6 | vrrp_script check_haproxy { 7 | script "/usr/bin/killall -0 haproxy" 8 | interval 2 9 | weight 2 10 | } 11 | # Virtual interface 12 | # The priority specifies the order in which the assigned interface to take over in a failover 13 | vrrp_instance balancer { 14 | state SLAVE 15 | interface {{ IP_BALANCER_NIC }} 16 | virtual_router_id 51 17 | priority 100 18 | # The virtual ip address shared between the two loadbalancers 19 | virtual_ipaddress { 20 | {{ HOST_IP_BALANCER_VIP }} 21 | } 22 | track_script { 23 | check_haproxy 24 | } 25 | authentication { 26 | auth_type PASS 27 | auth_pass {{ PASS_FOR_KEEPALIVED }} 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/03_keepalived-haproxy/templates/master_keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | # Keepalived process identifier 3 | router_id haproxy_DH 4 | } 5 | # Script used to check if HAProxy is running 6 | vrrp_script check_haproxy { 7 | script "/usr/bin/killall -0 haproxy" 8 | interval 2 9 | weight 2 10 | } 11 | # Virtual interface 12 | # The priority specifies the order in which the assigned interface to take over in a failover 13 | vrrp_instance balancer { 14 | state MASTER 15 | interface {{ IP_BALANCER_NIC }} 16 | virtual_router_id 51 17 | priority 101 18 | # The virtual ip address shared between the two loadbalancers 19 | virtual_ipaddress { 20 | {{ HOST_IP_BALANCER_VIP }} 21 | } 22 | track_script { 23 | check_haproxy 24 | } 25 | authentication { 26 | auth_type PASS 27 | auth_pass {{ PASS_FOR_KEEPALIVED }} 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/03_keepalived-haproxy/templates/slave_keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | # Keepalived process identifier 3 | router_id haproxy_DH_passive 4 | } 5 | # Script used to check if HAProxy is running 6 | vrrp_script check_haproxy { 7 | script "/usr/bin/killall -0 haproxy" 8 | interval 2 9 | weight 2 10 | } 11 | # Virtual interface 12 | # The priority specifies the order in which the assigned interface to take over in a failover 13 | vrrp_instance balancer { 14 | state SLAVE 15 | interface {{ IP_BALANCER_NIC }} 16 | virtual_router_id 51 17 | priority 100 18 | # The virtual ip address shared between the two loadbalancers 19 | virtual_ipaddress { 20 | {{ HOST_IP_BALANCER_VIP }} 21 | } 22 | track_script { 23 | check_haproxy 24 | } 25 | authentication { 26 | auth_type PASS 27 | auth_pass {{ PASS_FOR_KEEPALIVED }} 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/06_keepalived/templates/master_keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | # Keepalived process identifier 3 | router_id odyssey_DH 4 | } 5 | # Script used to check if odyssey is running 6 | vrrp_script check_odyssey { 7 | script "/usr/bin/killall -0 odyssey" 8 | interval 2 9 | weight 2 10 | } 11 | # Virtual interface 12 | # The priority specifies the order in which the assigned interface to take over in a failover 13 | vrrp_instance odyssey { 14 | state MASTER 15 | interface {{ IP_PG_CON_POOL_NIC }} 16 | virtual_router_id 55 17 | priority 101 18 | # The virtual ip address shared between the two loadbalancers 19 | virtual_ipaddress { 20 | {{ HOST_IP_PG_CON_POOL_VIP }} 21 | } 22 | track_script { 23 | check_odyssey 24 | } 25 | authentication { 26 | auth_type PASS 27 | auth_pass {{ PASS_FOR_PG_CON_POOL_KEEPALIVED }} 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/06_keepalived/templates/slave_keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | # Keepalived process identifier 3 | router_id odyssey_DH_passive 4 | } 5 | # Script used to check if odyssey is running 6 | vrrp_script check_odyssey { 7 | script "/usr/bin/killall -0 odyssey" 8 | interval 2 9 | weight 2 10 | } 11 | # Virtual interface 12 | # The priority specifies the order in which the assigned interface to take over in a failover 13 | vrrp_instance odyssey { 14 | state SLAVE 15 | interface {{ IP_PG_CON_POOL_NIC }} 16 | virtual_router_id 55 17 | priority 100 18 | # The virtual ip address shared between the two loadbalancers 19 | virtual_ipaddress { 20 | {{ HOST_IP_PG_CON_POOL_VIP }} 21 | } 22 | track_script { 23 | check_odyssey 24 | } 25 | authentication { 26 | auth_type PASS 27 | auth_pass {{ PASS_FOR_PG_CON_POOL_KEEPALIVED }} 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/04_build_odyssey_rpm/files/FindPostgreSQL_rpmbuild.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find the PostgreSQL libraries 2 | # 3 | # POSTGRESQL_INCLUDE_DIR - PostgreSQL include directory 4 | # POSTGRESQL_LIBRARY - PostgreSQL library 5 | # PQ_LIBRARY - PostgreSQL PQ library 6 | 7 | find_path( 8 | POSTGRESQL_INCLUDE_DIR 9 | NAMES common/base64.h common/saslprep.h common/scram-common.h common/sha2.h 10 | PATH_SUFFIXES pgsql-11/include/server 11 | ) 12 | 13 | find_library( 14 | POSTGRESQL_LIBRARY 15 | NAMES pgcommon 16 | HINTS "/usr/pgsql-11/include/server/" 17 | PATH_SUFFIXES pgsql-11/lib/ 18 | ) 19 | 20 | find_library( 21 | PQ_LIBRARY 22 | NAMES libpq.a 23 | PATH_SUFFIXES pgsql-11/lib/ 24 | ) 25 | 26 | find_package_handle_standard_args( 27 | POSTGRESQL 28 | REQUIRED_VARS POSTGRESQL_LIBRARY PQ_LIBRARY POSTGRESQL_INCLUDE_DIR 29 | ) 30 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/06_keepalived/templates/master_keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | # Keepalived process identifier 3 | router_id odyssey_DH 4 | } 5 | # Script used to check if odyssey is running 6 | vrrp_script check_odyssey { 7 | script "/usr/bin/killall -0 odyssey" 8 | interval 2 9 | weight 2 10 | } 11 | # Virtual interface 12 | # The priority specifies the order in which the assigned interface to take over in a failover 13 | vrrp_instance odyssey { 14 | state MASTER 15 | interface {{ IP_PG_CON_POOL_NIC }} 16 | virtual_router_id 55 17 | priority 101 18 | # The virtual ip address shared between the two loadbalancers 19 | virtual_ipaddress { 20 | {{ HOST_IP_PG_CON_POOL_VIP }} 21 | } 22 | track_script { 23 | check_odyssey 24 | } 25 | authentication { 26 | auth_type PASS 27 | auth_pass {{ PASS_FOR_PG_CON_POOL_KEEPALIVED }} 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/06_keepalived/templates/slave_keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | # Keepalived process identifier 3 | router_id odyssey_DH_passive 4 | } 5 | # Script used to check if odyssey is running 6 | vrrp_script check_odyssey { 7 | script "/usr/bin/killall -0 odyssey" 8 | interval 2 9 | weight 2 10 | } 11 | # Virtual interface 12 | # The priority specifies the order in which the assigned interface to take over in a failover 13 | vrrp_instance odyssey { 14 | state SLAVE 15 | interface {{ IP_PG_CON_POOL_NIC }} 16 | virtual_router_id 55 17 | priority 100 18 | # The virtual ip address shared between the two loadbalancers 19 | virtual_ipaddress { 20 | {{ HOST_IP_PG_CON_POOL_VIP }} 21 | } 22 | track_script { 23 | check_odyssey 24 | } 25 | authentication { 26 | auth_type PASS 27 | auth_pass {{ PASS_FOR_PG_CON_POOL_KEEPALIVED }} 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/04_build_odyssey_rpm/files/FindPostgreSQL_rpmbuild.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find the PostgreSQL libraries 2 | # 3 | # POSTGRESQL_INCLUDE_DIR - PostgreSQL include directory 4 | # POSTGRESQL_LIBRARY - PostgreSQL library 5 | # PQ_LIBRARY - PostgreSQL PQ library 6 | 7 | find_path( 8 | POSTGRESQL_INCLUDE_DIR 9 | NAMES common/base64.h common/saslprep.h common/scram-common.h common/sha2.h 10 | PATH_SUFFIXES pgsql-11/include/server 11 | ) 12 | 13 | find_library( 14 | POSTGRESQL_LIBRARY 15 | NAMES pgcommon 16 | HINTS "/usr/pgsql-11/include/server/" 17 | PATH_SUFFIXES pgsql-11/lib/ 18 | ) 19 | 20 | find_library( 21 | PQ_LIBRARY 22 | NAMES libpq.a 23 | PATH_SUFFIXES pgsql-11/lib/ 24 | ) 25 | 26 | find_package_handle_standard_args( 27 | POSTGRESQL 28 | REQUIRED_VARS POSTGRESQL_LIBRARY PQ_LIBRARY POSTGRESQL_INCLUDE_DIR 29 | ) 30 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/03_build_odyssey_binary/files/postgres.h: -------------------------------------------------------------------------------- 1 | #ifndef ODYSSEY_POSTGRES_H 2 | #define ODYSSEY_POSTGRES_H 3 | 4 | /* 5 | * Odyssey. 6 | * 7 | * Scalable PostgreSQL connection pooler. 8 | */ 9 | 10 | #define int8 int8_t 11 | #define uint8 uint8_t 12 | #define uint16 uint16_t 13 | #define uint32 uint32_t 14 | #define uint64 uint64_t 15 | 16 | #define lengthof(array) (sizeof (array) / sizeof ((array)[0])) 17 | #define pg_hton32(x) htobe32(x) 18 | 19 | #define pg_attribute_noreturn() _NORETURN 20 | 21 | #define HIGHBIT (0x80) 22 | #define IS_HIGHBIT_SET(ch) ((unsigned char)(ch) & HIGHBIT) 23 | 24 | #define FRONTEND 25 | 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | #endif /* ODYSSEY_POSTGRES_H */ 32 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/03_build_odyssey_binary/files/postgres.h: -------------------------------------------------------------------------------- 1 | #ifndef ODYSSEY_POSTGRES_H 2 | #define ODYSSEY_POSTGRES_H 3 | 4 | /* 5 | * Odyssey. 6 | * 7 | * Scalable PostgreSQL connection pooler. 8 | */ 9 | 10 | #define int8 int8_t 11 | #define uint8 uint8_t 12 | #define uint16 uint16_t 13 | #define uint32 uint32_t 14 | #define uint64 uint64_t 15 | 16 | #define lengthof(array) (sizeof (array) / sizeof ((array)[0])) 17 | #define pg_hton32(x) htobe32(x) 18 | 19 | #define pg_attribute_noreturn() _NORETURN 20 | 21 | #define HIGHBIT (0x80) 22 | #define IS_HIGHBIT_SET(ch) ((unsigned char)(ch) & HIGHBIT) 23 | 24 | #define FRONTEND 25 | 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | #endif /* ODYSSEY_POSTGRES_H */ 32 | -------------------------------------------------------------------------------- /tests/tank/yandextank/load.yaml: -------------------------------------------------------------------------------- 1 | phantom: 2 | address: 10.51.21.50 3 | # address: 10.51.21.57:8080 4 | uris: 5 | - "/zabbix/screens.php?elementid=23" 6 | - "/zabbix/screens.php?elementid=25" 7 | - "/zabbix/screens.php?elementid=30" 8 | # - "/zabbix/zabbix.php?action=dashboard.view&dashboardid=1" 9 | # - "/zabbix/screens.php?elementid=23" 10 | # - "/zabbix/screens.php?elementid=24" 11 | # - "/zabbix/zabbix.php?action=dashboard.view&ddreset=1" 12 | ssl: false 13 | load_profile: 14 | load_type: rps 15 | # schedule: line(1, 60, 200s) 16 | # schedule: line(1, 125, 200s) 17 | schedule: line(1, 200, 200s) 18 | # ammofile: /var/loadtest/ammo_local.txt 19 | # ammo_type: uripost 20 | autostop: 21 | autostop: 22 | - net(xx,25,10) 23 | console: 24 | enabled: true 25 | telegraf: 26 | enabled: false 27 | uploader: 28 | # enabled: false 29 | enabled: true 30 | package: yandextank.plugins.DataUploader 31 | token_file: token.txt 32 | operator: timlok 33 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/03_build_odyssey_binary/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: clone odyssey repo 3 | git: 4 | repo: https://github.com/yandex/odyssey.git 5 | dest: /root/odyssey 6 | 7 | - name: copy FindPostgreSQL.cmake 8 | copy: 9 | src: FindPostgreSQL.cmake 10 | dest: /root/odyssey/cmake/FindPostgreSQL.cmake 11 | owner: root 12 | group: root 13 | mode: '0644' 14 | 15 | - name: copy postgres.h 16 | copy: 17 | src: postgres.h 18 | dest: /root/odyssey/sources/postgres.h 19 | owner: root 20 | group: root 21 | mode: '0644' 22 | 23 | - name: create build directory 24 | file: 25 | path: /root/odyssey/build 26 | state: directory 27 | owner: root 28 | group: root 29 | mode: '0755' 30 | 31 | - name: generating a Makefile for assembling the odyssey binary 32 | shell: cd /root/odyssey/build && cmake -DCMAKE_BUILD_TYPE=Release .. 33 | 34 | - name: assembly odyssey binary file 35 | make: 36 | chdir: /root/odyssey/build 37 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/03_build_odyssey_binary/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: clone odyssey repo 3 | git: 4 | repo: https://github.com/yandex/odyssey.git 5 | dest: /root/odyssey 6 | 7 | - name: copy FindPostgreSQL.cmake 8 | copy: 9 | src: FindPostgreSQL.cmake 10 | dest: /root/odyssey/cmake/FindPostgreSQL.cmake 11 | owner: root 12 | group: root 13 | mode: '0644' 14 | 15 | - name: copy postgres.h 16 | copy: 17 | src: postgres.h 18 | dest: /root/odyssey/sources/postgres.h 19 | owner: root 20 | group: root 21 | mode: '0644' 22 | 23 | - name: create build directory 24 | file: 25 | path: /root/odyssey/build 26 | state: directory 27 | owner: root 28 | group: root 29 | mode: '0755' 30 | 31 | - name: generating a Makefile for assembling the odyssey binary 32 | shell: cd /root/odyssey/build && cmake -DCMAKE_BUILD_TYPE=Release .. 33 | 34 | - name: assembly odyssey binary file 35 | make: 36 | chdir: /root/odyssey/build 37 | -------------------------------------------------------------------------------- /provisioning/roles/02_hl-client_docker-yandextank/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install device-mapper-persistent-data, lvm2 3 | yum: 4 | name: device-mapper-persistent-data, lvm2 5 | state: latest 6 | 7 | - name: import docker repo a key from a url 8 | rpm_key: 9 | key: https://download.docker.com/linux/centos/gpg 10 | state: present 11 | 12 | - name: add repo docker-ce 13 | shell: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 14 | 15 | - name: install docker-ce, docker-ce-cli, containerd.io 16 | yum: 17 | name: docker-ce, docker-ce-cli, containerd.io 18 | state: latest 19 | notify: 20 | - docker restart 21 | 22 | - name: copy yandextank directory 23 | copy: 24 | src: yandextank 25 | dest: /root/ 26 | owner: root 27 | group: root 28 | 29 | - name: put load.yaml template 30 | template: 31 | src: load.yaml.j2 32 | dest: /root/yandextank/load.yaml 33 | owner: root 34 | group: root 35 | mode: '0644' 36 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/02_hl-client_docker-yandextank/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install device-mapper-persistent-data, lvm2 3 | yum: 4 | name: device-mapper-persistent-data, lvm2 5 | state: latest 6 | 7 | - name: import docker repo a key from a url 8 | rpm_key: 9 | key: https://download.docker.com/linux/centos/gpg 10 | state: present 11 | 12 | - name: add repo docker-ce 13 | shell: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 14 | 15 | - name: install docker-ce, docker-ce-cli, containerd.io 16 | yum: 17 | name: docker-ce, docker-ce-cli, containerd.io 18 | state: latest 19 | notify: 20 | - docker restart 21 | 22 | - name: copy yandextank directory 23 | copy: 24 | src: yandextank 25 | dest: /root/ 26 | owner: root 27 | group: root 28 | 29 | - name: put load.yaml template 30 | template: 31 | src: load.yaml.j2 32 | dest: /root/yandextank/load.yaml 33 | owner: root 34 | group: root 35 | mode: '0644' 36 | -------------------------------------------------------------------------------- /provisioning/roles/05_consul-cluster/templates/consul-server.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "advertise_addr": "{{ ansible_eth1.ipv4.address }}", 3 | "bind_addr": "{{ ansible_eth1.ipv4.address }}", 4 | "bootstrap_expect": 3, 5 | "client_addr": "0.0.0.0", 6 | "datacenter": "{{ CONSUL_DATACENTER }}", 7 | "data_dir": "/var/lib/consul", 8 | "domain": "consul", 9 | "enable_script_checks": true, 10 | "dns_config": { 11 | "enable_truncate": true, 12 | "only_passing": true 13 | }, 14 | "enable_syslog": true, 15 | "log_level": "INFO", 16 | "encrypt": "{{ CONSUL_KEY }}", 17 | "leave_on_terminate": true, 18 | "rejoin_after_leave": true, 19 | "retry_join": [ 20 | "{{ HOST_NAME_DCS_01 }}", 21 | "{{ HOST_NAME_DCS_02 }}", 22 | "{{ HOST_NAME_DCS_03 }}" 23 | ], 24 | "server": true, 25 | "start_join": [ 26 | "{{ HOST_NAME_DCS_01 }}", 27 | "{{ HOST_NAME_DCS_02 }}", 28 | "{{ HOST_NAME_DCS_03 }}" 29 | ], 30 | "ui": true 31 | } 32 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/04_vip-manager_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add otus-odyssey-vip-manager repository 3 | yum_repository: 4 | name: otus-odyssey-vip-manager 5 | description: Repository with odyssey and vip-manager rpm packages 6 | baseurl: 7 | - http://"{{ HOST_NAME_PG_CON_POOL_01 }}"."{{ FAKE_DOMAIN }}":8081/repo 8 | - http://"{{ HOST_IP_PG_CON_POOL_01 }}":8081/repo 9 | gpgcheck: no 10 | enabled: yes 11 | 12 | - name: install vip-manager from repo 13 | yum: 14 | name: vip-manager 15 | state: latest 16 | 17 | - name: copy vip-manager.service 18 | copy: 19 | src: vip-manager.service 20 | dest: /usr/lib/systemd/system/vip-manager.service 21 | owner: root 22 | group: root 23 | mode: '0644' 24 | 25 | - name: put vip-manager.yml template 26 | template: 27 | src: vip-manager.yml.j2 28 | dest: /etc/default/vip-manager.yml 29 | owner: root 30 | group: root 31 | mode: '0644' 32 | notify: 33 | - vip-manager start and enable 34 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/05_consul-cluster/templates/consul-server.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "advertise_addr": "{{ ansible_eth0.ipv4.address }}", 3 | "bind_addr": "{{ ansible_eth0.ipv4.address }}", 4 | "bootstrap_expect": 3, 5 | "client_addr": "0.0.0.0", 6 | "datacenter": "{{ CONSUL_DATACENTER }}", 7 | "data_dir": "/var/lib/consul", 8 | "domain": "consul", 9 | "enable_script_checks": true, 10 | "dns_config": { 11 | "enable_truncate": true, 12 | "only_passing": true 13 | }, 14 | "enable_syslog": true, 15 | "log_level": "INFO", 16 | "encrypt": "{{ CONSUL_KEY }}", 17 | "leave_on_terminate": true, 18 | "rejoin_after_leave": true, 19 | "retry_join": [ 20 | "{{ HOST_NAME_DCS_01 }}", 21 | "{{ HOST_NAME_DCS_02 }}", 22 | "{{ HOST_NAME_DCS_03 }}" 23 | ], 24 | "server": true, 25 | "start_join": [ 26 | "{{ HOST_NAME_DCS_01 }}", 27 | "{{ HOST_NAME_DCS_02 }}", 28 | "{{ HOST_NAME_DCS_03 }}" 29 | ], 30 | "ui": true 31 | } 32 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/01_tuning_OS/01_tuning_OS/files/hosts.redhat.tmpl: -------------------------------------------------------------------------------- 1 | ## template:jinja 2 | {# 3 | This file /etc/cloud/templates/hosts.redhat.tmpl is only utilized 4 | if enabled in cloud-config. Specifically, in order to enable it 5 | you need to add the following to config: 6 | manage_etc_hosts: True 7 | -#} 8 | # Your system has configured 'manage_etc_hosts' as True. 9 | # As a result, if you wish for changes to this file to persist 10 | # then you will need to either 11 | # a.) make changes to the master file in /etc/cloud/templates/hosts.redhat.tmpl 12 | # b.) change or remove the value of 'manage_etc_hosts' in 13 | # /etc/cloud/cloud.cfg or cloud-config from user-data 14 | # 15 | # The following lines are desirable for IPv4 capable hosts 16 | 127.0.0.1 {{fqdn}} {{hostname}} 17 | 127.0.0.1 localhost.localdomain localhost 18 | 127.0.0.1 localhost4.localdomain4 localhost4 19 | 20 | # The following lines are desirable for IPv6 capable hosts 21 | ::1 {{fqdn}} {{hostname}} 22 | ::1 localhost.localdomain localhost 23 | ::1 localhost6.localdomain6 localhost6 24 | -------------------------------------------------------------------------------- /provisioning/roles/02_hl-client_docker-yandextank/templates/load.yaml.j2: -------------------------------------------------------------------------------- 1 | phantom: 2 | address: {{ HOST_IP_BALANCER_VIP }} 3 | # address: {{ HOST_NAME_BALANCER_VIP }}.{{ FAKE_DOMAIN }} 4 | uris: 5 | # - "/zabbix/screens.php?elementid=23" 6 | # - "/zabbix/screens.php?elementid=25" 7 | # - "/zabbix/screens.php?elementid=26" 8 | - "/zabbix/zabbix.php?action=dashboard.view&dashboardid=1" 9 | # - "/zabbix/screens.php?elementid=23" 10 | # - "/zabbix/screens.php?elementid=24" 11 | # - "/zabbix/zabbix.php?action=dashboard.view&ddreset=1" 12 | ssl: false 13 | load_profile: 14 | load_type: rps 15 | schedule: line(1, 60, 200s) 16 | # ammofile: /var/loadtest/ammo_local.txt 17 | # ammo_type: uripost 18 | console: 19 | enabled: true 20 | telegraf: 21 | enabled: false 22 | # config: hl-zabbix02.xml 23 | # enabled: true 24 | # kill_old: true 25 | # ssh_timeout: 15s 26 | # package: yandextank.plugins.Telegraf 27 | uploader: 28 | # enabled: false 29 | enabled: true 30 | package: yandextank.plugins.DataUploader 31 | token_file: token.txt 32 | operator: timlok 33 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/02_hl-client_docker-yandextank/templates/load.yaml.j2: -------------------------------------------------------------------------------- 1 | phantom: 2 | address: {{ HOST_IP_BALANCER_VIP }} 3 | # address: {{ HOST_NAME_BALANCER_VIP }}.{{ FAKE_DOMAIN }} 4 | uris: 5 | - "/zabbix/screens.php?elementid=23" 6 | - "/zabbix/screens.php?elementid=25" 7 | - "/zabbix/screens.php?elementid=26" 8 | # - "/zabbix/zabbix.php?action=dashboard.view&dashboardid=1" 9 | # - "/zabbix/screens.php?elementid=23" 10 | # - "/zabbix/screens.php?elementid=24" 11 | # - "/zabbix/zabbix.php?action=dashboard.view&ddreset=1" 12 | ssl: false 13 | load_profile: 14 | load_type: rps 15 | schedule: line(1, 60, 200s) 16 | # ammofile: /var/loadtest/ammo_local.txt 17 | # ammo_type: uripost 18 | console: 19 | enabled: true 20 | telegraf: 21 | enabled: false 22 | # config: hl-zabbix02.xml 23 | # enabled: true 24 | # kill_old: true 25 | # ssh_timeout: 15s 26 | # package: yandextank.plugins.Telegraf 27 | uploader: 28 | # enabled: false 29 | enabled: true 30 | package: yandextank.plugins.DataUploader 31 | token_file: token.txt 32 | operator: timlok 33 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/01_zabbix_install/templates/zabbix.conf.php.j2: -------------------------------------------------------------------------------- 1 | 'http://localhost:9200', 26 | // 'text' => 'http://localhost:9200' 27 | //]; 28 | // Value types stored in Elasticsearch. 29 | //$HISTORY['types'] = ['uint', 'text']; 30 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/01_zabbix_install/templates/zabbix.conf.php.j2: -------------------------------------------------------------------------------- 1 | 'http://localhost:9200', 26 | // 'text' => 'http://localhost:9200' 27 | //]; 28 | // Value types stored in Elasticsearch. 29 | //$HISTORY['types'] = ['uint', 'text']; 30 | -------------------------------------------------------------------------------- /provisioning/roles/01_tuning_OS/02_zabbix-agent_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import zabbix repo a key from a url 3 | rpm_key: 4 | key: https://repo.zabbix.com/RPM-GPG-KEY-ZABBIX-A14FE591 5 | state: present 6 | 7 | - name: install zabbix repo 8 | yum: 9 | name: https://repo.zabbix.com/zabbix/4.2/rhel/7/x86_64/zabbix-release-4.2-2.el7.noarch.rpm 10 | state: installed 11 | 12 | - name: install zabbix packages 13 | yum: 14 | name: 15 | - zabbix-agent 16 | - zabbix-get 17 | state: latest 18 | 19 | - name: put zabbix_agentd.conf template for servers 20 | template: 21 | src: server_zabbix_agentd.conf.j2 22 | dest: /etc/zabbix/zabbix_agentd.conf 23 | owner: root 24 | group: root 25 | mode: '0644' 26 | notify: 27 | - zabbix-agent restart 28 | when: "'hl-zabbix0' in ansible_hostname" 29 | 30 | - name: put zabbix_agentd.conf template for clients 31 | template: 32 | src: client_zabbix_agentd.conf.j2 33 | dest: /etc/zabbix/zabbix_agentd.conf 34 | owner: root 35 | group: root 36 | mode: '0644' 37 | notify: 38 | - zabbix-agent restart 39 | when: "'hl-zabbix0' not in ansible_hostname" 40 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/01_tuning_OS/02_zabbix-agent_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import zabbix repo a key from a url 3 | rpm_key: 4 | key: https://repo.zabbix.com/RPM-GPG-KEY-ZABBIX-A14FE591 5 | state: present 6 | 7 | - name: install zabbix repo 8 | yum: 9 | name: https://repo.zabbix.com/zabbix/4.2/rhel/7/x86_64/zabbix-release-4.2-2.el7.noarch.rpm 10 | state: installed 11 | 12 | - name: install zabbix packages 13 | yum: 14 | name: 15 | - zabbix-agent 16 | - zabbix-get 17 | state: latest 18 | 19 | - name: put zabbix_agentd.conf template for servers 20 | template: 21 | src: server_zabbix_agentd.conf.j2 22 | dest: /etc/zabbix/zabbix_agentd.conf 23 | owner: root 24 | group: root 25 | mode: '0644' 26 | notify: 27 | - zabbix-agent restart 28 | when: "'hl-zabbix0' in ansible_hostname" 29 | 30 | - name: put zabbix_agentd.conf template for clients 31 | template: 32 | src: client_zabbix_agentd.conf.j2 33 | dest: /etc/zabbix/zabbix_agentd.conf 34 | owner: root 35 | group: root 36 | mode: '0644' 37 | notify: 38 | - zabbix-agent restart 39 | when: "'hl-zabbix0' not in ansible_hostname" 40 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/04_vip-manager_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add otus-odyssey-vip-manager repository 3 | yum_repository: 4 | name: otus-odyssey-vip-manager 5 | description: Repository with odyssey and vip-manager rpm packages 6 | baseurl: 7 | - http://"{{ HOST_NAME_PG_CON_POOL_01 }}"."{{ FAKE_DOMAIN }}":8081/repo 8 | - http://"{{ HOST_IP_PG_CON_POOL_01 }}":8081/repo 9 | gpgcheck: no 10 | enabled: yes 11 | tags: 12 | - vip-manager_install 13 | 14 | - name: install vip-manager from repo 15 | yum: 16 | name: vip-manager 17 | state: latest 18 | tags: 19 | - vip-manager_install 20 | 21 | - name: copy vip-manager.service 22 | copy: 23 | src: vip-manager.service 24 | dest: /usr/lib/systemd/system/vip-manager.service 25 | owner: root 26 | group: root 27 | mode: '0644' 28 | tags: 29 | - vip-manager_install 30 | 31 | - name: put vip-manager.yml template 32 | template: 33 | src: vip-manager.yml.j2 34 | dest: /etc/default/vip-manager.yml 35 | owner: root 36 | group: root 37 | mode: '0644' 38 | notify: 39 | - vip-manager start and enable 40 | tags: 41 | - vip-manager_install 42 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/01_zabbix_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import zabbix repo a key from a url 3 | rpm_key: 4 | key: https://repo.zabbix.com/RPM-GPG-KEY-ZABBIX-A14FE591 5 | state: present 6 | 7 | - name: install zabbix repo 8 | yum: 9 | name: https://repo.zabbix.com/zabbix/4.2/rhel/7/x86_64/zabbix-release-4.2-2.el7.noarch.rpm 10 | state: installed 11 | 12 | - name: install zabbix packages 13 | yum: 14 | name: 15 | - zabbix-server-pgsql 16 | - zabbix-web-pgsql 17 | - zabbix-get 18 | state: latest 19 | 20 | - name: edit /etc/zabbix/zabbix_server.conf 21 | blockinfile: 22 | path: /etc/zabbix/zabbix_server.conf 23 | block: | 24 | ListenIP=127.0.0.1,{{ IP_ZAB_CLUSTER }} 25 | SourceIP={{ IP_ZAB_CLUSTER }} 26 | DBHost={{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }} 27 | DBPort={{ PORT_FOR_DB_CLIENT }} 28 | DBPassword={{ PASS_ZAB_FOR_DB }} 29 | StartDiscoverers=5 30 | 31 | - name: put zabbix.conf.php template 32 | template: 33 | src: zabbix.conf.php.j2 34 | dest: /etc/zabbix/web/zabbix.conf.php 35 | owner: root 36 | group: root 37 | mode: '0644' 38 | notify: 39 | - zabbix-server stopped 40 | -------------------------------------------------------------------------------- /provisioning/roles/01_tuning_OS/01_tuning_OS/files/screenrc: -------------------------------------------------------------------------------- 1 | # GNU Screen - main configuration file 2 | # All other .screenrc files will source this file to inherit settings. 3 | # Author: Christian Wills - cwills.sys@gmail.com 4 | 5 | # Allow bold colors - necessary for some reason 6 | attrcolor b ".I" 7 | 8 | # Tell screen how to set colors. AB = background, AF=foreground 9 | termcapinfo xterm 'Co#256:AB=\E[48;5;%dm:AF=\E[38;5;%dm' 10 | 11 | # Enables use of shift-PgUp and shift-PgDn 12 | termcapinfo xterm|xterms|xs|rxvt ti@:te@ 13 | 14 | # Erase background with current bg color 15 | defbce "on" 16 | 17 | # Enable 256 color term 18 | term xterm-256color 19 | 20 | # Cache 10000 lines for scroll back 21 | defscrollback 10000 22 | 23 | # New mail notification 24 | #backtick 101 30 15 $HOME/bin/mailstatus.sh 25 | 26 | hardstatus alwayslastline 27 | # Very nice tabbed colored hardstatus line 28 | hardstatus string '%{= Kd} %{= Kd}%-w%{= Kr}[%{= KW}%n %t%{= Kr}]%{= Kd}%+w %-= %{KG} %H%{KW}|%{KY}%101`%{KW}|%D %M %d %Y%{= Kc} %C%A%{-}' 29 | 30 | # change command character from ctrl-a to ctrl-b (emacs users may want this) 31 | #escape ^Bb 32 | 33 | # Hide hardstatus: ctrl-a f 34 | #bind f eval "hardstatus ignore" 35 | # Show hardstatus: ctrl-a F 36 | #bind F eval "hardstatus alwayslastline" 37 | 38 | nethack on 39 | hardstatus on 40 | startup_message off 41 | screen 0 42 | vbell on 43 | multiuser on 44 | 45 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/01_preparation_for_rpms_assembly/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import postgresql repo a key from a url 3 | rpm_key: 4 | key: https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG-11 5 | state: present 6 | 7 | - name: install postgresql vanila repo 8 | yum: 9 | name: https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm 10 | state: installed 11 | 12 | - name: install required packages 13 | yum: 14 | name: 15 | - nginx 16 | - wget 17 | - git 18 | - make 19 | - rpm-build 20 | - python2-devel 21 | - python-setuptools 22 | - redhat-lsb-core 23 | - rpmdevtools 24 | - createrepo 25 | - yum-utils 26 | - cmake 27 | - gcc 28 | - openssl 29 | - openssl-devel 30 | - postgresql11-devel 31 | - golang 32 | - ruby-devel 33 | - rubygems 34 | state: latest 35 | 36 | - name: create directory for rpm otus-odyssey-vip-manager repository 37 | file: 38 | path: /usr/share/nginx/html/repo 39 | state: directory 40 | mode: '0755' 41 | 42 | - name: put template otus-odyssey-vip-manager.conf for nginx 43 | template: 44 | src: otus-odyssey-vip-manager.conf.j2 45 | dest: /etc/nginx/conf.d/otus-odyssey-vip-manager.conf 46 | owner: root 47 | group: root 48 | mode: '0644' 49 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/01_tuning_OS/01_tuning_OS/files/screenrc: -------------------------------------------------------------------------------- 1 | # GNU Screen - main configuration file 2 | # All other .screenrc files will source this file to inherit settings. 3 | # Author: Christian Wills - cwills.sys@gmail.com 4 | 5 | # Allow bold colors - necessary for some reason 6 | attrcolor b ".I" 7 | 8 | # Tell screen how to set colors. AB = background, AF=foreground 9 | termcapinfo xterm 'Co#256:AB=\E[48;5;%dm:AF=\E[38;5;%dm' 10 | 11 | # Enables use of shift-PgUp and shift-PgDn 12 | termcapinfo xterm|xterms|xs|rxvt ti@:te@ 13 | 14 | # Erase background with current bg color 15 | defbce "on" 16 | 17 | # Enable 256 color term 18 | term xterm-256color 19 | 20 | # Cache 10000 lines for scroll back 21 | defscrollback 10000 22 | 23 | # New mail notification 24 | #backtick 101 30 15 $HOME/bin/mailstatus.sh 25 | 26 | hardstatus alwayslastline 27 | # Very nice tabbed colored hardstatus line 28 | hardstatus string '%{= Kd} %{= Kd}%-w%{= Kr}[%{= KW}%n %t%{= Kr}]%{= Kd}%+w %-= %{KG} %H%{KW}|%{KY}%101`%{KW}|%D %M %d %Y%{= Kc} %C%A%{-}' 29 | 30 | # change command character from ctrl-a to ctrl-b (emacs users may want this) 31 | #escape ^Bb 32 | 33 | # Hide hardstatus: ctrl-a f 34 | #bind f eval "hardstatus ignore" 35 | # Show hardstatus: ctrl-a F 36 | #bind F eval "hardstatus alwayslastline" 37 | 38 | nethack on 39 | hardstatus on 40 | startup_message off 41 | screen 0 42 | vbell on 43 | multiuser on 44 | 45 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/01_preparation_for_rpms_assembly/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import postgresql repo a key from a url 3 | rpm_key: 4 | key: https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG-11 5 | state: present 6 | 7 | - name: install postgresql vanila repo 8 | yum: 9 | name: https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm 10 | state: installed 11 | 12 | - name: install required packages 13 | yum: 14 | name: 15 | - nginx 16 | - wget 17 | - git 18 | - make 19 | - rpm-build 20 | - python2-devel 21 | - python-setuptools 22 | - redhat-lsb-core 23 | - rpmdevtools 24 | - createrepo 25 | - yum-utils 26 | - cmake 27 | - gcc 28 | - openssl 29 | - openssl-devel 30 | - postgresql11-devel 31 | - golang 32 | - ruby-devel 33 | - rubygems 34 | state: latest 35 | 36 | - name: create directory for rpm otus-odyssey-vip-manager repository 37 | file: 38 | path: /usr/share/nginx/html/repo 39 | state: directory 40 | mode: '0755' 41 | 42 | - name: put template otus-odyssey-vip-manager.conf for nginx 43 | template: 44 | src: otus-odyssey-vip-manager.conf.j2 45 | dest: /etc/nginx/conf.d/otus-odyssey-vip-manager.conf 46 | owner: root 47 | group: root 48 | mode: '0644' 49 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/01_zabbix_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import zabbix repo a key from a url 3 | rpm_key: 4 | key: https://repo.zabbix.com/RPM-GPG-KEY-ZABBIX-A14FE591 5 | state: present 6 | 7 | - name: install zabbix repo 8 | yum: 9 | name: https://repo.zabbix.com/zabbix/4.2/rhel/7/x86_64/zabbix-release-4.2-2.el7.noarch.rpm 10 | state: installed 11 | 12 | - name: install zabbix packages 13 | yum: 14 | name: 15 | - zabbix-server-pgsql 16 | - zabbix-web-pgsql 17 | - zabbix-get 18 | state: latest 19 | 20 | - name: edit /etc/zabbix/zabbix_server.conf 21 | blockinfile: 22 | path: /etc/zabbix/zabbix_server.conf 23 | block: | 24 | ListenIP=127.0.0.1,{{ IP_ZAB_CLUSTER }} 25 | SourceIP={{ IP_ZAB_CLUSTER }} 26 | DBHost={{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }} 27 | DBPort={{ PORT_FOR_DB_CLIENT }} 28 | DBPassword={{ PASS_ZAB_FOR_DB }} 29 | StartDiscoverers=5 30 | tags: 31 | - change_name_HOST_NAME_PG_CON_POOL_VIP 32 | 33 | - name: put zabbix.conf.php template 34 | template: 35 | src: zabbix.conf.php.j2 36 | dest: /etc/zabbix/web/zabbix.conf.php 37 | owner: root 38 | group: root 39 | mode: '0644' 40 | tags: 41 | - change_name_HOST_NAME_PG_CON_POOL_VIP 42 | 43 | - name: zabbix-server stopped 44 | systemd: 45 | name: zabbix-server 46 | state: stopped 47 | enabled: no 48 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/02_nginx-php-fpm/templates/zabbix.conf.j2: -------------------------------------------------------------------------------- 1 | #http://server_ip:8080/zabbix 2 | 3 | server { 4 | listen 8080; 5 | #server_name {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }}; 6 | 7 | ########## пример ########## 8 | # server_name zabbix01.otus; 9 | #Если клиент может разрешить имя zabbix01.otus, то можно раскомментировать 10 | #server_name. Но, в этом случае, nginx будет отрабатывать этот конфиг только 11 | #при условии полного совпадния имени и порта в запросе от клиента 12 | #http://zabbix01.otus:8080/zabbix/ 13 | ############################ 14 | 15 | # listen 80; 16 | # server_name {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }}; 17 | 18 | root /usr/share; 19 | set $root_path /usr/share; 20 | 21 | access_log /var/log/nginx/zabbix.access.log; 22 | error_log /var/log/nginx/zabbix.error.log; 23 | 24 | 25 | location / { 26 | index index.php index.html index.htm; 27 | } 28 | 29 | location ~ \.php$ { 30 | fastcgi_pass unix:/var/run/php-fpm/php-fpm.sock; 31 | fastcgi_index index.php; 32 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 33 | include fastcgi_params; 34 | fastcgi_param PHP_VALUE " 35 | max_execution_time = 300 36 | memory_limit = 128M 37 | post_max_size = 16M 38 | upload_max_filesize = 2M 39 | max_input_time = 300 40 | max_input_vars = 10000 41 | date.timezone = {{ TIMEZONE_OS }} 42 | always_populate_raw_post_data = -1 43 | "; 44 | fastcgi_buffers 8 256k; 45 | fastcgi_buffer_size 128k; 46 | fastcgi_intercept_errors on; 47 | fastcgi_busy_buffers_size 256k; 48 | fastcgi_temp_file_write_size 256k; 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/02_nginx-php-fpm/templates/zabbix.conf.j2: -------------------------------------------------------------------------------- 1 | #http://server_ip:8080/zabbix 2 | 3 | server { 4 | listen 8080; 5 | #server_name {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }}; 6 | 7 | ########## пример ########## 8 | # server_name zabbix01.otus; 9 | #Если клиент может разрешить имя zabbix01.otus, то можно раскомментировать 10 | #server_name. Но, в этом случае, nginx будет отрабатывать этот конфиг только 11 | #при условии полного совпадния имени и порта в запросе от клиента 12 | #http://zabbix01.otus:8080/zabbix/ 13 | ############################ 14 | 15 | # listen 80; 16 | # server_name {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }}; 17 | 18 | root /usr/share; 19 | set $root_path /usr/share; 20 | 21 | access_log /var/log/nginx/zabbix.access.log; 22 | error_log /var/log/nginx/zabbix.error.log; 23 | 24 | 25 | location / { 26 | index index.php index.html index.htm; 27 | } 28 | 29 | location ~ \.php$ { 30 | fastcgi_pass unix:/var/run/php-fpm/php-fpm.sock; 31 | fastcgi_index index.php; 32 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 33 | include fastcgi_params; 34 | fastcgi_param PHP_VALUE " 35 | max_execution_time = 300 36 | memory_limit = 128M 37 | post_max_size = 16M 38 | upload_max_filesize = 2M 39 | max_input_time = 300 40 | max_input_vars = 10000 41 | date.timezone = {{ TIMEZONE_OS }} 42 | always_populate_raw_post_data = -1 43 | "; 44 | fastcgi_buffers 8 256k; 45 | fastcgi_buffer_size 128k; 46 | fastcgi_intercept_errors on; 47 | fastcgi_busy_buffers_size 256k; 48 | fastcgi_temp_file_write_size 256k; 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu/01_mamonsu_build_and_create_repo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install required packages 3 | yum: 4 | name: 5 | - git 6 | - make 7 | - rpm-build 8 | - python2-devel 9 | - python-setuptools 10 | - redhat-lsb-core 11 | - rpmdevtools 12 | - rpm-build 13 | - createrepo 14 | - yum-utils 15 | state: latest 16 | 17 | - name: clone mamonsu repo 18 | git: 19 | repo: https://github.com/postgrespro/mamonsu.git 20 | dest: /root/mamonsu 21 | 22 | - name: assembly of the rpm package mamonsu 23 | make: 24 | chdir: /root/mamonsu 25 | target: rpm 26 | 27 | #- name: clone repo and assembly of the rpm package mamonsu 28 | # shell: | 29 | # cd /root && \ 30 | # git clone https://github.com/postgrespro/mamonsu.git && \ 31 | # cd mamonsu && \ 32 | # make rpm 33 | 34 | - name: create directory for rpm mamonsu repository 35 | file: 36 | path: /usr/share/nginx/html/repo 37 | state: directory 38 | mode: '0755' 39 | 40 | - name: copy rpm-file to the repo directory 41 | copy: 42 | src: /root/mamonsu/mamonsu-2.4.1-1.el7.noarch.rpm 43 | dest: /usr/share/nginx/html/repo/mamonsu-2.4.1-1.el7.noarch.rpm 44 | remote_src: yes 45 | 46 | - name: put template mamonsu.conf for nginx 47 | template: 48 | src: mamonsu.conf.j2 49 | dest: /etc/nginx/conf.d/mamonsu.conf 50 | owner: root 51 | group: root 52 | mode: '0644' 53 | 54 | - name: createrepo mamonsu 55 | shell: createrepo /usr/share/nginx/html/repo/ 56 | 57 | - name: nginx restart 58 | systemd: 59 | name: nginx 60 | state: restarted 61 | enabled: yes 62 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu/01_mamonsu_build_and_create_repo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install required packages 3 | yum: 4 | name: 5 | - git 6 | - make 7 | - rpm-build 8 | - python2-devel 9 | - python-setuptools 10 | - redhat-lsb-core 11 | - rpmdevtools 12 | - rpm-build 13 | - createrepo 14 | - yum-utils 15 | state: latest 16 | 17 | - name: clone mamonsu repo 18 | git: 19 | repo: https://github.com/postgrespro/mamonsu.git 20 | dest: /root/mamonsu 21 | 22 | - name: assembly of the rpm package mamonsu 23 | make: 24 | chdir: /root/mamonsu 25 | target: rpm 26 | 27 | #- name: clone repo and assembly of the rpm package mamonsu 28 | # shell: | 29 | # cd /root && \ 30 | # git clone https://github.com/postgrespro/mamonsu.git && \ 31 | # cd mamonsu && \ 32 | # make rpm 33 | 34 | - name: create directory for rpm mamonsu repository 35 | file: 36 | path: /usr/share/nginx/html/repo 37 | state: directory 38 | mode: '0755' 39 | 40 | - name: copy rpm-file to the repo directory 41 | copy: 42 | src: /root/mamonsu/mamonsu-2.4.1-1.el7.noarch.rpm 43 | dest: /usr/share/nginx/html/repo/mamonsu-2.4.1-1.el7.noarch.rpm 44 | remote_src: yes 45 | 46 | - name: put template mamonsu.conf for nginx 47 | template: 48 | src: mamonsu.conf.j2 49 | dest: /etc/nginx/conf.d/mamonsu.conf 50 | owner: root 51 | group: root 52 | mode: '0644' 53 | 54 | - name: createrepo mamonsu 55 | shell: createrepo /usr/share/nginx/html/repo/ 56 | 57 | - name: nginx restart 58 | systemd: 59 | name: nginx 60 | state: restarted 61 | enabled: yes 62 | -------------------------------------------------------------------------------- /provisioning/roles/10_pacemaker/02_pacemaker_create_cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacemaker create active-passive cluster from three resources (zabbix-server, mamonsu, VIP) 3 | shell: | 4 | pcs cluster auth {{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }} {{ HOST_NAME_WEB02 }}.{{ FAKE_DOMAIN }} -u hacluster -p {{ PASS_HACLUSTER_USER }} --force 5 | pcs cluster setup --force --name cl-zabbix {{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }} {{ HOST_NAME_WEB02 }}.{{ FAKE_DOMAIN }} 6 | pcs cluster start --all 7 | pcs property set no-quorum-policy=ignore 8 | pcs property set stonith-enabled=false 9 | pcs resource create cluster_vip ocf:heartbeat:IPaddr2 ip="{{ IP_ZAB_CLUSTER }}" cidr_netmask="24" nic="{{ IP_ZAB_CLUSTER_NIC }}" op monitor interval="4s" 10 | pcs resource create zabbix_server systemd:zabbix-server op monitor interval=10s 11 | pcs resource create mamonsu2 systemd:mamonsu2 op monitor interval=30s start-delay=10s 12 | pcs constraint colocation add zabbix_server cluster_vip INFINITY 13 | pcs constraint colocation set mamonsu2 sequential=false set zabbix_server cluster_vip 14 | pcs constraint order cluster_vip then zabbix_server 15 | pcs constraint order zabbix_server then mamonsu2 16 | notify: 17 | - corosync enable 18 | - pacemaker enable 19 | 20 | - name: corosync enable 21 | systemd: 22 | name: corosync 23 | state: started 24 | enabled: yes 25 | delegate_to: "{{ item }}" 26 | with_items: 27 | - "{{ groups['web2'] }}" 28 | 29 | - name: pacemaker enable 30 | systemd: 31 | name: pacemaker 32 | state: started 33 | enabled: yes 34 | delegate_to: "{{ item }}" 35 | with_items: 36 | - "{{ groups['web2'] }}" 37 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/10_pacemaker/02_pacemaker_create_cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pacemaker create active-passive cluster from three resources (zabbix-server, mamonsu, VIP) 3 | shell: | 4 | pcs cluster auth {{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }} {{ HOST_NAME_WEB02 }}.{{ FAKE_DOMAIN }} -u hacluster -p {{ PASS_HACLUSTER_USER }} --force 5 | pcs cluster setup --force --name cl-zabbix {{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }} {{ HOST_NAME_WEB02 }}.{{ FAKE_DOMAIN }} 6 | pcs cluster start --all 7 | pcs property set no-quorum-policy=ignore 8 | pcs property set stonith-enabled=false 9 | pcs resource create cluster_vip ocf:heartbeat:IPaddr2 ip="{{ IP_ZAB_CLUSTER }}" cidr_netmask="24" nic="{{ IP_ZAB_CLUSTER_NIC }}" op monitor interval="4s" 10 | pcs resource create zabbix_server systemd:zabbix-server op monitor interval=10s 11 | pcs resource create mamonsu2 systemd:mamonsu2 op monitor interval=30s start-delay=10s 12 | pcs constraint colocation add zabbix_server cluster_vip INFINITY 13 | pcs constraint colocation set mamonsu2 sequential=false set zabbix_server cluster_vip 14 | pcs constraint order cluster_vip then zabbix_server 15 | pcs constraint order zabbix_server then mamonsu2 16 | notify: 17 | - corosync enable 18 | - pacemaker enable 19 | 20 | - name: corosync enable 21 | systemd: 22 | name: corosync 23 | state: started 24 | enabled: yes 25 | delegate_to: "{{ item }}" 26 | with_items: 27 | - "{{ groups['web2'] }}" 28 | 29 | - name: pacemaker enable 30 | systemd: 31 | name: pacemaker 32 | state: started 33 | enabled: yes 34 | delegate_to: "{{ item }}" 35 | with_items: 36 | - "{{ groups['web2'] }}" 37 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/02_nginx-php-fpm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install nginx and php-fpm 3 | yum: 4 | name: 5 | - nginx 6 | - php, php-fpm, php-pgsql, php-pear, php-cgi, php-common, php-ldap, php-mbstring, php-snmp, php-gd, php-xml, php-gettext, php-bcmath 7 | state: latest 8 | 9 | - name: change ownership of a php-session and zabbix-web 10 | file: 11 | path: "{{ item }}" 12 | state: directory 13 | mode: '0755' 14 | owner: nginx 15 | group: nginx 16 | #recurse: yes 17 | with_items: 18 | - /var/lib/php/session 19 | - /etc/zabbix/web 20 | 21 | - name: set timezone in php.ini 22 | lineinfile: 23 | path: /etc/php.ini 24 | regexp: '^(;[d]|[d])ate\.timezone\s=($|\s\"[a-zA-Z]*/[a-zA-Z]*\")' 25 | backrefs: yes 26 | line: date.timezone = "{{ TIMEZONE_OS }}" 27 | 28 | - name: editinig php-fpm.d-www.conf 29 | shell: | 30 | sed -i 's/;catch_workers_output = yes/catch_workers_output = yes/g' /etc/php-fpm.d/www.conf 31 | sed -i 's/user = apache/user = nginx/g' /etc/php-fpm.d/www.conf 32 | sed -i 's/group = apache/group = nginx/g' /etc/php-fpm.d/www.conf 33 | sed -i 's/listen = 127.0.0.1:9000/listen = \/run\/php-fpm\/php-fpm.sock/g' /etc/php-fpm.d/www.conf 34 | sed -i 's/;listen.owner = nobody/listen.owner = nginx/g' /etc/php-fpm.d/www.conf 35 | sed -i 's/;listen.group = nobody/listen.group = nginx/g' /etc/php-fpm.d/www.conf 36 | sed -i 's/;listen.mode = 0660/listen.mode = 0660/g' /etc/php-fpm.d/www.conf 37 | notify: 38 | - php-fpm restart 39 | 40 | - name: put template zabbix.conf for nginx 41 | template: 42 | src: zabbix.conf.j2 43 | dest: /etc/nginx/conf.d/zabbix.conf 44 | owner: root 45 | group: root 46 | mode: '0644' 47 | notify: 48 | - nginx restart 49 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/02_nginx-php-fpm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install nginx and php-fpm 3 | yum: 4 | name: 5 | - nginx 6 | - php, php-fpm, php-pgsql, php-pear, php-cgi, php-common, php-ldap, php-mbstring, php-snmp, php-gd, php-xml, php-gettext, php-bcmath 7 | state: latest 8 | 9 | - name: change ownership of a php-session and zabbix-web 10 | file: 11 | path: "{{ item }}" 12 | state: directory 13 | mode: '0755' 14 | owner: nginx 15 | group: nginx 16 | #recurse: yes 17 | with_items: 18 | - /var/lib/php/session 19 | - /etc/zabbix/web 20 | 21 | - name: set timezone in php.ini 22 | lineinfile: 23 | path: /etc/php.ini 24 | regexp: '^(;[d]|[d])ate\.timezone\s=($|\s\"[a-zA-Z]*/[a-zA-Z]*\")' 25 | backrefs: yes 26 | line: date.timezone = "{{ TIMEZONE_OS }}" 27 | 28 | - name: editinig php-fpm.d-www.conf 29 | shell: | 30 | sed -i 's/;catch_workers_output = yes/catch_workers_output = yes/g' /etc/php-fpm.d/www.conf 31 | sed -i 's/user = apache/user = nginx/g' /etc/php-fpm.d/www.conf 32 | sed -i 's/group = apache/group = nginx/g' /etc/php-fpm.d/www.conf 33 | sed -i 's/listen = 127.0.0.1:9000/listen = \/run\/php-fpm\/php-fpm.sock/g' /etc/php-fpm.d/www.conf 34 | sed -i 's/;listen.owner = nobody/listen.owner = nginx/g' /etc/php-fpm.d/www.conf 35 | sed -i 's/;listen.group = nobody/listen.group = nginx/g' /etc/php-fpm.d/www.conf 36 | sed -i 's/;listen.mode = 0660/listen.mode = 0660/g' /etc/php-fpm.d/www.conf 37 | notify: 38 | - php-fpm restart 39 | 40 | - name: put template zabbix.conf for nginx 41 | template: 42 | src: zabbix.conf.j2 43 | dest: /etc/nginx/conf.d/zabbix.conf 44 | owner: root 45 | group: root 46 | mode: '0644' 47 | notify: 48 | - nginx restart 49 | -------------------------------------------------------------------------------- /tests/tank/04_web01.md: -------------------------------------------------------------------------------- 1 | # Тестирование с увеличенным числом ядер ЦП и ОЗУ на web-серверах 2 | 3 | Тесты были направлены на VIP-адрес http-балансировщиков, соответственно, трафик распределялся на два уже оптимизированных web-сервера. 4 | 5 | Для автоматического перемещения VIP-адреса кластера patroni был задействован [vip-manager](https://github.com/cybertec-postgresql/vip-manager) - это ПО подключается к DCS и мониторит состояние текущего лидера в кластере. В связи с этим, соответствующим образом были перенастроены odyssey и patroni. 6 | 7 | Во всех тестах трафик проходил по маршруту 8 | 9 | ```yandex.tank > haproxy > 2 x web > odyssey > vip-manager+postgresql``` 10 | 11 | Пул соединений odyssey в режиме сессий. 12 | 13 | Параметры теста остались прежними - увеличение до 200 rps за 200 секунд 14 | 15 | ```yaml 16 | schedule: line(1, 200, 200s) 17 | ``` 18 | 19 | ## Тесты 20 | 21 | ### web - 2 ядра ЦП, 6 ГБ ОЗУ 22 | 23 | сработал модуль автостопа\ 24 | в конце теста была одна взаимная блокировка\ 25 | [https://overload.yandex.net/232144](https://overload.yandex.net/232144) 26 | 27 | ![04_web01.png](files/04_web01.png) 28 | 29 | ### web - 4 ядра ЦП, 7 ГБ ОЗУ 30 | 31 | [https://overload.yandex.net/232208](https://overload.yandex.net/232208) 32 | 33 | ![04_web02.png](files/04_web02.png) 34 | 35 | ## Результат 36 | 37 | Так как фронтенд (относительно БД это web-сервера+zabbix-server) смог работать быстрее, то процессы php-fpm уже не удерживают соединения с БД из-за недостатка процессорных ресурсов. В связи с этим, больше, чем на тысячу увеличилось количество обработанных транзакций при уменьшении общей нагрузки на БД и, очевидно, что БД может выдержать ещё большую нагрузку. Комплексные экраны наглядно показывают разницу в используемых аппаратных ресурсах серверов и производительности БД, при этом. 38 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/02_patroni-server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import postgresql repo a key from a url 3 | rpm_key: 4 | key: https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG-11 5 | state: present 6 | 7 | - name: install postgresql vanila repo 8 | yum: 9 | name: https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm 10 | state: installed 11 | 12 | - name: install PostgreSQL, gcc and python`s packages 13 | yum: 14 | name: 15 | - postgresql11-server 16 | - postgresql11 17 | - postgresql11-contrib 18 | - pg_top11 19 | - python2-psycopg2 20 | - gcc 21 | - python-devel 22 | - python-setuptools 23 | - python2-pip 24 | state: latest 25 | 26 | - name: postgresql stopped 27 | systemd: 28 | name: postgresql-11 29 | state: stopped 30 | enabled: no 31 | 32 | - name: upgrade pip setuptools 33 | pip: 34 | name: setuptools, pip 35 | extra_args: --upgrade 36 | state: latest 37 | 38 | - name: install python-packages, patroni from pip 39 | pip: 40 | name: virtualenv, python-etcd, python-consul, patroni, psycopg2==2.7, urllib3 41 | extra_args: --upgrade 42 | state: latest 43 | 44 | - name: put patroni.yml template 45 | template: 46 | src: patroni.yml.j2 47 | dest: /etc/patroni.yml 48 | owner: root 49 | group: root 50 | mode: '0644' 51 | 52 | - name: copy patroni.service 53 | copy: 54 | src: patroni.service 55 | dest: /etc/systemd/system/patroni.service 56 | owner: root 57 | group: root 58 | mode: '0644' 59 | 60 | - name: force systemd to reread configs and patroni start 61 | systemd: 62 | daemon_reload: yes 63 | name: patroni 64 | state: restarted 65 | enabled: yes 66 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/02_patroni-server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: import postgresql repo a key from a url 3 | rpm_key: 4 | key: https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG-11 5 | state: present 6 | 7 | - name: install postgresql vanila repo 8 | yum: 9 | name: https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm 10 | state: installed 11 | 12 | - name: install PostgreSQL, gcc and python`s packages 13 | yum: 14 | name: 15 | - postgresql11-server 16 | - postgresql11 17 | - postgresql11-contrib 18 | - pg_top11 19 | - python2-psycopg2 20 | - gcc 21 | - python-devel 22 | - python-setuptools 23 | - python2-pip 24 | state: latest 25 | 26 | - name: postgresql stopped 27 | systemd: 28 | name: postgresql-11 29 | state: stopped 30 | enabled: no 31 | 32 | - name: upgrade pip setuptools 33 | pip: 34 | name: setuptools, pip 35 | extra_args: --upgrade 36 | state: latest 37 | 38 | - name: install python-packages, patroni from pip 39 | pip: 40 | name: virtualenv, python-etcd, python-consul, patroni, psycopg2==2.7, urllib3 41 | extra_args: --upgrade 42 | state: latest 43 | 44 | - name: put patroni.yml template 45 | template: 46 | src: patroni.yml.j2 47 | dest: /etc/patroni.yml 48 | owner: root 49 | group: root 50 | mode: '0644' 51 | 52 | - name: copy patroni.service 53 | copy: 54 | src: patroni.service 55 | dest: /etc/systemd/system/patroni.service 56 | owner: root 57 | group: root 58 | mode: '0644' 59 | 60 | - name: force systemd to reread configs and patroni start 61 | systemd: 62 | daemon_reload: yes 63 | name: patroni 64 | state: restarted 65 | enabled: yes 66 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/04_vip-manager_install/templates/vip-manager.yml.j2: -------------------------------------------------------------------------------- 1 | # config for vip-manager by Cybertec Schönig & Schönig GmbH 2 | 3 | # time (in milliseconds) after which vip-manager wakes up and checks if it needs to register or release ip addresses. 4 | interval: 1000 5 | 6 | # the etcd or consul key which vip-manager will regularly poll. 7 | key: "/service/otus/leader" 8 | # if the value of the above key matches the NodeName (often the hostname of this host), vip-manager will try to add the virtual ip address to the interface specified in Iface 9 | nodename: "{{ ansible_hostname }}" 10 | 11 | ip: {{ HOST_IP_PG_VIP }} # the virtual ip address to manage 12 | mask: 24 # netmask for the virtual ip 13 | iface: {{ IP_VIPMANAGER_NIC }} #interface to which the virtual ip will be added 14 | 15 | # how the virtual ip should be managed. we currently support "ip addr add/remove" through shell commands or the Hetzner api 16 | hosting_type: basic # possible values: basic, hetzner . 17 | 18 | endpoint_type: consul # etcd or consul 19 | # a list that contains all endpoints to which etcd could talk. 20 | endpoints: 21 | - http://127.0.0.1:8500/v1/kv 22 | #- http://192.168.0.42:2379 23 | # A single list-item is also fine. 24 | # consul will always only use the first entry from this list. 25 | # For consul, you'll obviously need to change the port to 8500. Unless you're using a different one. Maybe you're a rebel and are running consul on port 2379? Just to confuse people? Why would you do that? Oh, I get it. 26 | 27 | #etcd_user: "patroni" 28 | #etcd_password: "Julian's secret password" 29 | # don't worry about parameter with a prefix that doesn't match the endpoint_type. You can write anything there, I won't even look at it. 30 | #consul_token: "Julian's secret token" 31 | consul_token: "{{ CONSUL_KEY }}" 32 | 33 | #how often things should be retried and how long to wait between retries. (currently only affects arpClient) 34 | retry_num: 2 35 | retry_after: 250 #in milliseconds 36 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu/02_mamonsu_install/files/biggest_tables.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from mamonsu.plugins.pgsql.plugin import PgsqlPlugin as Plugin 4 | from mamonsu.plugins.pgsql.pool import Pooler 5 | 6 | 7 | class BiggestTables(Plugin): 8 | 9 | # every 5 min 10 | Interval = 5 * 60 11 | # only 10 biggest tables 12 | Limit = 10 13 | 14 | def run(self, zbx): 15 | tables = [] 16 | for info_dbs in Pooler.query('select datname \ 17 | from pg_catalog.pg_database where datistemplate = false'): 18 | for info_sizes in Pooler.query("select n.nspname, c.relname, \ 19 | pg_catalog.pg_total_relation_size(c.oid) as size from \ 20 | pg_catalog.pg_class c left join pg_catalog.pg_namespace n \ 21 | on n.oid = c.relnamespace \ 22 | where c.relkind IN ('r','v','m','S','f','') \ 23 | order by size \ 24 | desc limit {0};".format(self.Limit), info_dbs[0]): 25 | table_name = '{0}.{1}.{2}'.format( 26 | info_dbs[0], info_sizes[0], info_sizes[1]) 27 | tables.append({'{#TABLE}': table_name}) 28 | zbx.send('pgsql.table.size[{0}]'.format( 29 | table_name), info_sizes[2]) 30 | zbx.send('pgsql.table.discovery[]', zbx.json({'data': tables})) 31 | 32 | def discovery_rules(self, template): 33 | rule = { 34 | 'name': 'Biggest table discovery', 35 | 'key': 'pgsql.table.discovery[]', 36 | 'filter': '{#TABLE}:.*' 37 | } 38 | items = [ 39 | {'key': 'pgsql.table.size[{#TABLE}]', 40 | 'name': 'Table {#TABLE}: size', 41 | 'units': Plugin.UNITS.bytes, 42 | 'value_type': Plugin.VALUE_TYPE.numeric_unsigned, 43 | 'delay': self.Interval}] 44 | return template.discovery_rule(rule=rule, items=items) 45 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu/02_mamonsu_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add mamonsu repository 3 | yum_repository: 4 | name: mamonsu 5 | description: mamonsu 6 | baseurl: 7 | - http://"{{ HOST_NAME_WEB02 }}"."{{ FAKE_DOMAIN }}":8081/repo 8 | - http://"{{ HOST_IP_WEB02 }}":8081/repo 9 | gpgcheck: no 10 | enabled: yes 11 | 12 | - name: install mamonsu from repo 13 | yum: 14 | name: mamonsu 15 | state: latest 16 | 17 | - name: move mamonsu init.d script 18 | copy: 19 | src: /etc/init.d/mamonsu 20 | remote_src: yes 21 | dest: /root/mamonsu_initd_bck 22 | 23 | - name: delete init.d script mamonsu 24 | file: 25 | path: /etc/init.d/mamonsu 26 | state: absent 27 | 28 | - name: copy logrotate mamonsu config 29 | copy: 30 | src: mamonsu_logrotate 31 | dest: /etc/logrotate.d/mamonsu 32 | owner: root 33 | group: root 34 | mode: '0644' 35 | 36 | - name: rsyslog restarted and systemd force reread services configuration 37 | systemd: 38 | name: rsyslog 39 | state: restarted 40 | daemon-reload: yes 41 | 42 | - name: put agent.conf template 43 | template: 44 | src: mamonsu_agent.conf.j2 45 | dest: /etc/mamonsu/agent.conf 46 | owner: root 47 | group: root 48 | mode: '0644' 49 | 50 | - name: copy plugin, zabbix-template and systemd-unit files 51 | copy: 52 | src: "{{ item.src }}" 53 | dest: "{{ item.dest }}" 54 | owner: root 55 | group: root 56 | mode: '0644' 57 | with_items: 58 | - { src: "biggest_tables.py", dest: "/etc/mamonsu/plugins/biggest_tables.py" } 59 | - { src: "zbx_export_template.xml", dest: "/usr/share/mamonsu/template.xml" } 60 | - { src: "mamonsu2_sysconfig", dest: "/etc/sysconfig/mamonsu2" } 61 | - { src: "mamonsu2.service", dest: "/etc/systemd/system/mamonsu2.service" } 62 | 63 | - name: mamonsu2 stopped and disabled 64 | systemd: 65 | name: mamonsu2 66 | state: stopped 67 | daemon-reload: yes 68 | enabled: no 69 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu/02_mamonsu_install/files/biggest_tables.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from mamonsu.plugins.pgsql.plugin import PgsqlPlugin as Plugin 4 | from mamonsu.plugins.pgsql.pool import Pooler 5 | 6 | 7 | class BiggestTables(Plugin): 8 | 9 | # every 5 min 10 | Interval = 5 * 60 11 | # only 10 biggest tables 12 | Limit = 10 13 | 14 | def run(self, zbx): 15 | tables = [] 16 | for info_dbs in Pooler.query('select datname \ 17 | from pg_catalog.pg_database where datistemplate = false'): 18 | for info_sizes in Pooler.query("select n.nspname, c.relname, \ 19 | pg_catalog.pg_total_relation_size(c.oid) as size from \ 20 | pg_catalog.pg_class c left join pg_catalog.pg_namespace n \ 21 | on n.oid = c.relnamespace \ 22 | where c.relkind IN ('r','v','m','S','f','') \ 23 | order by size \ 24 | desc limit {0};".format(self.Limit), info_dbs[0]): 25 | table_name = '{0}.{1}.{2}'.format( 26 | info_dbs[0], info_sizes[0], info_sizes[1]) 27 | tables.append({'{#TABLE}': table_name}) 28 | zbx.send('pgsql.table.size[{0}]'.format( 29 | table_name), info_sizes[2]) 30 | zbx.send('pgsql.table.discovery[]', zbx.json({'data': tables})) 31 | 32 | def discovery_rules(self, template): 33 | rule = { 34 | 'name': 'Biggest table discovery', 35 | 'key': 'pgsql.table.discovery[]', 36 | 'filter': '{#TABLE}:.*' 37 | } 38 | items = [ 39 | {'key': 'pgsql.table.size[{#TABLE}]', 40 | 'name': 'Table {#TABLE}: size', 41 | 'units': Plugin.UNITS.bytes, 42 | 'value_type': Plugin.VALUE_TYPE.numeric_unsigned, 43 | 'delay': self.Interval}] 44 | return template.discovery_rule(rule=rule, items=items) 45 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/04_vip-manager_install/templates/vip-manager.yml.j2: -------------------------------------------------------------------------------- 1 | # config for vip-manager by Cybertec Schönig & Schönig GmbH 2 | 3 | # time (in milliseconds) after which vip-manager wakes up and checks if it needs to register or release ip addresses. 4 | interval: 1000 5 | 6 | # the etcd or consul key which vip-manager will regularly poll. 7 | key: "/service/otus/leader" 8 | # if the value of the above key matches the NodeName (often the hostname of this host), vip-manager will try to add the virtual ip address to the interface specified in Iface 9 | nodename: "{{ ansible_hostname }}" 10 | 11 | ip: {{ HOST_IP_PG_VIP }} # the virtual ip address to manage 12 | mask: 24 # netmask for the virtual ip 13 | iface: {{ IP_VIPMANAGER_NIC }} #interface to which the virtual ip will be added 14 | 15 | # how the virtual ip should be managed. we currently support "ip addr add/remove" through shell commands or the Hetzner api 16 | hosting_type: basic # possible values: basic, hetzner . 17 | 18 | endpoint_type: consul # etcd or consul 19 | # a list that contains all endpoints to which etcd could talk. 20 | endpoints: 21 | - http://127.0.0.1:8500/v1/kv 22 | #- http://192.168.0.42:2379 23 | # A single list-item is also fine. 24 | # consul will always only use the first entry from this list. 25 | # For consul, you'll obviously need to change the port to 8500. Unless you're using a different one. Maybe you're a rebel and are running consul on port 2379? Just to confuse people? Why would you do that? Oh, I get it. 26 | 27 | #etcd_user: "patroni" 28 | #etcd_password: "Julian's secret password" 29 | # don't worry about parameter with a prefix that doesn't match the endpoint_type. You can write anything there, I won't even look at it. 30 | #consul_token: "Julian's secret token" 31 | consul_token: "{{ CONSUL_KEY }}" 32 | 33 | #how often things should be retried and how long to wait between retries. (currently only affects arpClient) 34 | retry_num: 2 35 | retry_after: 250 #in milliseconds 36 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/02_patroni-server/templates/patroni.yml.j2: -------------------------------------------------------------------------------- 1 | scope: {{ FAKE_DOMAIN }} 2 | name: {{ ansible_hostname }} 3 | 4 | restapi: 5 | listen: {{ ansible_eth1.ipv4.address }}:8008 6 | connect_address: {{ ansible_eth1.ipv4.address }}:8008 7 | 8 | consul: 9 | host: 127.0.0.1:8500 10 | scheme: http 11 | register_service: true 12 | 13 | bootstrap: 14 | dcs: 15 | ttl: 30 16 | loop_wait: 10 17 | retry_timeout: 10 18 | maximum_lag_on_failover: 1048576 19 | postgresql: 20 | use_pg_rewind: true 21 | 22 | initdb: 23 | - encoding: UTF8 24 | - data-checksums 25 | 26 | pg_hba: 27 | - host replication replicator 127.0.0.1/32 md5 28 | - host replication replicator {{ HOST_NAME_PG01 }} md5 29 | - host replication replicator {{ HOST_NAME_PG02 }} md5 30 | - host replication replicator {{ HOST_NAME_PG03 }} md5 31 | - host replication replicator {{ HOST_NAME_PG01 }}.{{ FAKE_DOMAIN }} md5 32 | - host replication replicator {{ HOST_NAME_PG02 }}.{{ FAKE_DOMAIN }} md5 33 | - host replication replicator {{ HOST_NAME_PG03 }}.{{ FAKE_DOMAIN }} md5 34 | - host all all {{ NET_POSTGRES_ALLOW }} md5 35 | 36 | users: 37 | admin: 38 | password: admin 39 | options: 40 | - createrole 41 | - createdb 42 | 43 | postgresql: 44 | listen: 0.0.0.0:5432 45 | bin_dir: /usr/pgsql-11/bin 46 | connect_address: {{ ansible_eth1.ipv4.address }}:5432 47 | data_dir: /var/lib/pgsql/11/data 48 | pgpass: /tmp/pgpass 49 | authentication: 50 | replication: 51 | username: replicator 52 | password: {{ PASS_POSTGRES_FOR_DB }} 53 | superuser: 54 | username: postgres 55 | password: {{ PASS_POSTGRES_FOR_DB }} 56 | parameters: 57 | unix_socket_directories: '.,/tmp' 58 | 59 | tags: 60 | nofailover: false 61 | noloadbalance: false 62 | clonefrom: false 63 | nosync: false 64 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/02_patroni-server/templates/patroni.yml.j2: -------------------------------------------------------------------------------- 1 | scope: {{ FAKE_DOMAIN }} 2 | name: {{ ansible_hostname }} 3 | 4 | restapi: 5 | listen: {{ ansible_eth0.ipv4.address }}:8008 6 | connect_address: {{ ansible_eth0.ipv4.address }}:8008 7 | 8 | consul: 9 | host: 127.0.0.1:8500 10 | scheme: http 11 | register_service: true 12 | 13 | bootstrap: 14 | dcs: 15 | ttl: 30 16 | loop_wait: 10 17 | retry_timeout: 10 18 | maximum_lag_on_failover: 1048576 19 | postgresql: 20 | use_pg_rewind: true 21 | 22 | initdb: 23 | - encoding: UTF8 24 | - data-checksums 25 | 26 | pg_hba: 27 | - host replication replicator 127.0.0.1/32 md5 28 | - host replication replicator {{ HOST_NAME_PG01 }} md5 29 | - host replication replicator {{ HOST_NAME_PG02 }} md5 30 | - host replication replicator {{ HOST_NAME_PG03 }} md5 31 | - host replication replicator {{ HOST_NAME_PG01 }}.{{ FAKE_DOMAIN }} md5 32 | - host replication replicator {{ HOST_NAME_PG02 }}.{{ FAKE_DOMAIN }} md5 33 | - host replication replicator {{ HOST_NAME_PG03 }}.{{ FAKE_DOMAIN }} md5 34 | - host all all {{ NET_POSTGRES_ALLOW }} md5 35 | 36 | users: 37 | admin: 38 | password: admin 39 | options: 40 | - createrole 41 | - createdb 42 | 43 | postgresql: 44 | listen: 0.0.0.0:5432 45 | bin_dir: /usr/pgsql-11/bin 46 | connect_address: {{ ansible_eth0.ipv4.address }}:5432 47 | data_dir: /var/lib/pgsql/11/data 48 | pgpass: /tmp/pgpass 49 | authentication: 50 | replication: 51 | username: replicator 52 | password: {{ PASS_POSTGRES_FOR_DB }} 53 | superuser: 54 | username: postgres 55 | password: {{ PASS_POSTGRES_FOR_DB }} 56 | parameters: 57 | unix_socket_directories: '.,/tmp' 58 | 59 | tags: 60 | nofailover: false 61 | noloadbalance: false 62 | clonefrom: false 63 | nosync: false 64 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/02_build_vip-manager_rpm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install fpm with gem 3 | gem: 4 | name: fpm 5 | user_install: no 6 | state: latest 7 | 8 | - name: install ffi version 1.11.2 with gem 9 | gem: 10 | name: ffi 11 | version: 1.11.2 12 | user_install: no 13 | state: present 14 | 15 | - name: create a symbolic link for fpm 16 | file: 17 | src: /usr/local/bin/fpm 18 | dest: /usr/sbin/fpm 19 | owner: root 20 | group: root 21 | state: link 22 | 23 | - name: create go directory in home root user 24 | file: 25 | path: /root/go 26 | state: directory 27 | owner: root 28 | group: root 29 | mode: '0755' 30 | 31 | - name: modify .bash_profile 32 | blockinfile: 33 | dest: /root/.bash_profile 34 | block: | 35 | export GOPATH=$HOME/go 36 | # export GOBIN=$GOPATH/bin 37 | # export PATH=$GOBIN:$PATH:/root/go/bin 38 | marker: "# {mark} ANSIBLE MANAGED BLOCK - changes for golang" 39 | insertafter: EOF 40 | create: yes 41 | 42 | - name: download vip-manager 43 | shell: go get github.com/cybertec-postgresql/vip-manager 44 | # environment: 45 | # GOPATH: $HOME/go 46 | 47 | - name: assembly of the rpm package vip-manager 48 | make: 49 | chdir: /root/go/src/github.com/cybertec-postgresql/vip-manager 50 | target: package-rpm 51 | 52 | - name: find vip-manager rpm 53 | find: 54 | paths: /root/go/src/github.com/cybertec-postgresql/vip-manager 55 | recurse: yes 56 | patterns: 'vip-manager[-_][^debug].*(.rpm)' 57 | use_regex: yes 58 | register: vip_manager_rpm 59 | 60 | - name: copy rpm-file to the repo directory 61 | copy: 62 | src: "{{ item.path }}" 63 | dest: /usr/share/nginx/html/repo 64 | remote_src: yes 65 | with_items: 66 | "{{ vip_manager_rpm.files }}" 67 | 68 | - name: createrepo otus-odyssey-vip-manager 69 | shell: createrepo /usr/share/nginx/html/repo/ 70 | 71 | - name: nginx restart 72 | systemd: 73 | name: nginx 74 | state: restarted 75 | enabled: yes 76 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/02_build_vip-manager_rpm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install fpm with gem 3 | gem: 4 | name: fpm 5 | user_install: no 6 | state: latest 7 | 8 | - name: install ffi version 1.11.2 with gem 9 | gem: 10 | name: ffi 11 | version: 1.11.2 12 | user_install: no 13 | state: present 14 | 15 | - name: create a symbolic link for fpm 16 | file: 17 | src: /usr/local/bin/fpm 18 | dest: /usr/sbin/fpm 19 | owner: root 20 | group: root 21 | state: link 22 | 23 | - name: create go directory in home root user 24 | file: 25 | path: /root/go 26 | state: directory 27 | owner: root 28 | group: root 29 | mode: '0755' 30 | 31 | - name: modify .bash_profile 32 | blockinfile: 33 | dest: /root/.bash_profile 34 | block: | 35 | export GOPATH=$HOME/go 36 | # export GOBIN=$GOPATH/bin 37 | # export PATH=$GOBIN:$PATH:/root/go/bin 38 | marker: "# {mark} ANSIBLE MANAGED BLOCK - changes for golang" 39 | insertafter: EOF 40 | create: yes 41 | 42 | - name: download vip-manager 43 | shell: go get github.com/cybertec-postgresql/vip-manager 44 | # environment: 45 | # GOPATH: $HOME/go 46 | 47 | - name: assembly of the rpm package vip-manager 48 | make: 49 | chdir: /root/go/src/github.com/cybertec-postgresql/vip-manager 50 | target: package-rpm 51 | 52 | - name: find vip-manager rpm 53 | find: 54 | paths: /root/go/src/github.com/cybertec-postgresql/vip-manager 55 | recurse: yes 56 | patterns: 'vip-manager[-_][^debug].*(.rpm)' 57 | use_regex: yes 58 | register: vip_manager_rpm 59 | 60 | - name: copy rpm-file to the repo directory 61 | copy: 62 | src: "{{ item.path }}" 63 | dest: /usr/share/nginx/html/repo 64 | remote_src: yes 65 | with_items: 66 | "{{ vip_manager_rpm.files }}" 67 | 68 | - name: createrepo otus-odyssey-vip-manager 69 | shell: createrepo /usr/share/nginx/html/repo/ 70 | 71 | - name: nginx restart 72 | systemd: 73 | name: nginx 74 | state: restarted 75 | enabled: yes 76 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/06_keepalived/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install keepalived 3 | yum: 4 | name: 5 | - keepalived 6 | state: latest 7 | 8 | - name: edit sysctl.conf 9 | sysctl: 10 | name: "{{ item.key }}" 11 | value: "{{ item.value }}" 12 | sysctl_set: yes 13 | state: present 14 | reload: yes 15 | with_items: 16 | - { key: "net.core.rmem_max", value: "16777216" } 17 | - { key: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" } 18 | - { key: "net.core.wmem_max", value: "16777216" } 19 | - { key: "net.ipv4.tcp_wmem", value: "4096 16384 16777216" } 20 | - { key: "net.ipv4.tcp_fin_timeout", value: "20" } 21 | - { key: "net.ipv4.tcp_tw_reuse", value: "1" } 22 | - { key: "net.core.netdev_max_backlog", value: "10000" } 23 | - { key: "net.ipv4.ip_local_port_range", value: "15000 65001" } 24 | - { key: "net.ipv4.ip_nonlocal_bind", value: "1" } 25 | - { key: "net.ipv4.ip_forward", value: "1" } 26 | - { key: "net.ipv4.conf.all.forwarding", value: "1" } 27 | 28 | - name: copy keepalived.service 29 | copy: 30 | src: keepalived.service 31 | dest: /usr/lib/systemd/system/keepalived.service 32 | owner: root 33 | group: root 34 | mode: '0644' 35 | 36 | - name: put keepalived.conf template to master 37 | template: 38 | src: master_keepalived.conf.j2 39 | dest: /etc/keepalived/keepalived.conf 40 | owner: root 41 | group: root 42 | mode: '0644' 43 | notify: 44 | - keepalived restart 45 | when: "ansible_facts['hostname'] == HOST_NAME_PG_CON_POOL_01|string" 46 | #when: ansible_facts['hostname'] == "{{ HOST_NAME_PG_CON_POOL_01 }}" 47 | 48 | - name: put keepalived.conf template to slave 49 | template: 50 | src: slave_keepalived.conf.j2 51 | dest: /etc/keepalived/keepalived.conf 52 | owner: root 53 | group: root 54 | mode: '0644' 55 | notify: 56 | - keepalived restart 57 | when: "ansible_facts['hostname'] == HOST_NAME_PG_CON_POOL_02|string" 58 | #when: ansible_facts['hostname'] == "{{ HOST_NAME_PG_CON_POOL_02 }}" 59 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu/02_mamonsu_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add mamonsu repository 3 | yum_repository: 4 | name: mamonsu 5 | description: mamonsu 6 | baseurl: 7 | - http://"{{ HOST_NAME_WEB02 }}"."{{ FAKE_DOMAIN }}":8081/repo 8 | - http://"{{ HOST_IP_WEB02 }}":8081/repo 9 | gpgcheck: no 10 | enabled: yes 11 | 12 | - name: install mamonsu from repo 13 | yum: 14 | name: mamonsu 15 | state: latest 16 | 17 | - name: move mamonsu init.d script 18 | copy: 19 | src: /etc/init.d/mamonsu 20 | remote_src: yes 21 | dest: /root/mamonsu_initd_bck 22 | 23 | - name: delete init.d script mamonsu 24 | file: 25 | path: /etc/init.d/mamonsu 26 | state: absent 27 | 28 | - name: copy logrotate mamonsu config 29 | copy: 30 | src: mamonsu_logrotate 31 | dest: /etc/logrotate.d/mamonsu 32 | owner: root 33 | group: root 34 | mode: '0644' 35 | 36 | - name: rsyslog restarted and systemd force reread services configuration 37 | systemd: 38 | name: rsyslog 39 | state: restarted 40 | daemon-reload: yes 41 | 42 | - name: put agent.conf template 43 | template: 44 | src: mamonsu_agent.conf.j2 45 | dest: /etc/mamonsu/agent.conf 46 | owner: root 47 | group: root 48 | mode: '0644' 49 | tags: 50 | - change_name_HOST_NAME_PG_CON_POOL_VIP 51 | 52 | - name: copy plugin, zabbix-template and systemd-unit files 53 | copy: 54 | src: "{{ item.src }}" 55 | dest: "{{ item.dest }}" 56 | owner: root 57 | group: root 58 | mode: '0644' 59 | with_items: 60 | - { src: "biggest_tables.py", dest: "/etc/mamonsu/plugins/biggest_tables.py" } 61 | - { src: "zbx_export_template.xml", dest: "/usr/share/mamonsu/template.xml" } 62 | - { src: "mamonsu2_sysconfig", dest: "/etc/sysconfig/mamonsu2" } 63 | - { src: "mamonsu2.service", dest: "/etc/systemd/system/mamonsu2.service" } 64 | 65 | - name: mamonsu2 stopped and disabled 66 | systemd: 67 | name: mamonsu2 68 | state: stopped 69 | daemon-reload: yes 70 | enabled: no 71 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/06_keepalived/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install keepalived 3 | yum: 4 | name: 5 | - keepalived 6 | state: latest 7 | 8 | - name: edit sysctl.conf 9 | sysctl: 10 | name: "{{ item.key }}" 11 | value: "{{ item.value }}" 12 | sysctl_set: yes 13 | state: present 14 | reload: yes 15 | with_items: 16 | - { key: "net.core.rmem_max", value: "16777216" } 17 | - { key: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" } 18 | - { key: "net.core.wmem_max", value: "16777216" } 19 | - { key: "net.ipv4.tcp_wmem", value: "4096 16384 16777216" } 20 | - { key: "net.ipv4.tcp_fin_timeout", value: "20" } 21 | - { key: "net.ipv4.tcp_tw_reuse", value: "1" } 22 | - { key: "net.core.netdev_max_backlog", value: "10000" } 23 | - { key: "net.ipv4.ip_local_port_range", value: "15000 65001" } 24 | - { key: "net.ipv4.ip_nonlocal_bind", value: "1" } 25 | - { key: "net.ipv4.ip_forward", value: "1" } 26 | - { key: "net.ipv4.conf.all.forwarding", value: "1" } 27 | 28 | - name: copy keepalived.service 29 | copy: 30 | src: keepalived.service 31 | dest: /usr/lib/systemd/system/keepalived.service 32 | owner: root 33 | group: root 34 | mode: '0644' 35 | 36 | - name: put keepalived.conf template to master 37 | template: 38 | src: master_keepalived.conf.j2 39 | dest: /etc/keepalived/keepalived.conf 40 | owner: root 41 | group: root 42 | mode: '0644' 43 | notify: 44 | - keepalived restart 45 | when: "ansible_facts['hostname'] == HOST_NAME_PG_CON_POOL_01|string" 46 | #when: ansible_facts['hostname'] == "{{ HOST_NAME_PG_CON_POOL_01 }}" 47 | 48 | - name: put keepalived.conf template to slave 49 | template: 50 | src: slave_keepalived.conf.j2 51 | dest: /etc/keepalived/keepalived.conf 52 | owner: root 53 | group: root 54 | mode: '0644' 55 | notify: 56 | - keepalived restart 57 | when: "ansible_facts['hostname'] == HOST_NAME_PG_CON_POOL_02|string" 58 | #when: ansible_facts['hostname'] == "{{ HOST_NAME_PG_CON_POOL_02 }}" 59 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/04_build_odyssey_rpm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create rpmbuild directory 3 | file: 4 | path: /root/rpmbuild/SOURCES/ 5 | state: directory 6 | owner: root 7 | group: root 8 | mode: '0755' 9 | 10 | - name: clone odyssey repo for rpm 11 | git: 12 | repo: https://github.com/yandex/odyssey.git 13 | dest: /root/rpmbuild/SOURCES/odyssey 14 | 15 | - name: copy FindPostgreSQL.cmake for rpmbuild 16 | copy: 17 | src: FindPostgreSQL_rpmbuild.cmake 18 | dest: /root/rpmbuild/SOURCES/odyssey/cmake/FindPostgreSQL.cmake 19 | owner: root 20 | group: root 21 | mode: '0644' 22 | 23 | - name: compress directory odyssey into odyssey.tar.gz 24 | archive: 25 | path: /root/rpmbuild/SOURCES/odyssey 26 | dest: /root/rpmbuild/SOURCES/odyssey.tar.gz 27 | format: gz 28 | force_archive: true 29 | 30 | - name: dependency check and assembly rpm package odyssey 31 | shell: | 32 | yum-builddep /root/rpmbuild/SOURCES/odyssey/scripts/centos/odyssey.spec 33 | rpmbuild -bb /root/rpmbuild/SOURCES/odyssey/scripts/centos/odyssey.spec 34 | 35 | - name: find odyssey rpm 36 | find: 37 | paths: /root/rpmbuild/RPMS/x86_64 38 | recurse: yes 39 | patterns: 'odyssey-[^debuginfo].*(\.el7\.x86_64\.rpm)' 40 | use_regex: yes 41 | register: odyssey_rpm 42 | 43 | #- name: debug registered var 44 | # debug: 45 | # var: odyssey_rpm 46 | # 47 | #- name: test shell for registered var 48 | # shell: echo {{ odyssey_rpm.files }} > /tmp/testfie 49 | # 50 | #- name: copy test 51 | # copy: 52 | # src: "{{ item.path }}" 53 | # dest: /tmp 54 | # remote_src: yes 55 | # with_items: 56 | # "{{ odyssey_rpm.files }}" 57 | 58 | - name: copy rpm-file to the repo directory 59 | copy: 60 | src: "{{ item.path }}" 61 | dest: /usr/share/nginx/html/repo 62 | remote_src: yes 63 | with_items: 64 | "{{ odyssey_rpm.files }}" 65 | 66 | - name: createrepo otus-odyssey-vip-manager 67 | shell: createrepo /usr/share/nginx/html/repo/ 68 | 69 | - name: nginx restart 70 | systemd: 71 | name: nginx 72 | state: restarted 73 | enabled: yes 74 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/04_build_odyssey_rpm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create rpmbuild directory 3 | file: 4 | path: /root/rpmbuild/SOURCES/ 5 | state: directory 6 | owner: root 7 | group: root 8 | mode: '0755' 9 | 10 | - name: clone odyssey repo for rpm 11 | git: 12 | repo: https://github.com/yandex/odyssey.git 13 | dest: /root/rpmbuild/SOURCES/odyssey 14 | 15 | - name: copy FindPostgreSQL.cmake for rpmbuild 16 | copy: 17 | src: FindPostgreSQL_rpmbuild.cmake 18 | dest: /root/rpmbuild/SOURCES/odyssey/cmake/FindPostgreSQL.cmake 19 | owner: root 20 | group: root 21 | mode: '0644' 22 | 23 | - name: compress directory odyssey into odyssey.tar.gz 24 | archive: 25 | path: /root/rpmbuild/SOURCES/odyssey 26 | dest: /root/rpmbuild/SOURCES/odyssey.tar.gz 27 | format: gz 28 | force_archive: true 29 | 30 | - name: dependency check and assembly rpm package odyssey 31 | shell: | 32 | yum-builddep /root/rpmbuild/SOURCES/odyssey/scripts/centos/odyssey.spec 33 | rpmbuild -bb /root/rpmbuild/SOURCES/odyssey/scripts/centos/odyssey.spec 34 | 35 | - name: find odyssey rpm 36 | find: 37 | paths: /root/rpmbuild/RPMS/x86_64 38 | recurse: yes 39 | patterns: 'odyssey-[^debuginfo].*(\.el7\.x86_64\.rpm)' 40 | use_regex: yes 41 | register: odyssey_rpm 42 | 43 | #- name: debug registered var 44 | # debug: 45 | # var: odyssey_rpm 46 | # 47 | #- name: test shell for registered var 48 | # shell: echo {{ odyssey_rpm.files }} > /tmp/testfie 49 | # 50 | #- name: copy test 51 | # copy: 52 | # src: "{{ item.path }}" 53 | # dest: /tmp 54 | # remote_src: yes 55 | # with_items: 56 | # "{{ odyssey_rpm.files }}" 57 | 58 | - name: copy rpm-file to the repo directory 59 | copy: 60 | src: "{{ item.path }}" 61 | dest: /usr/share/nginx/html/repo 62 | remote_src: yes 63 | with_items: 64 | "{{ odyssey_rpm.files }}" 65 | 66 | - name: createrepo otus-odyssey-vip-manager 67 | shell: createrepo /usr/share/nginx/html/repo/ 68 | 69 | - name: nginx restart 70 | systemd: 71 | name: nginx 72 | state: restarted 73 | enabled: yes 74 | -------------------------------------------------------------------------------- /provisioning/roles/03_keepalived-haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install keepalived, haproxy 3 | yum: 4 | name: 5 | - keepalived 6 | - haproxy 7 | state: latest 8 | 9 | - name: edit sysctl.conf 10 | sysctl: 11 | name: "{{ item.key }}" 12 | value: "{{ item.value }}" 13 | sysctl_set: yes 14 | state: present 15 | reload: yes 16 | with_items: 17 | - { key: "net.core.rmem_max", value: "16777216" } 18 | - { key: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" } 19 | - { key: "net.core.wmem_max", value: "16777216" } 20 | - { key: "net.ipv4.tcp_wmem", value: "4096 16384 16777216" } 21 | - { key: "net.ipv4.tcp_fin_timeout", value: "20" } 22 | - { key: "net.ipv4.tcp_tw_reuse", value: "1" } 23 | - { key: "net.core.netdev_max_backlog", value: "10000" } 24 | - { key: "net.ipv4.ip_local_port_range", value: "15000 65001" } 25 | - { key: "net.ipv4.ip_nonlocal_bind", value: "1" } 26 | - { key: "net.ipv4.ip_forward", value: "1" } 27 | - { key: "net.ipv4.conf.all.forwarding", value: "1" } 28 | 29 | - name: copy keepalived.service 30 | copy: 31 | src: keepalived.service 32 | dest: /usr/lib/systemd/system/keepalived.service 33 | owner: root 34 | group: root 35 | mode: '0644' 36 | 37 | - name: put keepalived.conf template to master 38 | template: 39 | src: master_keepalived.conf.j2 40 | dest: /etc/keepalived/keepalived.conf 41 | owner: root 42 | group: root 43 | mode: '0644' 44 | notify: 45 | - keepalived restart 46 | when: "ansible_facts['hostname'] == HOST_NAME_BALANCER_01|string" 47 | #when: ansible_facts['hostname'] == "{{ HOST_NAME_BALANCER_01 }}" 48 | 49 | - name: put keepalived.conf template to slave 50 | template: 51 | src: slave_keepalived.conf.j2 52 | dest: /etc/keepalived/keepalived.conf 53 | owner: root 54 | group: root 55 | mode: '0644' 56 | notify: 57 | - keepalived restart 58 | when: "ansible_facts['hostname'] == HOST_NAME_BALANCER_02|string" 59 | #when: ansible_facts['hostname'] == "{{ HOST_NAME_BALANCER_02 }}" 60 | 61 | - name: put haproxy.cfg template 62 | template: 63 | src: haproxy.cfg.j2 64 | dest: /etc/haproxy/haproxy.cfg 65 | owner: root 66 | group: root 67 | mode: '0644' 68 | notify: 69 | - haproxy restart 70 | -------------------------------------------------------------------------------- /provisioning/roles/05_consul-cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: edit /etc/hosts (comment line) 3 | replace: 4 | path: "{{ HOSTS_FILE }}" 5 | regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)' 6 | replace: '#\1' 7 | tags: 8 | - update_hosts 9 | 10 | - name: install jq 11 | yum: 12 | name: jq 13 | state: latest 14 | 15 | - name: download consul 16 | get_url: 17 | url: https://releases.hashicorp.com/consul/{{ CONSUL_VERSION }}/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 18 | dest: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 19 | mode: '0600' 20 | 21 | - name: extract consul zip-archive 22 | unarchive: 23 | remote_src: yes 24 | src: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 25 | dest: /usr/local/bin/ 26 | 27 | - name: create system group "consul" 28 | group: 29 | name: consul 30 | system: yes 31 | state: present 32 | 33 | - name: add system user "consul" 34 | user: 35 | name: consul 36 | group: consul 37 | shell: /sbin/nologin 38 | home: /var/lib/consul 39 | system: yes 40 | 41 | - name: set permissions for consul data directories 42 | file: 43 | path: "{{ item }}" 44 | state: directory 45 | owner: consul 46 | group: consul 47 | mode: '0775' 48 | with_items: 49 | - /var/lib/consul 50 | - /etc/consul.d 51 | 52 | #- name: create directory for consul config files 53 | # file: 54 | # path: /etc/consul.d 55 | # state: directory 56 | # owner: consul 57 | # group: consul 58 | # mode: '0775' 59 | 60 | - name: put consul-server.service template 61 | template: 62 | src: consul-server.service.j2 63 | dest: /etc/systemd/system/consul-server.service 64 | owner: root 65 | group: root 66 | mode: '0644' 67 | 68 | #generate encryption key that will be used ad the "encrypt" entry of ALL CONSUL NODES 69 | #CONSUL_KEY=$(consul keygen); echo $CONSUL_KEY 70 | #sgIjP/24ugFcKfJ5DJ1Ob29dB5jgzwZbXY0lED3RM9w= 71 | #see ansible variable CONSUL_KEY 72 | 73 | - name: put bootstrap consul configuration template 74 | template: 75 | src: consul-server.json.j2 76 | dest: /etc/consul.d/consul-server.json 77 | owner: root 78 | group: root 79 | mode: '0644' 80 | notify: 81 | - consul-server restarted 82 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/03_keepalived-haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install keepalived, haproxy 3 | yum: 4 | name: 5 | - keepalived 6 | - haproxy 7 | state: latest 8 | 9 | - name: edit sysctl.conf 10 | sysctl: 11 | name: "{{ item.key }}" 12 | value: "{{ item.value }}" 13 | sysctl_set: yes 14 | state: present 15 | reload: yes 16 | with_items: 17 | - { key: "net.core.rmem_max", value: "16777216" } 18 | - { key: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" } 19 | - { key: "net.core.wmem_max", value: "16777216" } 20 | - { key: "net.ipv4.tcp_wmem", value: "4096 16384 16777216" } 21 | - { key: "net.ipv4.tcp_fin_timeout", value: "20" } 22 | - { key: "net.ipv4.tcp_tw_reuse", value: "1" } 23 | - { key: "net.core.netdev_max_backlog", value: "10000" } 24 | - { key: "net.ipv4.ip_local_port_range", value: "15000 65001" } 25 | - { key: "net.ipv4.ip_nonlocal_bind", value: "1" } 26 | - { key: "net.ipv4.ip_forward", value: "1" } 27 | - { key: "net.ipv4.conf.all.forwarding", value: "1" } 28 | 29 | - name: copy keepalived.service 30 | copy: 31 | src: keepalived.service 32 | dest: /usr/lib/systemd/system/keepalived.service 33 | owner: root 34 | group: root 35 | mode: '0644' 36 | 37 | - name: put keepalived.conf template to master 38 | template: 39 | src: master_keepalived.conf.j2 40 | dest: /etc/keepalived/keepalived.conf 41 | owner: root 42 | group: root 43 | mode: '0644' 44 | notify: 45 | - keepalived restart 46 | when: "ansible_facts['hostname'] == HOST_NAME_BALANCER_01|string" 47 | #when: ansible_facts['hostname'] == "{{ HOST_NAME_BALANCER_01 }}" 48 | 49 | - name: put keepalived.conf template to slave 50 | template: 51 | src: slave_keepalived.conf.j2 52 | dest: /etc/keepalived/keepalived.conf 53 | owner: root 54 | group: root 55 | mode: '0644' 56 | notify: 57 | - keepalived restart 58 | when: "ansible_facts['hostname'] == HOST_NAME_BALANCER_02|string" 59 | #when: ansible_facts['hostname'] == "{{ HOST_NAME_BALANCER_02 }}" 60 | 61 | - name: put haproxy.cfg template 62 | template: 63 | src: haproxy.cfg.j2 64 | dest: /etc/haproxy/haproxy.cfg 65 | owner: root 66 | group: root 67 | mode: '0644' 68 | notify: 69 | - haproxy restart 70 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/01_consul-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: edit /etc/hosts (comment line) 3 | replace: 4 | path: "{{ HOSTS_FILE }}" 5 | regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)' 6 | replace: '#\1' 7 | tags: 8 | - update_hosts 9 | 10 | - name: install jq 11 | yum: 12 | name: jq 13 | state: latest 14 | 15 | - name: download consul 16 | get_url: 17 | url: https://releases.hashicorp.com/consul/{{ CONSUL_VERSION }}/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 18 | dest: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 19 | mode: '0600' 20 | 21 | - name: extract consul zip-archive 22 | unarchive: 23 | remote_src: yes 24 | src: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 25 | dest: /usr/local/bin/ 26 | 27 | - name: create system group "consul" 28 | group: 29 | name: consul 30 | system: yes 31 | state: present 32 | 33 | - name: add system user "consul" 34 | user: 35 | name: consul 36 | group: consul 37 | shell: /sbin/nologin 38 | home: /var/lib/consul 39 | system: yes 40 | 41 | - name: set permissions for consul data directories 42 | file: 43 | path: "{{ item }}" 44 | state: directory 45 | owner: consul 46 | group: consul 47 | mode: '0775' 48 | with_items: 49 | - /var/lib/consul 50 | - /etc/consul.d 51 | 52 | #- name: create directory for consul config files 53 | # file: 54 | # path: /etc/consul.d 55 | # state: directory 56 | # owner: consul 57 | # group: consul 58 | # mode: '0775' 59 | 60 | - name: copy consul-client.service 61 | copy: 62 | src: consul-client.service 63 | dest: /etc/systemd/system/consul-client.service 64 | owner: root 65 | group: root 66 | mode: '0644' 67 | 68 | #generate encryption key that will be used ad the "encrypt" entry of ALL CONSUL NODES 69 | #CONSUL_KEY=$(consul keygen); echo $CONSUL_KEY 70 | #sgIjP/24ugFcKfJ5DJ1Ob29dB5jgzwZbXY0lED3RM9w= 71 | #see ansible variable CONSUL_KEY 72 | 73 | - name: put bootstrap consul configuration template 74 | template: 75 | src: consul-client.json.j2 76 | dest: /etc/consul.d/consul-client.json 77 | owner: root 78 | group: root 79 | mode: '0644' 80 | 81 | - name: consul-client restarted 82 | systemd: 83 | daemon_reload: yes 84 | name: consul-client 85 | state: restarted 86 | enabled: yes 87 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/03_web-optimization/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install packages for php 3 | yum: 4 | name: 5 | - php-pecl-apcu 6 | - php-pecl-zendopcache 7 | - memcached 8 | - memcached-devel 9 | - php-pecl-memcache 10 | - redis 11 | - php-pecl-redis 12 | state: latest 13 | notify: 14 | - memcached restart 15 | - redis restart 16 | 17 | - name: editinig php-fpm.d-www.conf 18 | shell: | 19 | sed -i 's/pm = dynamic/pm = static/g' /etc/php-fpm.d/www.conf 20 | sed -i 's/pm.max_children = 50/pm.max_children = 150/g' /etc/php-fpm.d/www.conf 21 | sed -i 's/;rlimit_files = 1024/rlimit_files = 16384/g' /etc/php-fpm.d/www.conf 22 | sed -i 's/;pm.max_requests = 500/pm.max_requests = 500/g' /etc/php-fpm.d/www.conf 23 | 24 | - name: create directory for php-fpm.service override.conf 25 | file: 26 | path: /etc/systemd/system/php-fpm.service.d 27 | state: directory 28 | mode: '0755' 29 | 30 | - name: copy override.conf (increase in the number of opened files php-fpm to 16384) 31 | copy: 32 | src: override_php-fpm.conf 33 | dest: /etc/systemd/system/php-fpm.service.d/override.conf 34 | owner: root 35 | group: root 36 | mode: '0644' 37 | notify: 38 | - php-fpm restart 39 | 40 | - name: editinig /etc/nginx/nginx.conf 41 | shell: sed -i 's/worker_connections 1024;/worker_connections 16384;/g' /etc/nginx/nginx.conf 42 | 43 | - name: create directory for nginx.service override.conf 44 | file: 45 | path: /etc/systemd/system/nginx.service.d 46 | state: directory 47 | mode: '0755' 48 | 49 | - name: copy override.conf (increase in the number of opened files nginx to 16384) 50 | copy: 51 | src: override_nginx.conf 52 | dest: /etc/systemd/system/nginx.service.d/override.conf 53 | owner: root 54 | group: root 55 | mode: '0644' 56 | notify: 57 | - nginx restart 58 | 59 | - name: edit sysctl.conf 60 | sysctl: 61 | name: "{{ item.key }}" 62 | value: "{{ item.value }}" 63 | sysctl_set: yes 64 | state: present 65 | reload: yes 66 | with_items: 67 | - { key: "net.core.rmem_max", value: "16777216" } 68 | - { key: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" } 69 | - { key: "net.core.wmem_max", value: "16777216" } 70 | - { key: "net.ipv4.tcp_wmem", value: "4096 16384 16777216" } 71 | - { key: "net.ipv4.tcp_fin_timeout", value: "20" } 72 | - { key: "net.ipv4.tcp_tw_reuse", value: "1" } 73 | - { key: "net.core.netdev_max_backlog", value: "10000" } 74 | - { key: "net.ipv4.ip_local_port_range", value: "15000 65001" } 75 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/03_web-optimization/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install packages for php 3 | yum: 4 | name: 5 | - php-pecl-apcu 6 | - php-pecl-zendopcache 7 | - memcached 8 | - memcached-devel 9 | - php-pecl-memcache 10 | - redis 11 | - php-pecl-redis 12 | state: latest 13 | notify: 14 | - memcached restart 15 | - redis restart 16 | 17 | - name: editinig php-fpm.d-www.conf 18 | shell: | 19 | sed -i 's/pm = dynamic/pm = static/g' /etc/php-fpm.d/www.conf 20 | sed -i 's/pm.max_children = 50/pm.max_children = 800/g' /etc/php-fpm.d/www.conf 21 | sed -i 's/;rlimit_files = 1024/rlimit_files = 16384/g' /etc/php-fpm.d/www.conf 22 | sed -i 's/;pm.max_requests = 500/pm.max_requests = 500/g' /etc/php-fpm.d/www.conf 23 | 24 | - name: create directory for php-fpm.service override.conf 25 | file: 26 | path: /etc/systemd/system/php-fpm.service.d 27 | state: directory 28 | mode: '0755' 29 | 30 | - name: copy override.conf (increase in the number of opened files php-fpm to 16384) 31 | copy: 32 | src: override_php-fpm.conf 33 | dest: /etc/systemd/system/php-fpm.service.d/override.conf 34 | owner: root 35 | group: root 36 | mode: '0644' 37 | notify: 38 | - php-fpm restart 39 | 40 | - name: editinig /etc/nginx/nginx.conf 41 | shell: sed -i 's/worker_connections 1024;/worker_connections 16384;/g' /etc/nginx/nginx.conf 42 | 43 | - name: create directory for nginx.service override.conf 44 | file: 45 | path: /etc/systemd/system/nginx.service.d 46 | state: directory 47 | mode: '0755' 48 | 49 | - name: copy override.conf (increase in the number of opened files nginx to 16384) 50 | copy: 51 | src: override_nginx.conf 52 | dest: /etc/systemd/system/nginx.service.d/override.conf 53 | owner: root 54 | group: root 55 | mode: '0644' 56 | notify: 57 | - nginx restart 58 | 59 | - name: edit sysctl.conf 60 | sysctl: 61 | name: "{{ item.key }}" 62 | value: "{{ item.value }}" 63 | sysctl_set: yes 64 | state: present 65 | reload: yes 66 | with_items: 67 | - { key: "net.core.rmem_max", value: "16777216" } 68 | - { key: "net.ipv4.tcp_rmem", value: "4096 87380 16777216" } 69 | - { key: "net.core.wmem_max", value: "16777216" } 70 | - { key: "net.ipv4.tcp_wmem", value: "4096 16384 16777216" } 71 | - { key: "net.ipv4.tcp_fin_timeout", value: "20" } 72 | - { key: "net.ipv4.tcp_tw_reuse", value: "1" } 73 | - { key: "net.core.netdev_max_backlog", value: "10000" } 74 | - { key: "net.ipv4.ip_local_port_range", value: "15000 65001" } 75 | -------------------------------------------------------------------------------- /provisioning_proxmox/hosts: -------------------------------------------------------------------------------- 1 | [balancer] 2 | hl-balancer01 ansible_user=otus ansible_host=hl-balancer01 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 3 | hl-balancer02 ansible_user=otus ansible_host=hl-balancer02 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 4 | 5 | [pg_conpool] 6 | #hl-pg-haproxy ansible_user=otus ansible_host=hl-pg-haproxy ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 7 | hl-pg-conpool01 ansible_user=otus ansible_host=hl-pg-conpool01 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 8 | hl-pg-conpool02 ansible_user=otus ansible_host=hl-pg-conpool02 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 9 | 10 | [pg_conpool1] 11 | hl-pg-conpool01 ansible_user=otus ansible_host=hl-pg-conpool01 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 12 | 13 | [dcs] 14 | #hl-etcd ansible_user=otus ansible_host=hl-etcd ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 15 | hl-dcs01 ansible_user=otus ansible_host=hl-dcs01 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 16 | hl-dcs02 ansible_user=otus ansible_host=hl-dcs02 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 17 | hl-dcs03 ansible_user=otus ansible_host=hl-dcs03 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 18 | 19 | [database] 20 | hl-pg01 ansible_user=otus ansible_host=hl-pg01 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 21 | hl-pg02 ansible_user=otus ansible_host=hl-pg02 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 22 | hl-pg03 ansible_user=otus ansible_host=hl-pg03 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 23 | 24 | [database1] 25 | hl-pg01 ansible_user=otus ansible_host=hl-pg01 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 26 | 27 | [web] 28 | hl-zabbix01 ansible_user=otus ansible_host=hl-zabbix01 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 29 | hl-zabbix02 ansible_user=otus ansible_host=hl-zabbix02 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 30 | 31 | [web1] 32 | hl-zabbix01 ansible_user=otus ansible_host=hl-zabbix01 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 33 | 34 | [web2] 35 | hl-zabbix02 ansible_user=otus ansible_host=hl-zabbix02 ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 36 | 37 | #[pg_conpool_dcs] 38 | #hl-pg-haproxy ansible_user=otus ansible_host=hl-pg-haproxy ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 39 | #hl-etcd ansible_user=otus ansible_host=hl-etcd ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 40 | 41 | [client] 42 | hl-client ansible_user=otus ansible_host=hl-client ansible_port=22 ansible_private_key_file=/home/otus/.ssh/hlstand 43 | -------------------------------------------------------------------------------- /provisioning_proxmox/hosts_ip: -------------------------------------------------------------------------------- 1 | [balancer] 2 | hl-balancer01 ansible_user=otus ansible_host=10.51.21.51 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 3 | hl-balancer02 ansible_user=otus ansible_host=10.51.21.52 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 4 | 5 | [pg_conpool] 6 | #hl-pg-haproxy ansible_user=otus ansible_host=10.51.21.59 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 7 | hl-pg-conpool01 ansible_user=otus ansible_host=10.51.21.54 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 8 | hl-pg-conpool02 ansible_user=otus ansible_host=10.51.21.55 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 9 | 10 | [pg_conpool1] 11 | hl-pg-conpool01 ansible_user=otus ansible_host=10.51.21.54 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 12 | 13 | [dcs] 14 | #hl-etcd ansible_user=otus ansible_host=10.51.21.64 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 15 | hl-dcs01 ansible_user=otus ansible_host=10.51.21.61 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 16 | hl-dcs02 ansible_user=otus ansible_host=10.51.21.62 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 17 | hl-dcs03 ansible_user=otus ansible_host=10.51.21.63 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 18 | 19 | [database] 20 | hl-pg01 ansible_user=otus ansible_host=10.51.21.65 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 21 | hl-pg02 ansible_user=otus ansible_host=10.51.21.66 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 22 | hl-pg03 ansible_user=otus ansible_host=10.51.21.67 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 23 | 24 | [database1] 25 | hl-pg01 ansible_user=otus ansible_host=10.51.21.65 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 26 | 27 | [web] 28 | hl-zabbix01 ansible_user=otus ansible_host=10.51.21.57 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 29 | hl-zabbix02 ansible_user=otus ansible_host=10.51.21.58 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 30 | 31 | [web1] 32 | hl-zabbix01 ansible_user=otus ansible_host=10.51.21.57 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 33 | 34 | [web2] 35 | hl-zabbix02 ansible_user=otus ansible_host=10.51.21.58 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 36 | 37 | #[pg_conpool_dcs] 38 | #hl-pg-haproxy ansible_user=otus ansible_host=10.51.21.59 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 39 | #hl-etcd ansible_user=otus ansible_host=10.51.21.64 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 40 | 41 | [client] 42 | hl-client ansible_user=otus ansible_host=10.51.21.70 ansible_port=22 ansible_private_key_file=/home/timur/.ssh/id_rsa 43 | -------------------------------------------------------------------------------- /provisioning/variables: -------------------------------------------------------------------------------- 1 | --- 2 | TIMEZONE_OS: 'Asia/Novosibirsk' 3 | #TIMEZONE_OS: 'Europe/Moscow' 4 | 5 | HOSTS_FILE: '/etc/hosts' 6 | #HOSTS_FILE: '/etc/cloud/templates/hosts.redhat.tmpl' 7 | 8 | #zabbix 9 | IP_ZAB_CLUSTER: '10.51.21.56' 10 | PASS_ZAB_FOR_DB: 'zabb_xPas5' 11 | PORT_FOR_DB_CLIENT: '5000' 12 | USER_FOR_WEB_GUI_ZABBIX: 'Admin' 13 | PASS_FOR_WEB_GUI_ZABBIX: 'zabbix' 14 | 15 | #postgres 16 | PASS_POSTGRES_FOR_DB: 'gfhjkm' 17 | NET_POSTGRES_ALLOW: '10.51.21.0/24' 18 | 19 | #odyssey-keepalived 20 | PASS_ODYSSEY_USER: 'odyssey' 21 | IP_PG_CON_POOL_NIC: 'eth1' 22 | PASS_FOR_PG_CON_POOL_KEEPALIVED: 'och4Ohc8kaequej6' 23 | 24 | #pacemaker 25 | PASS_HACLUSTER_USER: 'Ozae2doipi1Nahzi' 26 | IP_ZAB_CLUSTER_NIC: 'eth1' 27 | 28 | #balancer 29 | IP_BALANCER_NIC: 'eth1' 30 | PASS_FOR_KEEPALIVED: 'ieSietoco1Aith6e' 31 | PASS_FOR_HAPROXY: 'balancer' 32 | USER_HAPROXY: 'balancer' 33 | 34 | #consul 35 | CONSUL_VERSION: '1.6.1' 36 | CONSUL_KEY: 'sgIjP/24ugFcKfJ5DJ1Ob29dB5jgzwZbXY0lED3RM9w=' 37 | CONSUL_DATACENTER: 'HL-OTUS' 38 | 39 | #домен 40 | FAKE_DOMAIN: 'otus' 41 | 42 | ########## хосты ########## 43 | HOST_NAME_BALANCER_VIP: 'hl-balancer-vip' 44 | HOST_IP_BALANCER_VIP: '10.51.21.50' 45 | 46 | HOST_NAME_BALANCER_01: 'hl-balancer01' 47 | HOST_IP_BALANCER_01: '10.51.21.51' 48 | 49 | HOST_NAME_BALANCER_02: 'hl-balancer02' 50 | HOST_IP_BALANCER_02: '10.51.21.52' 51 | 52 | HOST_NAME_PG_CON_POOL_VIP: 'hl-pg-conpool-vip' 53 | HOST_IP_PG_CON_POOL_VIP: '10.51.21.53' 54 | 55 | HOST_NAME_PG_CON_POOL_01: 'hl-pg-conpool01' 56 | HOST_IP_PG_CON_POOL_01: '10.51.21.54' 57 | 58 | HOST_NAME_PG_CON_POOL_02: 'hl-pg-conpool02' 59 | HOST_IP_PG_CON_POOL_02: '10.51.21.55' 60 | 61 | HOST_NAME_WEB_VIP: 'hl-zabbix-vip' 62 | HOST_IP_WEB_VIP: '10.51.21.56' 63 | 64 | HOST_NAME_WEB01: 'hl-zabbix01' 65 | HOST_IP_WEB01: '10.51.21.57' 66 | 67 | HOST_NAME_WEB02: 'hl-zabbix02' 68 | HOST_IP_WEB02: '10.51.21.58' 69 | 70 | 71 | 72 | #свободен 10.51.21.59 73 | 74 | #свободен 10.51.21.60 75 | 76 | 77 | 78 | HOST_NAME_DCS_01: 'hl-dcs01' 79 | HOST_IP_DCS_01: '10.51.21.61' 80 | 81 | HOST_NAME_DCS_02: 'hl-dcs02' 82 | HOST_IP_DCS_02: '10.51.21.62' 83 | 84 | HOST_NAME_DCS_03: 'hl-dcs03' 85 | HOST_IP_DCS_03: '10.51.21.63' 86 | 87 | 88 | 89 | #свободен 10.51.21.64 90 | 91 | 92 | 93 | HOST_NAME_PG01: 'hl-pg01' 94 | HOST_IP_PG01: '10.51.21.65' 95 | 96 | HOST_NAME_PG02: 'hl-pg02' 97 | HOST_IP_PG02: '10.51.21.66' 98 | 99 | HOST_NAME_PG03: 'hl-pg03' 100 | HOST_IP_PG03: '10.51.21.67' 101 | 102 | HOST_NAME_PG_VIP: 'hl-pg-vip' 103 | HOST_IP_PG_VIP: '10.51.21.68' 104 | IP_VIPMANAGER_NIC: 'eth1' 105 | 106 | 107 | 108 | #свободен 10.51.21.69 109 | 110 | 111 | 112 | HOST_NAME_HL_CLIENT: 'hl-client' 113 | HOST_IP_HL_CLIENT: '10.51.21.70' 114 | -------------------------------------------------------------------------------- /provisioning_proxmox/variables: -------------------------------------------------------------------------------- 1 | --- 2 | #TIMEZONE_OS: 'Asia/Novosibirsk' 3 | TIMEZONE_OS: 'Europe/Moscow' 4 | 5 | #HOSTS_FILE: '/etc/hosts' 6 | HOSTS_FILE: '/etc/cloud/templates/hosts.redhat.tmpl' 7 | 8 | #zabbix 9 | IP_ZAB_CLUSTER: '10.51.21.56' 10 | PASS_ZAB_FOR_DB: 'zabb_xPas5' 11 | PORT_FOR_DB_CLIENT: '5000' 12 | USER_FOR_WEB_GUI_ZABBIX: 'Admin' 13 | PASS_FOR_WEB_GUI_ZABBIX: 'zabbix' 14 | 15 | #postgres 16 | PASS_POSTGRES_FOR_DB: 'gfhjkm' 17 | NET_POSTGRES_ALLOW: '10.51.21.0/24' 18 | 19 | #odyssey-keepalived 20 | PASS_ODYSSEY_USER: 'odyssey' 21 | IP_PG_CON_POOL_NIC: 'eth0' 22 | PASS_FOR_PG_CON_POOL_KEEPALIVED: 'och4Ohc8kaequej6' 23 | 24 | #pacemaker 25 | PASS_HACLUSTER_USER: 'Ozae2doipi1Nahzi' 26 | IP_ZAB_CLUSTER_NIC: 'eth0' 27 | 28 | #balancer 29 | IP_BALANCER_NIC: 'eth0' 30 | PASS_FOR_KEEPALIVED: 'ieSietoco1Aith6e' 31 | PASS_FOR_HAPROXY: 'balancer' 32 | USER_HAPROXY: 'balancer' 33 | 34 | #consul 35 | CONSUL_VERSION: '1.6.1' 36 | CONSUL_KEY: 'sgIjP/24ugFcKfJ5DJ1Ob29dB5jgzwZbXY0lED3RM9w=' 37 | CONSUL_DATACENTER: 'HL-OTUS' 38 | 39 | #домен 40 | FAKE_DOMAIN: 'otus' 41 | 42 | ########## хосты ########## 43 | HOST_NAME_BALANCER_VIP: 'hl-balancer-vip' 44 | HOST_IP_BALANCER_VIP: '10.51.21.50' 45 | 46 | HOST_NAME_BALANCER_01: 'hl-balancer01' 47 | HOST_IP_BALANCER_01: '10.51.21.51' 48 | 49 | HOST_NAME_BALANCER_02: 'hl-balancer02' 50 | HOST_IP_BALANCER_02: '10.51.21.52' 51 | 52 | HOST_NAME_PG_CON_POOL_VIP: 'hl-pg-conpool-vip' 53 | HOST_IP_PG_CON_POOL_VIP: '10.51.21.53' 54 | 55 | HOST_NAME_PG_CON_POOL_01: 'hl-pg-conpool01' 56 | HOST_IP_PG_CON_POOL_01: '10.51.21.54' 57 | 58 | HOST_NAME_PG_CON_POOL_02: 'hl-pg-conpool02' 59 | HOST_IP_PG_CON_POOL_02: '10.51.21.55' 60 | 61 | HOST_NAME_WEB_VIP: 'hl-zabbix-vip' 62 | HOST_IP_WEB_VIP: '10.51.21.56' 63 | 64 | HOST_NAME_WEB01: 'hl-zabbix01' 65 | HOST_IP_WEB01: '10.51.21.57' 66 | 67 | HOST_NAME_WEB02: 'hl-zabbix02' 68 | HOST_IP_WEB02: '10.51.21.58' 69 | 70 | 71 | 72 | #свободен 10.51.21.59 73 | 74 | #свободен 10.51.21.60 75 | 76 | 77 | 78 | HOST_NAME_DCS_01: 'hl-dcs01' 79 | HOST_IP_DCS_01: '10.51.21.61' 80 | 81 | HOST_NAME_DCS_02: 'hl-dcs02' 82 | HOST_IP_DCS_02: '10.51.21.62' 83 | 84 | HOST_NAME_DCS_03: 'hl-dcs03' 85 | HOST_IP_DCS_03: '10.51.21.63' 86 | 87 | 88 | 89 | #свободен 10.51.21.64' 90 | 91 | 92 | 93 | HOST_NAME_PG01: 'hl-pg01' 94 | HOST_IP_PG01: '10.51.21.65' 95 | 96 | HOST_NAME_PG02: 'hl-pg02' 97 | HOST_IP_PG02: '10.51.21.66' 98 | 99 | HOST_NAME_PG03: 'hl-pg03' 100 | HOST_IP_PG03: '10.51.21.67' 101 | 102 | HOST_NAME_PG_VIP: 'hl-pg-vip' 103 | HOST_IP_PG_VIP: '10.51.21.68' 104 | IP_VIPMANAGER_NIC: 'eth0' 105 | 106 | 107 | 108 | #свободен 10.51.21.69 109 | 110 | 111 | 112 | HOST_NAME_HL_CLIENT: 'hl-client' 113 | HOST_IP_HL_CLIENT: '10.51.21.70' 114 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu/03_mamonsu_zabbix-postgres/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: export tepmlate.xml and add host to zabbix-server 3 | shell: | 4 | mamonsu zabbix template export /usr/share/mamonsu/template.xml --url=http://{{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }}:8080/zabbix --user={{ USER_FOR_WEB_GUI_ZABBIX }} --password={{ PASS_FOR_WEB_GUI_ZABBIX }} 5 | mamonsu zabbix host create \ 6 | hl-pgMASTER \ 7 | $(mamonsu zabbix hostgroup id "Discovered hosts" --url=http://{{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }}:8080/zabbix --user={{ USER_FOR_WEB_GUI_ZABBIX }} --password={{ PASS_FOR_WEB_GUI_ZABBIX }}) \ 8 | $(mamonsu zabbix template id PostgresPro-Linux2 --url=http://{{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }}:8080/zabbix --user={{ USER_FOR_WEB_GUI_ZABBIX }} --password={{ PASS_FOR_WEB_GUI_ZABBIX }}) \ 9 | 127.0.0.1 \ 10 | --url=http://{{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }}:8080/zabbix --user={{ USER_FOR_WEB_GUI_ZABBIX }} --password={{ PASS_FOR_WEB_GUI_ZABBIX }} 11 | 12 | - name: plugin the extension file into the database and restart the patroni cluster 13 | shell: | 14 | patronictl -c /etc/patroni.yml edit-config --pg pg_stat_statements.track=all --pg track_activity_query_size=2048 --pg shared_preload_libraries=pg_stat_statements --force 15 | sleep 6 16 | patronictl -c /etc/patroni.yml restart otus --force 17 | delegate_to: "{{ item }}" 18 | with_items: 19 | - "{{ groups['database1'] }}" 20 | 21 | - name: waiting 10 seconds for patroni cluster restarted 22 | wait_for: 23 | timeout: 10 24 | 25 | ########## При работе через odyssey пользователь postgres 26 | ########## не имеет доступа к схеме public чужой БД. 27 | ########## Поэтому здесь обращаемся на VIP-адрес кластера patroni, 28 | ########## а не на VIP-адрес odyssey. 29 | ########## Написал разработчикам issue. 30 | - name: adds extensions to the other databases cascade 31 | postgresql_ext: 32 | #login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 33 | login_host: "{{ HOST_NAME_PG_VIP }}.{{ FAKE_DOMAIN }}" 34 | login_user: postgres 35 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 36 | #port: "{{ PORT_FOR_DB_CLIENT }}" 37 | port: '5432' 38 | db: "{{ item.db }}" 39 | name: "{{ item.exten }}" 40 | cascade: true 41 | state: present 42 | with_items: 43 | - { db: "template1", exten: "pg_buffercache" } 44 | - { db: "template1", exten: "pg_stat_statements" } 45 | - { db: "postgres", exten: "pg_buffercache" } 46 | - { db: "postgres", exten: "pg_stat_statements" } 47 | - { db: "zabbix", exten: "pg_buffercache" } 48 | - { db: "zabbix", exten: "pg_stat_statements" } 49 | 50 | - name: waiting 10 seconds for patroni apply the configuration 51 | wait_for: 52 | timeout: 10 53 | 54 | - name: patroni restart 55 | systemd: 56 | name: patroni 57 | state: restarted 58 | enabled: yes 59 | delegate_to: "{{ item }}" 60 | with_items: 61 | - "{{ groups['database'] }}" 62 | -------------------------------------------------------------------------------- /provisioning/hosts_vagrant: -------------------------------------------------------------------------------- 1 | [balancer] 2 | HLbalancer01 ansible_host=127.0.0.1 ansible_port=2231 ansible_private_key_file=.vagrant/machines/HLbalancer01/virtualbox/private_key 3 | HLbalancer02 ansible_host=127.0.0.1 ansible_port=2241 ansible_private_key_file=.vagrant/machines/HLbalancer02/virtualbox/private_key 4 | 5 | [pg_conpool] 6 | #HLpgHaproxy ansible_host=127.0.0.1 ansible_port=2321 ansible_private_key_file=.vagrant/machines/HLpgHaproxy/virtualbox/private_key 7 | HLpgConpool01 ansible_host=127.0.0.1 ansible_port=2233 ansible_private_key_file=.vagrant/machines/HLpgConpool01/virtualbox/private_key 8 | HLpgConpool02 ansible_host=127.0.0.1 ansible_port=2234 ansible_private_key_file=.vagrant/machines/HLpgConpool02/virtualbox/private_key 9 | 10 | [pg_conpool1] 11 | HLpgConpool01 ansible_host=127.0.0.1 ansible_port=2233 ansible_private_key_file=.vagrant/machines/HLpgConpool01/virtualbox/private_key 12 | 13 | [dcs] 14 | #HLetcd ansible_host=127.0.0.1 ansible_port=2421 ansible_private_key_file=.vagrant/machines/HLetcd/virtualbox/private_key 15 | HLdcs01 ansible_host=127.0.0.1 ansible_port=2251 ansible_private_key_file=.vagrant/machines/HLdcs01/virtualbox/private_key 16 | HLdcs02 ansible_host=127.0.0.1 ansible_port=2252 ansible_private_key_file=.vagrant/machines/HLdcs02/virtualbox/private_key 17 | HLdcs03 ansible_host=127.0.0.1 ansible_port=2253 ansible_private_key_file=.vagrant/machines/HLdcs03/virtualbox/private_key 18 | 19 | [database] 20 | HLpg01 ansible_host=127.0.0.1 ansible_port=2521 ansible_private_key_file=.vagrant/machines/HLpg01/virtualbox/private_key 21 | HLpg02 ansible_host=127.0.0.1 ansible_port=2621 ansible_private_key_file=.vagrant/machines/HLpg02/virtualbox/private_key 22 | HLpg03 ansible_host=127.0.0.1 ansible_port=2721 ansible_private_key_file=.vagrant/machines/HLpg03/virtualbox/private_key 23 | 24 | [database1] 25 | HLpg01 ansible_host=127.0.0.1 ansible_port=2521 ansible_private_key_file=.vagrant/machines/HLpg01/virtualbox/private_key 26 | 27 | [web] 28 | HLzabbix01 ansible_host=127.0.0.1 ansible_port=2821 ansible_private_key_file=.vagrant/machines/HLzabbix01/virtualbox/private_key 29 | HLzabbix02 ansible_host=127.0.0.1 ansible_port=2921 ansible_private_key_file=.vagrant/machines/HLzabbix02/virtualbox/private_key 30 | 31 | [web1] 32 | HLzabbix01 ansible_host=127.0.0.1 ansible_port=2821 ansible_private_key_file=.vagrant/machines/HLzabbix01/virtualbox/private_key 33 | 34 | [web2] 35 | HLzabbix02 ansible_host=127.0.0.1 ansible_port=2921 ansible_private_key_file=.vagrant/machines/HLzabbix02/virtualbox/private_key 36 | 37 | #[pg_conpool_dcs] 38 | #HLpgHaproxy ansible_host=127.0.0.1 ansible_port=2321 ansible_private_key_file=.vagrant/machines/HLpgHaproxy/virtualbox/private_key 39 | #HLetcd ansible_host=127.0.0.1 ansible_port=2421 ansible_private_key_file=.vagrant/machines/HLetcd/virtualbox/private_key 40 | 41 | [client] 42 | HLclient ansible_host=127.0.0.1 ansible_port=2232 ansible_private_key_file=.vagrant/machines/HLclient/virtualbox/private_key 43 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu/03_mamonsu_zabbix-postgres/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: export tepmlate.xml and add host to zabbix-server 3 | shell: | 4 | mamonsu zabbix template export /usr/share/mamonsu/template.xml --url=http://{{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }}:8080/zabbix --user={{ USER_FOR_WEB_GUI_ZABBIX }} --password={{ PASS_FOR_WEB_GUI_ZABBIX }} 5 | mamonsu zabbix host create \ 6 | hl-pgMASTER \ 7 | $(mamonsu zabbix hostgroup id "Discovered hosts" --url=http://{{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }}:8080/zabbix --user={{ USER_FOR_WEB_GUI_ZABBIX }} --password={{ PASS_FOR_WEB_GUI_ZABBIX }}) \ 8 | $(mamonsu zabbix template id PostgresPro-Linux2 --url=http://{{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }}:8080/zabbix --user={{ USER_FOR_WEB_GUI_ZABBIX }} --password={{ PASS_FOR_WEB_GUI_ZABBIX }}) \ 9 | 127.0.0.1 \ 10 | --url=http://{{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }}:8080/zabbix --user={{ USER_FOR_WEB_GUI_ZABBIX }} --password={{ PASS_FOR_WEB_GUI_ZABBIX }} 11 | 12 | - name: plugin the extension file into the database and restart the patroni cluster 13 | shell: | 14 | patronictl -c /etc/patroni.yml edit-config --pg pg_stat_statements.track=all --pg track_activity_query_size=2048 --pg shared_preload_libraries=pg_stat_statements --force 15 | sleep 6 16 | patronictl -c /etc/patroni.yml restart otus --force 17 | delegate_to: "{{ item }}" 18 | with_items: 19 | - "{{ groups['database1'] }}" 20 | 21 | - name: waiting 10 seconds for patroni cluster restarted 22 | wait_for: 23 | timeout: 10 24 | 25 | ########## При работе через odyssey пользователь postgres 26 | ########## не имеет доступа к схеме public чужой БД. 27 | ########## Поэтому здесь обращаемся на VIP-адрес кластера patroni, 28 | ########## а не на VIP-адрес odyssey. 29 | ########## Написал разработчикам issue. 30 | - name: adds extensions to the other databases cascade 31 | postgresql_ext: 32 | #login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 33 | login_host: "{{ HOST_NAME_PG_VIP }}.{{ FAKE_DOMAIN }}" 34 | login_user: postgres 35 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 36 | #port: "{{ PORT_FOR_DB_CLIENT }}" 37 | port: '5432' 38 | db: "{{ item.db }}" 39 | name: "{{ item.exten }}" 40 | cascade: true 41 | state: present 42 | with_items: 43 | - { db: "template1", exten: "pg_buffercache" } 44 | - { db: "template1", exten: "pg_stat_statements" } 45 | - { db: "postgres", exten: "pg_buffercache" } 46 | - { db: "postgres", exten: "pg_stat_statements" } 47 | - { db: "zabbix", exten: "pg_buffercache" } 48 | - { db: "zabbix", exten: "pg_stat_statements" } 49 | 50 | - name: waiting 10 seconds for patroni apply the configuration 51 | wait_for: 52 | timeout: 10 53 | 54 | - name: patroni restart 55 | systemd: 56 | name: patroni 57 | state: restarted 58 | enabled: yes 59 | delegate_to: "{{ item }}" 60 | with_items: 61 | - "{{ groups['database'] }}" 62 | -------------------------------------------------------------------------------- /provisioning/roles/09_mamonsu/02_mamonsu_install/templates/mamonsu_agent.conf.j2: -------------------------------------------------------------------------------- 1 | ### required section 2 | [postgres] 3 | enabled = True 4 | host = {{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }} 5 | user = postgres 6 | password = {{ PASS_POSTGRES_FOR_DB }} 7 | database = postgres 8 | port = {{ PORT_FOR_DB_CLIENT }} 9 | query_timeout = 10 10 | application_name = mamonsu 11 | 12 | ### required section 13 | [zabbix] 14 | enabled = True 15 | ; zabbix server address 16 | address = {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }} 17 | ; configured 'Host name' of client in zabbix 18 | client = hl-pgMASTER 19 | port = 10051 20 | 21 | [sender] 22 | queue = 2048 23 | 24 | ### required section 25 | [system] 26 | enabled = False 27 | ;enabled = True 28 | 29 | ### required section 30 | [log] 31 | file = /var/log/mamonsu/agent.log 32 | level = INFO 33 | format = [%(levelname)s] %(asctime)s - %(name)s - %(message)s 34 | 35 | [agent] 36 | enabled = True 37 | host = 127.0.0.1 38 | ;host = hl-zabbix-vip 39 | port = 10050 40 | 41 | [plugins] 42 | ;enabled = False 43 | enabled = True 44 | directory = /etc/mamonsu/plugins 45 | 46 | ;[biggest_tables] 47 | ;enabled = True 48 | 49 | [metric_log] 50 | enabled = False 51 | directory = /var/log/mamonsu 52 | max_size_mb = 1024 53 | 54 | ### individual plugin sections 55 | [health] 56 | max_memory_usage = 41943040 57 | interval = 60 58 | 59 | [bgwriter] 60 | interval = 60 61 | 62 | [connections] 63 | percent_connections_tr = 90 64 | interval = 60 65 | 66 | [databases] 67 | bloat_scale = 0.2 68 | min_rows = 50 69 | interval = 300 70 | 71 | [pghealth] 72 | uptime = 600 73 | cache = 80 74 | interval = 60 75 | 76 | [instance] 77 | interval = 60 78 | 79 | [xlog] 80 | enabled = False 81 | lag_more_then_in_sec = 300 82 | interval = 60 83 | 84 | [pgstatstatement] 85 | interval = 60 86 | 87 | [pgbuffercache] 88 | interval = 60 89 | 90 | [pgwaitsampling] 91 | enabled = False 92 | interval = 60 93 | 94 | [checkpoint] 95 | max_checkpoint_by_wal_in_hour = 12 96 | interval = 300 97 | 98 | [oldest] 99 | max_transaction_time = 18000 100 | max_xid_age = 18000000 101 | interval = 60 102 | 103 | [pglocks] 104 | interval = 60 105 | 106 | [cfs] 107 | enabled = False 108 | force_enable = False 109 | interval = 60 110 | 111 | [archivecommand] 112 | enabled = False 113 | max_count_files = 2 114 | interval = 60 115 | 116 | [procstat] 117 | interval = 60 118 | 119 | [diskstats] 120 | interval = 60 121 | 122 | [disksizes] 123 | vfs_percent_free = 10 124 | vfs_inode_percent_free = 10 125 | interval = 60 126 | 127 | [memory] 128 | interval = 60 129 | 130 | [systemuptime] 131 | up_time = 300 132 | interval = 60 133 | 134 | [openfiles] 135 | interval = 60 136 | 137 | [net] 138 | interval = 60 139 | 140 | [la] 141 | interval = 60 142 | 143 | [zbxsender] 144 | interval = 10 145 | 146 | [logsender] 147 | interval = 2 148 | 149 | [agentapi] 150 | interval = 60 151 | 152 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/09_mamonsu/02_mamonsu_install/templates/mamonsu_agent.conf.j2: -------------------------------------------------------------------------------- 1 | ### required section 2 | [postgres] 3 | enabled = True 4 | host = {{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }} 5 | user = postgres 6 | password = {{ PASS_POSTGRES_FOR_DB }} 7 | database = postgres 8 | port = {{ PORT_FOR_DB_CLIENT }} 9 | query_timeout = 10 10 | application_name = mamonsu 11 | 12 | ### required section 13 | [zabbix] 14 | enabled = True 15 | ; zabbix server address 16 | address = {{ HOST_NAME_WEB_VIP }}.{{ FAKE_DOMAIN }} 17 | ; configured 'Host name' of client in zabbix 18 | client = hl-pgMASTER 19 | port = 10051 20 | 21 | [sender] 22 | queue = 2048 23 | 24 | ### required section 25 | [system] 26 | enabled = False 27 | ;enabled = True 28 | 29 | ### required section 30 | [log] 31 | file = /var/log/mamonsu/agent.log 32 | level = INFO 33 | format = [%(levelname)s] %(asctime)s - %(name)s - %(message)s 34 | 35 | [agent] 36 | enabled = True 37 | host = 127.0.0.1 38 | ;host = hl-zabbix-vip 39 | port = 10050 40 | 41 | [plugins] 42 | ;enabled = False 43 | enabled = True 44 | directory = /etc/mamonsu/plugins 45 | 46 | ;[biggest_tables] 47 | ;enabled = True 48 | 49 | [metric_log] 50 | enabled = False 51 | directory = /var/log/mamonsu 52 | max_size_mb = 1024 53 | 54 | ### individual plugin sections 55 | [health] 56 | max_memory_usage = 41943040 57 | interval = 60 58 | 59 | [bgwriter] 60 | interval = 60 61 | 62 | [connections] 63 | percent_connections_tr = 90 64 | interval = 60 65 | 66 | [databases] 67 | bloat_scale = 0.2 68 | min_rows = 50 69 | interval = 300 70 | 71 | [pghealth] 72 | uptime = 600 73 | cache = 80 74 | interval = 60 75 | 76 | [instance] 77 | interval = 60 78 | 79 | [xlog] 80 | enabled = False 81 | lag_more_then_in_sec = 300 82 | interval = 60 83 | 84 | [pgstatstatement] 85 | interval = 60 86 | 87 | [pgbuffercache] 88 | interval = 60 89 | 90 | [pgwaitsampling] 91 | enabled = False 92 | interval = 60 93 | 94 | [checkpoint] 95 | max_checkpoint_by_wal_in_hour = 12 96 | interval = 300 97 | 98 | [oldest] 99 | max_transaction_time = 18000 100 | max_xid_age = 18000000 101 | interval = 60 102 | 103 | [pglocks] 104 | interval = 60 105 | 106 | [cfs] 107 | enabled = False 108 | force_enable = False 109 | interval = 60 110 | 111 | [archivecommand] 112 | enabled = False 113 | max_count_files = 2 114 | interval = 60 115 | 116 | [procstat] 117 | interval = 60 118 | 119 | [diskstats] 120 | interval = 60 121 | 122 | [disksizes] 123 | vfs_percent_free = 10 124 | vfs_inode_percent_free = 10 125 | interval = 60 126 | 127 | [memory] 128 | interval = 60 129 | 130 | [systemuptime] 131 | up_time = 300 132 | interval = 60 133 | 134 | [openfiles] 135 | interval = 60 136 | 137 | [net] 138 | interval = 60 139 | 140 | [la] 141 | interval = 60 142 | 143 | [zbxsender] 144 | interval = 10 145 | 146 | [logsender] 147 | interval = 2 148 | 149 | [agentapi] 150 | interval = 60 151 | 152 | -------------------------------------------------------------------------------- /provisioning/roles/08_zabbix/04_zabbix_createDB/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ####### ВНИМАНИЕ!!! Удаление БД и пользователя zabbix ####### 3 | ################# Если не нужно, отключить! ################# 4 | - name: drop zabbix database 5 | postgresql_db: 6 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 7 | login_user: postgres 8 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 9 | port: "{{ PORT_FOR_DB_CLIENT }}" 10 | name: zabbix 11 | state: absent 12 | 13 | - name: remove zabbix user 14 | postgresql_user: 15 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 16 | login_user: postgres 17 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 18 | port: "{{ PORT_FOR_DB_CLIENT }}" 19 | name: zabbix 20 | state: absent 21 | ############################################################# 22 | 23 | - name: create zabbix user 24 | postgresql_user: 25 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 26 | login_user: postgres 27 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 28 | port: "{{ PORT_FOR_DB_CLIENT }}" 29 | #db: template1 30 | name: zabbix 31 | password: "{{ PASS_ZAB_FOR_DB }}" 32 | encrypted: true 33 | state: present 34 | 35 | - name: create zabbix database 36 | postgresql_db: 37 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 38 | login_user: postgres 39 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 40 | port: "{{ PORT_FOR_DB_CLIENT }}" 41 | name: zabbix 42 | owner: zabbix 43 | #encoding: UTF-8 44 | template: template1 45 | state: present 46 | 47 | ########## Почему-то база заливается повреждённой ########## 48 | #- name: restore zabbix database from create.sql.gz 49 | # postgresql_db: 50 | # login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 51 | # login_user: postgres 52 | # login_password: "{{ PASS_POSTGRES_FOR_DB }}" 53 | # port: "{{ PORT_FOR_DB_CLIENT }}" 54 | # name: zabbix 55 | # owner: zabbix 56 | # state: restore 57 | # #target: "{{ zabbix_sql_gz }}" 58 | # target: /usr/share/doc/zabbix-server-pgsql-4.2.7/create.sql.gz 59 | 60 | - name: restore zabbix database from create.sql.gz 61 | shell: 62 | zcat /usr/share/doc/zabbix-server-pgsql*/create.sql.gz | psql -U zabbix -h {{ HOST_NAME_PG_CON_POOL_VIP }} -p {{ PORT_FOR_DB_CLIENT }} zabbix 63 | 64 | ########## При работе через odyssey пользователь postgres 65 | ########## не имеет доступа к схеме public чужой БД. 66 | ########## Поэтому здесь выполянем запрос от имени пользователя zabbix, 67 | ########## а не пользователя postgres 68 | ########## Написал разработчикам issue. 69 | - name: setting a password for the Admin user (real username "Zabbix") to access the web interface 70 | postgresql_query: 71 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 72 | # login_user: postgres 73 | # login_password: "{{ PASS_POSTGRES_FOR_DB }}" 74 | login_user: zabbix 75 | login_password: "{{ PASS_ZAB_FOR_DB }}" 76 | port: "{{ PORT_FOR_DB_CLIENT }}" 77 | db: zabbix 78 | query: update users set passwd=md5('{{ PASS_FOR_WEB_GUI_ZABBIX }}') where alias='Admin' 79 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/08_zabbix/04_zabbix_createDB/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ####### ВНИМАНИЕ!!! Удаление БД и пользователя zabbix ####### 3 | ################# Если не нужно, отключить! ################# 4 | - name: drop zabbix database 5 | postgresql_db: 6 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 7 | login_user: postgres 8 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 9 | port: "{{ PORT_FOR_DB_CLIENT }}" 10 | name: zabbix 11 | state: absent 12 | 13 | - name: remove zabbix user 14 | postgresql_user: 15 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 16 | login_user: postgres 17 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 18 | port: "{{ PORT_FOR_DB_CLIENT }}" 19 | name: zabbix 20 | state: absent 21 | ############################################################# 22 | 23 | - name: create zabbix user 24 | postgresql_user: 25 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 26 | login_user: postgres 27 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 28 | port: "{{ PORT_FOR_DB_CLIENT }}" 29 | #db: template1 30 | name: zabbix 31 | password: "{{ PASS_ZAB_FOR_DB }}" 32 | encrypted: true 33 | state: present 34 | 35 | - name: create zabbix database 36 | postgresql_db: 37 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 38 | login_user: postgres 39 | login_password: "{{ PASS_POSTGRES_FOR_DB }}" 40 | port: "{{ PORT_FOR_DB_CLIENT }}" 41 | name: zabbix 42 | owner: zabbix 43 | #encoding: UTF-8 44 | template: template1 45 | state: present 46 | 47 | ########## Почему-то база заливается повреждённой ########## 48 | #- name: restore zabbix database from create.sql.gz 49 | # postgresql_db: 50 | # login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 51 | # login_user: postgres 52 | # login_password: "{{ PASS_POSTGRES_FOR_DB }}" 53 | # port: "{{ PORT_FOR_DB_CLIENT }}" 54 | # name: zabbix 55 | # owner: zabbix 56 | # state: restore 57 | # #target: "{{ zabbix_sql_gz }}" 58 | # target: /usr/share/doc/zabbix-server-pgsql-4.2.7/create.sql.gz 59 | 60 | - name: restore zabbix database from create.sql.gz 61 | shell: 62 | zcat /usr/share/doc/zabbix-server-pgsql*/create.sql.gz | psql -U zabbix -h {{ HOST_NAME_PG_CON_POOL_VIP }} -p {{ PORT_FOR_DB_CLIENT }} zabbix 63 | 64 | ########## При работе через odyssey пользователь postgres 65 | ########## не имеет доступа к схеме public чужой БД. 66 | ########## Поэтому здесь выполянем запрос от имени пользователя zabbix, 67 | ########## а не пользователя postgres 68 | ########## Написал разработчикам issue. 69 | - name: setting a password for the Admin user (real username "Zabbix") to access the web interface 70 | postgresql_query: 71 | login_host: "{{ HOST_NAME_PG_CON_POOL_VIP }}.{{ FAKE_DOMAIN }}" 72 | # login_user: postgres 73 | # login_password: "{{ PASS_POSTGRES_FOR_DB }}" 74 | login_user: zabbix 75 | login_password: "{{ PASS_ZAB_FOR_DB }}" 76 | port: "{{ PORT_FOR_DB_CLIENT }}" 77 | db: zabbix 78 | query: update users set passwd=md5('{{ PASS_FOR_WEB_GUI_ZABBIX }}') where alias='Admin' 79 | -------------------------------------------------------------------------------- /tests/tank/02_web02.md: -------------------------------------------------------------------------------- 1 | # Тестирование web - HA-кластер web с http-балансировкой 2 | 3 | Тесты были направлены на VIP-адрес http-балансировщиков, соответственно, трафик распределялся на два уже оптимизированных web-сервера. Так как интенсивность запросов к БД возрастёт, то очевидно, что в результатах тестов узким местом будет являться БД, коим она и являлась на [предыдущем этапе](web01.md). 4 | 5 | Web-сервера имеют 1 ядро ЦП и 2 ГБ ОЗУ. 6 | Сервера БД - 2 ядра ЦП и 3 ГБ ОЗУ. 7 | 8 | Конфигурация БД - по-умолчанию. 9 | 10 | ## Тесты 11 | 12 | ### оптимизированный web + http-балансировка 13 | 14 | ```yandex.tank > haproxy > 2 x web > haproxy > postgresql``` 15 | 16 | [https://overload.yandex.net/221060](https://overload.yandex.net/221060) 17 | 18 | и нагрузка на ВМ 19 | 20 | ![02_web02_1.png](files/02_web02_1.png) 21 | 22 | Из графиков видно, что, как и предполагалось, производительность стенда упёрлась в БД. В частности, iowait на сервере БД достигает 33,9% и количество возможных подключений к БД достигло максимального значения для текущей конфигурации БД, что подтверждается логами: 23 | 24 | ```bash 25 | [root@hl-pg01 log]# tail -f -n 3 postgresql-Mon.log 26 | 2019-10-21 17:45:03.162 MSK [27963] ВАЖНО: оставшиеся слоты подключений зарезервированы для подключений суперпользователя (не для репликации) 27 | 2019-10-21 17:45:03.177 MSK [27964] ВАЖНО: оставшиеся слоты подключений зарезервированы для подключений суперпользователя (не для репликации) 28 | 2019-10-21 17:45:03.195 MSK [27965] ВАЖНО: оставшиеся слоты подключений зарезервированы для подключений суперпользователя (не для репликации) 29 | ``` 30 | 31 | ### оптимизированный web + http-балансировка + pgbouncer 32 | 33 | настройки пула в pgbouncer 34 | 35 | ```ini 36 | zabbix = host=hl-pg01.otus port=5432 pool_size=100 37 | pool_mode = transaction 38 | ``` 39 | 40 | ```yandex.tank > haproxy > 2 x web > pgbouncer > postgresql``` 41 | 42 | [https://overload.yandex.net/221490](https://overload.yandex.net/221490) 43 | 44 | Проводились два теста через небольшой промежуток времени, поэтому графики выглядят так: 45 | 46 | ![02_web02_2.png](files/02_web02_2.png) 47 | 48 | ### оптимизированный web + http-балансировка + первичная настройка postgresql 49 | 50 | (без pgbouncer) 51 | 52 |
Конфигурация postgresql (нажать, чтобы открыть)

53 | 54 | ```bash 55 | # DB Version: 11 56 | # OS Type: linux 57 | # DB Type: dw 58 | # Total Memory (RAM): 4 GB 59 | # CPUs num: 2 60 | # Connections num: 200 61 | # Data Storage: hdd 62 | 63 | max_connections = 200 64 | shared_buffers = 1GB 65 | effective_cache_size = 3GB 66 | maintenance_work_mem = 512MB 67 | checkpoint_completion_target = 0.9 68 | wal_buffers = 16MB 69 | default_statistics_target = 500 70 | random_page_cost = 4 71 | effective_io_concurrency = 2 72 | work_mem = 2621kB 73 | min_wal_size = 4GB 74 | max_wal_size = 8GB 75 | max_worker_processes = 2 76 | max_parallel_workers_per_gather = 1 77 | max_parallel_workers = 2 78 | ``` 79 | 80 |

81 | 82 | [https://overload.yandex.net/222008](https://overload.yandex.net/222008) 83 | 84 | ## ВЫВОД 85 | 86 | В первую очередь необходима тонкая настройка БД. Очевидно, что на данном этапе менеджер пула соединений pgbouncer не приносит сколько-нибудь значимой пользы, т.к. продолжает удерживать коннекты к БД дольше, чем сама БД. 87 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/05_consul-cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: edit /etc/hosts (comment line) 3 | replace: 4 | path: "{{ HOSTS_FILE }}" 5 | #regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)' 6 | regexp: '(^(127\.0\.0\.1)(\s*|\t*)\{\{fqdn\}\}(\s*|\t*)(\{\{hostname\}\})(\s*|\t*)$)' 7 | replace: '#\1' 8 | tags: 9 | - update_hosts 10 | - change_name_HOST_NAME_PG_CON_POOL_VIP 11 | 12 | - name: edit2 /etc/hosts (comment line) 13 | replace: 14 | path: "{{ HOSTS_FILE }}" 15 | #regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)' 16 | regexp: '(^(::1)(\s*|\t*)\{\{fqdn\}\}(\s*|\t*)(\{\{hostname\}\})(\s*|\t*)$)' 17 | replace: '#\1' 18 | tags: 19 | - update_hosts 20 | - change_name_HOST_NAME_PG_CON_POOL_VIP 21 | 22 | - name: cloud-init restart 23 | systemd: 24 | name: cloud-init 25 | state: restarted 26 | tags: 27 | - update_hosts 28 | - change_name_HOST_NAME_PG_CON_POOL_VIP 29 | 30 | - name: install jq 31 | yum: 32 | name: jq 33 | state: latest 34 | 35 | - name: download consul 36 | get_url: 37 | url: https://releases.hashicorp.com/consul/{{ CONSUL_VERSION }}/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 38 | dest: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 39 | mode: '0600' 40 | 41 | - name: extract consul zip-archive 42 | unarchive: 43 | remote_src: yes 44 | src: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 45 | dest: /usr/local/bin/ 46 | 47 | - name: create system group "consul" 48 | group: 49 | name: consul 50 | system: yes 51 | state: present 52 | 53 | - name: add system user "consul" 54 | user: 55 | name: consul 56 | group: consul 57 | shell: /sbin/nologin 58 | home: /var/lib/consul 59 | system: yes 60 | 61 | - name: set permissions for consul data directories 62 | file: 63 | path: "{{ item }}" 64 | state: directory 65 | owner: consul 66 | group: consul 67 | mode: '0775' 68 | with_items: 69 | - /var/lib/consul 70 | - /etc/consul.d 71 | 72 | #- name: create directory for consul config files 73 | # file: 74 | # path: /etc/consul.d 75 | # state: directory 76 | # owner: consul 77 | # group: consul 78 | # mode: '0775' 79 | 80 | - name: put consul-server.service template 81 | template: 82 | src: consul-server.service.j2 83 | dest: /etc/systemd/system/consul-server.service 84 | owner: root 85 | group: root 86 | mode: '0644' 87 | 88 | #generate encryption key that will be used ad the "encrypt" entry of ALL CONSUL NODES 89 | #CONSUL_KEY=$(consul keygen); echo $CONSUL_KEY 90 | #sgIjP/24ugFcKfJ5DJ1Ob29dB5jgzwZbXY0lED3RM9w= 91 | #see ansible variable CONSUL_KEY 92 | 93 | - name: put bootstrap consul configuration template 94 | template: 95 | src: consul-server.json.j2 96 | dest: /etc/consul.d/consul-server.json 97 | owner: root 98 | group: root 99 | mode: '0644' 100 | 101 | - name: consul-server restarted 102 | systemd: 103 | daemon_reload: yes 104 | name: consul-server 105 | state: restarted 106 | enabled: yes 107 | tags: 108 | - change_name_HOST_NAME_PG_CON_POOL_VIP 109 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/06_pgsql-patroni/01_consul-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: edit /etc/hosts (comment line) 3 | replace: 4 | path: "{{ HOSTS_FILE }}" 5 | #regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)' 6 | regexp: '(^(127\.0\.0\.1)(\s*|\t*)\{\{fqdn\}\}(\s*|\t*)(\{\{hostname\}\})(\s*|\t*)$)' 7 | replace: '#\1' 8 | tags: 9 | - update_hosts 10 | - change_name_HOST_NAME_PG_CON_POOL_VIP 11 | 12 | - name: edit2 /etc/hosts (comment line) 13 | replace: 14 | path: "{{ HOSTS_FILE }}" 15 | #regexp: '(^(127\.0\.0\.1)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)(hl-[a-zA-Z]*[0-9]*)(\s*|\t*)$)' 16 | regexp: '(^(::1)(\s*|\t*)\{\{fqdn\}\}(\s*|\t*)(\{\{hostname\}\})(\s*|\t*)$)' 17 | replace: '#\1' 18 | tags: 19 | - update_hosts 20 | - change_name_HOST_NAME_PG_CON_POOL_VIP 21 | 22 | - name: cloud-init restart 23 | systemd: 24 | name: cloud-init 25 | state: restarted 26 | tags: 27 | - update_hosts 28 | - change_name_HOST_NAME_PG_CON_POOL_VIP 29 | 30 | - name: install jq 31 | yum: 32 | name: jq 33 | state: latest 34 | 35 | - name: download consul 36 | get_url: 37 | url: https://releases.hashicorp.com/consul/{{ CONSUL_VERSION }}/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 38 | dest: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 39 | mode: '0600' 40 | 41 | - name: extract consul zip-archive 42 | unarchive: 43 | remote_src: yes 44 | src: /tmp/consul_{{ CONSUL_VERSION }}_linux_amd64.zip 45 | dest: /usr/local/bin/ 46 | 47 | - name: create system group "consul" 48 | group: 49 | name: consul 50 | system: yes 51 | state: present 52 | 53 | - name: add system user "consul" 54 | user: 55 | name: consul 56 | group: consul 57 | shell: /sbin/nologin 58 | home: /var/lib/consul 59 | system: yes 60 | 61 | - name: set permissions for consul data directories 62 | file: 63 | path: "{{ item }}" 64 | state: directory 65 | owner: consul 66 | group: consul 67 | mode: '0775' 68 | with_items: 69 | - /var/lib/consul 70 | - /etc/consul.d 71 | 72 | #- name: create directory for consul config files 73 | # file: 74 | # path: /etc/consul.d 75 | # state: directory 76 | # owner: consul 77 | # group: consul 78 | # mode: '0775' 79 | 80 | - name: copy consul-client.service 81 | copy: 82 | src: consul-client.service 83 | dest: /etc/systemd/system/consul-client.service 84 | owner: root 85 | group: root 86 | mode: '0644' 87 | 88 | #generate encryption key that will be used ad the "encrypt" entry of ALL CONSUL NODES 89 | #CONSUL_KEY=$(consul keygen); echo $CONSUL_KEY 90 | #sgIjP/24ugFcKfJ5DJ1Ob29dB5jgzwZbXY0lED3RM9w= 91 | #see ansible variable CONSUL_KEY 92 | 93 | - name: put bootstrap consul configuration template 94 | template: 95 | src: consul-client.json.j2 96 | dest: /etc/consul.d/consul-client.json 97 | owner: root 98 | group: root 99 | mode: '0644' 100 | 101 | - name: consul-client restarted 102 | systemd: 103 | daemon_reload: yes 104 | name: consul-client 105 | state: restarted 106 | enabled: yes 107 | tags: 108 | - change_name_HOST_NAME_PG_CON_POOL_VIP 109 | -------------------------------------------------------------------------------- /provisioning/roles/03_keepalived-haproxy/templates/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Global settings 3 | #--------------------------------------------------------------------- 4 | global 5 | log 127.0.0.1 local0 6 | log 127.0.0.1 local1 notice 7 | #log loghost local0 info 8 | 9 | maxconn 4096 10 | maxsessrate 4096 11 | #chroot /usr/share/haproxy 12 | chroot /var/lib/haproxy 13 | pidfile /var/run/haproxy.pid 14 | 15 | user haproxy 16 | group haproxy 17 | 18 | daemon 19 | 20 | #debug 21 | #quiet 22 | 23 | # turn on stats unix socket 24 | stats socket /var/lib/haproxy/stats 25 | 26 | #--------------------------------------------------------------------- 27 | # common defaults that all the 'listen' and 'backend' sections will 28 | # use if not designated in their block 29 | #--------------------------------------------------------------------- 30 | defaults 31 | log global 32 | mode http 33 | option httplog 34 | option dontlognull 35 | option http-server-close 36 | option forwardfor except 127.0.0.0/8 37 | retries 3 38 | option redispatch 39 | timeout http-request 10s 40 | timeout queue 1m 41 | timeout connect 10s 42 | timeout client 1m 43 | timeout server 1m 44 | timeout http-keep-alive 10s 45 | timeout check 10s 46 | maxconn 10000 47 | #maxconn 3000 48 | #contimeout 5000 49 | #clitimeout 50000 50 | #srvtimeout 50000 51 | 52 | #--------------------------------------------------------------------- 53 | #HAProxy Monitoring Config 54 | #--------------------------------------------------------------------- 55 | #Haproxy Monitoring run on port 8080 56 | listen haproxy-stat *:8080 57 | mode http 58 | option forwardfor 59 | option httpclose 60 | stats enable 61 | stats show-legends 62 | stats refresh 15s 63 | 64 | #URL for HAProxy monitoring 65 | stats uri /stats 66 | stats realm Haproxy\ Statistics 67 | 68 | #User and Password for login to the monitoring dashboard 69 | stats auth {{ USER_HAPROXY }}:{{ PASS_FOR_HAPROXY }} 70 | stats admin if TRUE 71 | 72 | #This is optionally for monitoring backend 73 | default_backend hl-zabbix 74 | 75 | #--------------------------------------------------------------------- 76 | # FrontEnd Configuration 77 | #--------------------------------------------------------------------- 78 | frontend main 79 | bind *:80 80 | option http-server-close 81 | option forwardfor 82 | default_backend hl-zabbix 83 | 84 | #--------------------------------------------------------------------- 85 | # BackEnd roundrobin as balance algorithm 86 | #--------------------------------------------------------------------- 87 | backend hl-zabbix 88 | mode http 89 | balance roundrobin 90 | option httpclose 91 | option forwardfor 92 | cookie SERVERNAME insert indirect nocache 93 | server {{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }} {{ HOST_IP_WEB01 }}:8080 maxconn 5000 cookie s1 check 94 | server {{ HOST_NAME_WEB02 }}.{{ FAKE_DOMAIN }} {{ HOST_IP_WEB02 }}:8080 maxconn 5000 cookie s2 check 95 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/03_keepalived-haproxy/templates/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Global settings 3 | #--------------------------------------------------------------------- 4 | global 5 | log 127.0.0.1 local0 6 | log 127.0.0.1 local1 notice 7 | #log loghost local0 info 8 | 9 | maxconn 4096 10 | maxsessrate 4096 11 | #chroot /usr/share/haproxy 12 | chroot /var/lib/haproxy 13 | pidfile /var/run/haproxy.pid 14 | 15 | user haproxy 16 | group haproxy 17 | 18 | daemon 19 | 20 | #debug 21 | #quiet 22 | 23 | # turn on stats unix socket 24 | stats socket /var/lib/haproxy/stats 25 | 26 | #--------------------------------------------------------------------- 27 | # common defaults that all the 'listen' and 'backend' sections will 28 | # use if not designated in their block 29 | #--------------------------------------------------------------------- 30 | defaults 31 | log global 32 | mode http 33 | option httplog 34 | option dontlognull 35 | option http-server-close 36 | option forwardfor except 127.0.0.0/8 37 | retries 3 38 | option redispatch 39 | timeout http-request 10s 40 | timeout queue 1m 41 | timeout connect 10s 42 | timeout client 1m 43 | timeout server 1m 44 | timeout http-keep-alive 10s 45 | timeout check 10s 46 | maxconn 10000 47 | #maxconn 3000 48 | #contimeout 5000 49 | #clitimeout 50000 50 | #srvtimeout 50000 51 | 52 | #--------------------------------------------------------------------- 53 | #HAProxy Monitoring Config 54 | #--------------------------------------------------------------------- 55 | #Haproxy Monitoring run on port 8080 56 | listen haproxy-stat *:8080 57 | mode http 58 | option forwardfor 59 | option httpclose 60 | stats enable 61 | stats show-legends 62 | stats refresh 15s 63 | 64 | #URL for HAProxy monitoring 65 | stats uri /stats 66 | stats realm Haproxy\ Statistics 67 | 68 | #User and Password for login to the monitoring dashboard 69 | stats auth {{ USER_HAPROXY }}:{{ PASS_FOR_HAPROXY }} 70 | stats admin if TRUE 71 | 72 | #This is optionally for monitoring backend 73 | default_backend hl-zabbix 74 | 75 | #--------------------------------------------------------------------- 76 | # FrontEnd Configuration 77 | #--------------------------------------------------------------------- 78 | frontend main 79 | bind *:80 80 | option http-server-close 81 | option forwardfor 82 | default_backend hl-zabbix 83 | 84 | #--------------------------------------------------------------------- 85 | # BackEnd roundrobin as balance algorithm 86 | #--------------------------------------------------------------------- 87 | backend hl-zabbix 88 | mode http 89 | balance roundrobin 90 | option httpclose 91 | option forwardfor 92 | cookie SERVERNAME insert indirect nocache 93 | server {{ HOST_NAME_WEB01 }}.{{ FAKE_DOMAIN }} {{ HOST_IP_WEB01 }}:8080 maxconn 5000 cookie s1 check 94 | server {{ HOST_NAME_WEB02 }}.{{ FAKE_DOMAIN }} {{ HOST_IP_WEB02 }}:8080 maxconn 5000 cookie s2 check 95 | -------------------------------------------------------------------------------- /provisioning/roles/01_tuning_OS/01_tuning_OS/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install epel 3 | yum: 4 | name: 5 | - epel-release 6 | 7 | - name: install packages 8 | yum: 9 | name: 10 | - chrony 11 | - libselinux-python 12 | - vim 13 | - vim-enhanced 14 | - mc 15 | - screen 16 | - ccze 17 | # - lnav 18 | - redhat-lsb-core 19 | - wget 20 | - yum-utils 21 | - htop 22 | - sudo 23 | - iftop 24 | - net-tools 25 | - elinks 26 | - lynx 27 | - bind-utils 28 | - deltarpm 29 | - lsof 30 | - tree 31 | - traceroute 32 | - tcpdump 33 | - nmap 34 | - unzip 35 | # - iperf3 36 | - lbzip2 37 | - fuse-sshfs 38 | - bash-completion 39 | state: latest 40 | notify: 41 | - chronyd start and enable 42 | 43 | - name: copy .screenrc to root user 44 | copy: 45 | src: screenrc 46 | dest: /root/.screenrc 47 | owner: root 48 | group: root 49 | mode: '0600' 50 | 51 | - name: edit bashrc, vimrc 52 | shell: 53 | echo "alias vi='vim'" >> /root/.bashrc && echo "colorscheme desert" >> /etc/vimrc 54 | 55 | - name: set timezone 56 | timezone: 57 | name: "{{ TIMEZONE_OS }}" 58 | 59 | - name: put SELinux in permissive mode 60 | selinux: 61 | policy: targeted 62 | state: permissive 63 | 64 | - name: add mappings to /etc/hosts 65 | blockinfile: 66 | path: "{{ HOSTS_FILE }}" 67 | block: | 68 | {{ item.ip }} {{ item.name }}.{{ FAKE_DOMAIN }} {{ item.name }} 69 | marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}" 70 | with_items: 71 | - { name: "{{ HOST_NAME_BALANCER_VIP }}", ip: "{{ HOST_IP_BALANCER_VIP }} "} 72 | - { name: "{{ HOST_NAME_BALANCER_01 }}", ip: "{{ HOST_IP_BALANCER_01 }} "} 73 | - { name: "{{ HOST_NAME_BALANCER_02 }}", ip: "{{ HOST_IP_BALANCER_02 }} "} 74 | #- { name: "{{ HOST_NAME_PG_HAPROXY }}", ip: "{{ HOST_IP_PG_HAPROXY }}" } 75 | - { name: "{{ HOST_NAME_PG_CON_POOL_VIP }}", ip: "{{ HOST_IP_PG_CON_POOL_VIP }}" } 76 | - { name: "{{ HOST_NAME_PG_CON_POOL_01 }}", ip: "{{ HOST_IP_PG_CON_POOL_01 }}" } 77 | - { name: "{{ HOST_NAME_PG_CON_POOL_02 }}", ip: "{{ HOST_IP_PG_CON_POOL_02 }}" } 78 | - { name: "{{ HOST_NAME_DCS_01 }}", ip: "{{ HOST_IP_DCS_01 }}" } 79 | - { name: "{{ HOST_NAME_DCS_02 }}", ip: "{{ HOST_IP_DCS_02 }}" } 80 | - { name: "{{ HOST_NAME_DCS_03 }}", ip: "{{ HOST_IP_DCS_03 }}" } 81 | - { name: "{{ HOST_NAME_PG01 }}", ip: "{{ HOST_IP_PG01 }}" } 82 | - { name: "{{ HOST_NAME_PG02 }}", ip: "{{ HOST_IP_PG02 }}" } 83 | - { name: "{{ HOST_NAME_PG03 }}", ip: "{{ HOST_IP_PG03 }}" } 84 | - { name: "{{ HOST_NAME_PG_VIP }}", ip: "{{ HOST_IP_PG_VIP }}" } 85 | #- { name: "{{ HOST_NAME_DCS }}", ip: "{{ HOST_IP_DCS }}" } 86 | - { name: "{{ HOST_NAME_WEB_VIP }}", ip: "{{ HOST_IP_WEB_VIP }}" } 87 | - { name: "{{ HOST_NAME_WEB01 }}", ip: "{{ HOST_IP_WEB01 }}" } 88 | - { name: "{{ HOST_NAME_WEB02 }}", ip: "{{ HOST_IP_WEB02 }}" } 89 | - { name: "{{ HOST_NAME_HL_CLIENT }}", ip: "{{ HOST_IP_HL_CLIENT }}" } 90 | tags: 91 | - update_hosts 92 | 93 | - name: firewalld disable 94 | service: 95 | name: firewalld 96 | state: stopped 97 | enabled: no 98 | 99 | - name: set the russian locale on database and zabbix servers 100 | shell: | 101 | localedef -i ru_RU -f UTF-8 ru_RU.UTF-8 102 | localectl set-locale LANG=ru_RU.UTF-8 103 | notify: 104 | - system restart 105 | # when: host in groups['database'] 106 | when: ('hl-pg0' in ansible_hostname) or ('hl-zabbix0' in ansible_hostname) 107 | -------------------------------------------------------------------------------- /tests/tank/files/03_db02_vacuum.md: -------------------------------------------------------------------------------- 1 | # VACUUM FULL 2 | 3 | При проведении дальнейших тестов в БД участились взаимные блокировки. В связи с этим, было выполнено сжатие раздутых таблиц и индексов и уменьшение физического размера файлов БД: 4 | 5 | ```bash 6 | psql -U postgres -h /tmp -d zabbix -c "VACUUM FULL VERBOSE ANALYZE;" 7 | psql -U postgres -h /tmp -d zabbix -c "REINDEX DATABASE zabbix;" 8 | ``` 9 | 10 | То же самое можно проделать без блокировок (VACUUM FULL), например, с помощью утилиты [pgcompacttable](https://github.com/dataegret/pgcompacttable) 11 | 12 | ```bash 13 | psql -U postgres -h /tmp -d zabbix -c "create extension if not exists pgstattuple;" 14 | pgcompacttable -h /tmp --all --force --verbose 15 | ``` 16 | 17 | ## До сжатия 18 | 19 | размер БД zabbix 20 | 21 | ```sql 22 | zabbix=# SELECT pg_size_pretty( pg_database_size( 'zabbix' ) ); 23 | pg_size_pretty 24 | ---------------- 25 | 1607 MB 26 | (1 строка) 27 | ``` 28 | 29 | размер 20 самых больших таблиц 30 | 31 | ```sql 32 | SELECT nspname || '.' || relname AS "relation", 33 | pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size" 34 | FROM pg_class C 35 | LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) 36 | WHERE nspname NOT IN ('pg_catalog', 'information_schema') 37 | AND C.relkind <> 'i' 38 | AND nspname !~ '^pg_toast' 39 | ORDER BY pg_total_relation_size(C.oid) DESC 40 | LIMIT 20; 41 | relation | total_size 42 | ---------------------------+------------ 43 | public.history | 593 MB 44 | public.history_uint | 447 MB 45 | public.sessions | 238 MB 46 | public.auditlog | 208 MB 47 | public.trends_uint | 48 MB 48 | public.trends | 45 MB 49 | public.events | 2592 kB 50 | public.items | 2280 kB 51 | public.history_str | 1728 kB 52 | public.images | 1184 kB 53 | public.triggers | 672 kB 54 | public.items_applications | 576 kB 55 | public.history_text | 560 kB 56 | public.problem | 400 kB 57 | public.profiles | 400 kB 58 | public.functions | 376 kB 59 | public.event_recovery | 368 kB 60 | public.item_discovery | 304 kB 61 | public.graphs_items | 288 kB 62 | public.graphs | 272 kB 63 | (20 строк) 64 | ``` 65 | 66 | ## После сжатия 67 | 68 | размер БД zabbix 69 | 70 | ```sql 71 | zabbix=# SELECT pg_size_pretty( pg_database_size( 'zabbix' ) ); 72 | pg_size_pretty 73 | ---------------- 74 | 1019 MB 75 | (1 строка) 76 | ``` 77 | 78 | размер 20 самых больших таблиц 79 | 80 | ```sql 81 | SELECT nspname || '.' || relname AS "relation", 82 | pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size" 83 | FROM pg_class C 84 | LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) 85 | WHERE nspname NOT IN ('pg_catalog', 'information_schema') 86 | AND C.relkind <> 'i' 87 | AND nspname !~ '^pg_toast' 88 | ORDER BY pg_total_relation_size(C.oid) DESC 89 | LIMIT 20; 90 | relation | total_size 91 | ---------------------------+------------ 92 | public.history | 329 MB 93 | public.history_uint | 226 MB 94 | public.sessions | 194 MB 95 | public.auditlog | 188 MB 96 | public.trends_uint | 31 MB 97 | public.trends | 29 MB 98 | public.items | 1952 kB 99 | public.events | 1232 kB 100 | public.images | 1168 kB 101 | public.history_str | 992 kB 102 | public.triggers | 504 kB 103 | public.items_applications | 464 kB 104 | public.history_text | 336 kB 105 | public.functions | 312 kB 106 | public.graphs_items | 248 kB 107 | public.event_recovery | 240 kB 108 | public.graphs | 224 kB 109 | public.item_discovery | 216 kB 110 | public.hosts | 144 kB 111 | public.item_preproc | 136 kB 112 | (20 строк) 113 | ``` 114 | -------------------------------------------------------------------------------- /provisioning/roles/04_pgconpool/05_install_odyssey/templates/odyssey.conf.j2: -------------------------------------------------------------------------------- 1 | ### 2 | ### SERVICE 3 | ### 4 | 5 | #daemonize no 6 | #priority -10 7 | # pid_file "/var/run/odyssey.pid" 8 | 9 | unix_socket_dir "/tmp" 10 | unix_socket_mode "0644" 11 | 12 | ### 13 | ### LOGGING 14 | ### 15 | 16 | #log_file "/var/log/odyssey.log" 17 | 18 | log_format "%p %t %l [%i %s] [user - %u, db - %d] (%c) %m\n" 19 | log_config yes 20 | #log_debug yes 21 | #log_session yes 22 | #log_query yes 23 | #log_stats yes 24 | log_debug no 25 | log_session no 26 | log_query no 27 | log_stats no 28 | 29 | #stats_interval 60 30 | stats_interval 300 31 | 32 | ### 33 | ### PERFORMANCE 34 | ### 35 | 36 | #workers 1 37 | #resolvers 1 38 | readahead 8192 39 | cache_coroutine 210 40 | 41 | nodelay yes 42 | keepalive 7200 43 | 44 | ### 45 | ### GLOBAL LIMITS 46 | ### 47 | 48 | #client_max 2000 49 | client_max_routing 32 50 | 51 | ### 52 | ### LISTEN 53 | ### 54 | 55 | listen { 56 | tls "disable" 57 | host "*" 58 | # port 6432 59 | port 5000 60 | } 61 | 62 | ### 63 | ### ROUTING 64 | ### 65 | 66 | storage "local" { 67 | type "local" 68 | #tls "disable" 69 | } 70 | 71 | database "console" { 72 | user "odyssey" { 73 | #authentication "none" 74 | authentication "md5" 75 | password "{{ PASS_ODYSSEY_USER }}" 76 | pool "session" 77 | storage "local" 78 | } 79 | } 80 | 81 | storage "postgres_server" { 82 | type "remote" 83 | tls "disable" 84 | host "{{ HOST_NAME_PG_VIP }}.{{ FAKE_DOMAIN }}" 85 | port 5432 86 | } 87 | 88 | database "postgres" { 89 | user "postgres" { 90 | authentication "md5" 91 | password "{{ PASS_POSTGRES_FOR_DB }}" 92 | storage "postgres_server" 93 | storage_user "postgres" 94 | storage_password "{{ PASS_POSTGRES_FOR_DB }}" 95 | pool "session" 96 | client_max 4 97 | pool_size 0 98 | pool_timeout 0 99 | pool_ttl 60 100 | pool_cancel yes 101 | pool_discard yes 102 | pool_rollback yes 103 | client_fwd_error yes 104 | log_debug no 105 | } 106 | } 107 | 108 | database "template1" { 109 | user "postgres" { 110 | authentication "md5" 111 | password "{{ PASS_POSTGRES_FOR_DB }}" 112 | storage "postgres_server" 113 | storage_user "postgres" 114 | storage_password "{{ PASS_POSTGRES_FOR_DB }}" 115 | pool "session" 116 | client_max 4 117 | pool_size 0 118 | pool_timeout 0 119 | pool_ttl 60 120 | pool_cancel yes 121 | pool_discard yes 122 | pool_rollback yes 123 | client_fwd_error yes 124 | log_debug no 125 | } 126 | } 127 | 128 | database "zabbix" { 129 | user "postgres" { 130 | authentication "md5" 131 | password "{{ PASS_POSTGRES_FOR_DB }}" 132 | storage "postgres_server" 133 | storage_user "postgres" 134 | storage_db "postgres" 135 | storage_password "{{ PASS_POSTGRES_FOR_DB }}" 136 | pool "session" 137 | client_max 10 138 | pool_size 0 139 | pool_timeout 0 140 | pool_ttl 60 141 | pool_cancel yes 142 | #pool_discard no 143 | pool_discard yes 144 | pool_rollback yes 145 | client_fwd_error yes 146 | log_debug no 147 | } 148 | user "zabbix" { 149 | authentication "md5" 150 | password "{{ PASS_ZAB_FOR_DB }}" 151 | storage "postgres_server" 152 | storage_user "zabbix" 153 | storage_password "{{ PASS_ZAB_FOR_DB }}" 154 | #pool "transaction" 155 | pool "session" 156 | client_max 2000 157 | pool_size 185 158 | pool_timeout 0 159 | pool_ttl 60 160 | pool_cancel yes 161 | #pool_discard no 162 | pool_discard yes 163 | pool_rollback yes 164 | client_fwd_error yes 165 | log_debug no 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /provisioning_proxmox/roles/04_pgconpool/05_install_odyssey/templates/odyssey.conf.j2: -------------------------------------------------------------------------------- 1 | ### 2 | ### SERVICE 3 | ### 4 | 5 | #daemonize no 6 | #priority -10 7 | # pid_file "/var/run/odyssey.pid" 8 | 9 | unix_socket_dir "/tmp" 10 | unix_socket_mode "0644" 11 | 12 | ### 13 | ### LOGGING 14 | ### 15 | 16 | #log_file "/var/log/odyssey.log" 17 | 18 | log_format "%p %t %l [%i %s] [user - %u, db - %d] (%c) %m\n" 19 | log_config yes 20 | #log_debug yes 21 | #log_session yes 22 | #log_query yes 23 | #log_stats yes 24 | log_debug no 25 | log_session no 26 | log_query no 27 | log_stats no 28 | 29 | #stats_interval 60 30 | stats_interval 300 31 | 32 | ### 33 | ### PERFORMANCE 34 | ### 35 | 36 | #workers 1 37 | #resolvers 1 38 | readahead 8192 39 | cache_coroutine 210 40 | 41 | nodelay yes 42 | keepalive 7200 43 | 44 | ### 45 | ### GLOBAL LIMITS 46 | ### 47 | 48 | #client_max 2000 49 | client_max_routing 32 50 | 51 | ### 52 | ### LISTEN 53 | ### 54 | 55 | listen { 56 | tls "disable" 57 | host "*" 58 | # port 6432 59 | port 5000 60 | } 61 | 62 | ### 63 | ### ROUTING 64 | ### 65 | 66 | storage "local" { 67 | type "local" 68 | #tls "disable" 69 | } 70 | 71 | database "console" { 72 | user "odyssey" { 73 | #authentication "none" 74 | authentication "md5" 75 | password "{{ PASS_ODYSSEY_USER }}" 76 | pool "session" 77 | storage "local" 78 | } 79 | } 80 | 81 | storage "postgres_server" { 82 | type "remote" 83 | tls "disable" 84 | host "{{ HOST_NAME_PG_VIP }}.{{ FAKE_DOMAIN }}" 85 | port 5432 86 | } 87 | 88 | database "postgres" { 89 | user "postgres" { 90 | authentication "md5" 91 | password "{{ PASS_POSTGRES_FOR_DB }}" 92 | storage "postgres_server" 93 | storage_user "postgres" 94 | storage_password "{{ PASS_POSTGRES_FOR_DB }}" 95 | pool "session" 96 | client_max 4 97 | pool_size 0 98 | pool_timeout 0 99 | pool_ttl 60 100 | pool_cancel yes 101 | pool_discard yes 102 | pool_rollback yes 103 | client_fwd_error yes 104 | log_debug no 105 | } 106 | } 107 | 108 | database "template1" { 109 | user "postgres" { 110 | authentication "md5" 111 | password "{{ PASS_POSTGRES_FOR_DB }}" 112 | storage "postgres_server" 113 | storage_user "postgres" 114 | storage_password "{{ PASS_POSTGRES_FOR_DB }}" 115 | pool "session" 116 | client_max 4 117 | pool_size 0 118 | pool_timeout 0 119 | pool_ttl 60 120 | pool_cancel yes 121 | pool_discard yes 122 | pool_rollback yes 123 | client_fwd_error yes 124 | log_debug no 125 | } 126 | } 127 | 128 | database "zabbix" { 129 | user "postgres" { 130 | authentication "md5" 131 | password "{{ PASS_POSTGRES_FOR_DB }}" 132 | storage "postgres_server" 133 | storage_user "postgres" 134 | storage_db "postgres" 135 | storage_password "{{ PASS_POSTGRES_FOR_DB }}" 136 | pool "session" 137 | client_max 10 138 | pool_size 0 139 | pool_timeout 0 140 | pool_ttl 60 141 | pool_cancel yes 142 | #pool_discard no 143 | pool_discard yes 144 | pool_rollback yes 145 | client_fwd_error yes 146 | log_debug no 147 | } 148 | user "zabbix" { 149 | authentication "md5" 150 | password "{{ PASS_ZAB_FOR_DB }}" 151 | storage "postgres_server" 152 | storage_user "zabbix" 153 | storage_password "{{ PASS_ZAB_FOR_DB }}" 154 | #pool "transaction" 155 | pool "session" 156 | client_max 2000 157 | pool_size 185 158 | pool_timeout 0 159 | pool_ttl 60 160 | pool_cancel yes 161 | #pool_discard no 162 | pool_discard yes 163 | pool_rollback yes 164 | client_fwd_error yes 165 | log_debug no 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /provisioning/roles/06_pgsql-patroni/03_pgsql_optimization/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: setting optimized postgresql parameters and restart the patroni current node 3 | shell: | 4 | patronictl -c /etc/patroni.yml edit-config \ 5 | --pg max_connections=200 \ 6 | --pg shared_buffers=128MB \ 7 | --pg effective_cache_size=384MB \ 8 | --pg maintenance_work_mem=32MB \ 9 | --pg checkpoint_completion_target=0.9 \ 10 | --pg wal_buffers=3932kB \ 11 | --pg default_statistics_target=100 \ 12 | --pg random_page_cost=4 \ 13 | --pg effective_io_concurrency=2 \ 14 | --pg work_mem=327kB \ 15 | --pg min_wal_size=2GB \ 16 | --pg max_wal_size=4GB \ 17 | --pg checkpoint_timeout=5min \ 18 | --pg synchronous_commit=off \ 19 | --pg wal_compression=on \ 20 | --force 21 | sleep 5 22 | patronictl -c /etc/patroni.yml restart {{ FAKE_DOMAIN }} {{ ansible_hostname }} --force 23 | tags: patroni_edit-config 24 | 25 | - name: waiting until postgresql is up and running 26 | wait_for: 27 | port: 5432 28 | host: "{{ ansible_hostname }}" 29 | delay: 10 30 | timeout: 120 31 | 32 | - name: patroni stop 33 | systemd: 34 | name: patroni 35 | state: stopped 36 | 37 | - name: creates an entry "disabling transparent_hugepage on reboot" in the cron 38 | cron: 39 | name: "disabling transparent_hugepage on reboot" 40 | special_time: reboot 41 | job: "/usr/bin/echo never > /sys/kernel/mm/transparent_hugepage/enabled" 42 | 43 | - name: creates an entry "disabling defrag transparent_hugepage on reboot" in the cron 44 | cron: 45 | name: "disabling defrag transparent_hugepage on reboot" 46 | special_time: reboot 47 | job: "/usr/bin/echo never > /sys/kernel/mm/transparent_hugepage/defrag" 48 | 49 | - name: disabling transparent huge pages 50 | shell: | 51 | echo never > /sys/kernel/mm/transparent_hugepage/enabled 52 | echo never > /sys/kernel/mm/transparent_hugepage/defrag 53 | 54 | - name: get uid postgres user 55 | shell: id -u postgres 56 | register: uid_postgres 57 | 58 | #- name: debug registered var 59 | # debug: var=uid_postgres.stdout 60 | #- name: test shell for registered var 61 | # shell: echo {{ uid_postgres.stdout }} > /tmp/testfie 62 | 63 | - name: get gid postgres user 64 | shell: id -g postgres 65 | register: gid_postgres 66 | 67 | - name: edit fstab - mounting the pg_stat_tmp directory in RAM 68 | mount: 69 | path: /var/lib/pgsql/11/data/pg_stat_tmp 70 | src: tmpfs 71 | fstype: tmpfs 72 | opts: noatime,nodiratime,defaults,size=16M,mode=700,uid={{ uid_postgres.stdout }},gid={{ gid_postgres.stdout }} 73 | state: mounted 74 | 75 | - name: edit fstab - add noatime mount option to root partition 76 | lineinfile: 77 | path: /etc/fstab 78 | backup: yes 79 | backrefs: yes 80 | regexp: '^(UUID=[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\s+\/\s+xfs\s+)(defaults)(\s+0\s+0)$' 81 | #regexp: '^(\/dev\/mapper\/centos-root\s+\/\s+xfs\s+)(defaults)(\s+0\s+0)$' 82 | line: '\1\2,noatime\3' 83 | register: fstab 84 | tags: update_fstab_to_pg0X 85 | 86 | #- name: if /etc/fstab changed, remount root partition 87 | # shell: mount / -v -o remount 88 | # when: fstab.changed 89 | # tags: update_fstab_to_pg0X 90 | 91 | - name: if /etc/fstab changed, remount root partition 92 | mount: 93 | path: / 94 | state: remounted 95 | when: fstab.changed 96 | tags: update_fstab_to_pg0X 97 | 98 | - name: create /etc/sysctl.d/30-postgresql.conf 99 | file: 100 | path: /etc/sysctl.d/30-postgresql.conf 101 | owner: root 102 | group: root 103 | mode: '0644' 104 | state: touch 105 | 106 | - name: edit /etc/sysctl.d/30-postgresql.conf 107 | sysctl: 108 | name: "{{ item.key }}" 109 | value: "{{ item.value }}" 110 | sysctl_file: /etc/sysctl.d/30-postgresql.conf 111 | sysctl_set: yes 112 | state: present 113 | reload: yes 114 | with_items: 115 | - { key: "vm.nr_hugepages", value: "196" } 116 | - { key: "vm.hugetlb_shm_group", value: "{{ gid_postgres.stdout }}" } 117 | - { key: "kernel.sched_migration_cost_ns", value: "5000000" } 118 | 119 | - name: patroni start 120 | systemd: 121 | name: patroni 122 | state: started 123 | --------------------------------------------------------------------------------