├── test ├── .gitignore ├── simple │ ├── certs │ │ ├── .gitignore │ │ ├── setup_for_test.sh │ │ ├── recreate_root_cert.sh │ │ ├── recreate_server_certs.sh │ │ ├── gen_root_cert.sh │ │ └── gen_server_cert.sh │ ├── setup_for_test.sh │ ├── README.md │ ├── Vagrantfile │ ├── run.sh │ ├── inventory_12 │ │ ├── hosts.yml │ │ └── group_vars │ │ │ └── all │ │ │ └── test_all.yml │ ├── inventory_13 │ │ ├── hosts.yml │ │ └── group_vars │ │ │ └── all │ │ │ └── test_all.yml │ ├── inventory_12_1.5 │ │ ├── hosts.yml │ │ └── group_vars │ │ │ └── all │ │ │ └── test_all.yml │ ├── inventory_13_1.6 │ │ ├── hosts.yml │ │ └── group_vars │ │ │ └── all │ │ │ └── test_all.yml │ └── inventory_14_1.6 │ │ ├── hosts.yml │ │ └── group_vars │ │ └── all │ │ └── test_all.yml └── upgrade_1.3_to_1.4 │ ├── certs │ ├── .gitignore │ ├── setup_for_test.sh │ ├── recreate_root_cert.sh │ ├── recreate_server_certs.sh │ ├── gen_root_cert.sh │ └── gen_server_cert.sh │ ├── setup_for_test.sh │ ├── README.md │ ├── Vagrantfile │ ├── run.sh │ └── inventory │ ├── hosts.yml │ └── group_vars │ └── all │ └── test_all.yml ├── files └── .gitignore ├── tools └── health_monitor │ ├── create_venv.sh │ ├── .dockerignore │ ├── runDev.sh │ ├── requirements.txt │ ├── Dockerfile │ ├── build.sh │ ├── config.py │ ├── run.sh │ ├── LICENSE │ ├── monitor.py │ ├── README.md │ └── .gitignore ├── certs └── pg_auto_failover │ ├── .gitignore │ ├── recreate_root_cert.sh │ ├── recreate_server_certs.sh │ ├── gen_root_cert.sh │ └── gen_server_cert.sh ├── success.png ├── roles ├── postgres-cluster-data-initialize-new │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── register_node.yml │ │ ├── setup_service.yml │ │ ├── main.yml │ │ ├── register_node_1.3.yml │ │ ├── register_node_1.4.yml │ │ ├── register_node_1.5.yml │ │ └── register_node_1.6.yml ├── postgres-cluster-data-initialize-existing │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── existing_node.yml │ │ └── main.yml ├── postgres-cluster-install │ ├── vars │ │ ├── Ubuntu-18.yml │ │ └── Ubuntu-20.yml │ ├── templates │ │ ├── postgres.sh.j2 │ │ └── pgdg.preferences.j2 │ ├── tasks │ │ ├── initialize.yml │ │ ├── main.yml │ │ └── setup-Debian.yml │ ├── defaults │ │ └── main.yml │ └── LICENSE ├── copy-ssl-certs │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── postgres-cluster-configure │ ├── tasks │ │ ├── main.yml │ │ ├── restart_postgres.yml │ │ └── configure.yml │ ├── templates │ │ └── postgres.sh.j2 │ ├── defaults │ │ └── main.yml │ └── LICENSE ├── postgres-cluster-backup-setup │ ├── files │ │ └── start_dump.sh │ ├── templates │ │ └── dump_all.sh.j2 │ └── tasks │ │ └── main.yml ├── postgres-pre-setup │ ├── tasks │ │ └── main.yml │ └── files │ │ └── postgres12.asc ├── essential-software-setup │ ├── subtasks │ │ └── molly-guard.yml │ └── tasks │ │ └── main.yml ├── postgres-cluster-pgbouncer-client-setup │ ├── templates │ │ ├── userlist.txt.j2 │ │ └── pgbouncer.ini.j2 │ └── tasks │ │ └── main.yml ├── postgres-cluster-xinetd │ ├── templates │ │ ├── pgsqlchck_xinetd.j2 │ │ └── pgsqlchck.j2 │ └── tasks │ │ └── main.yml ├── postgres-cluster-hba-config │ ├── templates │ │ ├── pg_hba.conf.j2 │ │ └── pg_ident.conf.j2 │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── postgres-cluster-monitor-initialize │ └── tasks │ │ ├── monitor_init_1.3.yml │ │ ├── monitor_init_1.4.yml │ │ ├── monitor_init_1.5.yml │ │ ├── monitor_init_1.6.yml │ │ └── main.yml ├── postgres-cluster-databases │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── add_schemas.yml │ │ └── main.yml ├── postgres-cluster-groups │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── postgres-cluster-users │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── postgres-cluster-pg-auto-failover-upgrade-pre-1.4 │ ├── tasks │ │ ├── main.yml │ │ └── upgrade.yml │ └── files │ │ └── deb.sh ├── postgres-cluster-pgbouncer-setup │ └── tasks │ │ └── main.yml ├── postgres-cluster-user-setup │ └── tasks │ │ └── main.yml ├── force-reconnect │ └── tasks │ │ └── main.yml ├── postgres-cluster-pg-auto-failover-install │ ├── tasks │ │ └── main.yml │ └── files │ │ └── deb.sh ├── postgres-cluster-load-vars │ ├── vars │ │ ├── Ubuntu-18.yml │ │ └── Ubuntu-20.yml │ └── tasks │ │ └── main.yml └── user-setup │ ├── tasks │ ├── main.yml │ └── subtasks │ │ └── setup-single-user.yml │ └── files │ └── .bashrc ├── connection_strings.png ├── postgres_cluster_backup_setup.yml ├── postgres_cluster_upgrade_pre_1.4.yml ├── postgres_cluster_database_users.yml ├── postgres_cluster_pgbouncer.yml ├── postgres_cluster_xinetd.yml ├── LICENSE ├── base_setup.yml ├── inventories └── pg_auto_failover │ ├── hosts.yml │ └── group_vars │ └── all │ └── pg_auto_failover_test_all_config.yml ├── postgres_cluster_servers.yml └── README.md /test/.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | *.log -------------------------------------------------------------------------------- /files/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | ** 3 | !.gitignore 4 | -------------------------------------------------------------------------------- /test/simple/certs/.gitignore: -------------------------------------------------------------------------------- 1 | root_ca 2 | test 3 | -------------------------------------------------------------------------------- /tools/health_monitor/create_venv.sh: -------------------------------------------------------------------------------- 1 | python3 -m venv venv -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/certs/.gitignore: -------------------------------------------------------------------------------- 1 | root_ca 2 | test 3 | -------------------------------------------------------------------------------- /tools/health_monitor/.dockerignore: -------------------------------------------------------------------------------- 1 | config.py 2 | venv 3 | __pycache__ -------------------------------------------------------------------------------- /tools/health_monitor/runDev.sh: -------------------------------------------------------------------------------- 1 | FLASK_APP=monitor python3 -m flask run -------------------------------------------------------------------------------- /tools/health_monitor/requirements.txt: -------------------------------------------------------------------------------- 1 | psycopg2==2.9.2 2 | Flask==2.0.3 3 | gunicorn==20.1.0 -------------------------------------------------------------------------------- /certs/pg_auto_failover/.gitignore: -------------------------------------------------------------------------------- 1 | af-database-01 2 | af-database-02 3 | af-monitor-01 4 | root_ca -------------------------------------------------------------------------------- /success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuroforgede/pg_auto_failover_ansible/HEAD/success.png -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-new/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | postgresql_cluster_user: "postgres" -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-existing/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | postgresql_cluster_user: "postgres" -------------------------------------------------------------------------------- /connection_strings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuroforgede/pg_auto_failover_ansible/HEAD/connection_strings.png -------------------------------------------------------------------------------- /roles/postgres-cluster-install/vars/Ubuntu-18.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __postgresql_cluster_python_library: python-psycopg2 3 | -------------------------------------------------------------------------------- /roles/postgres-cluster-install/vars/Ubuntu-20.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __postgresql_cluster_python_library: python3-psycopg2 3 | -------------------------------------------------------------------------------- /test/simple/certs/setup_for_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | bash recreate_root_cert.sh 3 | bash recreate_server_certs.sh 4 | -------------------------------------------------------------------------------- /test/simple/setup_for_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd certs 3 | bash setup_for_test.sh 4 | 5 | vagrant destroy 6 | vagrant up -------------------------------------------------------------------------------- /roles/copy-ssl-certs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ssl_certs_base_dir: "{{playbook_dir}}/files/certs/{{inventory_hostname}}" -------------------------------------------------------------------------------- /test/simple/certs/recreate_root_cert.sh: -------------------------------------------------------------------------------- 1 | rm -rf root_ca 2 | 3 | bash gen_root_cert.sh root_ca N9QaJuxAzQZWeMw6u2G96Wha2eB4GTn9 -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/certs/setup_for_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | bash recreate_root_cert.sh 3 | bash recreate_server_certs.sh 4 | -------------------------------------------------------------------------------- /certs/pg_auto_failover/recreate_root_cert.sh: -------------------------------------------------------------------------------- 1 | rm -rf root_ca 2 | 3 | bash gen_root_cert.sh root_ca N9QaJuxAzQZWeMw6u2G96Wha2eB4GTn9 -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/setup_for_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd certs 3 | bash setup_for_test.sh 4 | 5 | vagrant destroy 6 | vagrant up -------------------------------------------------------------------------------- /test/simple/README.md: -------------------------------------------------------------------------------- 1 | # How to test 2 | 3 | 1. run `bash setup_for_test.sh` 4 | 2. run `bash run.sh ` -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/certs/recreate_root_cert.sh: -------------------------------------------------------------------------------- 1 | rm -rf root_ca 2 | 3 | bash gen_root_cert.sh root_ca N9QaJuxAzQZWeMw6u2G96Wha2eB4GTn9 -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/README.md: -------------------------------------------------------------------------------- 1 | # How to test 2 | 3 | 1. run `bash setup_for_test.sh` 4 | 2. run `bash run.sh ` -------------------------------------------------------------------------------- /roles/postgres-cluster-configure/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: configure.yml 3 | 4 | # TODO: smart deletion of default cluster, we dont need it -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-new/tasks/register_node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "register_node_{{ postgresql_pg_auto_failover_version }}.yml" -------------------------------------------------------------------------------- /roles/postgres-cluster-backup-setup/files/start_dump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /usr/bin/flock -n /tmp/postgres-dump.lck bash /data/ansible/pg_dump/dump_all.sh -------------------------------------------------------------------------------- /roles/postgres-cluster-install/templates/postgres.sh.j2: -------------------------------------------------------------------------------- 1 | export PGDATA={{ postgresql_cluster_data_dir }} 2 | export PATH=$PATH:{{ postgresql_cluster_bin_path }} 3 | -------------------------------------------------------------------------------- /roles/postgres-cluster-configure/templates/postgres.sh.j2: -------------------------------------------------------------------------------- 1 | export PGDATA={{ postgresql_cluster_data_dir }} 2 | export PATH=$PATH:{{ postgresql_cluster_bin_path }} 3 | -------------------------------------------------------------------------------- /postgres_cluster_backup_setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: postgres_cluster 4 | become: true 5 | roles: 6 | - name: postgres-cluster-load-vars 7 | - name: postgres-cluster-backup-setup 8 | -------------------------------------------------------------------------------- /roles/postgres-cluster-install/tasks/initialize.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set PostgreSQL environment variables. 3 | template: 4 | src: postgres.sh.j2 5 | dest: /etc/profile.d/postgres.sh 6 | mode: 0644 7 | -------------------------------------------------------------------------------- /postgres_cluster_upgrade_pre_1.4.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: postgres_cluster 4 | become: true 5 | any_errors_fatal: true 6 | roles: 7 | - name: postgres-cluster-load-vars 8 | # make sure we have the latest hba config 9 | - role: postgres-cluster-pg-auto-failover-upgrade-pre-1.4 -------------------------------------------------------------------------------- /roles/postgres-cluster-backup-setup/templates/dump_all.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DATE=`date '+%Y_%m_%d_%H_%M_%S'` 4 | 5 | su postgres -c "pg_basebackup --write-recovery-conf -X stream -F tar --gzip -p {{ postgresql_cluster_port | default('5433') }} -D /data/ansible/pg_dumps/pg_basebackup_${DATE}" -------------------------------------------------------------------------------- /tools/health_monitor/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | python3-setuptools 5 | 6 | RUN pip install --upgrade pip==21.2.4 7 | 8 | WORKDIR /monitor 9 | 10 | COPY . . 11 | 12 | RUN chmod +x run.sh 13 | 14 | RUN pip3 install -r requirements.txt 15 | 16 | CMD ["/monitor/run.sh"] -------------------------------------------------------------------------------- /tools/health_monitor/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker build -f Dockerfile \ 3 | -t neuroforgede/pg_auto_failover_health_monitor:latest \ 4 | -t neuroforgede/pg_auto_failover_health_monitor:0.1 \ 5 | . 6 | 7 | docker push neuroforgede/pg_auto_failover_health_monitor:latest 8 | docker push neuroforgede/pg_auto_failover_health_monitor:0.1 -------------------------------------------------------------------------------- /test/simple/certs/recreate_server_certs.sh: -------------------------------------------------------------------------------- 1 | rm -r test 2 | 3 | bash gen_server_cert.sh root_ca test/monitor/postgres_server 10.0.0.10 yJpYDPpTaX7kK6kjpz7Gvu7SV5DeWBj6 4 | bash gen_server_cert.sh root_ca test/node1/postgres_server 10.0.0.11 yJpYDPpTaX7kK6kjpz7Gvu7SV5DeWBj6 5 | bash gen_server_cert.sh root_ca test/node2/postgres_server 10.0.0.12 yJpYDPpTaX7kK6kjpz7Gvu7SV5DeWBj6 -------------------------------------------------------------------------------- /roles/postgres-cluster-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Include OS-specific variables (Debian). 4 | include_vars: "{{ ansible_distribution }}-{{ ansible_distribution_version.split('.')[0] }}.yml" 5 | when: ansible_os_family == 'Debian' 6 | 7 | - include_tasks: setup-Debian.yml 8 | when: ansible_os_family == 'Debian' 9 | 10 | - include_tasks: initialize.yml 11 | -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/certs/recreate_server_certs.sh: -------------------------------------------------------------------------------- 1 | rm -r test 2 | 3 | bash gen_server_cert.sh root_ca test/monitor/postgres_server 10.0.0.20 yJpYDPpTaX7kK6kjpz7Gvu7SV5DeWBj6 4 | bash gen_server_cert.sh root_ca test/node1/postgres_server 10.0.0.21 yJpYDPpTaX7kK6kjpz7Gvu7SV5DeWBj6 5 | bash gen_server_cert.sh root_ca test/node2/postgres_server 10.0.0.22 yJpYDPpTaX7kK6kjpz7Gvu7SV5DeWBj6 -------------------------------------------------------------------------------- /roles/postgres-pre-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | - name: Add Postgres packages repo apt key 5 | become: yes 6 | apt_key: 7 | data: "{{ lookup('file', 'postgres12.asc') }}" 8 | state: present 9 | 10 | - name: Add postgres repo http://apt.postgresql.org/pub/repos/apt/ 11 | become: yes 12 | apt_repository: 13 | repo: "{{ postgresql_repository }}" 14 | state: present -------------------------------------------------------------------------------- /roles/postgres-cluster-install/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | postgresql_cluster_python_library: "{{ __postgresql_cluster_python_library }}" 4 | postgresql_cluster_user: postgres 5 | 6 | postgresql_cluster_unix_socket_directories: 7 | - /var/run/postgresql 8 | 9 | # Debian only. Used to generate the locales used by PostgreSQL databases. 10 | postgresql_cluster_locales: 11 | - 'en_US.UTF-8' 12 | -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-existing/tasks/existing_node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_role: 3 | name: postgres-cluster-hba-config 4 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 5 | 6 | - include_role: 7 | name: postgres-cluster-configure 8 | vars: 9 | postgresql_cluster_skip_restart: true 10 | when: not (postgresql_cluster_is_monitor | default('False') | bool) -------------------------------------------------------------------------------- /roles/postgres-cluster-install/templates/pgdg.preferences.j2: -------------------------------------------------------------------------------- 1 | Package: postgresql* 2 | Pin: version {{ postgresql_cluster_version }}* 3 | Pin-Priority: 1001 4 | 5 | Package: postgresql-{{ postgresql_cluster_version }}* 6 | Pin: version {{ postgresql_cluster_version }}* 7 | Pin-Priority: 1001 8 | 9 | Package: postgresql-{{ postgresql_cluster_version }}* 10 | Pin: version {{ postgresql_cluster_version }}* 11 | Pin-Priority: 1001 -------------------------------------------------------------------------------- /roles/essential-software-setup/subtasks/molly-guard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install molly-guard 3 | apt: 4 | name: molly-guard 5 | state: present 6 | update_cache: "{{ apt_update_cache | default('True') }}" 7 | become: yes 8 | 9 | - name: Enable molly-guard for screen/ssh-sessions 10 | lineinfile: 11 | dest: /etc/molly-guard/rc 12 | regexp: '^#ALWAYS_QUERY_HOSTNAME=true' 13 | line: 'ALWAYS_QUERY_HOSTNAME=true' 14 | become: yes -------------------------------------------------------------------------------- /postgres_cluster_database_users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: postgres_cluster 4 | any_errors_fatal: true 5 | become: true 6 | roles: 7 | - name: postgres-cluster-load-vars 8 | - role: postgres-cluster-users 9 | - role: postgres-cluster-groups 10 | - role: postgres-cluster-databases 11 | - role: postgres-cluster-hba-config 12 | - role: postgres-cluster-pgbouncer-client-setup 13 | when: postgresql_cluster_pg_bouncer_include | default('False') | bool 14 | -------------------------------------------------------------------------------- /roles/postgres-cluster-configure/tasks/restart_postgres.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "reload cluster config" 3 | become_user: postgres 4 | shell: > 5 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_ctl reload --pgdata {{ postgresql_cluster_data_dir }} 6 | 7 | - name: "restart pg_auto_failover service" 8 | service: 9 | name: "{{ postgresql_cluster_daemon }}" 10 | state: "{{ postgresql_cluster_restarted_state }}" 11 | when: not (postgresql_cluster_skip_restart | default('False') | bool) -------------------------------------------------------------------------------- /roles/postgres-cluster-pgbouncer-client-setup/templates/userlist.txt.j2: -------------------------------------------------------------------------------- 1 | "{{ postgresql_cluster_pg_bouncer_root_user | default('pgbounceradmin') }}" "{{ postgresql_cluster_pg_bouncer_root_password_hashed | default('md5' + ((postgresql_cluster_pg_bouncer_root_password + (postgresql_cluster_pg_bouncer_root_user | default('pgbounceradmin'))) | hash('md5'))) }}" 2 | {% for user in postgresql_cluster_users | default([]) %} 3 | "{{ user.name }}" "{{ user.hashed_password | default('md5' + ((user.password + user.name) | hash('md5'))) }}" 4 | {% endfor %} -------------------------------------------------------------------------------- /roles/postgres-cluster-xinetd/templates/pgsqlchck_xinetd.j2: -------------------------------------------------------------------------------- 1 | service pgsqlchk 2 | { 3 | flags = REUSE 4 | socket_type = stream 5 | port = {{ postgresql_cluster_xinetd_port | default('23267') }} 6 | wait = no 7 | user = {{ postgresql_cluster_xinetd_user | default('postgres_xinetd') }} 8 | server = /opt/pgsqlchk 9 | log_on_failure += USERID 10 | disable = no 11 | only_from = 0.0.0.0/0 12 | per_source = UNLIMITED 13 | } -------------------------------------------------------------------------------- /postgres_cluster_pgbouncer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # this assumes that the main playbook has alredy been run and is only intended as an upgrade playbook 4 | 5 | - hosts: postgres_cluster 6 | any_errors_fatal: true 7 | become: true 8 | roles: 9 | - name: postgres-cluster-load-vars 10 | - role: postgres-cluster-hba-config 11 | - role: postgres-cluster-pgbouncer-setup 12 | when: postgresql_cluster_pg_bouncer_include | default('False') | bool 13 | - role: postgres-cluster-pgbouncer-client-setup 14 | when: postgresql_cluster_pg_bouncer_include | default('False') | bool 15 | -------------------------------------------------------------------------------- /roles/postgres-cluster-hba-config/templates/pg_hba.conf.j2: -------------------------------------------------------------------------------- 1 | {{ ansible_managed | comment }} 2 | # PostgreSQL Client Authentication Configuration File 3 | # =================================================== 4 | # 5 | # See: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html 6 | 7 | {% for client in computed_postgresql_cluster_pg_hba_entries %} 8 | {{ client.type }} {{ client.database }} {{ client.user }} {{ client.address|default('') }} {{ client.ip_address|default('') }} {{ client.ip_mask|default('') }} {{ client.auth_method }} {{ client.auth_options|default("") }} 9 | {% endfor %} 10 | -------------------------------------------------------------------------------- /roles/postgres-cluster-configure/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Set postgresql state when configuration changes are made. Recommended values: 4 | # `restarted` or `reloaded` 5 | postgresql_cluster_restarted_state: "restarted" 6 | 7 | postgresql_cluster_user: postgres 8 | postgresql_cluster_group: postgres 9 | 10 | postgresql_cluster_unix_socket_directories: 11 | - /var/run/postgresql 12 | 13 | # Global configuration options that will be set in postgresql.conf. 14 | postgresql_cluster_global_config_options: 15 | - option: unix_socket_directories 16 | value: '{{ postgresql_cluster_unix_socket_directories | join(",") }}' -------------------------------------------------------------------------------- /tools/health_monitor/config.py: -------------------------------------------------------------------------------- 1 | servers = { 2 | "monitor": { 3 | "display_name": "monitor", 4 | "dsn": "postgresql://healthuser:healthuser@10.0.0.10:5433/pg_auto_failover?connect_timeout=1", 5 | "healthcheck": "SELECT MIN(health) FROM pgautofailover.node" 6 | }, 7 | "node01": { 8 | "display_name": "node01", 9 | "dsn": "postgresql://testuser:password1@10.0.0.11:5433/testdb?connect_timeout=1", 10 | "healthcheck": "SELECT 1" 11 | }, 12 | "node02": { 13 | "display_name": "node01", 14 | "dsn": "postgresql://testuser:password1@10.0.0.12:5433/testdb?connect_timeout=1", 15 | "healthcheck": "SELECT 1" 16 | }, 17 | } -------------------------------------------------------------------------------- /tools/health_monitor/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | log_level="warning" 4 | worker_timeout="30" 5 | gunicorn_access_log_format='%(h)s %(l)s %({x-forwarded-for}i)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' 6 | worker_count="3" 7 | gunicorn_limit_request_line="4094" 8 | bind_ip="0.0.0.0" 9 | 10 | echo "starting gunicorn..." 11 | exec gunicorn \ 12 | --timeout "${worker_timeout}" \ 13 | --capture-output \ 14 | --access-logfile - \ 15 | --error-logfile - \ 16 | --log-file - \ 17 | --log-level $log_level \ 18 | --access-logformat "$gunicorn_access_log_format" \ 19 | --workers=$worker_count \ 20 | --limit-request-line "$gunicorn_limit_request_line" \ 21 | -b $bind_ip:8080 \ 22 | monitor:app -------------------------------------------------------------------------------- /certs/pg_auto_failover/recreate_server_certs.sh: -------------------------------------------------------------------------------- 1 | rm -r af-monitor-01 2 | rm -r af-database-01 3 | rm -r af-database-02 4 | 5 | bash gen_server_cert.sh root_ca af-monitor-01/postgres_server 116.203.218.49 yJpYDPpTaX7kK6kjpz7Gvu7SV5DeWBj6 6 | bash gen_server_cert.sh root_ca af-database-01/postgres_server 116.203.218.75 4wNxA9Uaut5BtMTR3tdExU5xGGMRs4p4 7 | bash gen_server_cert.sh root_ca af-database-02/postgres_server 49.12.9.15 zkF8SU8Q2uNz3bBsJQ94F6Gx5DbcEgJ5 8 | 9 | mkdir -p ../../files/certs 10 | 11 | rm -rf ../../files/certs/af-monitor-01 12 | rm -rf ../../files/certs/af-database-01 13 | rm -rf ../../files/certs/af-database-02 14 | 15 | cp -r af-monitor-01 ../../files/certs 16 | cp -r af-database-01 ../../files/certs 17 | cp -r af-database-02 ../../files/certs -------------------------------------------------------------------------------- /roles/postgres-cluster-monitor-initialize/tasks/monitor_init_1.3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "run pg_autoctl create monitor" 3 | become_user: "{{ postgresql_cluster_user }}" 4 | shell: > 5 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl create monitor \ 6 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 7 | --skip-pg-hba \ 8 | --ssl-ca-file "{{ postgresql_cluster_ssl_ca_file | default('/data/ansible/certs/postgres_server/rootCA.crt') }}" \ 9 | --server-key "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }}" \ 10 | --server-cert "{{ postgresql_cluster_server_cert | default('/data/ansible/certs/postgres_server/server.crt') }}" \ 11 | --nodename "{{ host_ip }}" \ 12 | --pgport "{{ postgresql_cluster_port | default('5433') }}" -------------------------------------------------------------------------------- /roles/postgres-cluster-monitor-initialize/tasks/monitor_init_1.4.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "run pg_autoctl create monitor" 3 | become_user: "{{ postgresql_cluster_user }}" 4 | shell: > 5 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl create monitor \ 6 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 7 | --skip-pg-hba \ 8 | --ssl-ca-file "{{ postgresql_cluster_ssl_ca_file | default('/data/ansible/certs/postgres_server/rootCA.crt') }}" \ 9 | --server-key "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }}" \ 10 | --server-cert "{{ postgresql_cluster_server_cert | default('/data/ansible/certs/postgres_server/server.crt') }}" \ 11 | --hostname "{{ host_ip }}" \ 12 | --pgport "{{ postgresql_cluster_port | default('5433') }}" -------------------------------------------------------------------------------- /roles/postgres-cluster-monitor-initialize/tasks/monitor_init_1.5.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "run pg_autoctl create monitor" 3 | become_user: "{{ postgresql_cluster_user }}" 4 | shell: > 5 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl create monitor \ 6 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 7 | --skip-pg-hba \ 8 | --ssl-ca-file "{{ postgresql_cluster_ssl_ca_file | default('/data/ansible/certs/postgres_server/rootCA.crt') }}" \ 9 | --server-key "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }}" \ 10 | --server-cert "{{ postgresql_cluster_server_cert | default('/data/ansible/certs/postgres_server/server.crt') }}" \ 11 | --hostname "{{ host_ip }}" \ 12 | --pgport "{{ postgresql_cluster_port | default('5433') }}" -------------------------------------------------------------------------------- /roles/postgres-cluster-monitor-initialize/tasks/monitor_init_1.6.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "run pg_autoctl create monitor" 3 | become_user: "{{ postgresql_cluster_user }}" 4 | shell: > 5 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl create monitor \ 6 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 7 | --skip-pg-hba \ 8 | --ssl-ca-file "{{ postgresql_cluster_ssl_ca_file | default('/data/ansible/certs/postgres_server/rootCA.crt') }}" \ 9 | --server-key "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }}" \ 10 | --server-cert "{{ postgresql_cluster_server_cert | default('/data/ansible/certs/postgres_server/server.crt') }}" \ 11 | --hostname "{{ host_ip }}" \ 12 | --pgport "{{ postgresql_cluster_port | default('5433') }}" -------------------------------------------------------------------------------- /roles/postgres-cluster-configure/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure global settings. 3 | lineinfile: 4 | dest: "{{ postgresql_cluster_config_path }}/postgresql.conf" 5 | regexp: "^#?{{ item.option }}.+$" 6 | line: "{{ item.option }} = '{{ item.value }}'" 7 | state: "{{ item.state | default('present') }}" 8 | with_items: "{{ postgresql_cluster_global_config_options }}" 9 | 10 | - include_tasks: "restart_postgres.yml" 11 | 12 | - name: Ensure PostgreSQL unix socket dirs exist. 13 | file: 14 | path: "{{ item }}" 15 | state: directory 16 | owner: "{{ postgresql_cluster_user }}" 17 | group: "{{ postgresql_cluster_group }}" 18 | mode: "{{ postgresql_cluster_unix_socket_directories_mode }}" 19 | with_items: "{{ postgresql_cluster_unix_socket_directories }}" 20 | -------------------------------------------------------------------------------- /postgres_cluster_xinetd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: postgres_cluster 4 | any_errors_fatal: true 5 | become: true 6 | vars: 7 | setup_additional_groups: 8 | - name: "{{ postgresql_cluster_xinetd_group | default('postgres_xinetd') }}" 9 | requires_root_password: True 10 | setup_additional_users: 11 | - name: "{{ postgresql_cluster_xinetd_user | default('postgres_xinetd') }}" 12 | group: "{{ postgresql_cluster_xinetd_group | default('postgres_xinetd') }}" 13 | system: True 14 | is_sudo: False 15 | roles: 16 | - role: user-setup 17 | 18 | - hosts: postgres_cluster 19 | any_errors_fatal: true 20 | become: true 21 | roles: 22 | - name: postgres-cluster-load-vars 23 | - role: postgres-cluster-hba-config 24 | - role: postgres-cluster-xinetd 25 | -------------------------------------------------------------------------------- /test/simple/Vagrantfile: -------------------------------------------------------------------------------- 1 | Vagrant.configure("2") do |config| 2 | config.vm.define :monitor do |monitor| 3 | monitor.vm.box = "ubuntu/bionic64" 4 | monitor.vm.network :private_network, ip: "10.0.0.10" 5 | monitor.vm.hostname = "monitor" 6 | monitor.vm.network :forwarded_port, guest: 22, host: 2200, id: 'ssh' 7 | end 8 | 9 | config.vm.define :node1 do |node1| 10 | node1.vm.box = "ubuntu/bionic64" 11 | node1.vm.network :private_network, ip: "10.0.0.11" 12 | node1.vm.hostname = "node1" 13 | node1.vm.network :forwarded_port, guest: 22, host: 2201, id: 'ssh' 14 | end 15 | 16 | config.vm.define :node2 do |node2| 17 | node2.vm.box = "ubuntu/bionic64" 18 | node2.vm.network :private_network, ip: "10.0.0.12" 19 | node2.vm.hostname = "node2" 20 | node2.vm.network :forwarded_port, guest: 22, host: 2202, id: 'ssh' 21 | end 22 | end -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/Vagrantfile: -------------------------------------------------------------------------------- 1 | Vagrant.configure("2") do |config| 2 | config.vm.define :monitor do |monitor| 3 | monitor.vm.box = "ubuntu/bionic64" 4 | monitor.vm.network :private_network, ip: "10.0.0.20" 5 | monitor.vm.hostname = "monitor" 6 | monitor.vm.network :forwarded_port, guest: 22, host: 2210, id: 'ssh' 7 | end 8 | 9 | config.vm.define :node1 do |node1| 10 | node1.vm.box = "ubuntu/bionic64" 11 | node1.vm.network :private_network, ip: "10.0.0.21" 12 | node1.vm.hostname = "node1" 13 | node1.vm.network :forwarded_port, guest: 22, host: 2211, id: 'ssh' 14 | end 15 | 16 | config.vm.define :node2 do |node2| 17 | node2.vm.box = "ubuntu/bionic64" 18 | node2.vm.network :private_network, ip: "10.0.0.22" 19 | node2.vm.hostname = "node2" 20 | node2.vm.network :forwarded_port, guest: 22, host: 2212, id: 'ssh' 21 | end 22 | end -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-existing/tasks/main.yml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | 4 | - name: Check if PostgreSQL database is initialized. 5 | stat: 6 | path: "{{ postgresql_cluster_data_dir }}/PG_VERSION" 7 | register: pgdata_dir_version 8 | 9 | - name: check if pg_autoctl already knows about this postgres cluster 10 | stat: 11 | path: "/home/postgres/.local/share/pg_autoctl/{{ postgresql_cluster_data_dir }}" 12 | register: pg_autoctl_dir 13 | 14 | - name: Check if service is already set up 15 | stat: 16 | path: "/etc/systemd/system/{{ postgresql_cluster_daemon }}.service" 17 | register: postgresql_cluster_daemon_service_file 18 | 19 | - include_tasks: existing_node.yml 20 | when: (pg_autoctl_dir.stat.exists) and (pgdata_dir_version.stat.exists) and (postgresql_cluster_daemon_service_file.stat.exists) and not (postgresql_cluster_is_monitor | default('False') | bool) 21 | -------------------------------------------------------------------------------- /roles/postgres-cluster-databases/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | postgresql_cluster_user: postgres 3 | postgresql_cluster_group: postgres 4 | 5 | postgresql_cluster_unix_socket_directories: 6 | - /var/run/postgresql 7 | 8 | # Databases to ensure exist. 9 | postgresql_cluster_databases: [] 10 | # - name: exampledb # required; the rest are optional 11 | # lc_collate: # defaults to 'en_US.UTF-8' 12 | # lc_ctype: # defaults to 'en_US.UTF-8' 13 | # encoding: # defaults to 'UTF-8' 14 | # template: # defaults to 'template0' 15 | # login_host: # defaults to 'localhost' 16 | # login_password: # defaults to not set 17 | # login_user: # defaults to '{{ postgresql_cluster_user }}' 18 | # login_unix_socket: # defaults to 1st of postgresql_cluster_unix_socket_directories 19 | # port: # defaults to not set 20 | # owner: # defaults to postgresql_cluster_user 21 | # state: # defaults to 'present' -------------------------------------------------------------------------------- /roles/postgres-cluster-groups/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | postgresql_cluster_user: postgres 4 | 5 | postgresql_cluster_unix_socket_directories: 6 | - /var/run/postgresql 7 | 8 | # Users to ensure exist. 9 | postgresql_cluster_users: [] 10 | # - name: jdoe #required; the rest are optional 11 | # password: # defaults to not set 12 | # encrypted: # defaults to not set 13 | # priv: # defaults to not set 14 | # role_attr_flags: # defaults to not set 15 | # db: # defaults to not set 16 | # login_host: # defaults to 'localhost' 17 | # login_password: # defaults to not set 18 | # login_user: # defaults to '{{ postgresql_cluster_user }}' 19 | # login_unix_socket: # defaults to 1st of postgresql_cluster_unix_socket_directories 20 | # port: # defaults to not set 21 | # state: # defaults to 'present' 22 | 23 | # Whether to output user data when managing users. 24 | postgres_users_no_log: true 25 | -------------------------------------------------------------------------------- /roles/postgres-cluster-users/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | postgresql_cluster_user: postgres 4 | 5 | postgresql_cluster_unix_socket_directories: 6 | - /var/run/postgresql 7 | 8 | # Users to ensure exist. 9 | postgresql_cluster_users: [] 10 | # - name: jdoe #required; the rest are optional 11 | # password: # defaults to not set 12 | # encrypted: # defaults to not set 13 | # priv: # defaults to not set 14 | # role_attr_flags: # defaults to not set 15 | # db: # defaults to not set 16 | # login_host: # defaults to 'localhost' 17 | # login_password: # defaults to not set 18 | # login_user: # defaults to '{{ postgresql_cluster_user }}' 19 | # login_unix_socket: # defaults to 1st of postgresql_cluster_unix_socket_directories 20 | # port: # defaults to not set 21 | # state: # defaults to 'present' 22 | 23 | # Whether to output user data when managing users. 24 | postgres_users_no_log: true 25 | -------------------------------------------------------------------------------- /roles/postgres-cluster-pg-auto-failover-upgrade-pre-1.4/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "fail if version is not supported by this upgrade script" 4 | fail: 5 | msg: "upgrading from version {{ postgresql_pg_auto_failover_version }} is not supported. Are you sure you called the right playbook?" 6 | when: postgresql_pg_auto_failover_version >= "1.4" 7 | 8 | - name: Check if service is already set up 9 | stat: 10 | path: "/etc/systemd/system/{{ postgresql_cluster_daemon }}.service" 11 | register: postgresql_cluster_daemon_service_file 12 | 13 | - name: "fail if monitor does not have a service setup already" 14 | fail: 15 | msg: "monitor is not properly set up, aborting..." 16 | when: not postgresql_cluster_daemon_service_file.stat.exists and (postgresql_cluster_is_monitor | default('False') | bool) 17 | 18 | - name: "upgrade (only if service exists already)" 19 | include_tasks: "upgrade.yml" 20 | when: postgresql_cluster_daemon_service_file.stat.exists 21 | -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-new/tasks/setup_service.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "create systemd config for cluster" 3 | shell: > 4 | su -c 'PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl -q show systemd --pgdata "{{ postgresql_cluster_data_dir }}"' postgres \ 5 | | tee /etc/systemd/system/{{ postgresql_cluster_daemon }}.service 6 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 7 | 8 | - name: "make sure database cluster service is started and enabled at boot" 9 | systemd: 10 | state: started 11 | enabled: true 12 | daemon_reload: yes 13 | name: "{{ postgresql_cluster_daemon }}" 14 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 15 | 16 | - include_role: 17 | name: postgres-cluster-hba-config 18 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 19 | 20 | - include_role: 21 | name: postgres-cluster-configure 22 | when: not (postgresql_cluster_is_monitor | default('False') | bool) -------------------------------------------------------------------------------- /roles/postgres-cluster-hba-config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | postgresql_cluster_user: postgres 4 | postgresql_cluster_group: postgres 5 | 6 | postgresql_cluster_unix_socket_directories: 7 | - /var/run/postgresql 8 | 9 | # Host based authentication (hba) entries to be added to the pg_hba.conf. This 10 | # variable's defaults reflect the defaults that come with a fresh installation with slight modifications 11 | _postgresql_cluster_minimum_hba_entries: 12 | - {type: local, database: all, user: postgres, auth_method: peer} 13 | - {type: local, database: replication, user: postgres, auth_method: peer} 14 | - {type: local, database: all, user: "{{ postgresql_cluster_xinetd_user | default('postgres_xinetd') }}", auth_method: peer} 15 | - {type: local, database: all, user: all, auth_method: md5} 16 | - {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5} 17 | - {type: host, database: all, user: all, address: '::1/128', auth_method: md5} 18 | 19 | # Whether to output user data when managing users. 20 | postgres_users_no_log: true 21 | -------------------------------------------------------------------------------- /roles/postgres-cluster-install/tasks/setup-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "pin postgres packages" 3 | template: 4 | src: "pgdg.preferences.j2" 5 | dest: "/etc/apt/preferences.d/pgdg" 6 | owner: root 7 | group: root 8 | 9 | - name: Ensure PostgreSQL Python libraries are installed. 10 | apt: 11 | name: "{{ postgresql_cluster_python_library }}" 12 | state: present 13 | when: postgresql_cluster_python_library is defined 14 | 15 | - name: Ensure PostgreSQL Python libraries are installed via pip 16 | pip: 17 | name: "{{ item }}" 18 | state: present 19 | executable: pip3 20 | with_items: "{{ postgresql_cluster_python_pip_libraries }}" 21 | when: postgresql_cluster_python_pip_libraries is defined 22 | 23 | - name: Ensure PostgreSQL packages are installed. 24 | apt: 25 | name: "{{ postgresql_cluster_packages }}" 26 | state: present 27 | 28 | - name: Ensure all configured locales are present. 29 | locale_gen: "name={{ item }} state=present" 30 | with_items: "{{ postgresql_cluster_locales }}" 31 | register: locale_gen_result -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | pg_auto_failover_ansible 2 | 3 | Copyright 2020-2022 NeuroForge GmbH & Co. KG 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /tools/health_monitor/LICENSE: -------------------------------------------------------------------------------- 1 | pg_auto_failover_health_monitor 2 | 3 | Copyright 2020-2022 NeuroForge GmbH & Co. KG 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /roles/postgres-cluster-pgbouncer-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure that the pgbouncer packages are installed 4 | become: true 5 | apt: 6 | name: "pgbouncer" 7 | state: present 8 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 9 | 10 | - name: Ensure that pgbouncer is started and enabled on boot 11 | tags: pgbouncer 12 | become: true 13 | service: 14 | name: "pgbouncer" 15 | state: started 16 | enabled: yes 17 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 18 | 19 | - name: Template pgbouncer.ini into /etc/pgbouncer/pgbouncer.ini 20 | template: 21 | src: pgbouncer.ini.j2 22 | dest: /etc/pgbouncer/pgbouncer.ini 23 | owner: "{{ postgresql_cluster_user | default('postgres') }}" 24 | group: "{{ postgresql_cluster_group | default('postgres') }}" 25 | mode: 0600 26 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 27 | 28 | - name: "restart pgbouncer" 29 | become: true 30 | service: 31 | name: "pgbouncer" 32 | state: "restarted" 33 | enabled: yes 34 | when: not (postgresql_cluster_is_monitor | default('False') | bool) -------------------------------------------------------------------------------- /roles/postgres-cluster-databases/tasks/add_schemas.yml: -------------------------------------------------------------------------------- 1 | - name: Ensure PostgreSQL database schema is present. 2 | postgresql_schema: 3 | name: "{{ schema_def.name }}" 4 | database: "{{ database.name }}" 5 | login_host: "{{ database.login_host | default('localhost') }}" 6 | login_password: "{{ database.login_password | default(omit) }}" 7 | login_user: "{{ database.login_user | default(postgresql_cluster_user) }}" 8 | login_unix_socket: "{{ database.login_unix_socket | default(postgresql_cluster_unix_socket_directories[0]) }}" 9 | port: "{{ database.port | default(postgresql_cluster_port) }}" 10 | owner: "{{ schema_def.owner | default(database.owner | default(postgresql_cluster_user)) }}" 11 | state: "{{ schema_def.state | default('present') }}" 12 | become: true 13 | become_user: "{{ postgresql_cluster_user }}" 14 | # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 15 | vars: 16 | ansible_ssh_pipelining: true 17 | when: (not (postgresql_cluster_is_monitor | default('False') | bool)) and schema_def.name is defined and pg_is_in_recovery.stdout_lines[0] == 'f' 18 | loop_control: 19 | loop_var: "schema_def" 20 | with_items: "{{ database.schemas | default([]) }}" -------------------------------------------------------------------------------- /roles/postgres-cluster-user-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "ensure /home/postgres/.postgresql exists" 3 | file: 4 | path: /home/postgres/.postgresql 5 | state: directory 6 | owner: "postgres" 7 | group: "postgres" 8 | mode: 0700 9 | 10 | - name: "copy key and cert to postgres user home" 11 | copy: 12 | src: "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.crt') }}" 13 | dest: "/home/postgres/.postgresql/postgresql.cert" 14 | owner: "postgres" 15 | group: "users" 16 | mode: 0600 17 | remote_src: yes 18 | 19 | - name: "copy key and cert to postgres user home" 20 | copy: 21 | src: "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.crt') }}" 22 | dest: "/home/postgres/.postgresql/postgresql.crt" 23 | owner: "postgres" 24 | group: "users" 25 | mode: 0600 26 | remote_src: yes 27 | 28 | - name: "copy key and cert to postgres user home" 29 | copy: 30 | src: "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }}" 31 | dest: "/home/postgres/.postgresql/postgresql.key" 32 | owner: "postgres" 33 | group: "users" 34 | mode: 0600 35 | remote_src: yes -------------------------------------------------------------------------------- /roles/postgres-cluster-pgbouncer-client-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Template userlist.txt into /etc/pgbouncer/userlist.txt 4 | template: 5 | src: userlist.txt.j2 6 | dest: /etc/pgbouncer/userlist.txt 7 | owner: "{{ postgresql_cluster_user | default('postgres') }}" 8 | group: "{{ postgresql_cluster_group | default('postgres') }}" 9 | mode: 0600 10 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 11 | 12 | - name: "Allow incoming access to the pgbouncer port {{ postgresql_cluster_pg_bouncer_listen_port | default('6432') }} for all cluster clients" 13 | ufw: 14 | rule: allow 15 | direction: in 16 | src: "{{ item.ip }}/{{ item.subnet_mask | default('32') }}" 17 | to_port: "{{ postgresql_cluster_pg_bouncer_listen_port | default('6432') }}" 18 | comment: "pgbouncer {{ postgresql_cluster_name }} - client: {{ item.name }}" 19 | with_items: "{{ postgresql_cluster_allowed_clients | default([]) }}" 20 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 21 | 22 | - name: "reload pgbouncer" 23 | become: true 24 | service: 25 | name: "pgbouncer" 26 | state: "reloaded" 27 | enabled: yes 28 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 29 | -------------------------------------------------------------------------------- /base_setup.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | become: true 3 | gather_facts: False 4 | tasks: 5 | # ansible only requires only a minimal installation of python 2.x on the managed machines 6 | - name: "ansible required: install python" 7 | raw: python3 -c "import simplejson" || (DEBIAN_FRONTEND=noninteractive apt-get update -y && DEBIAN_FRONTEND=noninteractive apt-get install python3-minimal -y && DEBIAN_FRONTEND=noninteractive apt-get install python3-simplejson -y) 8 | 9 | - hosts: all 10 | become: true 11 | tasks: 12 | - name: "set up bashrc for root user" 13 | copy: 14 | src: "{{ playbook_dir }}/roles/user-setup/files/.bashrc" 15 | dest: "/root/.bashrc" 16 | owner: "0" 17 | group: "0" 18 | mode: 0600 19 | 20 | - hosts: all 21 | become: true 22 | tasks: 23 | - name: Ensure the en_US locale exists 24 | locale_gen: 25 | name: en_US.UTF-8 26 | state: present 27 | - name: set en_US as default locale 28 | command: update-locale set-locale LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 29 | 30 | - hosts: all 31 | become: true 32 | vars: 33 | apt_update_cache: True 34 | apt_restart_after_dist_upgrade: True 35 | roles: 36 | - role: essential-software-setup 37 | -------------------------------------------------------------------------------- /roles/force-reconnect/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | 12 | - name: reset ssh connection to allow user changes to be applied 13 | meta: reset_connection 14 | -------------------------------------------------------------------------------- /roles/postgres-cluster-pg-auto-failover-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check if PostgreSQL database is initialized. 4 | stat: 5 | path: "{{ postgresql_cluster_data_dir }}/PG_VERSION" 6 | register: pgdata_dir_version 7 | 8 | - name: "copy deb.sh to /tmp/pg_auto_failover_deb.sh" 9 | copy: 10 | src: "deb.sh" 11 | dest: "/tmp/pg_auto_failover_deb.sh" 12 | owner: root 13 | group: root 14 | mode: 0700 15 | when: not pgdata_dir_version.stat.exists 16 | 17 | - name: "run /tmp/pg_auto_failover_deb.sh" 18 | command: "bash /tmp/pg_auto_failover_deb.sh" 19 | when: not pgdata_dir_version.stat.exists 20 | 21 | - name: "install pg-auto-failover-cli-{{ postgresql_pg_auto_failover_version }}" 22 | apt: 23 | name: "pg-auto-failover-cli-{{ postgresql_pg_auto_failover_version }}" 24 | state: present 25 | update_cache: "{{ apt_update_cache | default('True') }}" 26 | when: not pgdata_dir_version.stat.exists 27 | 28 | - name: "install postgresql-{{ postgresql_cluster_version }}-auto-failover-{{ postgresql_pg_auto_failover_version }}" 29 | apt: 30 | name: "postgresql-{{ postgresql_cluster_version }}-auto-failover-{{ postgresql_pg_auto_failover_version }}" 31 | state: present 32 | update_cache: "{{ apt_update_cache | default('True') }}" 33 | when: not pgdata_dir_version.stat.exists -------------------------------------------------------------------------------- /roles/postgres-cluster-groups/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "check if server is standby (pg_is_in_recovery)" 3 | shell: 4 | psql -p "{{ postgresql_cluster_port }}" -t -X -A -c 'select pg_is_in_recovery()' 5 | become_user: "{{ postgresql_cluster_user }}" 6 | register: pg_is_in_recovery 7 | 8 | - name: Ensure PostgreSQL groups are assigned. 9 | postgresql_membership: 10 | group: "{{ item.name }}" 11 | target_roles: "{{ item.members }}" 12 | db: "{{ item.db | default(omit) }}" 13 | login_host: "{{ item.login_host | default('localhost') }}" 14 | login_password: "{{ item.login_password | default(omit) }}" 15 | login_user: "{{ item.login_user | default(postgresql_cluster_user) }}" 16 | login_unix_socket: "{{ item.login_unix_socket | default(postgresql_cluster_unix_socket_directories[0]) }}" 17 | port: "{{ item.port | default(postgresql_cluster_port) }}" 18 | state: "{{ item.state | default('present') }}" 19 | with_items: "{{ postgresql_cluster_groups | default([]) }}" 20 | no_log: "{{ postgres_users_no_log }}" 21 | become: true 22 | become_user: "{{ postgresql_cluster_user }}" 23 | when: not (postgresql_cluster_is_monitor | default('False') | bool) and pg_is_in_recovery.stdout_lines[0] == 'f' 24 | # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 25 | vars: 26 | ansible_ssh_pipelining: true -------------------------------------------------------------------------------- /tools/health_monitor/monitor.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from psycopg2 import Error 3 | import psycopg2 4 | import logging 5 | from config import servers 6 | 7 | 8 | app = Flask(__name__) 9 | logger = logging.getLogger(__name__) 10 | 11 | def check_alive(server): 12 | connection = None 13 | try: 14 | # Connect to an existing database 15 | connection = psycopg2.connect(server['dsn']) 16 | 17 | # Create a cursor to perform database operations 18 | cursor = connection.cursor() 19 | 20 | cursor.execute(server['healthcheck']) 21 | # Fetch result 22 | record = cursor.fetchone() 23 | 24 | if record[0] == 1: 25 | return True 26 | 27 | except (Exception, Error) as error: 28 | logger.error("Error while checking health for " + server["display_name"], exc_info=True) 29 | finally: 30 | if connection is not None: 31 | try: 32 | cursor.close() 33 | finally: 34 | connection.close() 35 | return False 36 | 37 | 38 | @app.route("/api/v1/pg/health") 39 | @app.errorhandler(500) 40 | def health(): 41 | ret = {} 42 | for key, server in servers.items(): 43 | ret[key] = check_alive(server) 44 | all_healthy = all(ret.values()) 45 | if not all_healthy: 46 | return ret, 503 47 | return ret -------------------------------------------------------------------------------- /roles/postgres-cluster-load-vars/vars/Ubuntu-18.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __postgresql_cluster_restarted_state: "restarted" 3 | __postgresql_cluster_user: postgres 4 | __postgresql_cluster_name: "main_cluster" 5 | __postgresql_cluster_port: "5433" 6 | __postgresql_cluster_version: "12" 7 | __postgresql_pg_auto_failover_version: "1.6" 8 | __postgresql_repository: "deb [ arch=amd64 ] http://apt.postgresql.org/pub/repos/apt/ bionic-pgdg main" 9 | __postgresql_cluster_data_dir: "/var/lib/postgresql/{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}/{{ postgresql_cluster_name | default(__postgresql_cluster_name) }}" 10 | __postgresql_cluster_bin_path: "/usr/lib/postgresql/{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}/bin" 11 | __postgresql_cluster_config_path: "/var/lib/postgresql/{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}/{{ postgresql_cluster_name | default(__postgresql_cluster_name) }}" 12 | __postgresql_cluster_daemon: "postgresql_{{ postgresql_cluster_name | default(__postgresql_cluster_name) }}" 13 | __postgresql_cluster_packages: 14 | - "postgresql-{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}" 15 | - "postgresql-contrib-{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}" 16 | - libpq-dev 17 | postgresql_cluster_python_pip_library: 18 | - setuptools 19 | - psycopg2 20 | -------------------------------------------------------------------------------- /roles/postgres-cluster-load-vars/vars/Ubuntu-20.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __postgresql_cluster_restarted_state: "restarted" 3 | __postgresql_cluster_user: postgres 4 | __postgresql_cluster_name: "main_cluster" 5 | __postgresql_cluster_port: "5433" 6 | __postgresql_cluster_version: "12" 7 | __postgresql_pg_auto_failover_version: "1.6" 8 | __postgresql_repository: "deb [ arch=amd64 ] http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main" 9 | __postgresql_cluster_data_dir: "/var/lib/postgresql/{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}/{{ postgresql_cluster_name | default(__postgresql_cluster_name) }}" 10 | __postgresql_cluster_bin_path: "/usr/lib/postgresql/{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}/bin" 11 | __postgresql_cluster_config_path: "/var/lib/postgresql/{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}/{{ postgresql_cluster_name | default(__postgresql_cluster_name) }}" 12 | __postgresql_cluster_daemon: "postgresql_{{ postgresql_cluster_name | default(__postgresql_cluster_name) }}" 13 | __postgresql_cluster_packages: 14 | - "postgresql-{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}" 15 | - "postgresql-contrib-{{ postgresql_cluster_version | default(__postgresql_cluster_version) }}" 16 | - libpq-dev 17 | postgresql_cluster_python_pip_library: 18 | - setuptools 19 | - psycopg2 20 | -------------------------------------------------------------------------------- /test/simple/certs/gen_root_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BASEDIR="$1" 4 | LOGGING_PREFIX="gen_root_cert.sh >> " 5 | 6 | PASSKEY="$2" 7 | 8 | mkdir -p $BASEDIR 9 | 10 | rm -f ${BASEDIR}/rootCA.crt 11 | rm -f ${BASEDIR}/rootCA.csr 12 | rm -f ${BASEDIR}/rootCA.key 13 | rm -f ${BASEDIR}/rootCA.srl 14 | 15 | echo "$PASSKEY" > ${BASEDIR}/passkey.txt 16 | 17 | # generate a key for our root CA certificate 18 | echo "${LOGGING_PREFIX} Generating key for root CA certificate" 19 | openssl genrsa -des3 -passout pass:${PASSKEY} -out ${BASEDIR}/rootCA.pass.key 4096 20 | openssl rsa -passin pass:${PASSKEY} -in ${BASEDIR}/rootCA.pass.key -out ${BASEDIR}/rootCA.key 21 | rm ${BASEDIR}/rootCA.pass.key 22 | echo 23 | 24 | # create and self sign the root CA certificate 25 | echo 26 | echo "${LOGGING_PREFIX} Creating self-signed root CA certificate" 27 | openssl req -x509 -new -nodes -key ${BASEDIR}/rootCA.key -sha512 -days 365000 -out ${BASEDIR}/rootCA.crt -subj "/emailAddress=kontakt@your-company.de/C=DE/ST=Bavaria/L=Bayreuth/O=your-company/OU=your-company GmbH & Co. KG/CN=your-company-nf-kube01-ca" -extensions v3_ca 28 | echo "${LOGGING_PREFIX} Self-signed root CA certificate (${BASEDIR}/rootCA.crt) is:" 29 | openssl x509 -in ${BASEDIR}/rootCA.crt -text -noout 30 | echo 31 | 32 | cat ${BASEDIR}/rootCA.crt > ${BASEDIR}/rootCA-verification.pem 33 | cat ${BASEDIR}/rootCA.key ${BASEDIR}/rootCA.crt > ${BASEDIR}/rootCA-complete.pem 34 | -------------------------------------------------------------------------------- /certs/pg_auto_failover/gen_root_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BASEDIR="$1" 4 | LOGGING_PREFIX="gen_root_cert.sh >> " 5 | 6 | PASSKEY="$2" 7 | 8 | mkdir -p $BASEDIR 9 | 10 | rm -f ${BASEDIR}/rootCA.crt 11 | rm -f ${BASEDIR}/rootCA.csr 12 | rm -f ${BASEDIR}/rootCA.key 13 | rm -f ${BASEDIR}/rootCA.srl 14 | 15 | echo "$PASSKEY" > ${BASEDIR}/passkey.txt 16 | 17 | # generate a key for our root CA certificate 18 | echo "${LOGGING_PREFIX} Generating key for root CA certificate" 19 | openssl genrsa -des3 -passout pass:${PASSKEY} -out ${BASEDIR}/rootCA.pass.key 4096 20 | openssl rsa -passin pass:${PASSKEY} -in ${BASEDIR}/rootCA.pass.key -out ${BASEDIR}/rootCA.key 21 | rm ${BASEDIR}/rootCA.pass.key 22 | echo 23 | 24 | # create and self sign the root CA certificate 25 | echo 26 | echo "${LOGGING_PREFIX} Creating self-signed root CA certificate" 27 | openssl req -x509 -new -nodes -key ${BASEDIR}/rootCA.key -sha512 -days 365000 -out ${BASEDIR}/rootCA.crt -subj "/emailAddress=kontakt@your-company.de/C=DE/ST=Bavaria/L=Bayreuth/O=your-company/OU=your-company GmbH & Co. KG/CN=your-company-nf-kube01-ca" -extensions v3_ca 28 | echo "${LOGGING_PREFIX} Self-signed root CA certificate (${BASEDIR}/rootCA.crt) is:" 29 | openssl x509 -in ${BASEDIR}/rootCA.crt -text -noout 30 | echo 31 | 32 | cat ${BASEDIR}/rootCA.crt > ${BASEDIR}/rootCA-verification.pem 33 | cat ${BASEDIR}/rootCA.key ${BASEDIR}/rootCA.crt > ${BASEDIR}/rootCA-complete.pem 34 | -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/certs/gen_root_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BASEDIR="$1" 4 | LOGGING_PREFIX="gen_root_cert.sh >> " 5 | 6 | PASSKEY="$2" 7 | 8 | mkdir -p $BASEDIR 9 | 10 | rm -f ${BASEDIR}/rootCA.crt 11 | rm -f ${BASEDIR}/rootCA.csr 12 | rm -f ${BASEDIR}/rootCA.key 13 | rm -f ${BASEDIR}/rootCA.srl 14 | 15 | echo "$PASSKEY" > ${BASEDIR}/passkey.txt 16 | 17 | # generate a key for our root CA certificate 18 | echo "${LOGGING_PREFIX} Generating key for root CA certificate" 19 | openssl genrsa -des3 -passout pass:${PASSKEY} -out ${BASEDIR}/rootCA.pass.key 4096 20 | openssl rsa -passin pass:${PASSKEY} -in ${BASEDIR}/rootCA.pass.key -out ${BASEDIR}/rootCA.key 21 | rm ${BASEDIR}/rootCA.pass.key 22 | echo 23 | 24 | # create and self sign the root CA certificate 25 | echo 26 | echo "${LOGGING_PREFIX} Creating self-signed root CA certificate" 27 | openssl req -x509 -new -nodes -key ${BASEDIR}/rootCA.key -sha512 -days 365000 -out ${BASEDIR}/rootCA.crt -subj "/emailAddress=kontakt@your-company.de/C=DE/ST=Bavaria/L=Bayreuth/O=your-company/OU=your-company GmbH & Co. KG/CN=your-company-nf-kube01-ca" -extensions v3_ca 28 | echo "${LOGGING_PREFIX} Self-signed root CA certificate (${BASEDIR}/rootCA.crt) is:" 29 | openssl x509 -in ${BASEDIR}/rootCA.crt -text -noout 30 | echo 31 | 32 | cat ${BASEDIR}/rootCA.crt > ${BASEDIR}/rootCA-verification.pem 33 | cat ${BASEDIR}/rootCA.key ${BASEDIR}/rootCA.crt > ${BASEDIR}/rootCA-complete.pem 34 | -------------------------------------------------------------------------------- /roles/user-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | - name: construct list of all user infos 12 | set_fact: 13 | all_user_infos: "{{ setup_additional_users | default([]) }}" 14 | 15 | - name: set up users 16 | include: subtasks/setup-single-user.yml user="{{ item }}" 17 | with_items: "{{ all_user_infos }}" -------------------------------------------------------------------------------- /roles/postgres-cluster-configure/LICENSE: -------------------------------------------------------------------------------- 1 | This role is based on Jeff Geerling's (geerlingguy) ansible-role-postgresql. 2 | In contrast to the original role, this only _installs_ postgresql 3 | in the correct versions as specified, and also installs pg_auto_failover 4 | so that the database can be configured by other roles. 5 | 6 | The MIT License (MIT) 7 | 8 | Copyright (c) 2017 Jeff Geerling 9 | 10 | Permission is hereby granted, free of charge, to any person obtaining a copy of 11 | this software and associated documentation files (the "Software"), to deal in 12 | the Software without restriction, including without limitation the rights to 13 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 14 | the Software, and to permit persons to whom the Software is furnished to do so, 15 | subject to the following conditions: 16 | 17 | The above copyright notice and this permission notice shall be included in all 18 | copies or substantial portions of the Software. 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 22 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 23 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 24 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 25 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /roles/postgres-cluster-install/LICENSE: -------------------------------------------------------------------------------- 1 | This role is based on Jeff Geerling's (geerlingguy) ansible-role-postgresql. 2 | In contrast to the original role, this only _installs_ postgresql 3 | in the correct versions as specified, and also installs pg_auto_failover 4 | so that the database can be configured by other roles. 5 | 6 | The MIT License (MIT) 7 | 8 | Copyright (c) 2017 Jeff Geerling 9 | 10 | Permission is hereby granted, free of charge, to any person obtaining a copy of 11 | this software and associated documentation files (the "Software"), to deal in 12 | the Software without restriction, including without limitation the rights to 13 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 14 | the Software, and to permit persons to whom the Software is furnished to do so, 15 | subject to the following conditions: 16 | 17 | The above copyright notice and this permission notice shall be included in all 18 | copies or substantial portions of the Software. 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 22 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 23 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 24 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 25 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /roles/postgres-cluster-backup-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "ensure /data/ansible/pg_dump exists" 3 | file: 4 | path: /data/ansible/pg_dump 5 | state: directory 6 | owner: "root" 7 | group: "root" 8 | # pg_dump apparently wants to cd back to the directory 9 | mode: 0711 10 | 11 | - stat: 12 | path: /data/ansible/pg_dumps 13 | register: pg_dumps_link 14 | 15 | - name: "ensure /data/ansible/pg_dumps exists" 16 | file: 17 | path: /data/ansible/pg_dumps 18 | state: directory 19 | owner: "postgres" 20 | group: "postgres" 21 | mode: 0700 22 | when: not (pg_dumps_link.stat.islnk is defined and pg_dumps_link.stat.islnk) 23 | 24 | - name: copy dump_all.sh script 25 | template: 26 | src: dump_all.sh.j2 27 | dest: /data/ansible/pg_dump/dump_all.sh 28 | owner: "root" 29 | group: "root" 30 | mode: 0700 31 | 32 | - name: copy start_dump.sh script 33 | copy: 34 | src: start_dump.sh 35 | dest: /data/ansible/pg_dump/start_dump.sh 36 | owner: "root" 37 | group: "root" 38 | mode: 0700 39 | 40 | - name: Create cronjob for postgres dump 41 | when: "not (postgres_backup_enabled | default(False) | bool)" 42 | cron: 43 | name: "start automatic postgres dump" 44 | user: "root" 45 | state: absent 46 | 47 | - name: Create cronjob for postgres dump 48 | when: "postgres_backup_enabled | default(False) | bool" 49 | cron: 50 | name: "start automatic postgres dump" 51 | user: "root" 52 | minute: "0" 53 | hour: "2" 54 | job: "bash /data/ansible/pg_dump/start_dump.sh" 55 | -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | check_setup() { 4 | node_1_check_recovery=`PGPASSWORD=password1 psql -h 10.0.0.21 -p 5433 -t -X -A -c 'select pg_is_in_recovery()' --username testuser -d testdb` 5 | node_2_check_recovery=`PGPASSWORD=password1 psql -h 10.0.0.22 -p 5433 -t -X -A -c 'select pg_is_in_recovery()' --username testuser -d testdb` 6 | 7 | sorted=`echo -e "$node_2_check_recovery\n$node_1_check_recovery" | sort` 8 | 9 | f_count=`echo $sorted | grep -o f | wc -l` 10 | t_count=`echo $sorted | grep -o t | wc -l` 11 | 12 | if [ $t_count == "1" ]; then 13 | echo "Found exactly one primary. OK" 14 | else 15 | echo "Did not find exactly one primary. ERROR" 16 | exit 1 17 | fi 18 | 19 | if [ $f_count == "1" ]; then 20 | echo "Found exactly one secondary. OK" 21 | else 22 | echo "Did not find exactly one secondary. ERROR" 23 | exit 1 24 | fi 25 | } 26 | 27 | # we are testing, ignore host key checking 28 | ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/hosts.yml ../../base_setup.yml 29 | ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/hosts.yml ../../postgres_cluster_servers.yml 30 | 31 | check_setup 32 | 33 | # TODO: check if version is == 1.3 34 | 35 | echo "Setup with 1.3 successful, upgrading to 1.5 now..." 36 | 37 | ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/hosts.yml ../../postgres_cluster_upgrade_pre_1.4.yml --extra-vars='{"postgresql_new_pg_auto_failover_version": "1.5"}' 38 | 39 | check_setup 40 | 41 | # TODO: check if version is == 1.5 42 | 43 | echo "" 44 | echo "Finished tests. OK" -------------------------------------------------------------------------------- /inventories/pg_auto_failover/hosts.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | 11 | all: 12 | children: 13 | postgres_cluster: 14 | hosts: 15 | af-monitor-01: 16 | ansible_host: 116.203.218.49 17 | host_ip: "{{ ansible_host }}" 18 | postgresql_cluster_is_monitor: True 19 | af-database-01: 20 | ansible_host: 116.203.218.75 21 | host_ip: "{{ ansible_host }}" 22 | af-database-02: 23 | ansible_host: 49.12.9.15 24 | host_ip: "{{ ansible_host }}" -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-new/tasks/main.yml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | 4 | - name: Check if PostgreSQL database is initialized at target location 5 | stat: 6 | path: "{{ postgresql_cluster_data_dir }}/PG_VERSION" 7 | register: pgdata_dir_version 8 | 9 | - name: check if pg_autoctl already knows about this postgres cluster 10 | stat: 11 | path: "/home/postgres/.local/share/pg_autoctl/{{ postgresql_cluster_data_dir }}" 12 | register: pg_autoctl_dir 13 | 14 | - include_tasks: register_node.yml 15 | when: (not pg_autoctl_dir.stat.exists) and (not pgdata_dir_version.stat.exists) and not (postgresql_cluster_is_monitor | default('False') | bool) 16 | 17 | - name: Check if PostgreSQL database is initialized now at target location 18 | stat: 19 | path: "{{ postgresql_cluster_data_dir }}/PG_VERSION" 20 | register: pgdata_dir_version 21 | 22 | - name: check if pg_autoctl now knows about this postgres cluster 23 | stat: 24 | path: "/home/postgres/.local/share/pg_autoctl/{{ postgresql_cluster_data_dir }}" 25 | register: pg_autoctl_dir 26 | 27 | - fail: 28 | msg: "EMERG: could not reliably determine if pg_autoctl already knows about this cluster even after running the setup!" 29 | when: not (pg_autoctl_dir.stat.exists) 30 | 31 | - name: Check if service is already set up 32 | stat: 33 | path: "/etc/systemd/system/{{ postgresql_cluster_daemon }}.service" 34 | register: postgresql_cluster_daemon_service_file 35 | 36 | - include_tasks: setup_service.yml 37 | when: (pg_autoctl_dir.stat.exists) and (pgdata_dir_version.stat.exists) and (not postgresql_cluster_daemon_service_file.stat.exists) and not (postgresql_cluster_is_monitor | default('False') | bool) -------------------------------------------------------------------------------- /roles/postgres-cluster-databases/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "check if server is standby (pg_is_in_recovery)" 3 | shell: 4 | psql -p "{{ postgresql_cluster_port }}" -t -X -A -c 'select pg_is_in_recovery()' 5 | become_user: "{{ postgresql_cluster_user }}" 6 | register: pg_is_in_recovery 7 | 8 | - name: Ensure PostgreSQL databases are present. 9 | postgresql_db: 10 | name: "{{ item.name }}" 11 | lc_collate: "{{ item.lc_collate | default('en_US.UTF-8') }}" 12 | lc_ctype: "{{ item.lc_ctype | default('en_US.UTF-8') }}" 13 | encoding: "{{ item.encoding | default('UTF-8') }}" 14 | template: "{{ item.template | default('template0') }}" 15 | login_host: "{{ item.login_host | default('localhost') }}" 16 | login_password: "{{ item.login_password | default(omit) }}" 17 | login_user: "{{ item.login_user | default(postgresql_cluster_user) }}" 18 | login_unix_socket: "{{ item.login_unix_socket | default(postgresql_cluster_unix_socket_directories[0]) }}" 19 | port: "{{ item.port | default(postgresql_cluster_port) }}" 20 | owner: "{{ item.owner | default(postgresql_cluster_user) }}" 21 | state: "{{ item.state | default('present') }}" 22 | with_items: "{{ postgresql_cluster_databases | default([]) }}" 23 | become: true 24 | become_user: "{{ postgresql_cluster_user }}" 25 | # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 26 | vars: 27 | ansible_ssh_pipelining: true 28 | when: not (postgresql_cluster_is_monitor | default('False') | bool) and pg_is_in_recovery.stdout_lines[0] == 'f' 29 | 30 | 31 | - include_tasks: add_schemas.yml 32 | vars: 33 | database: "{{ item }}" 34 | with_items: "{{ postgresql_cluster_databases | default([]) }}" -------------------------------------------------------------------------------- /tools/health_monitor/README.md: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_health_monitor 2 | Simple service to keep track of the current pg_auto_failover health so that you can use tools like uptime kuma to monitor. 3 | 4 | 5 | ## Usage 6 | 7 | Create a config.py in your directory similar to this: 8 | 9 | ``` 10 | servers = { 11 | "monitor": { 12 | "display_name": "monitor", 13 | "dsn": "postgresql://healthuser:healthuser@10.0.0.10:5433/pg_auto_failover?connect_timeout=1", 14 | "healthcheck": "SELECT MIN(health) FROM pgautofailover.node" 15 | }, 16 | "node01": { 17 | "display_name": "node01", 18 | "dsn": "postgresql://testuser:password1@10.0.0.11:5433/testdb?connect_timeout=1", 19 | "healthcheck": "SELECT 1" 20 | }, 21 | "node02": { 22 | "display_name": "node01", 23 | "dsn": "postgresql://testuser:password1@10.0.0.12:5433/testdb?connect_timeout=1", 24 | "healthcheck": "SELECT 1" 25 | }, 26 | } 27 | ``` 28 | 29 | Then run the docker container as such: 30 | 31 | ``` 32 | docker run -v $(pwd)/config.py:/monitor/config.py:ro -p 8080:8080 --rm neuroforgede/pg_auto_failover_health_monitor:0.1 33 | ``` 34 | 35 | Then you will be able to check health like this: 36 | 37 | ``` 38 | curl --fail http://localhost:8080/api/v1/pg/health 39 | ``` 40 | 41 | If everything is fine (HTTP status = 200) like this: 42 | 43 | ``` 44 | {"monitor":true,"node01":true,"node02":true} 45 | ``` 46 | 47 | If the service is unavailable you will get this output: 48 | 49 | ``` 50 | curl: (22) The requested URL returned error: 503 SERVICE UNAVAILABLE 51 | ``` 52 | 53 | If you leave out `--fail` you get an output like this on a failure: 54 | 55 | ``` 56 | {"monitor":false,"node01":true,"node02":true} 57 | ``` -------------------------------------------------------------------------------- /roles/postgres-cluster-hba-config/templates/pg_ident.conf.j2: -------------------------------------------------------------------------------- 1 | {{ ansible_managed | comment }} 2 | # If SYSTEM-USERNAME starts with a slash (/), it will be treated as a 3 | # regular expression. Optionally this can contain a capture (a 4 | # parenthesized subexpression). The substring matching the capture 5 | # will be substituted for \1 (backslash-one) if present in 6 | # PG-USERNAME. 7 | # 8 | # Multiple maps may be specified in this file and used by pg_hba.conf. 9 | # 10 | # No map names are defined in the default configuration. If all 11 | # system user names and PostgreSQL user names are the same, you don't 12 | # need anything in this file. 13 | # 14 | # This file is read on server startup and when the postmaster receives 15 | # a SIGHUP signal. If you edit the file on a running system, you have 16 | # to SIGHUP the postmaster for the changes to take effect. You can 17 | # use "pg_ctl reload" to do that. 18 | 19 | # Put your actual configuration here 20 | # ---------------------------------- 21 | 22 | # MAPNAME SYSTEM-USERNAME PG-USERNAME 23 | autoctl_node_peer postgres autoctl_node 24 | {% for item in groups['postgres_cluster'] %} 25 | postgres_node_remote {{ hostvars[item]['host_ssl_cn'] | default(hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host'])) }} postgres 26 | autoctl_node_remote {{ hostvars[item]['host_ssl_cn'] | default(hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host'])) }} autoctl_node 27 | pgautofailover_replicator_remote {{ hostvars[item]['host_ssl_cn'] | default(hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host'])) }} pgautofailover_replicator 28 | pgautofailover_monitor_remote {{ hostvars[item]['host_ssl_cn'] | default(hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host'])) }} pgautofailover_monitor 29 | {% endfor %} -------------------------------------------------------------------------------- /roles/user-setup/tasks/subtasks/setup-single-user.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | # inspired by https://medium.com/sallesslice-com/visudo-with-ansible-746f83547bb3 12 | - name: user setup 13 | become: yes 14 | remote_user: root 15 | block: 16 | - name: "create user {{ user.name }}" 17 | user: 18 | name: "{{ user.name }}" 19 | append: yes 20 | shell: /bin/bash 21 | create_home: yes 22 | system: "{{ user.system | default('False') | bool }}" 23 | 24 | - name: "set up bashrc for user {{ user.name }}" 25 | copy: 26 | src: .bashrc 27 | dest: "/home/{{ user.name }}/.bashrc" 28 | owner: "{{ user.name }}" 29 | group: "{{ user.group }}" 30 | -------------------------------------------------------------------------------- /test/simple/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # we are testing, ignore host key checking 4 | ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory_$1/hosts.yml ../../base_setup.yml 5 | ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory_$1/hosts.yml ../../postgres_cluster_servers.yml 6 | 7 | node_1_check_recovery=`PGPASSWORD=password1 psql -h 10.0.0.11 -p 5433 -t -X -A -c 'select pg_is_in_recovery()' --username testuser -d testdb` 8 | node_2_check_recovery=`PGPASSWORD=password1 psql -h 10.0.0.12 -p 5433 -t -X -A -c 'select pg_is_in_recovery()' --username testuser -d testdb` 9 | 10 | sorted=`echo -e "$node_2_check_recovery\n$node_1_check_recovery" | sort` 11 | 12 | f_count=`echo $sorted | grep -o f | wc -l` 13 | t_count=`echo $sorted | grep -o t | wc -l` 14 | 15 | if [ $t_count == "1" ]; then 16 | echo "Found exactly one primary. OK" 17 | else 18 | echo "Did not find exactly one primary. ERROR" 19 | exit 1 20 | fi 21 | 22 | if [ $f_count == "1" ]; then 23 | echo "Found exactly one secondary. OK" 24 | else 25 | echo "Did not find exactly one secondary. ERROR" 26 | exit 1 27 | fi 28 | 29 | node_1_pgbouncer_test=`PGPASSWORD=password1 psql "port=6432 user=testuser dbname=testdb host=10.0.0.11 sslmode=allow" -t -X -A -c 'select 1'` 30 | node_2_pgbouncer_test=`PGPASSWORD=password1 psql "port=6432 user=testuser dbname=testdb host=10.0.0.12 sslmode=allow" -t -X -A -c 'select 1'` 31 | 32 | if [ "$node_1_pgbouncer_test" == "1" ]; then 33 | echo "Could authenticate against pgbouncer for node1. OK" 34 | else 35 | echo "Could not against pgbouncer for node1. ERROR" 36 | exit 1 37 | fi 38 | 39 | if [ "$node_2_pgbouncer_test" == "1" ]; then 40 | echo "Could authenticate against pgbouncer for node2. OK" 41 | else 42 | echo "Could not against pgbouncer for node2. ERROR" 43 | exit 1 44 | fi 45 | 46 | echo "" 47 | echo "Finished tests. OK" -------------------------------------------------------------------------------- /roles/postgres-cluster-monitor-initialize/tasks/main.yml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | 4 | - name: Check if PostgreSQL database is initialized. 5 | stat: 6 | path: "{{ postgresql_cluster_data_dir }}/PG_VERSION" 7 | register: pgdata_dir_version 8 | 9 | 10 | - name: check if pg_autoctl already knows about this postgres cluster 11 | stat: 12 | path: "/home/postgres/.local/share/pg_autoctl/{{ postgresql_cluster_data_dir }}" 13 | register: pg_autoctl_dir 14 | 15 | # explicitly do not remove the data dir here, we do not want to delete database clusters by accident 16 | 17 | # TODO: add support for crl files 18 | - include_tasks: "monitor_init_{{ postgresql_pg_auto_failover_version }}.yml" 19 | when: (not pg_autoctl_dir.stat.exists) and (not pgdata_dir_version.stat.exists) and (postgresql_cluster_is_monitor | default('False') | bool) 20 | 21 | - name: "create systemd config for cluster" 22 | shell: > 23 | su -c 'PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl -q show systemd --pgdata "{{ postgresql_cluster_data_dir }}"' postgres \ 24 | | tee /etc/systemd/system/{{ postgresql_cluster_daemon }}.service 25 | when: (not pgdata_dir_version.stat.exists) and (postgresql_cluster_is_monitor | default('False') | bool) 26 | 27 | - name: "make sure database cluster service is started and enabled at boot" 28 | systemd: 29 | state: started 30 | enabled: true 31 | daemon_reload: yes 32 | name: "{{ postgresql_cluster_daemon }}" 33 | when: (not pgdata_dir_version.stat.exists) and (postgresql_cluster_is_monitor | default('False') | bool) 34 | 35 | - include_role: 36 | name: postgres-cluster-hba-config 37 | when: (postgresql_cluster_is_monitor | default('False') | bool) 38 | 39 | - include_role: 40 | name: postgres-cluster-configure 41 | when: (postgresql_cluster_is_monitor | default('False') | bool) 42 | 43 | 44 | #PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_ctl stop --pgdata $(pwd) -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-new/tasks/register_node_1.3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "compute monitor_nodes_list" 3 | vars: 4 | monitor_node_ip : "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}" 5 | set_fact: 6 | postgresql_cluster_monitor_node_ips: "{{ (postgresql_cluster_monitor_node_ips | default([])) + [monitor_node_ip] }}" 7 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 8 | 9 | 10 | # TODO: add support for crl files 11 | # FIXME: monitor port may be wrong if its overridden on a per host basis !!! 12 | #--group {{ postgresql_cluster_group | default('0') }} \ 13 | 14 | - name: "run pg_autoctl create postgres" 15 | become_user: "{{ postgresql_cluster_user }}" 16 | # by default allow up to 5 hours for this command until it times out 17 | async: "{{ postgresql_cluster_new_node_creation_timeout | default(18000) | int }}" 18 | # poll every 10 seconds to see if we are finished 19 | poll: 10 20 | vars: 21 | first_monitor_node: "{{ postgresql_cluster_monitor_node_ips | first }}" 22 | shell: > 23 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl create postgres \ 24 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 25 | --skip-pg-hba \ 26 | --formation {{ postgresql_cluster_formation | default('default') }} \ 27 | --ssl-ca-file "{{ postgresql_cluster_ssl_ca_file | default('/data/ansible/certs/postgres_server/rootCA.crt') }}" \ 28 | --server-key "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }}" \ 29 | --server-cert "{{ postgresql_cluster_server_cert | default('/data/ansible/certs/postgres_server/server.crt') }}" \ 30 | --nodename "{{ host_ip }}" \ 31 | --pgport "{{ postgresql_cluster_port | default('5433') }}" \ 32 | --monitor postgres://autoctl_node@{{ first_monitor_node }}:{{ postgresql_cluster_port | default('5433') }}/pg_auto_failover 33 | when: not (postgresql_cluster_is_monitor | default('False') | bool) -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-new/tasks/register_node_1.4.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "compute monitor_nodes_list" 3 | vars: 4 | monitor_node_ip : "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}" 5 | set_fact: 6 | postgresql_cluster_monitor_node_ips: "{{ (postgresql_cluster_monitor_node_ips | default([])) + [monitor_node_ip] }}" 7 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 8 | 9 | 10 | # TODO: add support for crl files 11 | # FIXME: monitor port may be wrong if its overridden on a per host basis !!! 12 | #--group {{ postgresql_cluster_group | default('0') }} \ 13 | 14 | - name: "run pg_autoctl create postgres" 15 | become_user: "{{ postgresql_cluster_user }}" 16 | # by default allow up to 5 hours for this command until it times out 17 | async: "{{ postgresql_cluster_new_node_creation_timeout | default(18000) | int }}" 18 | # poll every 10 seconds to see if we are finished 19 | poll: 10 20 | vars: 21 | first_monitor_node: "{{ postgresql_cluster_monitor_node_ips | first }}" 22 | shell: > 23 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl create postgres \ 24 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 25 | --skip-pg-hba \ 26 | --formation {{ postgresql_cluster_formation | default('default') }} \ 27 | --ssl-ca-file "{{ postgresql_cluster_ssl_ca_file | default('/data/ansible/certs/postgres_server/rootCA.crt') }}" \ 28 | --server-key "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }}" \ 29 | --server-cert "{{ postgresql_cluster_server_cert | default('/data/ansible/certs/postgres_server/server.crt') }}" \ 30 | --name "{{ host_ip }}" \ 31 | --hostname "{{ host_ip }}" \ 32 | --pgport "{{ postgresql_cluster_port | default('5433') }}" \ 33 | --monitor postgres://autoctl_node@{{ first_monitor_node }}:{{ postgresql_cluster_port | default('5433') }}/pg_auto_failover 34 | when: not (postgresql_cluster_is_monitor | default('False') | bool) -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-new/tasks/register_node_1.5.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "compute monitor_nodes_list" 3 | vars: 4 | monitor_node_ip : "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}" 5 | set_fact: 6 | postgresql_cluster_monitor_node_ips: "{{ (postgresql_cluster_monitor_node_ips | default([])) + [monitor_node_ip] }}" 7 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 8 | 9 | 10 | # TODO: add support for crl files 11 | # FIXME: monitor port may be wrong if its overridden on a per host basis !!! 12 | #--group {{ postgresql_cluster_group | default('0') }} \ 13 | 14 | - name: "run pg_autoctl create postgres" 15 | become_user: "{{ postgresql_cluster_user }}" 16 | # by default allow up to 5 hours for this command until it times out 17 | async: "{{ postgresql_cluster_new_node_creation_timeout | default(18000) | int }}" 18 | # poll every 10 seconds to see if we are finished 19 | poll: 10 20 | vars: 21 | first_monitor_node: "{{ postgresql_cluster_monitor_node_ips | first }}" 22 | shell: > 23 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl create postgres \ 24 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 25 | --skip-pg-hba \ 26 | --formation {{ postgresql_cluster_formation | default('default') }} \ 27 | --ssl-ca-file "{{ postgresql_cluster_ssl_ca_file | default('/data/ansible/certs/postgres_server/rootCA.crt') }}" \ 28 | --server-key "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }}" \ 29 | --server-cert "{{ postgresql_cluster_server_cert | default('/data/ansible/certs/postgres_server/server.crt') }}" \ 30 | --name "{{ host_ip }}" \ 31 | --hostname "{{ host_ip }}" \ 32 | --pgport "{{ postgresql_cluster_port | default('5433') }}" \ 33 | --monitor postgres://autoctl_node@{{ first_monitor_node }}:{{ postgresql_cluster_port | default('5433') }}/pg_auto_failover 34 | when: not (postgresql_cluster_is_monitor | default('False') | bool) -------------------------------------------------------------------------------- /roles/postgres-cluster-data-initialize-new/tasks/register_node_1.6.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "compute monitor_nodes_list" 3 | vars: 4 | monitor_node_ip : "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}" 5 | set_fact: 6 | postgresql_cluster_monitor_node_ips: "{{ (postgresql_cluster_monitor_node_ips | default([])) + [monitor_node_ip] }}" 7 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 8 | 9 | 10 | # TODO: add support for crl files 11 | # FIXME: monitor port may be wrong if its overridden on a per host basis !!! 12 | #--group {{ postgresql_cluster_group | default('0') }} \ 13 | 14 | - name: "run pg_autoctl create postgres" 15 | become_user: "{{ postgresql_cluster_user }}" 16 | # by default allow up to 5 hours for this command until it times out 17 | async: "{{ postgresql_cluster_new_node_creation_timeout | default(18000) | int }}" 18 | # poll every 10 seconds to see if we are finished 19 | poll: 10 20 | vars: 21 | first_monitor_node: "{{ postgresql_cluster_monitor_node_ips | first }}" 22 | shell: > 23 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl create postgres \ 24 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 25 | --skip-pg-hba \ 26 | --formation {{ postgresql_cluster_formation | default('default') }} \ 27 | --ssl-ca-file "{{ postgresql_cluster_ssl_ca_file | default('/data/ansible/certs/postgres_server/rootCA.crt') }}" \ 28 | --server-key "{{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }}" \ 29 | --server-cert "{{ postgresql_cluster_server_cert | default('/data/ansible/certs/postgres_server/server.crt') }}" \ 30 | --name "{{ host_ip }}" \ 31 | --hostname "{{ host_ip }}" \ 32 | --pgport "{{ postgresql_cluster_port | default('5433') }}" \ 33 | --monitor postgres://autoctl_node@{{ first_monitor_node }}:{{ postgresql_cluster_port | default('5433') }}/pg_auto_failover 34 | when: not (postgresql_cluster_is_monitor | default('False') | bool) -------------------------------------------------------------------------------- /inventories/pg_auto_failover/group_vars/all/pg_auto_failover_test_all_config.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | 12 | ansible_ssh_private_key_file: "{{playbook_dir}}/ssh_keys/root_rsa" 13 | ansible_user: "root" 14 | 15 | postgresql_cluster_name: "main_cluster" 16 | postgresql_cluster_port: "5433" 17 | 18 | postgresql_cluster_version: "14" 19 | postgresql_pg_auto_failover_version: "1.6" 20 | 21 | postgresql_cluster_hba_entries: [] 22 | #- { type: local, database: all, user: postgres, auth_method: peer } 23 | # add your entries for your manually created databases here 24 | 25 | postgresql_cluster_allowed_clients: [] 26 | #- name: "descriptive name" 27 | # ip: "ip of allowed client" 28 | 29 | postgresql_cluster_users: [] 30 | #- name: "user" 31 | # password: "password" 32 | # encrypted: true 33 | # state: present 34 | # role_attr_flags: "SUPERUSER" 35 | 36 | postgresql_cluster_databases: [] 37 | #- name: "dbname" 38 | # lc_collate: 'en_US.UTF-8' 39 | # lc_ctype: 'en_US.UTF-8' 40 | # encoding: 'UTF-8' 41 | # owner: "dbowner" 42 | # state: present 43 | # schemas: 44 | # - name: "private" 45 | -------------------------------------------------------------------------------- /roles/copy-ssl-certs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | - name: "ensure /data/ansible exists" 12 | file: 13 | path: /data/ansible 14 | state: directory 15 | 16 | - name: "ensure /data/ansible/certs exists" 17 | file: 18 | path: /data/ansible/certs 19 | state: directory 20 | 21 | - name: "copy ssl certs to {{ item.directory }}" 22 | copy: 23 | src: "{{ssl_certs_base_dir}}/{{ item.directory }}" 24 | dest: "/data/ansible/certs" 25 | mode: 0700 26 | owner: "{{ item.owner }}" 27 | group: "{{ item.group }}" 28 | when: not item.use_ids | default('False') | bool 29 | with_items: "{{ ssl_cert_dirs }}" 30 | 31 | - name: "copy ssl certs to {{ item.directory }}" 32 | copy: 33 | src: "{{ssl_certs_base_dir}}/{{ item.directory }}" 34 | dest: "/data/ansible/certs" 35 | mode: 0700 36 | owner: "{{ item.uid }}" 37 | group: "{{ item.gid }}" 38 | when: item.use_ids | default('False') | bool 39 | with_items: "{{ ssl_cert_dirs }}" 40 | 41 | - name: "Ensure /data/ansible/certs/{{ item.directory }} is 0700" 42 | command: chmod 700 /data/ansible/certs/{{ item.directory }} 43 | with_items: "{{ ssl_cert_dirs }}" 44 | 45 | - name: "Ensure files in /data/ansible/certs/{{ item.directory }} 0400" 46 | command: find /data/ansible/certs/{{ item.directory }} -type f -exec chmod 0400 {} \; 47 | with_items: "{{ ssl_cert_dirs }}" -------------------------------------------------------------------------------- /roles/postgres-cluster-xinetd/templates/pgsqlchck.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # some parts of this are based on code from https://github.com/rglaue/xinetd_bash_http_service/blob/master/xinetdhttpservice.sh 4 | # 5 | # This script checks if a postgres server is healthy running on localhost. It will return: 6 | # "HTTP/1.x 200 OK\r" (if postgres is running smoothly) 7 | # - OR - 8 | # "HTTP/1.x 503 Service Unavailable\r" (else) 9 | 10 | {% raw %} 11 | OPT_HTTP_STATUS=1 12 | # 13 | # The HTTP response. This will return a HTTP response with the provided HTTP 14 | # code and a descriptive message. 15 | # Example: 16 | # http_response 301 "You accessed something that does not exist" 17 | # http_response 200 '{ "status": "success" }' 18 | # 19 | http_response () { 20 | HTTP_CODE=$1 21 | MESSAGE=${2:-Message Undefined} 22 | length=${#MESSAGE} 23 | if [ "$OPT_HTTP_STATUS" -eq 1 ]; then 24 | if [ "$HTTP_CODE" -eq 503 ]; then 25 | echo -en "HTTP/1.1 503 Service Unavailable\r\n" 26 | elif [ "$HTTP_CODE" -eq 301 ]; then 27 | echo -en "HTTP/1.1 301 Not Found\r\n" 28 | elif [ "$HTTP_CODE" -eq 200 ]; then 29 | echo -en "HTTP/1.1 200 OK\r\n" 30 | elif [ "$HTTP_CODE" -eq 206 ]; then 31 | echo -en "HTTP/1.1 206 Partial Content\r\n" 32 | else 33 | echo -en "HTTP/1.1 ${HTTP_CODE} UNKNOWN\r\n" 34 | fi 35 | echo -en "Content-Type: text/plain\r\n" 36 | echo -en "Connection: close\r\n" 37 | echo -en "Content-Length: ${length}\r\n" 38 | echo -en "\r\n" 39 | echo -en "$MESSAGE" 40 | echo -en "\r\n" 41 | sleep 0.1 42 | exit 0 43 | fi 44 | } 45 | {% endraw %} 46 | 47 | VALUE=`psql -t -p 5433 -c "select pg_is_in_recovery()" -X -A -U {{ postgresql_cluster_xinetd_group | default('postgres_xinetd') }} -d template1 2> /dev/null` 48 | # Check the output. If it is not empty then everything is fine and we return something. Else, we just do not return anything. 49 | 50 | 51 | if [ "$VALUE" == "t" ] 52 | then 53 | http_response {{ postgresql_cluster_xinetd_standby_http_code | default('206') }} "{{ postgresql_cluster_xinetd_standby_http_response | default('Standby') }}" 54 | elif [ "$VALUE" == "f" ] 55 | then 56 | http_response {{ postgresql_cluster_xinetd_primary_http_code | default('200') }} "{{ postgresql_cluster_xinetd_primary_http_response | default('Primary') }}" 57 | else 58 | http_response {{ postgresql_cluster_xinetd_down_http_code | default('503') }} "{{ postgresql_cluster_xinetd_down_http_response | default('DB Down') }}" 59 | fi -------------------------------------------------------------------------------- /tools/health_monitor/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /test/simple/certs/gen_server_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CA_DIR="$1" 4 | BASEDIR="$2" 5 | COMMON_NAME="$3" 6 | PASSKEY="$4" 7 | 8 | LOGGING_PREFIX="gen_cert.sh >> " 9 | 10 | mkdir -p $BASEDIR 11 | 12 | rm -f ${BASEDIR}/server.crt 13 | rm -f ${BASEDIR}/server.csr 14 | rm -f ${BASEDIR}/server.key 15 | 16 | echo "$PASSKEY" > ${BASEDIR}/passkey.txt 17 | 18 | # generate a key for our server certificate 19 | echo 20 | echo "${LOGGING_PREFIX} Generating key for server certificate" 21 | openssl genrsa -des3 -passout pass:${PASSKEY} -out ${BASEDIR}/server.pass.key 4096 22 | openssl rsa -passin pass:${PASSKEY} -in ${BASEDIR}/server.pass.key -out ${BASEDIR}/server.key 23 | rm ${BASEDIR}/server.pass.key 24 | echo 25 | 26 | # create a certificate request for our server. This includes a subject alternative name so either aios-localhost, localhost or postgres_ssl can be used to address it 27 | echo 28 | echo "${LOGGING_PREFIX} Creating server certificate" 29 | openssl req -new -key ${BASEDIR}/server.key -out ${BASEDIR}/server.csr -subj "/emailAddress=kontakt@your-company.de/C=DE/ST=Bavaria/L=Bayreuth/O=your-company/OU=your-company GmbH & Co. KG/CN=${COMMON_NAME}" 30 | echo "${LOGGING_PREFIX} Server certificate signing request (${BASEDIR}/server.csr) is:" 31 | openssl req -verify -in ${BASEDIR}/server.csr -text -noout 32 | echo 33 | 34 | # use our CA certificate and key to create a signed version of the server certificate 35 | echo 36 | echo "${LOGGING_PREFIX} Signing server certificate using our root CA certificate and key" 37 | openssl x509 -req -sha512 -days 365000 -in ${BASEDIR}/server.csr -CA ${CA_DIR}/rootCA.crt -CAkey ${CA_DIR}/rootCA.key -CAcreateserial -out ${BASEDIR}/server.crt 38 | chmod og-rwx ${BASEDIR}/server.key 39 | echo "${LOGGING_PREFIX} Server certificate signed with our root CA certificate (${BASEDIR}/server.crt) is:" 40 | openssl x509 -in ${BASEDIR}/server.crt -text -noout 41 | echo 42 | 43 | # done output the base64 encoded version of the root CA certificate which should be added to trust stores 44 | echo 45 | echo "${LOGGING_PREFIX} Use the following CA certificate variables:" 46 | B64_CA_CERT=`cat ${CA_DIR}/rootCA.crt | base64` 47 | echo "POSTGRES_SSL_CA_CERT=${B64_CA_CERT}" 48 | 49 | cp ${CA_DIR}/rootCA.crt ${BASEDIR}/rootCA.crt 50 | 51 | openssl x509 -outform der -in ${BASEDIR}/rootCA.crt -out ${BASEDIR}/rootCA.crt.der 52 | openssl pkcs8 -topk8 -inform PEM -outform DER -in ${BASEDIR}/server.key -out ${BASEDIR}/server.key.pk8 -nocrypt 53 | 54 | cat ${BASEDIR}/server.key ${BASEDIR}/server.crt > ${BASEDIR}/server-complete.pem 55 | 56 | cp ${CA_DIR}/rootCA.crt ${BASEDIR}/rootCA.crt 57 | cat ${CA_DIR}/rootCA-verification.pem > ${BASEDIR}/rootCA-verification.pem 58 | -------------------------------------------------------------------------------- /certs/pg_auto_failover/gen_server_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CA_DIR="$1" 4 | BASEDIR="$2" 5 | COMMON_NAME="$3" 6 | PASSKEY="$4" 7 | 8 | LOGGING_PREFIX="gen_cert.sh >> " 9 | 10 | mkdir -p $BASEDIR 11 | 12 | rm -f ${BASEDIR}/server.crt 13 | rm -f ${BASEDIR}/server.csr 14 | rm -f ${BASEDIR}/server.key 15 | 16 | echo "$PASSKEY" > ${BASEDIR}/passkey.txt 17 | 18 | # generate a key for our server certificate 19 | echo 20 | echo "${LOGGING_PREFIX} Generating key for server certificate" 21 | openssl genrsa -des3 -passout pass:${PASSKEY} -out ${BASEDIR}/server.pass.key 4096 22 | openssl rsa -passin pass:${PASSKEY} -in ${BASEDIR}/server.pass.key -out ${BASEDIR}/server.key 23 | rm ${BASEDIR}/server.pass.key 24 | echo 25 | 26 | # create a certificate request for our server. This includes a subject alternative name so either aios-localhost, localhost or postgres_ssl can be used to address it 27 | echo 28 | echo "${LOGGING_PREFIX} Creating server certificate" 29 | openssl req -new -key ${BASEDIR}/server.key -out ${BASEDIR}/server.csr -subj "/emailAddress=kontakt@your-company.de/C=DE/ST=Bavaria/L=Bayreuth/O=your-company/OU=your-company GmbH & Co. KG/CN=${COMMON_NAME}" 30 | echo "${LOGGING_PREFIX} Server certificate signing request (${BASEDIR}/server.csr) is:" 31 | openssl req -verify -in ${BASEDIR}/server.csr -text -noout 32 | echo 33 | 34 | # use our CA certificate and key to create a signed version of the server certificate 35 | echo 36 | echo "${LOGGING_PREFIX} Signing server certificate using our root CA certificate and key" 37 | openssl x509 -req -sha512 -days 365000 -in ${BASEDIR}/server.csr -CA ${CA_DIR}/rootCA.crt -CAkey ${CA_DIR}/rootCA.key -CAcreateserial -out ${BASEDIR}/server.crt 38 | chmod og-rwx ${BASEDIR}/server.key 39 | echo "${LOGGING_PREFIX} Server certificate signed with our root CA certificate (${BASEDIR}/server.crt) is:" 40 | openssl x509 -in ${BASEDIR}/server.crt -text -noout 41 | echo 42 | 43 | # done output the base64 encoded version of the root CA certificate which should be added to trust stores 44 | echo 45 | echo "${LOGGING_PREFIX} Use the following CA certificate variables:" 46 | B64_CA_CERT=`cat ${CA_DIR}/rootCA.crt | base64` 47 | echo "POSTGRES_SSL_CA_CERT=${B64_CA_CERT}" 48 | 49 | cp ${CA_DIR}/rootCA.crt ${BASEDIR}/rootCA.crt 50 | 51 | openssl x509 -outform der -in ${BASEDIR}/rootCA.crt -out ${BASEDIR}/rootCA.crt.der 52 | openssl pkcs8 -topk8 -inform PEM -outform DER -in ${BASEDIR}/server.key -out ${BASEDIR}/server.key.pk8 -nocrypt 53 | 54 | cat ${BASEDIR}/server.key ${BASEDIR}/server.crt > ${BASEDIR}/server-complete.pem 55 | 56 | cp ${CA_DIR}/rootCA.crt ${BASEDIR}/rootCA.crt 57 | cat ${CA_DIR}/rootCA-verification.pem > ${BASEDIR}/rootCA-verification.pem 58 | -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/certs/gen_server_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CA_DIR="$1" 4 | BASEDIR="$2" 5 | COMMON_NAME="$3" 6 | PASSKEY="$4" 7 | 8 | LOGGING_PREFIX="gen_cert.sh >> " 9 | 10 | mkdir -p $BASEDIR 11 | 12 | rm -f ${BASEDIR}/server.crt 13 | rm -f ${BASEDIR}/server.csr 14 | rm -f ${BASEDIR}/server.key 15 | 16 | echo "$PASSKEY" > ${BASEDIR}/passkey.txt 17 | 18 | # generate a key for our server certificate 19 | echo 20 | echo "${LOGGING_PREFIX} Generating key for server certificate" 21 | openssl genrsa -des3 -passout pass:${PASSKEY} -out ${BASEDIR}/server.pass.key 4096 22 | openssl rsa -passin pass:${PASSKEY} -in ${BASEDIR}/server.pass.key -out ${BASEDIR}/server.key 23 | rm ${BASEDIR}/server.pass.key 24 | echo 25 | 26 | # create a certificate request for our server. This includes a subject alternative name so either aios-localhost, localhost or postgres_ssl can be used to address it 27 | echo 28 | echo "${LOGGING_PREFIX} Creating server certificate" 29 | openssl req -new -key ${BASEDIR}/server.key -out ${BASEDIR}/server.csr -subj "/emailAddress=kontakt@your-company.de/C=DE/ST=Bavaria/L=Bayreuth/O=your-company/OU=your-company GmbH & Co. KG/CN=${COMMON_NAME}" 30 | echo "${LOGGING_PREFIX} Server certificate signing request (${BASEDIR}/server.csr) is:" 31 | openssl req -verify -in ${BASEDIR}/server.csr -text -noout 32 | echo 33 | 34 | # use our CA certificate and key to create a signed version of the server certificate 35 | echo 36 | echo "${LOGGING_PREFIX} Signing server certificate using our root CA certificate and key" 37 | openssl x509 -req -sha512 -days 365000 -in ${BASEDIR}/server.csr -CA ${CA_DIR}/rootCA.crt -CAkey ${CA_DIR}/rootCA.key -CAcreateserial -out ${BASEDIR}/server.crt 38 | chmod og-rwx ${BASEDIR}/server.key 39 | echo "${LOGGING_PREFIX} Server certificate signed with our root CA certificate (${BASEDIR}/server.crt) is:" 40 | openssl x509 -in ${BASEDIR}/server.crt -text -noout 41 | echo 42 | 43 | # done output the base64 encoded version of the root CA certificate which should be added to trust stores 44 | echo 45 | echo "${LOGGING_PREFIX} Use the following CA certificate variables:" 46 | B64_CA_CERT=`cat ${CA_DIR}/rootCA.crt | base64` 47 | echo "POSTGRES_SSL_CA_CERT=${B64_CA_CERT}" 48 | 49 | cp ${CA_DIR}/rootCA.crt ${BASEDIR}/rootCA.crt 50 | 51 | openssl x509 -outform der -in ${BASEDIR}/rootCA.crt -out ${BASEDIR}/rootCA.crt.der 52 | openssl pkcs8 -topk8 -inform PEM -outform DER -in ${BASEDIR}/server.key -out ${BASEDIR}/server.key.pk8 -nocrypt 53 | 54 | cat ${BASEDIR}/server.key ${BASEDIR}/server.crt > ${BASEDIR}/server-complete.pem 55 | 56 | cp ${CA_DIR}/rootCA.crt ${BASEDIR}/rootCA.crt 57 | cat ${CA_DIR}/rootCA-verification.pem > ${BASEDIR}/rootCA-verification.pem 58 | -------------------------------------------------------------------------------- /test/simple/inventory_12/hosts.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | 11 | all: 12 | children: 13 | postgres_cluster: 14 | hosts: 15 | monitor: 16 | ansible_ssh_host: "127.0.0.1" 17 | ansible_ssh_port: "2200" 18 | ansible_ssh_user: "vagrant" 19 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/monitor/virtualbox/private_key" 20 | 21 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 22 | 23 | ansible_host: 10.0.0.10 24 | host_ip: "{{ ansible_host }}" 25 | postgresql_cluster_is_monitor: True 26 | node1: 27 | ansible_ssh_host: "127.0.0.1" 28 | ansible_ssh_port: "2201" 29 | ansible_ssh_user: "vagrant" 30 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node1/virtualbox/private_key" 31 | 32 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 33 | 34 | ansible_host: 10.0.0.11 35 | host_ip: "{{ ansible_host }}" 36 | node2: 37 | ansible_ssh_host: "127.0.0.1" 38 | ansible_ssh_port: "2202" 39 | ansible_ssh_user: "vagrant" 40 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node2/virtualbox/private_key" 41 | 42 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 43 | 44 | ansible_host: 10.0.0.12 45 | host_ip: "{{ ansible_host }}" -------------------------------------------------------------------------------- /test/simple/inventory_13/hosts.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | 11 | all: 12 | children: 13 | postgres_cluster: 14 | hosts: 15 | monitor: 16 | ansible_ssh_host: "127.0.0.1" 17 | ansible_ssh_port: "2200" 18 | ansible_ssh_user: "vagrant" 19 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/monitor/virtualbox/private_key" 20 | 21 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 22 | 23 | ansible_host: 10.0.0.10 24 | host_ip: "{{ ansible_host }}" 25 | postgresql_cluster_is_monitor: True 26 | node1: 27 | ansible_ssh_host: "127.0.0.1" 28 | ansible_ssh_port: "2201" 29 | ansible_ssh_user: "vagrant" 30 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node1/virtualbox/private_key" 31 | 32 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 33 | 34 | ansible_host: 10.0.0.11 35 | host_ip: "{{ ansible_host }}" 36 | node2: 37 | ansible_ssh_host: "127.0.0.1" 38 | ansible_ssh_port: "2202" 39 | ansible_ssh_user: "vagrant" 40 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node2/virtualbox/private_key" 41 | 42 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 43 | 44 | ansible_host: 10.0.0.12 45 | host_ip: "{{ ansible_host }}" -------------------------------------------------------------------------------- /test/simple/inventory_12_1.5/hosts.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | 11 | all: 12 | children: 13 | postgres_cluster: 14 | hosts: 15 | monitor: 16 | ansible_ssh_host: "127.0.0.1" 17 | ansible_ssh_port: "2200" 18 | ansible_ssh_user: "vagrant" 19 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/monitor/virtualbox/private_key" 20 | 21 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 22 | 23 | ansible_host: 10.0.0.10 24 | host_ip: "{{ ansible_host }}" 25 | postgresql_cluster_is_monitor: True 26 | node1: 27 | ansible_ssh_host: "127.0.0.1" 28 | ansible_ssh_port: "2201" 29 | ansible_ssh_user: "vagrant" 30 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node1/virtualbox/private_key" 31 | 32 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 33 | 34 | ansible_host: 10.0.0.11 35 | host_ip: "{{ ansible_host }}" 36 | node2: 37 | ansible_ssh_host: "127.0.0.1" 38 | ansible_ssh_port: "2202" 39 | ansible_ssh_user: "vagrant" 40 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node2/virtualbox/private_key" 41 | 42 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 43 | 44 | ansible_host: 10.0.0.12 45 | host_ip: "{{ ansible_host }}" -------------------------------------------------------------------------------- /test/simple/inventory_13_1.6/hosts.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | 11 | all: 12 | children: 13 | postgres_cluster: 14 | hosts: 15 | monitor: 16 | ansible_ssh_host: "127.0.0.1" 17 | ansible_ssh_port: "2200" 18 | ansible_ssh_user: "vagrant" 19 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/monitor/virtualbox/private_key" 20 | 21 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 22 | 23 | ansible_host: 10.0.0.10 24 | host_ip: "{{ ansible_host }}" 25 | postgresql_cluster_is_monitor: True 26 | node1: 27 | ansible_ssh_host: "127.0.0.1" 28 | ansible_ssh_port: "2201" 29 | ansible_ssh_user: "vagrant" 30 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node1/virtualbox/private_key" 31 | 32 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 33 | 34 | ansible_host: 10.0.0.11 35 | host_ip: "{{ ansible_host }}" 36 | node2: 37 | ansible_ssh_host: "127.0.0.1" 38 | ansible_ssh_port: "2202" 39 | ansible_ssh_user: "vagrant" 40 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node2/virtualbox/private_key" 41 | 42 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 43 | 44 | ansible_host: 10.0.0.12 45 | host_ip: "{{ ansible_host }}" -------------------------------------------------------------------------------- /test/simple/inventory_14_1.6/hosts.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | 11 | all: 12 | children: 13 | postgres_cluster: 14 | hosts: 15 | monitor: 16 | ansible_ssh_host: "127.0.0.1" 17 | ansible_ssh_port: "2200" 18 | ansible_ssh_user: "vagrant" 19 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/monitor/virtualbox/private_key" 20 | 21 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 22 | 23 | ansible_host: 10.0.0.10 24 | host_ip: "{{ ansible_host }}" 25 | postgresql_cluster_is_monitor: True 26 | node1: 27 | ansible_ssh_host: "127.0.0.1" 28 | ansible_ssh_port: "2201" 29 | ansible_ssh_user: "vagrant" 30 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node1/virtualbox/private_key" 31 | 32 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 33 | 34 | ansible_host: 10.0.0.11 35 | host_ip: "{{ ansible_host }}" 36 | node2: 37 | ansible_ssh_host: "127.0.0.1" 38 | ansible_ssh_port: "2202" 39 | ansible_ssh_user: "vagrant" 40 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/simple/.vagrant/machines/node2/virtualbox/private_key" 41 | 42 | ssl_certs_base_dir: "{{ playbook_dir }}/test/simple/certs/test/{{inventory_hostname}}" 43 | 44 | ansible_host: 10.0.0.12 45 | host_ip: "{{ ansible_host }}" -------------------------------------------------------------------------------- /postgres_cluster_servers.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: postgres_cluster 3 | any_errors_fatal: true 4 | become: true 5 | vars: 6 | setup_additional_groups: 7 | - name: "{{ postgresql_cluster_group | default('postgres') }}" 8 | requires_root_password: True 9 | - name: "{{ postgresql_cluster_xinetd_group | default('postgres_xinetd') }}" 10 | requires_root_password: True 11 | setup_additional_users: 12 | - name: "{{ postgresql_cluster_user | default('postgres') }}" 13 | group: "{{ postgresql_cluster_group | default('postgres') }}" 14 | system: True 15 | is_sudo: False 16 | - name: "{{ postgresql_cluster_xinetd_user | default('postgres_xinetd') }}" 17 | group: "{{ postgresql_cluster_xinetd_group | default('postgres_xinetd') }}" 18 | system: True 19 | is_sudo: False 20 | ssl_cert_dirs: 21 | - directory: "postgres_server" 22 | owner: "{{ postgresql_cluster_user | default('postgres') }}" 23 | group: "{{ postgresql_cluster_group | default('postgres') }}" 24 | roles: 25 | - role: user-setup 26 | - role: force-reconnect 27 | - role: copy-ssl-certs 28 | 29 | - hosts: postgres_cluster 30 | any_errors_fatal: true 31 | become: true 32 | roles: 33 | - name: postgres-cluster-load-vars 34 | - role: postgres-pre-setup 35 | - role: postgres-cluster-install 36 | - role: postgres-cluster-user-setup 37 | - role: postgres-cluster-pg-auto-failover-install 38 | - role: postgres-cluster-monitor-initialize 39 | # update all pg_hba configs for all already initialized nodes 40 | - role: postgres-cluster-data-initialize-existing 41 | 42 | 43 | - hosts: postgres_cluster 44 | any_errors_fatal: true 45 | become: true 46 | serial: true 47 | roles: 48 | - name: postgres-cluster-load-vars 49 | # one by one add all new data nodes 50 | - role: postgres-cluster-data-initialize-new 51 | 52 | # final setup run, configure databases, hba-config, xinetd 53 | - hosts: postgres_cluster 54 | any_errors_fatal: true 55 | become: true 56 | roles: 57 | - name: postgres-cluster-load-vars 58 | - role: postgres-cluster-users 59 | - role: postgres-cluster-databases 60 | # not really needed as this is already done during setup, but does not hurt 61 | - role: postgres-cluster-hba-config 62 | # setup relevant extra services running on the db servers, namely pgbouncer and xinetd for healtchecks 63 | - role: postgres-cluster-pgbouncer-setup 64 | when: postgresql_cluster_pg_bouncer_include | default('False') | bool 65 | - role: postgres-cluster-pgbouncer-client-setup 66 | when: postgresql_cluster_pg_bouncer_include | default('False') | bool 67 | - role: postgres-cluster-xinetd 68 | -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | 11 | all: 12 | children: 13 | postgres_cluster: 14 | hosts: 15 | monitor: 16 | ansible_ssh_host: "127.0.0.1" 17 | ansible_ssh_port: "2210" 18 | ansible_ssh_user: "vagrant" 19 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/upgrade_1.3_to_1.4/.vagrant/machines/monitor/virtualbox/private_key" 20 | 21 | ssl_certs_base_dir: "{{ playbook_dir }}/test/upgrade_1.3_to_1.4/certs/test/{{inventory_hostname}}" 22 | 23 | ansible_host: 10.0.0.20 24 | host_ip: "{{ ansible_host }}" 25 | postgresql_cluster_is_monitor: True 26 | node1: 27 | ansible_ssh_host: "127.0.0.1" 28 | ansible_ssh_port: "2211" 29 | ansible_ssh_user: "vagrant" 30 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/upgrade_1.3_to_1.4/.vagrant/machines/node1/virtualbox/private_key" 31 | 32 | ssl_certs_base_dir: "{{ playbook_dir }}/test/upgrade_1.3_to_1.4/certs/test/{{inventory_hostname}}" 33 | 34 | ansible_host: 10.0.0.21 35 | host_ip: "{{ ansible_host }}" 36 | node2: 37 | ansible_ssh_host: "127.0.0.1" 38 | ansible_ssh_port: "2212" 39 | ansible_ssh_user: "vagrant" 40 | ansible_ssh_private_key_file: "{{ playbook_dir }}/test/upgrade_1.3_to_1.4/.vagrant/machines/node2/virtualbox/private_key" 41 | 42 | ssl_certs_base_dir: "{{ playbook_dir }}/test/upgrade_1.3_to_1.4/certs/test/{{inventory_hostname}}" 43 | 44 | ansible_host: 10.0.0.22 45 | host_ip: "{{ ansible_host }}" -------------------------------------------------------------------------------- /roles/postgres-cluster-load-vars/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Variable configuration. 3 | - name: Include OS-specific variables (Debian). 4 | include_vars: "{{ ansible_distribution }}-{{ ansible_distribution_version.split('.')[0] }}.yml" 5 | when: ansible_os_family == 'Debian' 6 | 7 | - name: Define postgresql_cluster_packages. 8 | set_fact: 9 | postgresql_cluster_packages: "{{ __postgresql_cluster_packages | list }}" 10 | when: postgresql_cluster_packages is not defined 11 | 12 | - name: Define postgresql_cluster_version. 13 | set_fact: 14 | postgresql_cluster_version: "{{ __postgresql_cluster_version }}" 15 | when: postgresql_cluster_version is not defined 16 | 17 | - name: "Define postgresql_pg_auto_failover_version." 18 | set_fact: 19 | postgresql_pg_auto_failover_version: "{{ __postgresql_pg_auto_failover_version }}" 20 | when: postgresql_pg_auto_failover_version is not defined 21 | 22 | - name: "Define postgresql_cluster_user." 23 | set_fact: 24 | postgresql_cluster_user: "{{ __postgresql_cluster_user }}" 25 | when: postgresql_cluster_user is not defined 26 | 27 | - name: "Define postgresql_cluster_restarted_state." 28 | set_fact: 29 | postgresql_cluster_restarted_state: "{{ __postgresql_cluster_restarted_state }}" 30 | when: postgresql_cluster_restarted_state is not defined 31 | 32 | - name: "Define postgresql_repository." 33 | set_fact: 34 | postgresql_repository: "{{ __postgresql_repository }}" 35 | when: postgresql_repository is not defined 36 | 37 | - name: Define postgresql_cluster_daemon. 38 | set_fact: 39 | postgresql_cluster_daemon: "{{ __postgresql_cluster_daemon }}" 40 | when: postgresql_cluster_daemon is not defined 41 | 42 | - name: Define postgresql_cluster_data_dir. 43 | set_fact: 44 | postgresql_cluster_data_dir: "{{ __postgresql_cluster_data_dir }}" 45 | when: postgresql_cluster_data_dir is not defined 46 | 47 | - name: Define postgresql_cluster_port. 48 | set_fact: 49 | postgresql_cluster_port: "{{ __postgresql_cluster_port }}" 50 | when: postgresql_cluster_port is not defined 51 | 52 | - name: Define postgresql_cluster_bin_path. 53 | set_fact: 54 | postgresql_cluster_bin_path: "{{ __postgresql_cluster_bin_path }}" 55 | when: postgresql_cluster_bin_path is not defined 56 | 57 | - name: Define postgresql_cluster_config_path. 58 | set_fact: 59 | postgresql_cluster_config_path: "{{ __postgresql_cluster_config_path }}" 60 | when: postgresql_cluster_config_path is not defined 61 | 62 | - name: Define postgresql_cluster_unix_socket_directories_mode. 63 | set_fact: 64 | postgresql_cluster_unix_socket_directories_mode: >- 65 | {{ __postgresql_cluster_unix_socket_directories_mode | default('02775') }} 66 | when: postgresql_cluster_unix_socket_directories_mode is not defined 67 | -------------------------------------------------------------------------------- /roles/postgres-cluster-users/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "check if server is standby (pg_is_in_recovery)" 3 | shell: 4 | psql -p "{{ postgresql_cluster_port }}" -t -X -A -c 'select pg_is_in_recovery()' 5 | become_user: "{{ postgresql_cluster_user }}" 6 | register: pg_is_in_recovery 7 | 8 | - name: Ensure PostgreSQL users are present. 9 | postgresql_user: 10 | name: "{{ item.name }}" 11 | password: "{{ item.password | default(omit) }}" 12 | encrypted: "{{ item.encrypted | default(omit) }}" 13 | priv: "{{ item.priv | default(omit) }}" 14 | role_attr_flags: "{{ item.role_attr_flags | default(omit) }}" 15 | db: "{{ item.db | default(omit) }}" 16 | login_host: "{{ item.login_host | default('localhost') }}" 17 | login_password: "{{ item.login_password | default(omit) }}" 18 | login_user: "{{ item.login_user | default(postgresql_cluster_user) }}" 19 | login_unix_socket: "{{ item.login_unix_socket | default(postgresql_cluster_unix_socket_directories[0]) }}" 20 | port: "{{ item.port | default(postgresql_cluster_port) }}" 21 | state: "{{ item.state | default('present') }}" 22 | with_items: "{{ postgresql_cluster_users | default([]) }}" 23 | no_log: "{{ postgres_users_no_log }}" 24 | become: true 25 | become_user: "{{ postgresql_cluster_user }}" 26 | when: not (postgresql_cluster_is_monitor | default('False') | bool) and pg_is_in_recovery.stdout_lines[0] == 'f' 27 | # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 28 | vars: 29 | ansible_ssh_pipelining: true 30 | 31 | 32 | - name: Ensure PostgreSQL cluster monitor users are present. 33 | postgresql_user: 34 | name: "{{ item.name }}" 35 | password: "{{ item.password | default(omit) }}" 36 | encrypted: "{{ item.encrypted | default(omit) }}" 37 | priv: "{{ item.priv | default(omit) }}" 38 | role_attr_flags: "{{ item.role_attr_flags | default(omit) }}" 39 | db: "{{ item.db | default(omit) }}" 40 | login_host: "{{ item.login_host | default('localhost') }}" 41 | login_password: "{{ item.login_password | default(omit) }}" 42 | login_user: "{{ item.login_user | default(postgresql_cluster_user) }}" 43 | login_unix_socket: "{{ item.login_unix_socket | default(postgresql_cluster_unix_socket_directories[0]) }}" 44 | port: "{{ item.port | default(postgresql_cluster_port) }}" 45 | state: "{{ item.state | default('present') }}" 46 | with_items: "{{ postgresql_monitor_users | default([]) }}" 47 | no_log: "{{ postgres_users_no_log }}" 48 | become: true 49 | become_user: "{{ postgresql_cluster_user }}" 50 | when: (postgresql_cluster_is_monitor | default('False') | bool) and pg_is_in_recovery.stdout_lines[0] == 'f' 51 | # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 52 | vars: 53 | ansible_ssh_pipelining: true -------------------------------------------------------------------------------- /roles/postgres-cluster-xinetd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "check if server is standby (pg_is_in_recovery)" 3 | shell: 4 | psql -p "{{ postgresql_cluster_port }}" -t -X -A -c 'select pg_is_in_recovery()' 5 | become_user: "{{ postgresql_cluster_user }}" 6 | register: pg_is_in_recovery 7 | 8 | - name: Ensure PostgreSQL xinetd user is present on the cluster. 9 | postgresql_user: 10 | name: "{{ postgresql_cluster_xinetd_user | default('postgres_xinetd') }}" 11 | login_host: "localhost" 12 | login_user: "{{ postgresql_cluster_user }}" 13 | login_unix_socket: "{{ postgresql_cluster_unix_socket_directories[0] }}" 14 | port: "{{ postgresql_cluster_port }}" 15 | state: "present" 16 | no_log: false #"{{ postgres_users_no_log }}" 17 | become: true 18 | become_user: "{{ postgresql_cluster_user }}" 19 | when: not (postgresql_cluster_is_monitor | default('False') | bool) and pg_is_in_recovery.stdout_lines[0] == 'f' 20 | # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 21 | vars: 22 | ansible_ssh_pipelining: true 23 | 24 | - name: Ensure that the xinetd packages are installed 25 | become: true 26 | package: 27 | name: "xinetd" 28 | state: present 29 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 30 | 31 | - name: Ensure that xinetd is started and enabled on boot 32 | tags: xinetd 33 | become: true 34 | service: 35 | name: "xinetd" 36 | state: started 37 | enabled: yes 38 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 39 | 40 | - name: Template pgsqlchck script in place 41 | template: 42 | src: pgsqlchck.j2 43 | dest: /opt/pgsqlchk 44 | owner: "root" 45 | group: "root" 46 | mode: 0755 47 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 48 | 49 | - name: Template pgsqlchck service into /etc/xinet.d 50 | template: 51 | src: pgsqlchck_xinetd.j2 52 | dest: /etc/xinetd.d/pgsqlchk 53 | owner: "root" 54 | group: "root" 55 | mode: 0644 56 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 57 | 58 | - name: "add pgsqlchck line to /etc/services" 59 | lineinfile: 60 | path: /etc/services 61 | line: 'pgsqlchk 23267/tcp # Ansible Managed: pgsqlchk' 62 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 63 | 64 | - name: "reload xinetd" 65 | become: true 66 | service: 67 | name: "xinetd" 68 | state: "reloaded" 69 | enabled: yes 70 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 71 | 72 | - name: "Allow incoming access to the xinetd postgres port {{ postgresql_cluster_xinetd_port | default('23267') }} for all cluster clients" 73 | ufw: 74 | rule: allow 75 | direction: in 76 | src: "{{ item.ip }}/{{ item.subnet_mask | default('32') }}" 77 | to_port: "{{ postgresql_cluster_xinetd_port | default('23267') }}" 78 | comment: "pgsqlchk xinetd {{ postgresql_cluster_name }} - client: {{ item.name }}" 79 | with_items: "{{ postgresql_cluster_allowed_clients | default([]) }}" 80 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 81 | -------------------------------------------------------------------------------- /test/simple/inventory_12/group_vars/all/test_all.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | postgresql_cluster_name: "main_cluster" 12 | postgresql_cluster_port: "5433" 13 | 14 | postgresql_cluster_version: "12" 15 | 16 | postgresql_cluster_group: "1" 17 | postgresql_cluster_formation: "default" # FIXME: should be work with non default 18 | 19 | postgresql_cluster_pg_bouncer_client_tls_sslmode: "require" 20 | postgresql_cluster_pg_bouncer_root_password: "pgbouncerpassword" 21 | postgresql_cluster_pg_bouncer_include: True 22 | 23 | postgresql_cluster_hba_entries: 24 | - { type: hostssl, database: "testdb", address: "10.0.0.0/24", user: "testuser", auth_method: md5, auth_options: "clientcert=0" } 25 | - { type: hostssl, database: "testdb2", address: "10.0.0.0/24", user: "testuser", auth_method: md5, auth_options: "clientcert=0" } 26 | #- { type: local, database: all, user: postgres, auth_method: peer } 27 | # add your entries for your manually created databases here 28 | 29 | postgresql_cluster_allowed_clients: 30 | - name: "vagrant host" 31 | ip: "10.0.0.1" 32 | 33 | postgresql_cluster_users: 34 | - name: "testuser" 35 | password: "password1" 36 | encrypted: "true" 37 | state: present 38 | role_attr_flags: "SUPERUSER" 39 | - name: "testuser2" 40 | password: "password2" 41 | encrypted: "true" 42 | state: present 43 | role_attr_flags: "SUPERUSER" 44 | 45 | postgresql_cluster_databases: 46 | - name: "testdb" 47 | lc_collate: 'en_US.UTF-8' 48 | lc_ctype: 'en_US.UTF-8' 49 | encoding: 'UTF-8' 50 | owner: "testuser" 51 | state: present 52 | schemas: 53 | - name: "private" 54 | - name: "testdb2" 55 | lc_collate: 'en_US.UTF-8' 56 | lc_ctype: 'en_US.UTF-8' 57 | encoding: 'UTF-8' 58 | owner: "testuser" 59 | state: present 60 | schemas: 61 | - name: "private" 62 | - name: "testdb3" 63 | lc_collate: 'en_US.UTF-8' 64 | lc_ctype: 'en_US.UTF-8' 65 | encoding: 'UTF-8' 66 | owner: "testuser2" 67 | state: present 68 | schemas: 69 | - name: "private" 70 | -------------------------------------------------------------------------------- /test/simple/inventory_13/group_vars/all/test_all.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | postgresql_cluster_name: "main_cluster" 12 | postgresql_cluster_port: "5433" 13 | 14 | postgresql_cluster_version: "13" 15 | 16 | postgresql_cluster_group: "1" 17 | postgresql_cluster_formation: "default" # FIXME: should be work with non default 18 | 19 | postgresql_cluster_pg_bouncer_client_tls_sslmode: "require" 20 | postgresql_cluster_pg_bouncer_root_password: "pgbouncerpassword" 21 | postgresql_cluster_pg_bouncer_include: True 22 | 23 | postgresql_cluster_hba_entries: 24 | - { type: hostssl, database: "testdb", address: "10.0.0.0/24", user: "testuser", auth_method: md5, auth_options: "clientcert=0" } 25 | - { type: hostssl, database: "testdb2", address: "10.0.0.0/24", user: "testuser", auth_method: md5, auth_options: "clientcert=0" } 26 | #- { type: local, database: all, user: postgres, auth_method: peer } 27 | # add your entries for your manually created databases here 28 | 29 | postgresql_cluster_allowed_clients: 30 | - name: "vagrant host" 31 | ip: "10.0.0.1" 32 | 33 | postgresql_cluster_users: 34 | - name: "testuser" 35 | password: "password1" 36 | encrypted: "true" 37 | state: present 38 | role_attr_flags: "SUPERUSER" 39 | - name: "testuser2" 40 | password: "password2" 41 | encrypted: "true" 42 | state: present 43 | role_attr_flags: "SUPERUSER" 44 | 45 | postgresql_cluster_databases: 46 | - name: "testdb" 47 | lc_collate: 'en_US.UTF-8' 48 | lc_ctype: 'en_US.UTF-8' 49 | encoding: 'UTF-8' 50 | owner: "testuser" 51 | state: present 52 | schemas: 53 | - name: "private" 54 | - name: "testdb2" 55 | lc_collate: 'en_US.UTF-8' 56 | lc_ctype: 'en_US.UTF-8' 57 | encoding: 'UTF-8' 58 | owner: "testuser" 59 | state: present 60 | schemas: 61 | - name: "private" 62 | - name: "testdb3" 63 | lc_collate: 'en_US.UTF-8' 64 | lc_ctype: 'en_US.UTF-8' 65 | encoding: 'UTF-8' 66 | owner: "testuser2" 67 | state: present 68 | schemas: 69 | - name: "private" 70 | -------------------------------------------------------------------------------- /test/upgrade_1.3_to_1.4/inventory/group_vars/all/test_all.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | postgresql_cluster_name: "main_cluster" 12 | postgresql_cluster_port: "5433" 13 | 14 | postgresql_cluster_version: "12" 15 | postgresql_pg_auto_failover_version: "1.3" 16 | 17 | postgresql_cluster_group: "1" 18 | postgresql_cluster_formation: "default" # FIXME: should be work with non default 19 | 20 | postgresql_cluster_pg_bouncer_client_tls_sslmode: "require" 21 | postgresql_cluster_pg_bouncer_root_password: "pgbouncerpassword" 22 | 23 | postgresql_cluster_hba_entries: 24 | - { type: hostssl, database: "testdb", address: "10.0.0.0/24", user: "testuser", auth_method: md5, auth_options: "clientcert=0" } 25 | - { type: hostssl, database: "testdb2", address: "10.0.0.0/24", user: "testuser", auth_method: md5, auth_options: "clientcert=0" } 26 | #- { type: local, database: all, user: postgres, auth_method: peer } 27 | # add your entries for your manually created databases here 28 | 29 | postgresql_cluster_allowed_clients: 30 | - name: "vagrant host" 31 | ip: "10.0.0.1" 32 | 33 | postgresql_cluster_users: 34 | - name: "testuser" 35 | password: "password1" 36 | encrypted: "true" 37 | state: present 38 | role_attr_flags: "SUPERUSER" 39 | - name: "testuser2" 40 | password: "password2" 41 | encrypted: "true" 42 | state: present 43 | role_attr_flags: "SUPERUSER" 44 | 45 | postgresql_cluster_databases: 46 | - name: "testdb" 47 | lc_collate: 'en_US.UTF-8' 48 | lc_ctype: 'en_US.UTF-8' 49 | encoding: 'UTF-8' 50 | owner: "testuser" 51 | state: present 52 | schemas: 53 | - name: "private" 54 | - name: "testdb2" 55 | lc_collate: 'en_US.UTF-8' 56 | lc_ctype: 'en_US.UTF-8' 57 | encoding: 'UTF-8' 58 | owner: "testuser" 59 | state: present 60 | schemas: 61 | - name: "private" 62 | - name: "testdb3" 63 | lc_collate: 'en_US.UTF-8' 64 | lc_ctype: 'en_US.UTF-8' 65 | encoding: 'UTF-8' 66 | owner: "testuser2" 67 | state: present 68 | schemas: 69 | - name: "private" 70 | -------------------------------------------------------------------------------- /test/simple/inventory_12_1.5/group_vars/all/test_all.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | postgresql_cluster_name: "main_cluster" 12 | postgresql_cluster_port: "5433" 13 | 14 | postgresql_cluster_version: "12" 15 | 16 | postgresql_cluster_group: "1" 17 | postgresql_cluster_formation: "default" # FIXME: should be work with non default 18 | postgresql_pg_auto_failover_version: "1.5" 19 | 20 | postgresql_cluster_pg_bouncer_client_tls_sslmode: "require" 21 | postgresql_cluster_pg_bouncer_root_password: "pgbouncerpassword" 22 | postgresql_cluster_pg_bouncer_include: True 23 | 24 | postgresql_cluster_hba_entries: 25 | - { type: hostssl, database: "testdb", address: "10.0.0.0/24", user: "testuser", auth_method: md5, auth_options: "clientcert=0" } 26 | - { type: hostssl, database: "testdb2", address: "10.0.0.0/24", user: "testuser", auth_method: md5, auth_options: "clientcert=0" } 27 | #- { type: local, database: all, user: postgres, auth_method: peer } 28 | # add your entries for your manually created databases here 29 | 30 | postgresql_cluster_allowed_clients: 31 | - name: "vagrant host" 32 | ip: "10.0.0.1" 33 | 34 | postgresql_cluster_users: 35 | - name: "testuser" 36 | password: "password1" 37 | encrypted: "true" 38 | state: present 39 | role_attr_flags: "SUPERUSER" 40 | - name: "testuser2" 41 | password: "password2" 42 | encrypted: "true" 43 | state: present 44 | role_attr_flags: "SUPERUSER" 45 | 46 | postgresql_cluster_databases: 47 | - name: "testdb" 48 | lc_collate: 'en_US.UTF-8' 49 | lc_ctype: 'en_US.UTF-8' 50 | encoding: 'UTF-8' 51 | owner: "testuser" 52 | state: present 53 | schemas: 54 | - name: "private" 55 | - name: "testdb2" 56 | lc_collate: 'en_US.UTF-8' 57 | lc_ctype: 'en_US.UTF-8' 58 | encoding: 'UTF-8' 59 | owner: "testuser" 60 | state: present 61 | schemas: 62 | - name: "private" 63 | - name: "testdb3" 64 | lc_collate: 'en_US.UTF-8' 65 | lc_ctype: 'en_US.UTF-8' 66 | encoding: 'UTF-8' 67 | owner: "testuser2" 68 | state: present 69 | schemas: 70 | - name: "private" 71 | -------------------------------------------------------------------------------- /roles/essential-software-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | # careful: this is executed on the management node as well, so this should 12 | # really only include _essential_ software 13 | - name: Install essential packages 14 | apt: 15 | name: "{{ packages }}" 16 | update_cache: "{{ apt_update_cache | default('True') }}" 17 | vars: 18 | packages: 19 | - vim 20 | - ufw 21 | - sudo 22 | - python-setuptools 23 | - python-passlib 24 | - acl 25 | 26 | - name: Install essential packages 27 | apt: 28 | name: "{{ packages }}" 29 | update_cache: "{{ apt_update_cache | default('True') }}" 30 | vars: 31 | packages: 32 | - python-pip 33 | when: ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('20.04', '<') 34 | 35 | - name: Install essential packages 36 | apt: 37 | name: "{{ packages }}" 38 | update_cache: "{{ apt_update_cache | default('True') }}" 39 | vars: 40 | packages: 41 | - python3-pip 42 | when: ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('20.04', '>=') 43 | 44 | - name: configure and enable ufw 45 | remote_user: root 46 | become: true 47 | block: 48 | - name: Allow ssh connections via ipv4 49 | ufw: 50 | rule: allow 51 | proto: tcp 52 | to: 0.0.0.0/0 53 | port: "22" 54 | 55 | - name: Allow ssh ipv6 connections via loopback 56 | ufw: 57 | rule: allow 58 | proto: tcp 59 | from: ::1 60 | to: ::1 61 | port: "22" 62 | 63 | - name: Allow ssh ipv6 connections to ipv6 floating ip if configured 64 | ufw: 65 | rule: allow 66 | proto: tcp 67 | to: "{{ floating_ipv6 }}" 68 | port: "22" 69 | when: floating_ipv6 is defined 70 | 71 | - name: Enable ufw 72 | ufw: 73 | state: enabled 74 | 75 | - name: molly guard 76 | include_tasks: subtasks/molly-guard.yml 77 | 78 | - set_fact: 79 | setup_disable_ipv6_val: "{{ setup_disable_ipv6 | default('False') }}" 80 | 81 | - name: Disable ipv6 82 | raw: echo 'Acquire::ForceIPv4 "true";' | sudo tee /etc/apt/apt.conf.d/99force-ipv4 83 | when: setup_disable_ipv6_val|bool 84 | -------------------------------------------------------------------------------- /test/simple/inventory_14_1.6/group_vars/all/test_all.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | postgresql_cluster_name: "main_cluster" 12 | postgresql_cluster_port: "5433" 13 | 14 | postgresql_cluster_version: "14" 15 | postgresql_pg_auto_failover_version: "1.6" 16 | 17 | postgresql_cluster_group: "1" 18 | postgresql_cluster_formation: "default" # FIXME: should be work with non default 19 | 20 | postgresql_cluster_pg_bouncer_client_tls_sslmode: "require" 21 | postgresql_cluster_pg_bouncer_root_password: "pgbouncerpassword" 22 | postgresql_cluster_pg_bouncer_include: True 23 | 24 | postgresql_cluster_hba_entries: 25 | # just for testing, dont use this in production! 26 | - { type: host, database: "testdb", address: "10.0.0.0/24", user: "testuser", auth_method: md5 } 27 | - { type: host, database: "testdb2", address: "10.0.0.0/24", user: "testuser", auth_method: md5 } 28 | #- { type: local, database: all, user: postgres, auth_method: peer } 29 | # add your entries for your manually created databases here 30 | 31 | postgresql_monitor_hba_entries: 32 | # just for testing, dont use this in production! 33 | - { type: host, database: "pg_auto_failover", address: "10.0.0.0/24", user: "healthuser", auth_method: md5 } 34 | 35 | postgresql_monitor_users: 36 | - name: "healthuser" 37 | password: "healthuser" 38 | encrypted: "true" 39 | state: present 40 | role_attr_flags: "SUPERUSER" 41 | 42 | postgresql_cluster_allowed_clients: 43 | - name: "vagrant host" 44 | ip: "10.0.0.1" 45 | 46 | 47 | postgresql_cluster_users: 48 | - name: "testuser" 49 | password: "password1" 50 | encrypted: "true" 51 | state: present 52 | role_attr_flags: "SUPERUSER" 53 | - name: "testuser2" 54 | password: "password2" 55 | encrypted: "true" 56 | state: present 57 | role_attr_flags: "SUPERUSER" 58 | 59 | postgresql_cluster_databases: 60 | - name: "testdb" 61 | lc_collate: 'en_US.UTF-8' 62 | lc_ctype: 'en_US.UTF-8' 63 | encoding: 'UTF-8' 64 | owner: "testuser" 65 | state: present 66 | schemas: 67 | - name: "private" 68 | - name: "testdb2" 69 | lc_collate: 'en_US.UTF-8' 70 | lc_ctype: 'en_US.UTF-8' 71 | encoding: 'UTF-8' 72 | owner: "testuser" 73 | state: present 74 | schemas: 75 | - name: "private" 76 | - name: "testdb3" 77 | lc_collate: 'en_US.UTF-8' 78 | lc_ctype: 'en_US.UTF-8' 79 | encoding: 'UTF-8' 80 | owner: "testuser2" 81 | state: present 82 | schemas: 83 | - name: "private" 84 | -------------------------------------------------------------------------------- /test/simple/inventory_13_1.6/group_vars/all/test_all.yml: -------------------------------------------------------------------------------- 1 | # pg_auto_failover_ansible 2 | # 3 | # Copyright (C) 2020-2022 NeuroForge GmbH & Co.KG 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | # 7 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | # 9 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | --- 11 | postgresql_cluster_name: "main_cluster" 12 | postgresql_cluster_port: "5433" 13 | 14 | postgresql_cluster_version: "13" 15 | postgresql_pg_auto_failover_version: "1.6" 16 | 17 | postgresql_cluster_group: "1" 18 | postgresql_cluster_formation: "default" # FIXME: should be work with non default 19 | 20 | postgresql_cluster_pg_bouncer_client_tls_sslmode: "require" 21 | postgresql_cluster_pg_bouncer_root_password: "pgbouncerpassword" 22 | postgresql_cluster_pg_bouncer_include: True 23 | 24 | postgresql_cluster_hba_entries: 25 | # just for testing, dont use this in production! 26 | - { type: host, database: "testdb", address: "10.0.0.0/24", user: "testuser", auth_method: md5 } 27 | - { type: host, database: "testdb2", address: "10.0.0.0/24", user: "testuser", auth_method: md5 } 28 | #- { type: local, database: all, user: postgres, auth_method: peer } 29 | # add your entries for your manually created databases here 30 | 31 | postgresql_monitor_hba_entries: 32 | # just for testing, dont use this in production! 33 | - { type: host, database: "pg_auto_failover", address: "10.0.0.0/24", user: "healthuser", auth_method: md5 } 34 | 35 | postgresql_monitor_users: 36 | - name: "healthuser" 37 | password: "healthuser" 38 | encrypted: "true" 39 | state: present 40 | role_attr_flags: "SUPERUSER" 41 | 42 | postgresql_cluster_allowed_clients: 43 | - name: "vagrant host" 44 | ip: "10.0.0.1" 45 | 46 | postgresql_monitor_allowed_clients: 47 | - name: "vagrant host (but on monitor)" 48 | ip: "10.0.0.1" 49 | 50 | 51 | postgresql_cluster_users: 52 | - name: "testuser" 53 | password: "password1" 54 | encrypted: "true" 55 | state: present 56 | role_attr_flags: "SUPERUSER" 57 | - name: "testuser2" 58 | password: "password2" 59 | encrypted: "true" 60 | state: present 61 | role_attr_flags: "SUPERUSER" 62 | 63 | postgresql_cluster_databases: 64 | - name: "testdb" 65 | lc_collate: 'en_US.UTF-8' 66 | lc_ctype: 'en_US.UTF-8' 67 | encoding: 'UTF-8' 68 | owner: "testuser" 69 | state: present 70 | schemas: 71 | - name: "private" 72 | - name: "testdb2" 73 | lc_collate: 'en_US.UTF-8' 74 | lc_ctype: 'en_US.UTF-8' 75 | encoding: 'UTF-8' 76 | owner: "testuser" 77 | state: present 78 | schemas: 79 | - name: "private" 80 | - name: "testdb3" 81 | lc_collate: 'en_US.UTF-8' 82 | lc_ctype: 'en_US.UTF-8' 83 | encoding: 'UTF-8' 84 | owner: "testuser2" 85 | state: present 86 | schemas: 87 | - name: "private" 88 | -------------------------------------------------------------------------------- /roles/user-setup/files/.bashrc: -------------------------------------------------------------------------------- 1 | # ~/.bashrc: executed by bash(1) for non-login shells. 2 | # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) 3 | # for examples 4 | 5 | # If not running interactively, don't do anything 6 | case $- in 7 | *i*) ;; 8 | *) return;; 9 | esac 10 | 11 | #HISTCONTROL=ignoreboth <--DISABLE!!! 12 | HISTFILESIZE=999999999 13 | HISTSIZE=999999999 14 | HISTTIMEFORMAT="[%F %T] " 15 | HISTFILE=~/.bash_eternal_history 16 | 17 | # append to the history file, don't overwrite it 18 | shopt -s histappend 19 | 20 | # check the window size after each command and, if necessary, 21 | # update the values of LINES and COLUMNS. 22 | shopt -s checkwinsize 23 | 24 | # If set, the pattern "**" used in a pathname expansion context will 25 | # match all files and zero or more directories and subdirectories. 26 | #shopt -s globstar 27 | 28 | # make less more friendly for non-text input files, see lesspipe(1) 29 | [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" 30 | 31 | # set variable identifying the chroot you work in (used in the prompt below) 32 | if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then 33 | debian_chroot=$(cat /etc/debian_chroot) 34 | fi 35 | 36 | # set a fancy prompt (non-color, unless we know we "want" color) 37 | case "$TERM" in 38 | xterm-color|*-256color) color_prompt=yes;; 39 | esac 40 | 41 | # uncomment for a colored prompt, if the terminal has the capability; turned 42 | # off by default to not distract the user: the focus in a terminal window 43 | # should be on the output of commands, not on the prompt 44 | force_color_prompt=yes 45 | 46 | if [ -n "$force_color_prompt" ]; then 47 | if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then 48 | # We have color support; assume it's compliant with Ecma-48 49 | # (ISO/IEC-6429). (Lack of such support is extremely rare, and such 50 | # a case would tend to support setf rather than setaf.) 51 | color_prompt=yes 52 | else 53 | color_prompt= 54 | fi 55 | fi 56 | 57 | if [ "$color_prompt" = yes ]; then 58 | #PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' 59 | PS1='${debian_chroot:+($debian_chroot)}\[\033[01;31m\]\u\[\033[01;33m\]@\[\033[01;36m\]\h \[\033[01;33m\]\w \[\033[01;35m\]\$ \[\033[00m\]' 60 | else 61 | PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' 62 | fi 63 | unset color_prompt force_color_prompt 64 | 65 | # If this is an xterm set the title to user@host:dir 66 | case "$TERM" in 67 | xterm*|rxvt*) 68 | PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" 69 | ;; 70 | *) 71 | ;; 72 | esac 73 | 74 | # enable color support of ls and also add handy aliases 75 | if [ -x /usr/bin/dircolors ]; then 76 | test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" 77 | alias ls='ls --color=auto' 78 | #alias dir='dir --color=auto' 79 | #alias vdir='vdir --color=auto' 80 | 81 | alias grep='grep --color=auto' 82 | alias fgrep='fgrep --color=auto' 83 | alias egrep='egrep --color=auto' 84 | fi 85 | 86 | # colored GCC warnings and errors 87 | #export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' 88 | 89 | # some more ls aliases 90 | alias ll='ls -alF' 91 | alias la='ls -A' 92 | alias l='ls -CF' 93 | 94 | # Add an "alert" alias for long running commands. Use like so: 95 | # sleep 10; alert 96 | alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' 97 | 98 | # Alias definitions. 99 | # You may want to put all your additions into a separate file like 100 | # ~/.bash_aliases, instead of adding them here directly. 101 | # See /usr/share/doc/bash-doc/examples in the bash-doc package. 102 | 103 | if [ -f ~/.bash_aliases ]; then 104 | . ~/.bash_aliases 105 | fi 106 | 107 | # enable programmable completion features (you don't need to enable 108 | # this, if it's already enabled in /etc/bash.bashrc and /etc/profile 109 | # sources /etc/bash.bashrc). 110 | if ! shopt -oq posix; then 111 | if [ -f /usr/share/bash-completion/bash_completion ]; then 112 | . /usr/share/bash-completion/bash_completion 113 | elif [ -f /etc/bash_completion ]; then 114 | . /etc/bash_completion 115 | fi 116 | fi 117 | 118 | alias rm='rm -i' -------------------------------------------------------------------------------- /roles/postgres-pre-setup/files/postgres12.asc: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | 3 | mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja 4 | UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V 5 | G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4 6 | bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi 7 | c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC 8 | IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh 9 | hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U 10 | A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3 11 | RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj 12 | Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2 13 | AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB 14 | tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQJOBBMBCAA4AhsDBQsJCAcD 15 | BRUKCQgLBRYCAwEAAh4BAheAFiEEuXsK/KoaR/BE8kSgf8x9RqzMTPgFAlhtCD8A 16 | CgkQf8x9RqzMTPgECxAAk8uL+dwveTv6eH21tIHcltt8U3Ofajdo+D/ayO53LiYO 17 | xi27kdHD0zvFMUWXLGxQtWyeqqDRvDagfWglHucIcaLxoxNwL8+e+9hVFIEskQAY 18 | kVToBCKMXTQDLarz8/J030Pmcv3ihbwB+jhnykMuyyNmht4kq0CNgnlcMCdVz0d3 19 | z/09puryIHJrD+A8y3TD4RM74snQuwc9u5bsckvRtRJKbP3GX5JaFZAqUyZNRJRJ 20 | Tn2OQRBhCpxhlZ2afkAPFIq2aVnEt/Ie6tmeRCzsW3lOxEH2K7MQSfSu/kRz7ELf 21 | Cz3NJHj7rMzC+76Rhsas60t9CjmvMuGONEpctijDWONLCuch3Pdj6XpC+MVxpgBy 22 | 2VUdkunb48YhXNW0jgFGM/BFRj+dMQOUbY8PjJjsmVV0joDruWATQG/M4C7O8iU0 23 | B7o6yVv4m8LDEN9CiR6r7H17m4xZseT3f+0QpMe7iQjz6XxTUFRQxXqzmNnloA1T 24 | 7VjwPqIIzkj/u0V8nICG/ktLzp1OsCFatWXh7LbU+hwYl6gsFH/mFDqVxJ3+DKQi 25 | vyf1NatzEwl62foVjGUSpvh3ymtmtUQ4JUkNDsXiRBWczaiGSuzD9Qi0ONdkAX3b 26 | ewqmN4TfE+XIpCPxxHXwGq9Rv1IFjOdCX0iG436GHyTLC1tTUIKF5xV4Y0+cXIOI 27 | RgQQEQgABgUCTpdI7gAKCRDFr3dKWFELWqaPAKD1TtT5c3sZz92Fj97KYmqbNQZP 28 | +ACfSC6+hfvlj4GxmUjp1aepoVTo3weJAhwEEAEIAAYFAk6XSQsACgkQTFprqxLS 29 | p64F8Q//cCcutwrH50UoRFejg0EIZav6LUKejC6kpLeubbEtuaIH3r2zMblPGc4i 30 | +eMQKo/PqyQrceRXeNNlqO6/exHozYi2meudxa6IudhwJIOn1MQykJbNMSC2sGUp 31 | 1W5M1N5EYgt4hy+qhlfnD66LR4G+9t5FscTJSy84SdiOuqgCOpQmPkVRm1HX5X1+ 32 | dmnzMOCk5LHHQuiacV0qeGO7JcBCVEIDr+uhU1H2u5GPFNHm5u15n25tOxVivb94 33 | xg6NDjouECBH7cCVuW79YcExH/0X3/9G45rjdHlKPH1OIUJiiX47OTxdG3dAbB4Q 34 | fnViRJhjehFscFvYWSqXo3pgWqUsEvv9qJac2ZEMSz9x2mj0ekWxuM6/hGWxJdB+ 35 | +985rIelPmc7VRAXOjIxWknrXnPCZAMlPlDLu6+vZ5BhFX0Be3y38f7GNCxFkJzl 36 | hWZ4Cj3WojMj+0DaC1eKTj3rJ7OJlt9S9xnO7OOPEUTGyzgNIDAyCiu8F4huLPaT 37 | ape6RupxOMHZeoCVlqx3ouWctelB2oNXcxxiQ/8y+21aHfD4n/CiIFwDvIQjl7dg 38 | mT3u5Lr6yxuosR3QJx1P6rP5ZrDTP9khT30t+HZCbvs5Pq+v/9m6XDmi+NlU7Zuh 39 | Ehy97tL3uBDgoL4b/5BpFL5U9nruPlQzGq1P9jj40dxAaDAX/WKJAj0EEwEIACcC 40 | GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlB5KywFCQPDFt8ACgkQf8x9RqzM 41 | TPhuCQ//QAjRSAOCQ02qmUAikT+mTB6baOAakkYq6uHbEO7qPZkv4E/M+HPIJ4wd 42 | nBNeSQjfvdNcZBA/x0hr5EMcBneKKPDj4hJ0panOIRQmNSTThQw9OU351gm3YQct 43 | AMPRUu1fTJAL/AuZUQf9ESmhyVtWNlH/56HBfYjE4iVeaRkkNLJyX3vkWdJSMwC/ 44 | LO3Lw/0M3R8itDsm74F8w4xOdSQ52nSRFRh7PunFtREl+QzQ3EA/WB4AIj3VohIG 45 | kWDfPFCzV3cyZQiEnjAe9gG5pHsXHUWQsDFZ12t784JgkGyO5wT26pzTiuApWM3k 46 | /9V+o3HJSgH5hn7wuTi3TelEFwP1fNzI5iUUtZdtxbFOfWMnZAypEhaLmXNkg4zD 47 | kH44r0ss9fR0DAgUav1a25UnbOn4PgIEQy2fgHKHwRpCy20d6oCSlmgyWsR40EPP 48 | YvtGq49A2aK6ibXmdvvFT+Ts8Z+q2SkFpoYFX20mR2nsF0fbt1lfH65P64dukxeR 49 | GteWIeNakDD40bAAOH8+OaoTGVBJ2ACJfLVNM53PEoftavAwUYMrR910qvwYfd/4 50 | 6rh46g1Frr9SFMKYE9uvIJIgDsQB3QBp71houU4H55M5GD8XURYs+bfiQpJG1p7e 51 | B8e5jZx1SagNWc4XwL2FzQ9svrkbg1Y+359buUiP7T6QXX2zY++JAj0EEwEIACcC 52 | GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlEqbZUFCQg2wEEACgkQf8x9RqzM 53 | TPhFMQ//WxAfKMdpSIA9oIC/yPD/dJpY/+DyouOljpE6MucMy/ArBECjFTBwi/j9 54 | NYM4ynAk34IkhuNexc1i9/05f5RM6+riLCLgAOsADDbHD4miZzoSxiVr6GQ3YXMb 55 | OGld9kV9Sy6mGNjcUov7iFcf5Hy5w3AjPfKuR9zXswyfzIU1YXObiiZT38l55pp/ 56 | BSgvGVQsvbNjsff5CbEKXS7q3xW+WzN0QWF6YsfNVhFjRGj8hKtHvwKcA02wwjLe 57 | LXVTm6915ZUKhZXUFc0vM4Pj4EgNswH8Ojw9AJaKWJIZmLyW+aP+wpu6YwVCicxB 58 | Y59CzBO2pPJDfKFQzUtrErk9irXeuCCLesDyirxJhv8o0JAvmnMAKOLhNFUrSQ2m 59 | +3EnF7zhfz70gHW+EG8X8mL/EN3/dUM09j6TVrjtw43RLxBzwMDeariFF9yC+5bL 60 | tnGgxjsB9Ik6GV5v34/NEEGf1qBiAzFmDVFRZlrNDkq6gmpvGnA5hUWNr+y0i01L 61 | jGyaLSWHYjgw2UEQOqcUtTFK9MNzbZze4mVaHMEz9/aMfX25R6qbiNqCChveIm8m 62 | Yr5Ds2zdZx+G5bAKdzX7nx2IUAxFQJEE94VLSp3npAaTWv3sHr7dR8tSyUJ9poDw 63 | gw4W9BIcnAM7zvFYbLF5FNggg/26njHCCN70sHt8zGxKQINMc6SJAj0EEwEIACcC 64 | GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlLpFRkFCQ6EJy0ACgkQf8x9RqzM 65 | TPjOZA//Zp0e25pcvle7cLc0YuFr9pBv2JIkLzPm83nkcwKmxaWayUIG4Sv6pH6h 66 | m8+S/CHQij/yFCX+o3ngMw2J9HBUvafZ4bnbI0RGJ70GsAwraQ0VlkIfg7GUw3Tz 67 | voGYO42rZTru9S0K/6nFP6D1HUu+U+AsJONLeb6oypQgInfXQExPZyliUnHdipei 68 | 4WR1YFW6sjSkZT/5C3J1wkAvPl5lvOVthI9Zs6bZlJLZwusKxU0UM4Btgu1Sf3nn 69 | JcHmzisixwS9PMHE+AgPWIGSec/N27a0KmTTvImV6K6nEjXJey0K2+EYJuIBsYUN 70 | orOGBwDFIhfRk9qGlpgt0KRyguV+AP5qvgry95IrYtrOuE7307SidEbSnvO5ezNe 71 | mE7gT9Z1tM7IMPfmoKph4BfpNoH7aXiQh1Wo+ChdP92hZUtQrY2Nm13cmkxYjQ4Z 72 | gMWfYMC+DA/GooSgZM5i6hYqyyfAuUD9kwRN6BqTbuAUAp+hCWYeN4D88sLYpFh3 73 | paDYNKJ+Gf7Yyi6gThcV956RUFDH3ys5Dk0vDL9NiWwdebWfRFbzoRM3dyGP889a 74 | OyLzS3mh6nHzZrNGhW73kslSQek8tjKrB+56hXOnb4HaElTZGDvD5wmrrhN94kby 75 | Gtz3cydIohvNO9d90+29h0eGEDYti7j7maHkBKUAwlcPvMg5m3Y= 76 | =DA1T 77 | -----END PGP PUBLIC KEY BLOCK----- 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Proudly made by [NeuroForge](https://neuroforge.de/) in Bayreuth, Germany. 2 | 3 | # pg_auto_failover_ansible 4 | Ansible Playbook(s) to create a cluster of PostgreSQL nodes running in a Ubuntu cluster with [pg_auto_failover](https://github.com/citusdata/pg_auto_failover). 5 | 6 | ## Current features 7 | 8 | - Automatically setup a secure cluster with 1 monitor node and 2 database nodes with synchronous replication 9 | - Support for Ubuntu 18.04/Ubuntu 20.04 10 | - Support for PostgreSQL 12/13/14 11 | - Support for rudimentary pgbouncer setup (not enabled by default) 12 | 13 | ## Not features 14 | 15 | - The installation script for pg_auto_failover `roles/postgres-cluster-pg-auto-failover-install/files/deb.sh` was manually downloaded from the [citus homepage](https://install.citusdata.com/community/deb.sh). To update it, you have to replace it manually. 16 | - Removal of old nodes has to be done manually. New data nodes can be added to the cluster by just running all playbooks again. 17 | - NO automatic configuration of backups - Replication does NOT replace regular backups! 18 | - By default this playbook leaves the default clusters that might be present on your machines after installation and uses a different port than the default (`5433` - this is configurable) 19 | - HAProxy setup is out of scope of this project as it is common to run HAProxy on the application servers themselves. A sample config can be found in the [wiki](https://github.com/neuroforgede/pg_auto_failover_ansible/wiki/HAProxy) 20 | 21 | ## Configuration 22 | 23 | While we do not have a full tutorial on all configuration options a good place to start is in the [test directory](./test). There you can also find Vagrant based tests to take 24 | the playbooks for a spin. 25 | 26 | ## Used Software 27 | 28 | The following roles are based on [geerlingguy's work](https://github.com/geerlingguy/ansible-role-postgresql) but heavily modified to work with pg_auto_failover 29 | 30 | - passbolt-cluster-configure 31 | - passbolt-cluster-hba-config 32 | - passbolt-cluster-install 33 | 34 | ## Upgrading pg_auto_failover (NOT postgres itself) 35 | 36 | When upgrading, please follow this procedure: 37 | 38 | When upgrading from a version <= 1.3: 39 | 40 | 1. run the `postgres_cluster_upgrade_pre_1.4.yml` playbook with extra-vars `--extra-vars='{"postgresql_new_pg_auto_failover_version": "1.4"}'` to upgrade to the version you want to upgrade to 41 | 2. update the inventory to reflect the new pg_auto_failover version 42 | 43 | When upgrading from a version >= 1.4: 44 | 45 | - NOTE: starting at 1.4 pg_auto_failover has a new upgrade scheme, we will provide a playbook to facilitate upgrades once a version > 1.4 is out. The procedure will be similar. 46 | 47 | ## Testing 48 | 49 | Requirements: 50 | 51 | - vagrant 52 | - virtualbox 53 | - psql 54 | 55 | To test out the functionality and/or want to run the tests for this project, go to `test/` and run `bash setup_for_test.sh` and then `run.sh`. 56 | 57 | ## Usage 58 | 59 | ### Setup Inventory 60 | 61 | 1. Go to `inventories/pg_auto_failover/hosts.yml` and adjust the ips of your hosts according to your requirements. 62 | 2. Put your root ssh key under `ssh_keys/root_rsa`. 63 | 64 | ### Setup certificates 65 | 66 | #### 0. fix all certificate information strings 67 | 68 | Update `certs/pg_auto_failover/gen_root_cert.sh` and `certs/certs/pg_auto_failover/gen_server_cert.sh` to contain all the correct properties for your certificates. (Fix all occurences of `your-company`). 69 | 70 | #### 1. create new root certificate 71 | 72 | Update file `certs/pg_auto_failover/recreate_root_cert.sh` according to your requirements (passworded certificate is not used here, but it is a good idea to set a separate one for each deployment you are using this in) 73 | 74 | 75 | Then, run: 76 | 77 | ```bash 78 | cd certs/pg_auto_failover 79 | bash recreate_root_cert.sh 80 | ``` 81 | 82 | #### 2. generate server certificates 83 | 84 | Update file `certs/pg_auto_failover/recreate_server_certs.sh`. Paths should correspond to the hostnames in your ansible inventory 85 | 86 | Then, run: 87 | 88 | ```bash 89 | cd certs/pg_auto_failover 90 | bash recreate_server_certs.sh 91 | ``` 92 | 93 | This will copy all certificates into the correct place for them to be picked up by ansible. 94 | 95 | 96 | ### Run base setup 97 | 98 | 0. remove existing ssh keys of you machines into your known hosts (only run this if you have rebuilt your machines and the ssh key has changed): 99 | 100 | For each relevant node run: 101 | 102 | ```bash 103 | ssh-keygen -f ~/.ssh/known_hosts 104 | ``` 105 | 106 | 1. put the ssh keys of your machines into your known hosts: 107 | 108 | For each node run: 109 | 110 | ```bash 111 | ssh-keyscan >> ~/.ssh/known_hosts 112 | ``` 113 | 114 | 2. Run the actual base_setup playbook 115 | 116 | ```bash 117 | ansible-playbook -i inventories/pg_auto_failover/hosts.yml base_setup.yml 118 | ``` 119 | ### Run the actual cluster setup 120 | 121 | ```bash 122 | ansible-playbook -i inventories/pg_auto_failover/hosts.yml postgres_cluster_servers.yml 123 | ``` 124 | 125 | ### Verify setup 126 | 127 | ```bash 128 | ssh -i ssh_keys/root_rsa root@ 129 | sudo su postgres 130 | XDG_RUNTIME_DIR="" pg_autoctl show state --pgdata /var/lib/postgresql/12/main_cluster/ 131 | ``` 132 | 133 | If everything is okay, you should be greeted with the following: 134 | 135 | ![](success.png) 136 | 137 | ### Next steps 138 | 139 | - Setup pg_hba config in inventory configuration to allow connection to all the databases you will create/need 140 | - create all databases 141 | 142 | ### Connection to the cluster 143 | 144 | Just ask the monitor node :) 145 | 146 | ```bash 147 | XDG_RUNTIME_DIR="" pg_autoctl show uri --pgdata /var/lib/postgresql/12/main_cluster/ 148 | ``` 149 | 150 | ![](connection_strings.png) 151 | -------------------------------------------------------------------------------- /roles/postgres-cluster-pg-auto-failover-upgrade-pre-1.4/tasks/upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_role: 3 | name: postgres-cluster-hba-config 4 | 5 | - name: "check state on each data node" 6 | become_user: "{{ postgresql_cluster_user }}" 7 | shell: > 8 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl show state \ 9 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 10 | --json 11 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 12 | register: pg_autoctl_state 13 | 14 | - name: Populate dict for state information 15 | set_fact: 16 | pg_autoctl_cluster_state: "{{ pg_autoctl_cluster_state|default({}) | combine( {item.nodename: item} ) }}" 17 | with_items: 18 | - "{{ pg_autoctl_state.stdout | from_json }}" 19 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 20 | 21 | - name: "fail if any node has current_group_state != assigned_group_state" 22 | fail: 23 | msg: "assigned group state differs from current group state" 24 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 25 | and (pg_autoctl_cluster_state[host_ip].assigned_group_state != pg_autoctl_cluster_state[host_ip].current_group_state) 26 | 27 | - name: "enable maintenance mode on secondaries" 28 | become_user: "{{ postgresql_cluster_user }}" 29 | shell: > 30 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl enable maintenance \ 31 | --pgdata "{{ postgresql_cluster_data_dir }}" 32 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 33 | and (pg_autoctl_cluster_state[host_ip].assigned_group_state == 'secondary') 34 | 35 | - name: "copy deb.sh to /tmp/pg_auto_failover_deb.sh" 36 | copy: 37 | src: "deb.sh" 38 | dest: "/tmp/pg_auto_failover_deb.sh" 39 | owner: root 40 | group: root 41 | mode: 0700 42 | 43 | - name: "run /tmp/pg_auto_failover_deb.sh" 44 | command: "bash /tmp/pg_auto_failover_deb.sh" 45 | 46 | - name: "uninstall postgresql-{{ postgresql_cluster_version }}-auto-failover-{{ postgresql_pg_auto_failover_version }}" 47 | apt: 48 | name: "postgresql-{{ postgresql_cluster_version }}-auto-failover-{{ postgresql_pg_auto_failover_version }}" 49 | state: absent 50 | update_cache: "{{ apt_update_cache | default('True') }}" 51 | 52 | - name: "uninstall pg-auto-failover-cli-{{ postgresql_pg_auto_failover_version }}" 53 | apt: 54 | name: "pg-auto-failover-cli-{{ postgresql_pg_auto_failover_version }}" 55 | state: absent 56 | update_cache: "{{ apt_update_cache | default('True') }}" 57 | 58 | - name: "install postgresql-{{ postgresql_cluster_version }}-auto-failover-{{ postgresql_new_pg_auto_failover_version }}" 59 | apt: 60 | name: "postgresql-{{ postgresql_cluster_version }}-auto-failover-{{ postgresql_new_pg_auto_failover_version }}" 61 | state: present 62 | update_cache: "{{ apt_update_cache | default('True') }}" 63 | 64 | - name: "install pg-auto-failover-cli-{{ postgresql_new_pg_auto_failover_version }}" 65 | apt: 66 | name: "pg-auto-failover-cli-{{ postgresql_new_pg_auto_failover_version }}" 67 | state: present 68 | update_cache: "{{ apt_update_cache | default('True') }}" 69 | 70 | - name: "restart monitor pg_auto_failover service" 71 | service: 72 | name: "{{ postgresql_cluster_daemon }}" 73 | state: "{{ postgresql_cluster_restarted_state }}" 74 | when: (postgresql_cluster_is_monitor | default('False') | bool) 75 | 76 | - name: "restart monitor pg_auto_failover service once more" 77 | service: 78 | name: "{{ postgresql_cluster_daemon }}" 79 | state: "{{ postgresql_cluster_restarted_state }}" 80 | when: (postgresql_cluster_is_monitor | default('False') | bool) 81 | 82 | - name: "wait until the pg_auto_failover postgres extension has been upgraded on the master node" 83 | become: true 84 | async: "{{ postgresql_cluster_monitor_upgrade_timeout | default(300) | int }}" 85 | # poll every 5 seconds to see if we are finished 86 | poll: 5 87 | shell: | 88 | counter=1 89 | 90 | echo "restarting monitor..." 91 | echo "checking pgautofailover extension again..." 92 | current_version=$(su postgres -c "psql -p {{ postgresql_cluster_port }} -t -X -A -c \"select extversion from pg_extension where extname='pgautofailover';\" -d pg_auto_failover") 93 | echo "current_version: $current_version" 94 | 95 | until [ "$current_version" == "{{ postgresql_new_pg_auto_failover_version }}" ] 96 | do 97 | ((counter++)) 98 | sleep 1 99 | 100 | echo "restarting monitor..." 101 | echo "checking pgautofailover extension again..." 102 | current_version=$(su postgres -c "psql -p {{ postgresql_cluster_port }} -t -X -A -c \"select extversion from pg_extension where extname='pgautofailover';\" -d pg_auto_failover") 103 | echo "current_version: $current_version" 104 | done 105 | 106 | echo "pgautofailover was found to be at version {{ postgresql_new_pg_auto_failover_version }} after ${counter} tries." 107 | args: 108 | executable: /bin/bash 109 | when: (postgresql_cluster_is_monitor | default('False') | bool) 110 | register: pg_autoctl_monitor_upgrade 111 | 112 | - debug: 113 | var: pg_autoctl_monitor_upgrade.stdout 114 | when: (postgresql_cluster_is_monitor | default('False') | bool) 115 | 116 | - name: "restart monitor pg_auto_failover service after upgrade was successful" 117 | service: 118 | name: "{{ postgresql_cluster_daemon }}" 119 | state: "{{ postgresql_cluster_restarted_state }}" 120 | when: (postgresql_cluster_is_monitor | default('False') | bool) 121 | 122 | - name: "restart pg_auto_failover service on all data nodes" 123 | service: 124 | name: "{{ postgresql_cluster_daemon }}" 125 | state: "{{ postgresql_cluster_restarted_state }}" 126 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 127 | 128 | - name: "check state on each data node" 129 | become_user: "{{ postgresql_cluster_user }}" 130 | shell: > 131 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl show state \ 132 | --pgdata "{{ postgresql_cluster_data_dir }}" \ 133 | --json 134 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 135 | register: pg_autoctl_state 136 | 137 | - debug: 138 | var: pg_autoctl_state 139 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 140 | 141 | - name: "ensure pg_auto_failover service is started on all nodes" 142 | service: 143 | name: "{{ postgresql_cluster_daemon }}" 144 | state: "{{ postgresql_cluster_restarted_state }}" 145 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 146 | 147 | - name: "disable maintenance mode on secondaries" 148 | become_user: "{{ postgresql_cluster_user }}" 149 | shell: > 150 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_autoctl disable maintenance \ 151 | --pgdata "{{ postgresql_cluster_data_dir }}" 152 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 153 | and (pg_autoctl_cluster_state[host_ip].assigned_group_state == 'secondary') 154 | 155 | -------------------------------------------------------------------------------- /roles/postgres-cluster-pg-auto-failover-upgrade-pre-1.4/files/deb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | unknown_os () 4 | { 5 | echo "Unfortunately, your operating system distribution and version are not supported by this script." 6 | echo 7 | echo "Please contact us via https://www.citusdata.com/about/contact_us with any issues." 8 | exit 1 9 | } 10 | 11 | arch_check () 12 | { 13 | if [ "$(uname -m)" != 'x86_64' ]; then 14 | echo "Unfortunately, the Citus repository does not contain packages for non-x86_64 architectures." 15 | echo 16 | echo "Please contact us via https://www.citusdata.com/about/contact_us with any issues." 17 | exit 1 18 | fi 19 | } 20 | 21 | gpg_check () 22 | { 23 | echo "Checking for gpg..." 24 | if command -v gpg > /dev/null; then 25 | echo "Detected gpg..." 26 | else 27 | echo -n "Installing gnupg for GPG verification... " 28 | apt-get install -y gnupg &> /dev/null 29 | if [ "$?" -ne "0" ]; then 30 | echo "Unable to install GPG! Your base system has a problem; please check your default OS's package repositories because GPG should work." 31 | echo "Repository installation aborted." 32 | exit 1 33 | else 34 | echo "done." 35 | fi 36 | fi 37 | } 38 | 39 | curl_check () 40 | { 41 | echo "Checking for curl..." 42 | if command -v curl > /dev/null; then 43 | echo "Detected curl..." 44 | else 45 | echo -n "Installing curl... " 46 | apt-get install -y --no-install-recommends curl &> /dev/null 47 | if [ "$?" -ne "0" ]; then 48 | echo "Unable to install curl! Your base system has a problem; please check your default OS's package repositories because curl should work." 49 | echo "Repository installation aborted." 50 | exit 1 51 | else 52 | echo "done." 53 | fi 54 | fi 55 | } 56 | 57 | pgdg_check () 58 | { 59 | echo "Checking for postgresql-12..." 60 | if apt-cache show postgresql-12 &> /dev/null; then 61 | echo "Detected postgresql-12..." 62 | else 63 | pgdg_list='/etc/apt/sources.list.d/pgdg.list' 64 | pgdg_source_path="deb http://apt.postgresql.org/pub/repos/apt/ ${codename}-pgdg main" 65 | pgdg_key_url='https://www.postgresql.org/media/keys/ACCC4CF8.asc' 66 | 67 | if [ -e $pgdg_list ]; then 68 | echo "Unable to install PostgreSQL Apt Repository" 69 | echo 70 | echo "The file ${pgdg_list} already exists." 71 | echo 72 | echo "Contact us via https://www.citusdata.com/about/contact_us with information about your system for help." 73 | exit 1 74 | fi 75 | 76 | echo -n "Installing ${pgdg_list}... " 77 | 78 | # create an apt config file for the PGDG repository 79 | echo "${pgdg_source_path}" > $pgdg_list 80 | echo "done." 81 | 82 | echo -n "Installing ca-certificates... " 83 | apt-get install -y --no-install-recommends ca-certificates &> /dev/null 84 | echo "done." 85 | 86 | echo -n "Importing PostgreSQL gpg key... " 87 | # import the gpg key 88 | curl -L "${pgdg_key_url}" 2> /dev/null | apt-key add - &>/dev/null 89 | echo "done." 90 | fi 91 | } 92 | 93 | install_debian_keyring () 94 | { 95 | if [ "${os}" = "debian" ]; then 96 | echo "Installing debian-archive-keyring which is needed for installing " 97 | echo "apt-transport-https on many Debian systems." 98 | apt-get install -y debian-archive-keyring &> /dev/null 99 | fi 100 | } 101 | 102 | 103 | detect_os () 104 | { 105 | if [[ ( -z "${os}" ) && ( -z "${dist}" ) ]]; then 106 | # some systems dont have lsb-release yet have the lsb_release binary and 107 | # vice-versa 108 | if [ -e /etc/lsb-release ]; then 109 | . /etc/lsb-release 110 | 111 | if [ "${ID}" = "raspbian" ]; then 112 | os=${ID} 113 | dist=`cut --delimiter='.' -f1 /etc/debian_version` 114 | else 115 | os=${DISTRIB_ID} 116 | dist=${DISTRIB_CODENAME} 117 | 118 | if [ -z "$dist" ]; then 119 | dist=${DISTRIB_RELEASE} 120 | fi 121 | fi 122 | 123 | elif [ `which lsb_release 2>/dev/null` ]; then 124 | dist=`lsb_release -c | cut -f2` 125 | os=`lsb_release -i | cut -f2 | awk '{ print tolower($1) }'` 126 | 127 | elif [ -e /etc/debian_version ]; then 128 | # some Debians have jessie/sid in their /etc/debian_version 129 | # while others have '6.0.7' 130 | os=`cat /etc/issue | head -1 | awk '{ print tolower($1) }'` 131 | if grep -q '/' /etc/debian_version; then 132 | dist=`cut --delimiter='/' -f1 /etc/debian_version` 133 | else 134 | dist=`cut --delimiter='.' -f1 /etc/debian_version` 135 | fi 136 | 137 | else 138 | unknown_os 139 | fi 140 | fi 141 | 142 | if [ -z "$dist" ]; then 143 | unknown_os 144 | fi 145 | 146 | # remove whitespace from OS and dist name 147 | os="${os// /}" 148 | dist="${dist// /}" 149 | 150 | echo "Detected operating system as $os/$dist." 151 | } 152 | 153 | detect_codename () 154 | { 155 | if [ "${os}" = "debian" ]; then 156 | case "${dist}" in 157 | 7) 158 | codename='wheezy' 159 | ;; 160 | 8) 161 | codename='jessie' 162 | ;; 163 | 9) 164 | codename='stretch' 165 | ;; 166 | 10) 167 | codename='buster' 168 | ;; 169 | wheezy) 170 | codename="${dist}" 171 | ;; 172 | jessie) 173 | codename="${dist}" 174 | ;; 175 | stretch) 176 | codename="${dist}" 177 | ;; 178 | buster) 179 | codename="${dist}" 180 | ;; 181 | *) 182 | unknown_os 183 | ;; 184 | esac 185 | else 186 | codename=${dist} 187 | fi 188 | } 189 | 190 | main () 191 | { 192 | detect_os 193 | detect_codename 194 | 195 | # Need to first run apt-get update so that apt-transport-https can be 196 | # installed 197 | echo -n "Running apt-get update... " 198 | apt-get update &> /dev/null 199 | echo "done." 200 | 201 | arch_check 202 | curl_check 203 | gpg_check 204 | pgdg_check 205 | 206 | # Install the debian-archive-keyring package on debian systems so that 207 | # apt-transport-https can be installed next 208 | install_debian_keyring 209 | 210 | echo -n "Installing apt-transport-https... " 211 | apt-get install -y apt-transport-https &> /dev/null 212 | echo "done." 213 | 214 | 215 | gpg_key_url="https://repos.citusdata.com/community/gpgkey" 216 | apt_config_url="https://repos.citusdata.com/community/config_file.list?os=${os}&dist=${dist}&source=script" 217 | 218 | apt_source_path="/etc/apt/sources.list.d/citusdata_community.list" 219 | 220 | echo -n "Installing $apt_source_path... " 221 | 222 | # create an apt config file for this repository 223 | curl -sSf "${apt_config_url}" > $apt_source_path 224 | curl_exit_code=$? 225 | 226 | if [ "$curl_exit_code" = "22" ]; then 227 | echo 228 | echo 229 | echo -n "Unable to download repo config from: " 230 | echo "${apt_config_url}" 231 | echo 232 | echo "This usually happens if your operating system is not supported by " 233 | echo "Citus Data, or this script's OS detection failed." 234 | echo 235 | echo "If you are running a supported OS, please contact us via https://www.citusdata.com/about/contact_us and report this." 236 | [ -e $apt_source_path ] && rm $apt_source_path 237 | exit 1 238 | elif [ "$curl_exit_code" = "35" -o "$curl_exit_code" = "60" ]; then 239 | echo "curl is unable to connect to citusdata.com over TLS when running: " 240 | echo " curl ${apt_config_url}" 241 | echo "This is usually due to one of two things:" 242 | echo 243 | echo " 1.) Missing CA root certificates (make sure the ca-certificates package is installed)" 244 | echo " 2.) An old version of libssl. Try upgrading libssl on your system to a more recent version" 245 | echo 246 | echo "Contact us via https://www.citusdata.com/about/contact_us with information about your system for help." 247 | [ -e $apt_source_path ] && rm $apt_source_path 248 | exit 1 249 | elif [ "$curl_exit_code" -gt "0" ]; then 250 | echo 251 | echo "Unable to run: " 252 | echo " curl ${apt_config_url}" 253 | echo 254 | echo "Double check your curl installation and try again." 255 | [ -e $apt_source_path ] && rm $apt_source_path 256 | exit 1 257 | else 258 | sed -i 's#packagecloud.io/citusdata#repos.citusdata.com#g' "${apt_source_path}" 259 | echo "done." 260 | fi 261 | 262 | echo -n "Importing Citus Data gpg key... " 263 | # import the gpg key 264 | curl -L "${gpg_key_url}" 2> /dev/null | apt-key add - &>/dev/null 265 | echo "done." 266 | 267 | echo -n "Running apt-get update... " 268 | # update apt on this system 269 | apt-get update &> /dev/null 270 | echo "done." 271 | 272 | echo 273 | echo "The repository is set up! You can now install packages." 274 | } 275 | 276 | main 277 | -------------------------------------------------------------------------------- /roles/postgres-cluster-pg-auto-failover-install/files/deb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | unknown_os () 4 | { 5 | echo "Unfortunately, your operating system distribution and version are not supported by this script." 6 | echo 7 | echo "Please contact us via https://www.citusdata.com/about/contact_us with any issues." 8 | exit 1 9 | } 10 | 11 | arch_check () 12 | { 13 | if [ "$(uname -m)" != 'x86_64' ]; then 14 | echo "Unfortunately, the Citus repository does not contain packages for non-x86_64 architectures." 15 | echo 16 | echo "Please contact us via https://www.citusdata.com/about/contact_us with any issues." 17 | exit 1 18 | fi 19 | } 20 | 21 | gpg_check () 22 | { 23 | echo "Checking for gpg..." 24 | if command -v gpg > /dev/null; then 25 | echo "Detected gpg..." 26 | else 27 | echo -n "Installing gnupg for GPG verification... " 28 | apt-get install -y gnupg &> /dev/null 29 | if [ "$?" -ne "0" ]; then 30 | echo "Unable to install GPG! Your base system has a problem; please check your default OS's package repositories because GPG should work." 31 | echo "Repository installation aborted." 32 | exit 1 33 | else 34 | echo "done." 35 | fi 36 | fi 37 | } 38 | 39 | curl_check () 40 | { 41 | echo "Checking for curl..." 42 | if command -v curl > /dev/null; then 43 | echo "Detected curl..." 44 | else 45 | echo -n "Installing curl... " 46 | apt-get install -y --no-install-recommends curl &> /dev/null 47 | if [ "$?" -ne "0" ]; then 48 | echo "Unable to install curl! Your base system has a problem; please check your default OS's package repositories because curl should work." 49 | echo "Repository installation aborted." 50 | exit 1 51 | else 52 | echo "done." 53 | fi 54 | fi 55 | } 56 | 57 | pgdg_check () 58 | { 59 | echo "Checking for postgresql-14..." 60 | if apt-cache show postgresql-14 &> /dev/null; then 61 | echo "Detected postgresql-14..." 62 | else 63 | pgdg_list='/etc/apt/sources.list.d/pgdg.list' 64 | pgdg_source_path="deb http://apt.postgresql.org/pub/repos/apt/ ${codename}-pgdg main" 65 | pgdg_key_url='https://www.postgresql.org/media/keys/ACCC4CF8.asc' 66 | 67 | if [ -e $pgdg_list ]; then 68 | echo "Overriding ${pgdg_list}" 69 | fi 70 | 71 | echo -n "Installing ${pgdg_list}... " 72 | 73 | # create an apt config file for the PGDG repository 74 | echo "${pgdg_source_path}" > $pgdg_list 75 | echo "done." 76 | 77 | echo -n "Installing ca-certificates... " 78 | apt-get install -y --no-install-recommends ca-certificates &> /dev/null 79 | echo "done." 80 | 81 | echo -n "Importing PostgreSQL gpg key... " 82 | # import the gpg key 83 | curl -L "${pgdg_key_url}" 2> /dev/null | apt-key add - &>/dev/null 84 | echo "done." 85 | 86 | echo -n "Running apt-get update... " 87 | apt-get update &> /dev/null 88 | echo "done." 89 | 90 | if ! apt-cache show postgresql-14 &> /dev/null; then 91 | echo "PGDG repositories don't have postgresql-14 package for your operating system" 92 | echo "Cannot install Citus, exiting." 93 | exit 1 94 | fi 95 | fi 96 | } 97 | 98 | install_debian_keyring () 99 | { 100 | if [ "${os}" = "debian" ]; then 101 | echo "Installing debian-archive-keyring which is needed for installing " 102 | echo "apt-transport-https on many Debian systems." 103 | apt-get install -y debian-archive-keyring &> /dev/null 104 | fi 105 | } 106 | 107 | 108 | detect_os () 109 | { 110 | if [[ ( -z "${os}" ) && ( -z "${dist}" ) ]]; then 111 | # some systems dont have lsb-release yet have the lsb_release binary and 112 | # vice-versa 113 | if [ -e /etc/lsb-release ]; then 114 | . /etc/lsb-release 115 | 116 | if [ "${ID}" = "raspbian" ]; then 117 | os=${ID} 118 | dist=`cut --delimiter='.' -f1 /etc/debian_version` 119 | else 120 | os=${DISTRIB_ID} 121 | dist=${DISTRIB_CODENAME} 122 | 123 | if [ -z "$dist" ]; then 124 | dist=${DISTRIB_RELEASE} 125 | fi 126 | fi 127 | 128 | elif [ `which lsb_release 2>/dev/null` ]; then 129 | dist=`lsb_release -c | cut -f2` 130 | os=`lsb_release -i | cut -f2 | awk '{ print tolower($1) }'` 131 | 132 | elif [ -e /etc/debian_version ]; then 133 | # some Debians have jessie/sid in their /etc/debian_version 134 | # while others have '6.0.7' 135 | os=`cat /etc/issue | head -1 | awk '{ print tolower($1) }'` 136 | if grep -q '/' /etc/debian_version; then 137 | dist=`cut --delimiter='/' -f1 /etc/debian_version` 138 | else 139 | dist=`cut --delimiter='.' -f1 /etc/debian_version` 140 | fi 141 | 142 | else 143 | unknown_os 144 | fi 145 | fi 146 | 147 | if [ -z "$dist" ]; then 148 | unknown_os 149 | fi 150 | 151 | # remove whitespace from OS and dist name 152 | os="${os// /}" 153 | dist="${dist// /}" 154 | 155 | echo "Detected operating system as $os/$dist." 156 | } 157 | 158 | detect_codename () 159 | { 160 | if [ "${os}" = "debian" ]; then 161 | case "${dist}" in 162 | 7) 163 | codename='wheezy' 164 | ;; 165 | 8) 166 | codename='jessie' 167 | ;; 168 | 9) 169 | codename='stretch' 170 | ;; 171 | 10) 172 | codename='buster' 173 | ;; 174 | 11) 175 | codename='bullseye' 176 | ;; 177 | wheezy) 178 | codename="${dist}" 179 | ;; 180 | jessie) 181 | codename="${dist}" 182 | ;; 183 | stretch) 184 | codename="${dist}" 185 | ;; 186 | buster) 187 | codename="${dist}" 188 | ;; 189 | bullseye) 190 | codename="${dist}" 191 | ;; 192 | *) 193 | unknown_os 194 | ;; 195 | esac 196 | else 197 | codename=${dist} 198 | fi 199 | } 200 | 201 | main () 202 | { 203 | detect_os 204 | detect_codename 205 | 206 | # Need to first run apt-get update so that apt-transport-https can be 207 | # installed 208 | echo -n "Running apt-get update... " 209 | apt-get update &> /dev/null 210 | echo "done." 211 | 212 | arch_check 213 | curl_check 214 | gpg_check 215 | pgdg_check 216 | 217 | # Install the debian-archive-keyring package on debian systems so that 218 | # apt-transport-https can be installed next 219 | install_debian_keyring 220 | 221 | echo -n "Installing apt-transport-https... " 222 | apt-get install -y apt-transport-https &> /dev/null 223 | echo "done." 224 | 225 | 226 | gpg_key_url="https://repos.citusdata.com/community/gpgkey" 227 | apt_config_url="https://repos.citusdata.com/community/config_file.list?os=${os}&dist=${dist}&source=script" 228 | 229 | apt_source_path="/etc/apt/sources.list.d/citusdata_community.list" 230 | gpg_keyring_path="/usr/share/keyrings/citusdata_community-archive-keyring.gpg" 231 | 232 | echo -n "Installing $apt_source_path... " 233 | 234 | # create an apt config file for this repository 235 | curl -sSf "${apt_config_url}" > $apt_source_path 236 | curl_exit_code=$? 237 | 238 | if [ "$curl_exit_code" = "22" ]; then 239 | echo 240 | echo 241 | echo -n "Unable to download repo config from: " 242 | echo "${apt_config_url}" 243 | echo 244 | echo "This usually happens if your operating system is not supported by " 245 | echo "Citus Data, or this script's OS detection failed." 246 | echo 247 | echo "If you are running a supported OS, please contact us via https://www.citusdata.com/about/contact_us and report this." 248 | [ -e $apt_source_path ] && rm $apt_source_path 249 | exit 1 250 | elif [ "$curl_exit_code" = "35" -o "$curl_exit_code" = "60" ]; then 251 | echo "curl is unable to connect to citusdata.com over TLS when running: " 252 | echo " curl ${apt_config_url}" 253 | echo "This is usually due to one of two things:" 254 | echo 255 | echo " 1.) Missing CA root certificates (make sure the ca-certificates package is installed)" 256 | echo " 2.) An old version of libssl. Try upgrading libssl on your system to a more recent version" 257 | echo 258 | echo "Contact us via https://www.citusdata.com/about/contact_us with information about your system for help." 259 | [ -e $apt_source_path ] && rm $apt_source_path 260 | exit 1 261 | elif [ "$curl_exit_code" -gt "0" ]; then 262 | echo 263 | echo "Unable to run: " 264 | echo " curl ${apt_config_url}" 265 | echo 266 | echo "Double check your curl installation and try again." 267 | [ -e $apt_source_path ] && rm $apt_source_path 268 | exit 1 269 | else 270 | sed -i 's#packagecloud.io/citusdata#repos.citusdata.com#g' "${apt_source_path}" 271 | echo "done." 272 | fi 273 | 274 | echo -n "Importing Citus Data Community gpg key... " 275 | # import the gpg key 276 | # below command decodes the ASCII armored gpg file (instead of binary file) 277 | # and adds the unarmored gpg key as keyring 278 | curl -fsSL "${gpg_key_url}" | gpg --dearmor > ${gpg_keyring_path} 279 | echo "done." 280 | 281 | echo -n "Running apt-get update... " 282 | # update apt on this system 283 | apt-get update &> /dev/null 284 | echo "done." 285 | 286 | echo 287 | echo "The repository is set up! You can now install packages." 288 | } 289 | 290 | main 291 | -------------------------------------------------------------------------------- /roles/postgres-cluster-hba-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "initialize computed_postgresql_cluster_pg_hba_entries" 3 | set_fact: 4 | computed_postgresql_cluster_pg_hba_entries: [] 5 | 6 | # TODO: this might be suboptimal if run on a secondary. we have not run into any issues so far though. 7 | - name: Ensure PostgreSQL pgautofailover_monitor user is present. 8 | retries: 5 9 | delay: 3 10 | postgresql_user: 11 | name: "pgautofailover_monitor" 12 | login_host: "localhost" 13 | login_user: "{{ postgresql_cluster_user }}" 14 | login_unix_socket: "{{ postgresql_cluster_unix_socket_directories[0] }}" 15 | port: "{{ postgresql_cluster_port }}" 16 | state: "present" 17 | no_log: "{{ postgres_users_no_log }}" 18 | become: true 19 | register: pgautofailover_monitor_user_creation 20 | become_user: "{{ postgresql_cluster_user }}" 21 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 22 | until: pgautofailover_monitor_user_creation is not failed 23 | # See: https://github.com/ansible/ansible/issues/16048#issuecomment-229012509 24 | vars: 25 | ansible_ssh_pipelining: true 26 | 27 | 28 | - name: "add postgres user to pg_hba entries for each cluster member for postgres database" 29 | vars: 30 | new_entry: 31 | type: hostssl 32 | database: "postgres" 33 | address: "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}/32" 34 | user: "postgres" 35 | auth_method: cert 36 | auth_options: "map=postgres_node_remote" 37 | set_fact: 38 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 39 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 40 | 41 | 42 | - name: "add postgres user to pg_hba entries for each cluster member for template1 database" 43 | vars: 44 | new_entry: 45 | type: hostssl 46 | database: "template1" 47 | address: "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}/32" 48 | user: "postgres" 49 | auth_method: cert 50 | auth_options: "map=postgres_node_remote" 51 | set_fact: 52 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 53 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 54 | 55 | 56 | - name: "add autoctl_node user to pg_hba entries for each monitor node" 57 | vars: 58 | new_entry: 59 | type: local 60 | database: "template1" 61 | user: "autoctl_node" 62 | auth_method: peer 63 | auth_options: "map=autoctl_node_peer" 64 | set_fact: 65 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 66 | when: postgresql_cluster_is_monitor | default('False') | bool 67 | 68 | 69 | - name: "add autoctl_node user to pg_hba entries for each monitor node" 70 | vars: 71 | new_entry: 72 | type: local 73 | database: "pg_auto_failover" 74 | user: "autoctl_node" 75 | auth_method: peer 76 | auth_options: "map=autoctl_node_peer" 77 | set_fact: 78 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 79 | when: postgresql_cluster_is_monitor | default('False') | bool 80 | 81 | 82 | - name: "add autoctl_node user to pg_hba entries for each cluster member for autoctl_node database" 83 | vars: 84 | new_entry: 85 | type: hostssl 86 | database: "autoctl" 87 | address: "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}/32" 88 | user: "autoctl_node" 89 | auth_method: cert 90 | auth_options: "map=autoctl_node_remote" 91 | set_fact: 92 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 93 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 94 | 95 | 96 | - name: "add autoctl_node user to pg_hba entries for each cluster member for template1 database" 97 | vars: 98 | new_entry: 99 | type: hostssl 100 | database: "template1" 101 | address: "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}/32" 102 | user: "autoctl_node" 103 | auth_method: cert 104 | auth_options: "map=autoctl_node_remote" 105 | set_fact: 106 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 107 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 108 | 109 | 110 | - name: "add autoctl_node user to pg_hba entries for each cluster member for pg_auto_failover database" 111 | vars: 112 | new_entry: 113 | type: hostssl 114 | database: "pg_auto_failover" 115 | address: "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}/32" 116 | user: "autoctl_node" 117 | auth_method: cert 118 | auth_options: "map=autoctl_node_remote" 119 | set_fact: 120 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 121 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 122 | 123 | 124 | - name: "add pgautofailover_replicator user to pg_hba entries for each cluster member for pg_auto_failover database" 125 | vars: 126 | new_entry: 127 | type: hostssl 128 | database: "replication" 129 | address: "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}/32" 130 | user: "pgautofailover_replicator" 131 | auth_method: cert 132 | auth_options: "map=pgautofailover_replicator_remote" 133 | set_fact: 134 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 135 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 136 | 137 | 138 | - name: "add pgautofailover_replicator user to pg_hba entries for each cluster member for pg_auto_failover database" 139 | vars: 140 | new_entry: 141 | type: hostssl 142 | database: "postgres" 143 | address: "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}/32" 144 | user: "pgautofailover_replicator" 145 | auth_method: cert 146 | auth_options: "map=pgautofailover_replicator_remote" 147 | set_fact: 148 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 149 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 150 | 151 | - name: "add pgautofailover_monitor user to pg_hba entries for each cluster member for pg_auto_failover database" 152 | vars: 153 | new_entry: 154 | type: hostssl 155 | database: "postgres" 156 | address: "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}/32" 157 | user: "pgautofailover_monitor" 158 | auth_method: cert 159 | auth_options: "map=pgautofailover_monitor_remote" 160 | set_fact: 161 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + [new_entry] }}" 162 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 163 | 164 | - name: "add default hba_entries to the end of computed_postgresql_cluster_pg_hba_entries" 165 | set_fact: 166 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + (_postgresql_cluster_minimum_hba_entries | default([])) }}" 167 | 168 | - name: "add non system pg_hba entries for all data cluster members to pg_hba.conf" 169 | set_fact: 170 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + (postgresql_cluster_hba_entries | default([])) }}" 171 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 172 | 173 | - name: "add non system pg_hba entries for monitor to the end of computed_postgresql_cluster_pg_hba_entries" 174 | set_fact: 175 | computed_postgresql_cluster_pg_hba_entries: "{{ computed_postgresql_cluster_pg_hba_entries + (postgresql_monitor_hba_entries | default([])) }}" 176 | when: (postgresql_cluster_is_monitor | default('False') | bool) 177 | 178 | - name: "Configure pg_ident mapping for all cluster members and monitors" 179 | template: 180 | src: "pg_ident.conf.j2" 181 | dest: "{{ postgresql_cluster_config_path }}/pg_ident.conf" 182 | owner: "{{ postgresql_cluster_user }}" 183 | group: "{{ postgresql_cluster_group }}" 184 | mode: 0600 185 | backup: yes 186 | force: yes 187 | 188 | - name: "Configure host based authentication for all cluster members and monitors" 189 | template: 190 | src: "pg_hba.conf.j2" 191 | dest: "{{ postgresql_cluster_config_path }}/pg_hba.conf" 192 | owner: "{{ postgresql_cluster_user }}" 193 | group: "{{ postgresql_cluster_group }}" 194 | mode: 0600 195 | backup: yes 196 | force: yes 197 | when: computed_postgresql_cluster_pg_hba_entries | length > 0 198 | 199 | - name: "reload cluster config" 200 | become_user: postgres 201 | shell: > 202 | PATH="$PATH:{{ postgresql_cluster_bin_path }}" pg_ctl reload --pgdata {{ postgresql_cluster_data_dir }} 203 | 204 | # TODO: move this to a separate role? 205 | - name: "Allow incoming access to the postgres port {{ postgresql_cluster_port | default('5433') }} for all cluster members" 206 | ufw: 207 | rule: allow 208 | direction: in 209 | src: "{{ hostvars[item]['host_ip'] | default(hostvars[item]['ansible_host']) }}/32" 210 | to_port: "{{ postgresql_cluster_port | default('5433') }}" 211 | comment: "PostgreSQL {{ postgresql_cluster_name }} - host: {{ hostvars[item]['inventory_hostname'] }}" 212 | with_items: "{{ groups['postgres_cluster'] | default([]) }}" 213 | 214 | - name: "Allow incoming access to the postgres port {{ postgresql_cluster_port | default('5433') }} for all cluster clients" 215 | ufw: 216 | rule: allow 217 | direction: in 218 | src: "{{ item.ip }}/{{ item.subnet_mask | default('32') }}" 219 | to_port: "{{ postgresql_cluster_port | default('5433') }}" 220 | comment: "Client {{ postgresql_cluster_name }} - client: {{ item.name }}" 221 | with_items: "{{ postgresql_cluster_allowed_clients | default([]) }}" 222 | when: not (postgresql_cluster_is_monitor | default('False') | bool) 223 | 224 | - name: "Allow incoming access to the postgres port {{ postgresql_cluster_port | default('5433') }} for all cluster clients" 225 | ufw: 226 | rule: allow 227 | direction: in 228 | src: "{{ item.ip }}/{{ item.subnet_mask | default('32') }}" 229 | to_port: "{{ postgresql_cluster_port | default('5433') }}" 230 | comment: "Client {{ postgresql_cluster_name }} - client: {{ item.name }}" 231 | with_items: "{{ postgresql_monitor_allowed_clients | default([]) }}" 232 | when: (postgresql_cluster_is_monitor | default('False') | bool) 233 | -------------------------------------------------------------------------------- /roles/postgres-cluster-pgbouncer-client-setup/templates/pgbouncer.ini.j2: -------------------------------------------------------------------------------- 1 | ;;; Ansible Managed pgbouncer ini file 2 | 3 | ;;; 4 | ;;; PgBouncer configuration file 5 | ;;; 6 | 7 | ;; database name = connect string 8 | ;; 9 | ;; connect string params: 10 | ;; dbname= host= port= user= password= auth_user= 11 | ;; client_encoding= datestyle= timezone= 12 | ;; pool_size= reserve_pool= max_db_connections= 13 | ;; pool_mode= connect_query= application_name= 14 | [databases] 15 | 16 | {% for database in postgresql_cluster_pg_bouncer_databases | default(postgresql_cluster_databases | default([])) %} 17 | ;;; see http://www.pgbouncer.org/config.html#section-databases 18 | ;;; we only really need localhost settings here 19 | {{ database.name }} = host=localhost port={{ postgresql_cluster_port | default('5433') }} dbname={{ database.name }} 20 | 21 | {% endfor %} 22 | 23 | ;; foodb over Unix socket 24 | ;foodb = 25 | 26 | ;; redirect bardb to bazdb on localhost 27 | ;bardb = host=localhost dbname=bazdb 28 | 29 | ;; access to dest database will go with single user 30 | ;forcedb = host=localhost port=300 user=baz password=foo client_encoding=UNICODE datestyle=ISO connect_query='SELECT 1' 31 | 32 | ;; use custom pool sizes 33 | ;nondefaultdb = pool_size=50 reserve_pool=10 34 | 35 | ;; use auth_user with auth_query if user not present in auth_file 36 | ;; auth_user must exist in auth_file 37 | ; foodb = auth_user=bar 38 | 39 | ;; fallback connect string 40 | ;* = host=testserver 41 | 42 | ;; User-specific configuration 43 | [users] 44 | 45 | ;user1 = pool_mode=transaction max_user_connections=10 46 | 47 | ;; Configuration section 48 | [pgbouncer] 49 | 50 | ;;; 51 | ;;; Administrative settings 52 | ;;; 53 | 54 | logfile = {{ postgresql_cluster_pg_bouncer_logfile | default('/var/log/postgresql/pgbouncer.log') }} 55 | pidfile = {{ postgresql_cluster_pg_bouncer_pidfile | default('/var/run/postgresql/pgbouncer.pid') }} 56 | 57 | ;;; 58 | ;;; Where to wait for clients 59 | ;;; 60 | 61 | ;; IP address or * which means all IPs, we default to * and limit access via ufw 62 | listen_addr = {{ postgresql_cluster_pg_bouncer_listen_addr | default('*') }} 63 | listen_port = {{ postgresql_cluster_pg_bouncer_listen_port | default('6432') }} 64 | 65 | ;; Unix socket is also used for -R. 66 | ;; On Debian it should be /var/run/postgresql 67 | ;unix_socket_dir = /tmp 68 | ;unix_socket_mode = 0777 69 | ;unix_socket_group = 70 | unix_socket_dir = {{ postgresql_cluster_pg_bouncer_unix_socket_dir | default('/var/run/postgresql') }} 71 | 72 | ;;; 73 | ;;; TLS settings for accepting clients 74 | ;;; 75 | 76 | ;; disable, allow, require, verify-ca, verify-full 77 | ;; by default we are aggressive and require the client to have a valid ca 78 | client_tls_sslmode = {{ postgresql_cluster_pg_bouncer_client_tls_sslmode | default('verify-ca') }} 79 | 80 | ;; we simply use the settings from the cluster itself, we are running under the same user anyways 81 | ;; Path to file that contains trusted CA certs 82 | client_tls_ca_file = {{ postgresql_cluster_ssl_ca_file | default('/data/ansible/certs/postgres_server/rootCA.crt') }} 83 | 84 | ;; Private key and cert to present to clients. 85 | ;; Required for accepting TLS connections from clients. 86 | client_tls_key_file = {{ postgresql_cluster_server_key | default('/data/ansible/certs/postgres_server/server.key') }} 87 | client_tls_cert_file = {{ postgresql_cluster_server_cert | default('/data/ansible/certs/postgres_server/server.crt') }} 88 | 89 | ;; fast, normal, secure, legacy, 90 | client_tls_ciphers = {{ postgresql_cluster_pg_bouncer_client_tls_ciphers | default('fast') }} 91 | 92 | ;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 93 | client_tls_protocols = {{ postgresql_cluster_pg_bouncer_client_tls_protocols | default('secure') }} 94 | 95 | ;; none, auto, legacy 96 | ;client_tls_dheparams = auto 97 | 98 | ;; none, auto, 99 | ;client_tls_ecdhcurve = auto 100 | 101 | ;;; we dont need the following as this pgbouncer is running on the same host as the database and for these connections 102 | ;;; we do not require ssl certificates to be present 103 | 104 | ;;; 105 | ;;; TLS settings for connecting to backend databases 106 | ;;; 107 | 108 | ;; disable, allow, require, verify-ca, verify-full 109 | ;server_tls_sslmode = disable 110 | 111 | ;; Path to that contains trusted CA certs 112 | ;server_tls_ca_file = 113 | 114 | ;; Private key and cert to present to backend. 115 | ;; Needed only if backend server require client cert. 116 | ;server_tls_key_file = 117 | ;server_tls_cert_file = 118 | 119 | ;; all, secure, tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3 120 | ;server_tls_protocols = secure 121 | 122 | ;; fast, normal, secure, legacy, 123 | ;server_tls_ciphers = fast 124 | 125 | ;;; 126 | ;;; Authentication settings 127 | ;;; 128 | 129 | ;; any, trust, plain, md5, cert, hba, pam 130 | auth_type = {{ postgresql_cluster_pg_bouncer_auth_type | default('md5') }} 131 | auth_file = /etc/pgbouncer/userlist.txt 132 | 133 | ;; Path to HBA-style auth config 134 | ;auth_hba_file = 135 | 136 | ;; Query to use to fetch password from database. Result 137 | ;; must have 2 columns - username and password hash. 138 | ;auth_query = SELECT usename, passwd FROM pg_shadow WHERE usename=$1 139 | 140 | ;;; 141 | ;;; Users allowed into database 'pgbouncer' 142 | ;;; 143 | 144 | ;; comma-separated list of users who are allowed to change settings 145 | admin_users = {{ postgresql_cluster_pg_bouncer_admin_users | default('pgbounceradmin') }} 146 | 147 | ;; comma-separated list of users who are just allowed to use SHOW command 148 | stats_users = {{ postgresql_cluster_pg_bouncer_stats_users | default('pgbounceradmin') }} 149 | 150 | ;;; 151 | ;;; Pooler personality questions 152 | ;;; 153 | 154 | ;; When server connection is released back to pool: 155 | ;; session - after client disconnects (default) 156 | ;; transaction - after transaction finishes 157 | ;; statement - after statement finishes 158 | pool_mode = {{ postgresql_cluster_pg_bouncer_pool_mode | default('session') }} 159 | 160 | ;; Query for cleaning connection immediately after releasing from 161 | ;; client. No need to put ROLLBACK here, pgbouncer does not reuse 162 | ;; connections where transaction is left open. 163 | ;server_reset_query = DISCARD ALL 164 | 165 | ;; Whether server_reset_query should run in all pooling modes. If it 166 | ;; is off, server_reset_query is used only for session-pooling. 167 | ;server_reset_query_always = 0 168 | 169 | ;; Comma-separated list of parameters to ignore when given in startup 170 | ;; packet. Newer JDBC versions require the extra_float_digits here. 171 | ;ignore_startup_parameters = extra_float_digits 172 | 173 | ;; When taking idle server into use, this query is run first. 174 | ;server_check_query = select 1 175 | 176 | ;; If server was used more recently that this many seconds ago, 177 | ; skip the check query. Value 0 may or may not run in immediately. 178 | ;server_check_delay = 30 179 | 180 | ;; Close servers in session pooling mode after a RECONNECT, RELOAD, 181 | ;; etc. when they are idle instead of at the end of the session. 182 | ;server_fast_close = 0 183 | 184 | ;; Use as application_name on server. 185 | ;application_name_add_host = 0 186 | 187 | ;; Period for updating aggregated stats. 188 | ;stats_period = 60 189 | 190 | ;;; 191 | ;;; Connection limits 192 | ;;; 193 | 194 | ;; Total number of clients that can connect 195 | max_client_conn = {{ postgresql_cluster_pg_bouncer_max_client_conn | default('100') }} 196 | 197 | ;; Default pool size. 20 is good number when transaction pooling 198 | ;; is in use, in session pooling it needs to be the number of 199 | ;; max clients you want to handle at any moment 200 | default_pool_size = {{ postgresql_cluster_pg_bouncer_default_pool_size | default('20') }} 201 | 202 | ;; Minimum number of server connections to keep in pool. 203 | ;min_pool_size = 0 204 | 205 | ; how many additional connection to allow in case of trouble 206 | ;reserve_pool_size = 0 207 | 208 | ;; If a clients needs to wait more than this many seconds, use reserve 209 | ;; pool. 210 | ;reserve_pool_timeout = 5 211 | 212 | ;; Maximum number of server connections for a database 213 | max_db_connections = {{ postgresql_cluster_pg_bouncer_max_db_connections | default('0') }} 214 | 215 | ;; Maximum number of server connections for a user 216 | max_user_connections = {{ postgresql_cluster_pg_bouncer_max_user_connections | default('0') }} 217 | 218 | ;; If off, then server connections are reused in LIFO manner 219 | ;server_round_robin = 0 220 | 221 | ;;; 222 | ;;; Logging 223 | ;;; 224 | 225 | ;; Syslog settings 226 | syslog = {{ postgresql_cluster_pg_bouncer_syslog | default('0') }} 227 | syslog_facility = {{ postgresql_cluster_pg_bouncer_syslog_facility | default('daemon') }} 228 | syslog_ident = {{ postgresql_cluster_pg_bouncer_syslog_ident | default('pgbouncer') }} 229 | 230 | ;; log if client connects or server connection is made 231 | ;log_connections = 1 232 | 233 | ;; log if and why connection was closed 234 | ;log_disconnections = 1 235 | 236 | ;; log error messages pooler sends to clients 237 | ;log_pooler_errors = 1 238 | 239 | ;; write aggregated stats into log 240 | ;log_stats = 1 241 | 242 | ;; Logging verbosity. Same as -v switch on command line. 243 | ;verbose = 0 244 | 245 | ;;; 246 | ;;; Timeouts 247 | ;;; 248 | 249 | ;; Close server connection if its been connected longer. 250 | ;server_lifetime = 3600 251 | 252 | ;; Close server connection if its not been used in this time. Allows 253 | ;; to clean unnecessary connections from pool after peak. 254 | ;server_idle_timeout = 600 255 | 256 | ;; Cancel connection attempt if server does not answer takes longer. 257 | ;server_connect_timeout = 15 258 | 259 | ;; If server login failed (server_connect_timeout or auth failure) 260 | ;; then wait this many second. 261 | ;server_login_retry = 15 262 | 263 | ;; Dangerous. Server connection is closed if query does not return in 264 | ;; this time. Should be used to survive network problems, _not_ as 265 | ;; statement_timeout. (default: 0) 266 | ;query_timeout = 0 267 | 268 | ;; Dangerous. Client connection is closed if the query is not 269 | ;; assigned to a server in this time. Should be used to limit the 270 | ;; number of queued queries in case of a database or network 271 | ;; failure. (default: 120) 272 | ;query_wait_timeout = 120 273 | 274 | ;; Dangerous. Client connection is closed if no activity in this 275 | ;; time. Should be used to survive network problems. (default: 0) 276 | ;client_idle_timeout = 0 277 | 278 | ;; Disconnect clients who have not managed to log in after connecting 279 | ;; in this many seconds. 280 | ;client_login_timeout = 60 281 | 282 | ;; Clean automatically created database entries (via "*") if they stay 283 | ;; unused in this many seconds. 284 | ; autodb_idle_timeout = 3600 285 | 286 | ;; Close connections which are in "IDLE in transaction" state longer 287 | ;; than this many seconds. 288 | ;idle_transaction_timeout = 0 289 | 290 | ;; How long SUSPEND/-R waits for buffer flush before closing 291 | ;; connection. 292 | ;suspend_timeout = 10 293 | 294 | ;;; 295 | ;;; Low-level tuning options 296 | ;;; 297 | 298 | ;; buffer for streaming packets 299 | ;pkt_buf = 4096 300 | 301 | ;; man 2 listen 302 | ;listen_backlog = 128 303 | 304 | ;; Max number pkt_buf to process in one event loop. 305 | ;sbuf_loopcnt = 5 306 | 307 | ;; Maximum PostgreSQL protocol packet size. 308 | ;max_packet_size = 2147483647 309 | 310 | ;; Set SO_REUSEPORT socket option 311 | ;so_reuseport = 0 312 | 313 | ;; networking options, for info: man 7 tcp 314 | 315 | ;; Linux: Notify program about new connection only if there is also 316 | ;; data received. (Seconds to wait.) On Linux the default is 45, on 317 | ;; other OS'es 0. 318 | ;tcp_defer_accept = 0 319 | 320 | ;; In-kernel buffer size (Linux default: 4096) 321 | ;tcp_socket_buffer = 0 322 | 323 | ;; whether tcp keepalive should be turned on (0/1) 324 | ;tcp_keepalive = 1 325 | 326 | ;; The following options are Linux-specific. They also require 327 | ;; tcp_keepalive=1. 328 | 329 | ;; Count of keepalive packets 330 | ;tcp_keepcnt = 0 331 | 332 | ;; How long the connection can be idle before sending keepalive 333 | ;; packets 334 | ;tcp_keepidle = 0 335 | 336 | ;; The time between individual keepalive probes 337 | ;tcp_keepintvl = 0 338 | 339 | ;; How long may transmitted data remain unacknowledged before TCP 340 | ;; connection is closed (in milliseconds) 341 | ;tcp_user_timeout = 0 342 | 343 | ;; DNS lookup caching time 344 | ;dns_max_ttl = 15 345 | 346 | ;; DNS zone SOA lookup period 347 | ;dns_zone_check_period = 0 348 | 349 | ;; DNS negative result caching time 350 | ;dns_nxdomain_ttl = 15 351 | 352 | ;; Custom resolv.conf file, to set custom DNS servers or other options 353 | ;; (default: empty = use OS settings) 354 | ;resolv_conf = /etc/pgbouncer/resolv.conf 355 | 356 | ;;; 357 | ;;; Random stuff 358 | ;;; 359 | 360 | ;; Hackish security feature. Helps against SQL injection: when PQexec 361 | ;; is disabled, multi-statement cannot be made. 362 | ;disable_pqexec = 0 363 | 364 | ;; Config file to use for next RELOAD/SIGHUP 365 | ;; By default contains config file from command line. 366 | ;conffile 367 | 368 | ;; Windows service name to register as. job_name is alias for 369 | ;; service_name, used by some Skytools scripts. 370 | ;service_name = pgbouncer 371 | ;job_name = pgbouncer 372 | 373 | ;; Read additional config from other file 374 | ;%include /etc/pgbouncer/pgbouncer-other.ini --------------------------------------------------------------------------------