├── pki-dir └── .gitkeep ├── tools └── pg_cluster_backend │ ├── psc │ ├── README.md │ ├── pgstatcommon │ │ ├── __init__.py │ │ └── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ └── pg_stat_common.cpython-38.pyc │ ├── pgstatlogger │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-38.pyc │ │ │ └── pg_stat_logger.cpython-38.pyc │ │ └── pg_stat_logger.py │ ├── postgresql │ │ ├── test │ │ │ ├── __init__.py │ │ │ ├── support.py │ │ │ ├── testall.py │ │ │ ├── test_bytea_codec.py │ │ │ ├── test_exceptions.py │ │ │ ├── perf_query_io.py │ │ │ ├── perf_copy_io.py │ │ │ ├── test_pgpassfile.py │ │ │ ├── test_installation.py │ │ │ ├── test_cluster.py │ │ │ ├── cursor_integrity.py │ │ │ ├── test_iri.py │ │ │ ├── test_notifyman.py │ │ │ └── test_alock.py │ │ ├── documentation │ │ │ ├── sphinx │ │ │ │ ├── .gitignore │ │ │ │ ├── changes-v1.2.rst │ │ │ │ ├── build.sh │ │ │ │ ├── changes-v1.1.rst │ │ │ │ ├── admin.rst │ │ │ │ ├── reference.rst │ │ │ │ ├── index.rst │ │ │ │ ├── changes-v1.0.rst │ │ │ │ └── alock.rst │ │ │ ├── __init__.py │ │ │ ├── changes-v1.2.rst │ │ │ ├── changes-v1.1.rst │ │ │ ├── admin.rst │ │ │ ├── reference.rst │ │ │ ├── index.rst │ │ │ ├── changes-v1.0.rst │ │ │ └── alock.rst │ │ ├── encodings │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ └── aliases.cpython-38.pyc │ │ │ ├── aliases.py │ │ │ └── bytea.py │ │ ├── protocol │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── buffer.cpython-38.pyc │ │ │ │ ├── client3.cpython-38.pyc │ │ │ │ ├── pbuffer.cpython-38.pyc │ │ │ │ ├── version.cpython-38.pyc │ │ │ │ ├── xact3.cpython-38.pyc │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── element3.cpython-38.pyc │ │ │ │ └── message_types.cpython-38.pyc │ │ │ ├── buffer.py │ │ │ ├── message_types.py │ │ │ ├── version.py │ │ │ └── pbuffer.py │ │ ├── port │ │ │ ├── _optimized │ │ │ │ ├── README │ │ │ │ └── module.c │ │ │ ├── __pycache__ │ │ │ │ └── __init__.cpython-38.pyc │ │ │ ├── __init__.py │ │ │ └── signal1_msw.py │ │ ├── resolved │ │ │ ├── __init__.py │ │ │ └── __pycache__ │ │ │ │ ├── crypt.cpython-38.pyc │ │ │ │ ├── riparse.cpython-38.pyc │ │ │ │ └── __init__.cpython-38.pyc │ │ ├── python │ │ │ ├── __init__.py │ │ │ ├── __pycache__ │ │ │ │ ├── socket.cpython-38.pyc │ │ │ │ ├── string.cpython-38.pyc │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ ├── decorlib.cpython-38.pyc │ │ │ │ ├── element.cpython-38.pyc │ │ │ │ ├── functools.cpython-38.pyc │ │ │ │ ├── itertools.cpython-38.pyc │ │ │ │ └── structlib.cpython-38.pyc │ │ │ ├── string.py │ │ │ ├── msw.py │ │ │ ├── doc.py │ │ │ ├── os.py │ │ │ ├── itertools.py │ │ │ ├── decorlib.py │ │ │ ├── datetime.py │ │ │ ├── functools.py │ │ │ ├── structlib.py │ │ │ └── socket.py │ │ ├── release │ │ │ └── __init__.py │ │ ├── __pycache__ │ │ │ ├── api.cpython-38.pyc │ │ │ ├── iri.cpython-38.pyc │ │ │ ├── sys.cpython-38.pyc │ │ │ ├── string.cpython-38.pyc │ │ │ ├── __init__.cpython-38.pyc │ │ │ ├── message.cpython-38.pyc │ │ │ ├── project.cpython-38.pyc │ │ │ ├── exceptions.cpython-38.pyc │ │ │ ├── notifyman.cpython-38.pyc │ │ │ ├── pgpassfile.cpython-38.pyc │ │ │ ├── versionstring.cpython-38.pyc │ │ │ └── clientparameters.cpython-38.pyc │ │ ├── driver │ │ │ ├── __pycache__ │ │ │ │ ├── pq3.cpython-38.pyc │ │ │ │ └── __init__.cpython-38.pyc │ │ │ └── __init__.py │ │ ├── lib │ │ │ └── __pycache__ │ │ │ │ └── __init__.cpython-38.pyc │ │ ├── types │ │ │ ├── io │ │ │ │ ├── __pycache__ │ │ │ │ │ ├── lib.cpython-38.pyc │ │ │ │ │ ├── __init__.cpython-38.pyc │ │ │ │ │ ├── builtins.cpython-38.pyc │ │ │ │ │ ├── pg_system.cpython-38.pyc │ │ │ │ │ └── stdlib_decimal.cpython-38.pyc │ │ │ │ ├── pg_system.py │ │ │ │ ├── stdlib_uuid.py │ │ │ │ ├── pg_bitwise.py │ │ │ │ ├── stdlib_jsonb.py │ │ │ │ ├── pg_network.py │ │ │ │ ├── contrib_hstore.py │ │ │ │ ├── pg_geometry.py │ │ │ │ ├── builtins.py │ │ │ │ ├── stdlib_xml_etree.py │ │ │ │ └── __init__.py │ │ │ ├── __pycache__ │ │ │ │ └── __init__.cpython-38.pyc │ │ │ ├── namedtuple.py │ │ │ └── bitwise.py │ │ ├── bin │ │ │ ├── __init__.py │ │ │ ├── pg_dotconf.py │ │ │ └── pg_python.py │ │ ├── project.py │ │ ├── pgpassfile.py │ │ ├── sys.py │ │ ├── __init__.py │ │ ├── versionstring.py │ │ └── message.py │ ├── __init__.py │ └── __pycache__ │ │ └── __init__.cpython-38.pyc │ ├── log │ └── pg_cluster_backend.log │ └── conf │ └── pg_cluster_backend.conf ├── roles ├── certificates │ └── vars │ │ └── main.yml ├── prepare_nodes │ ├── handlers │ │ └── main.yml │ └── tasks │ │ ├── rhel.yml │ │ ├── debian.yml │ │ └── main.yml ├── haproxy │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── pgbouncer │ ├── handlers │ │ └── main.yml │ ├── sql │ │ └── pgbouncer_prepare.sql │ └── templates │ │ ├── pgbouncer.service.j2 │ │ └── pgbouncer.ini.j2 ├── keepalived │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── argument_specs.yml │ ├── check_scripts │ │ └── chk_patroni_leader.sh │ └── templates │ │ └── keepalived.conf.j2 ├── postgres_tantordb │ └── tasks │ │ └── main.yml ├── patroni │ ├── handlers │ │ └── main.yml │ └── templates │ │ ├── patroni-watchdog.service.j2 │ │ ├── patroni-tantor.service.j2 │ │ ├── walg.json.j2 │ │ └── patroni_custom_bootstrap_script.sh.j2 └── etcd │ ├── templates │ ├── etcd-tantor.service.j2 │ └── etcd.conf.j2 │ ├── tasks │ ├── pki.yml │ ├── cluster_state.yml │ ├── cluster_discovery.yml │ ├── cluster_manage.yml │ └── main.yml │ └── handlers │ └── main.yml ├── inventory ├── group_vars │ ├── tantordb.yml │ ├── keepalived.yml │ ├── postgres_classic.yml │ ├── haproxy.yml │ ├── pgbouncer.yml │ ├── prepare_nodes.yml │ └── etcd.yml └── my_inventory ├── pg_cluster.odg ├── synchronous_commit.png ├── pg_cluster_architechture.png ├── files └── tantor.sh ├── ansible.cfg └── pg-cluster.yaml /pki-dir/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/certificates/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/log/pg_cluster_backend.log: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/pgstatcommon/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/pgstatlogger/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/__init__.py: -------------------------------------------------------------------------------- 1 | PSC_DEBUG = False 2 | -------------------------------------------------------------------------------- /inventory/group_vars/tantordb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | major_version: 16 4 | edition: "be" -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/sphinx/.gitignore: -------------------------------------------------------------------------------- 1 | *.txt 2 | -------------------------------------------------------------------------------- /pg_cluster.odg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/pg_cluster.odg -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/encodings/__init__.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .encodings 3 | ## 4 | -------------------------------------------------------------------------------- /synchronous_commit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/synchronous_commit.png -------------------------------------------------------------------------------- /pg_cluster_architechture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/pg_cluster_architechture.png -------------------------------------------------------------------------------- /files/tantor.sh: -------------------------------------------------------------------------------- 1 | if which patronictl >/dev/null; then 2 | export PATH="/opt/tantor/usr/bin:/opt/tantor/usr/sbin:${PATH}" 3 | fi -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/__init__.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .protocol 3 | ## 4 | """ 5 | PQ protocol facilities 6 | """ 7 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/port/_optimized/README: -------------------------------------------------------------------------------- 1 | This is the C ports of the more performance critical parts of py-postgresql. 2 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/resolved/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modules and packages resolved to avoid user dependency resolution. 3 | """ 4 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Python tools package. 3 | 4 | Various extensions to the standard library. 5 | """ 6 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/release/__init__.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .release 3 | ## 4 | """ 5 | Release management code and project meta-data. 6 | """ 7 | -------------------------------------------------------------------------------- /roles/prepare_nodes/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart chronyd 3 | ansible.builtin.systemd_service: 4 | name: chronyd 5 | state: restarted 6 | enabled: true 7 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/api.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/api.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/iri.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/iri.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/sys.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/sys.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/__init__.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .documentation 3 | ## 4 | r""" 5 | See: `postgresql.documentation.index` 6 | """ 7 | __docformat__ = 'reStructuredText' 8 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/string.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/string.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/message.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/message.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/project.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/project.cpython-38.pyc -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [persistent_connection] 2 | command_timeout = 60 3 | 4 | [defaults] 5 | timeout = 60 6 | log_path = ansible.log 7 | host_key_checking = False # отключение проверки fingerprint 8 | fail_on_error = true -------------------------------------------------------------------------------- /roles/haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart HAproxy 3 | ansible.builtin.systemd_service: 4 | name: haproxy-tantor 5 | daemon-reload: true 6 | state: restarted 7 | enabled: true 8 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/pgstatcommon/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/pgstatcommon/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/pgstatlogger/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/pgstatlogger/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/exceptions.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/exceptions.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/notifyman.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/notifyman.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/pgpassfile.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/pgpassfile.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/driver/__pycache__/pq3.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/driver/__pycache__/pq3.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/versionstring.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/versionstring.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/lib/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/lib/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/port/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/port/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/__pycache__/socket.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/python/__pycache__/socket.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/__pycache__/string.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/python/__pycache__/string.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/lib.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/lib.cpython-38.pyc -------------------------------------------------------------------------------- /inventory/group_vars/keepalived.yml: -------------------------------------------------------------------------------- 1 | --- 2 | keepalived_package_version: "" 3 | 4 | # Cluster variables 5 | cluster_vip_1: "" 6 | vip_interface: "{{ ansible_default_ipv4.interface }}" # interface name (ex. "ens32") 7 | -------------------------------------------------------------------------------- /roles/pgbouncer/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart pgbouncer-tantor 3 | ansible.builtin.systemd_service: 4 | name: pgbouncer-tantor 5 | state: restarted 6 | daemon_reload: true 7 | enabled: true 8 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/pgstatcommon/__pycache__/pg_stat_common.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/pgstatcommon/__pycache__/pg_stat_common.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/pgstatlogger/__pycache__/pg_stat_logger.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/pgstatlogger/__pycache__/pg_stat_logger.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__pycache__/clientparameters.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/__pycache__/clientparameters.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/driver/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/driver/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/buffer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/buffer.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/client3.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/client3.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/pbuffer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/pbuffer.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/version.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/version.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/xact3.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/xact3.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/python/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/__pycache__/decorlib.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/python/__pycache__/decorlib.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/__pycache__/element.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/python/__pycache__/element.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/__pycache__/functools.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/python/__pycache__/functools.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/__pycache__/itertools.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/python/__pycache__/itertools.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/__pycache__/structlib.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/python/__pycache__/structlib.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/resolved/__pycache__/crypt.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/resolved/__pycache__/crypt.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/resolved/__pycache__/riparse.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/resolved/__pycache__/riparse.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/types/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/encodings/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/encodings/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/encodings/__pycache__/aliases.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/encodings/__pycache__/aliases.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/element3.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/element3.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/resolved/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/resolved/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/builtins.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/builtins.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/pg_system.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/pg_system.cpython-38.pyc -------------------------------------------------------------------------------- /inventory/group_vars/postgres_classic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | config_system_locale: 'ru_RU.UTF-8' 4 | config_system_language: 'en_US.UTF-8' 5 | postgresql_debian_gpg_key: "https://www.postgresql.org/media/keys/ACCC4CF8.asc" 6 | major_version: 16 -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/message_types.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/protocol/__pycache__/message_types.cpython-38.pyc -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/stdlib_decimal.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TantorLabs/pg_cluster/HEAD/tools/pg_cluster_backend/psc/postgresql/types/io/__pycache__/stdlib_decimal.cpython-38.pyc -------------------------------------------------------------------------------- /roles/keepalived/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart keepalived-tantor service 3 | ansible.builtin.systemd_service: 4 | daemon_reload: true 5 | name: keepalived-tantor 6 | enabled: true 7 | state: restarted 8 | listen: "restart keepalived" 9 | -------------------------------------------------------------------------------- /roles/postgres_tantordb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install tantor-server 3 | ansible.builtin.package: 4 | name: "tantor-{{ edition | d('be', true) }}-server-{{ major_version | d('16', true) }}" 5 | state: present 6 | when: postgresql_vendor == 'tantordb' 7 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/bin/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Console-script collection package. 3 | 4 | Contents: 5 | 6 | pg_python 7 | Python console with a PostgreSQL connection bound to `db`. 8 | 9 | pg_dotconf 10 | Modify a PostgreSQL configuration file. 11 | """ 12 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/string.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .python.string 3 | ## 4 | import os 5 | 6 | def indent(s, level = 2, char = ' '): 7 | ind = char * level 8 | r = "" 9 | for x in s.splitlines(): 10 | r += ((ind + x).rstrip() + os.linesep) 11 | return r 12 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/changes-v1.2.rst: -------------------------------------------------------------------------------- 1 | Changes in v1.2 2 | =============== 3 | 4 | 1.2.0 released on 2016-06-23 5 | ---------------------------- 6 | 7 | * PostgreSQL 9.3 compatibility fixes (Elvis) 8 | * Python 3.5 compatibility fixes (Elvis) 9 | * Add support for JSONB type (Elvis) 10 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/sphinx/changes-v1.2.rst: -------------------------------------------------------------------------------- 1 | Changes in v1.2 2 | =============== 3 | 4 | 1.2.0 released on 2016-06-23 5 | ---------------------------- 6 | 7 | * PostgreSQL 9.3 compatibility fixes (Elvis) 8 | * Python 3.5 compatibility fixes (Elvis) 9 | * Add support for JSONB type (Elvis) 10 | -------------------------------------------------------------------------------- /roles/keepalived/meta/argument_specs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | argument_specs: 4 | main: 5 | short_description: Validating pg_cluster variables for keepalived role 6 | description: Checks presence and types of all required variables for pg_cluster deployment 7 | options: 8 | cluster_vip_1: 9 | type: "str" 10 | required: true 11 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/project.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project information. 3 | """ 4 | 5 | name = 'py-postgresql' 6 | identity = 'http://github.com/python-postgres/fe' 7 | 8 | meaculpa = 'Python+Postgres' 9 | abstract = 'Driver and tools library for PostgreSQL' 10 | 11 | version_info = (1, 2, 1) 12 | version = '.'.join(map(str, version_info)) 13 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/port/__init__.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .port 3 | ## 4 | """ 5 | Platform specific modules. 6 | 7 | The subject of each module should be the feature and the target platform. 8 | This is done to keep modules small and descriptive. 9 | 10 | These modules are for internal use only. 11 | """ 12 | __docformat__ = 'reStructuredText' 13 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/msw.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .python.msw 3 | ## 4 | """ 5 | Additional Microsoft Windows tools. 6 | """ 7 | 8 | # for Popen(), not supported on windows 9 | close_fds = False 10 | 11 | def platform_exe(name): 12 | """ 13 | Append '.exe' if it's not already there. 14 | """ 15 | if name.endswith('.exe'): 16 | return name 17 | return name + '.exe' 18 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/pg_system.py: -------------------------------------------------------------------------------- 1 | from ...types import OIDOID, XIDOID, CIDOID, TIDOID 2 | from . import lib 3 | 4 | oid_to_io = { 5 | OIDOID : (lib.oid_pack, lib.oid_unpack), 6 | XIDOID : (lib.xid_pack, lib.xid_unpack), 7 | CIDOID : (lib.cid_pack, lib.cid_unpack), 8 | TIDOID : (lib.tid_pack, lib.tid_unpack), 9 | #ACLITEMOID : (aclitem_pack, aclitem_unpack), 10 | } 11 | -------------------------------------------------------------------------------- /inventory/group_vars/haproxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | haproxy_package_version: "" 3 | 4 | haproxy_postgresql_port_rewritten: 15432 5 | haproxy_postgresql_port_readonly: 15433 6 | haproxy_pool_postgresql_port_rewritten: 16432 7 | haproxy_pool_postgresql_port_readonly: 16433 8 | haproxy_patroni_auth: "{{ patroni_restapi_username }}:{{ patroni_restapi_password }}" 9 | haproxy_stat_user: admin 10 | haproxy_stat_password: admin -------------------------------------------------------------------------------- /roles/patroni/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart patroni-watchdog 3 | ansible.builtin.systemd_service: 4 | name: patroni-watchdog 5 | state: restarted 6 | daemon_reload: true 7 | enabled: true 8 | 9 | - name: Restart patroni 10 | ansible.builtin.systemd_service: 11 | name: patroni-tantor.service 12 | state: restarted 13 | daemon_reload: true 14 | enabled: true 15 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/driver/__init__.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .driver package 3 | ## 4 | """ 5 | Driver package for providing an interface to a PostgreSQL database. 6 | """ 7 | __all__ = ['connect', 'default'] 8 | 9 | from .pq3 import Driver 10 | default = Driver() 11 | 12 | def connect(*args, **kw): 13 | 'Establish a connection using the default driver.' 14 | return default.connect(*args, **kw) 15 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/stdlib_uuid.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from ...types import UUIDOID 3 | 4 | def uuid_pack(x, UUID = uuid.UUID, bytes = bytes): 5 | if isinstance(x, UUID): 6 | return bytes(x.bytes) 7 | return bytes(UUID(x).bytes) 8 | 9 | def uuid_unpack(x, UUID = uuid.UUID): 10 | return UUID(bytes=x) 11 | 12 | oid_to_io = { 13 | UUIDOID : (uuid_pack, uuid_unpack), 14 | } 15 | -------------------------------------------------------------------------------- /roles/patroni/templates/patroni-watchdog.service.j2: -------------------------------------------------------------------------------- 1 | # "borrowed" from https://github.com/cybertec-postgresql/patroni-packaging/blob/master/RPM/patroni-watchdog.service 2 | 3 | [Unit] 4 | Description=Makes kernel watchdog device available for Patroni 5 | Before=patroni-postgres.service 6 | 7 | [Service] 8 | Type=oneshot 9 | 10 | ExecStart=/sbin/modprobe softdog 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/conf/pg_cluster_backend.conf: -------------------------------------------------------------------------------- 1 | [databases] 2 | pg_cluster_db_conn_1 = pq://:@xxx.xxx.xxx.xxx:15432/postgres 3 | 4 | [main] 5 | application_name = pg_cluster_backend 6 | threads_num = 10 7 | conn_exception_sleep_interval = 3 8 | reconnect_attempt = 3 9 | 10 | [test] 11 | accounts = 100 12 | 13 | [log] 14 | log_level = Info # Debug, Info, Error 15 | log_sql = 1 16 | file_maxmbytes = 50 17 | file_backupcount = 5 -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/doc.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .python.doc 3 | ## 4 | """ 5 | Documentation Tools. 6 | """ 7 | from operator import attrgetter 8 | 9 | class Doc(object): 10 | """ 11 | Simple object that sets the __doc__ attribute to the first parameter and 12 | initializes __annotations__ using keyword arguments. 13 | """ 14 | def __init__(self, doc, **annotations): 15 | self.__doc__ = str(doc) 16 | self.__annotations__ = annotations 17 | 18 | __str__ = attrgetter('__doc__') 19 | -------------------------------------------------------------------------------- /roles/etcd/templates/etcd-tantor.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Etcd Server 3 | After=network.target 4 | After=network-online.target 5 | Wants=network-online.target 6 | 7 | [Service] 8 | Type=notify 9 | WorkingDirectory=/opt/tantor/var/lib/etcd/ 10 | EnvironmentFile=-/opt/tantor/etc/etcd/etcd.conf 11 | User= {{ etcd_user }} 12 | # set GOMAXPROCS to number of processors 13 | ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/tantor/usr/bin/etcd" 14 | Restart=on-failure 15 | LimitNOFILE=65536 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | 20 | -------------------------------------------------------------------------------- /inventory/group_vars/pgbouncer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | pgbouncer_package_version: "" 3 | 4 | pgbouncer_listen_port: 6432 5 | pgbouncer_log_file: "/opt/tantor/var/log/pgbouncer/pgbouncer.log" 6 | pgbouncer_conf_file: "/opt/tantor/etc/pgbouncer/pgbouncer.ini" 7 | pgbouncer_pid_file: "/var/run/pgbouncer-tantor/pgbouncer.pid" 8 | pgbouncer_auth_user: pgbouncer 9 | pgbouncer_auth_user_passw: pgbouncer819mQ 10 | pgbouncer_incoming_hosts: "{% for host in groups['inv_cluster'] %}{{ hostvars[host]['ansible_default_ipv4'].address }}{% if not loop.last %},{% endif %}{% endfor %}" 11 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/pg_bitwise.py: -------------------------------------------------------------------------------- 1 | from .. import BITOID, VARBITOID 2 | from ..bitwise import Varbit, Bit 3 | from . import lib 4 | 5 | def varbit_pack(x, pack = lib.varbit_pack): 6 | return pack((x.bits, x.data)) 7 | 8 | def varbit_unpack(x, unpack = lib.varbit_unpack): 9 | return Varbit.from_bits(*unpack(x)) 10 | 11 | oid_to_io = { 12 | BITOID : (varbit_pack, varbit_unpack, Bit), 13 | VARBITOID : (varbit_pack, varbit_unpack, Varbit), 14 | } 15 | 16 | oid_to_type = { 17 | BITOID : Bit, 18 | VARBITOID : Varbit, 19 | } 20 | -------------------------------------------------------------------------------- /roles/etcd/tasks/pki.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install keys/certs 3 | with_items: 4 | - f: '{{ etcd_pki_key_src }}' 5 | d: '{{ etcd_pki_key_dest }}' 6 | m: '0400' 7 | - f: '{{ etcd_pki_cert_src }}' 8 | d: '{{ etcd_pki_cert_dest }}' 9 | m: '0600' 10 | - f: '{{ etcd_pki_ca_cert_src }}' 11 | d: '{{ etcd_pki_ca_cert_dest }}' 12 | m: '0600' 13 | ansible.builtin.copy: 14 | src: '{{ item.f }}' 15 | dest: '{{ item.d }}' 16 | owner: '{{ etcd_user }}' 17 | group: '{{ etcd_group }}' 18 | mode: '{{ item.m }}' 19 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/support.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.support 3 | ## 4 | """ 5 | Executable module used by test_* modules to mimic a command. 6 | """ 7 | import sys 8 | 9 | def pg_config(*args): 10 | data = """FOO=BaR 11 | FEH=YEAH 12 | version=NAY 13 | """ 14 | sys.stdout.write(data) 15 | 16 | if __name__ == '__main__': 17 | if sys.argv[1:]: 18 | cmd = sys.argv[1] 19 | if cmd in globals(): 20 | cmd = globals()[cmd] 21 | cmd(sys.argv[2:]) 22 | sys.exit(0) 23 | sys.stderr.write("no valid entry point referenced") 24 | sys.exit(1) 25 | -------------------------------------------------------------------------------- /roles/pgbouncer/sql/pgbouncer_prepare.sql: -------------------------------------------------------------------------------- 1 | DROP SCHEMA IF EXISTS pgbouncer CASCADE; 2 | CREATE SCHEMA pgbouncer AUTHORIZATION pgbouncer; 3 | 4 | CREATE OR REPLACE FUNCTION pgbouncer.get_auth(p_usename TEXT) 5 | RETURNS TABLE(username TEXT, password TEXT) AS 6 | $$ 7 | BEGIN 8 | RETURN QUERY 9 | SELECT usename::TEXT, passwd::TEXT FROM pg_catalog.pg_shadow 10 | WHERE usename = p_usename; 11 | END; 12 | $$ LANGUAGE plpgsql SECURITY DEFINER; 13 | 14 | REVOKE ALL ON FUNCTION pgbouncer.get_auth(p_usename TEXT) FROM PUBLIC; 15 | GRANT EXECUTE ON FUNCTION pgbouncer.get_auth(p_usename TEXT) TO pgbouncer 16 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/stdlib_jsonb.py: -------------------------------------------------------------------------------- 1 | from ...types import JSONBOID 2 | 3 | 4 | def jsonb_pack(x, typeio): 5 | jsonb = typeio.encode(x) 6 | return b'\x01' + jsonb 7 | 8 | 9 | def jsonb_unpack(x, typeio): 10 | if x[0] != 1: 11 | raise ValueError('unexpected JSONB format version: {!r}'.format(x[0])) 12 | return typeio.decode(x[1:]) 13 | 14 | 15 | def _jsonb_io_factory(oid, typeio): 16 | _pack = lambda x: jsonb_pack(x, typeio) 17 | _unpack = lambda x: jsonb_unpack(x, typeio) 18 | 19 | return (_pack, _unpack, str) 20 | 21 | 22 | oid_to_io = { 23 | JSONBOID: _jsonb_io_factory 24 | } 25 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/buffer.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .protocol.buffer 3 | ## 4 | """ 5 | This is an abstraction module that provides the working buffer implementation. 6 | If a C compiler is not available on the system that built the package, the slower 7 | `postgresql.protocol.pbuffer` module can be used in 8 | `postgresql.port.optimized.buffer`'s absence. 9 | 10 | This provides a convenient place to import the necessary module without 11 | concerning the local code with the details. 12 | """ 13 | try: 14 | from ..port.optimized import pq_message_stream 15 | except ImportError: 16 | from .pbuffer import pq_message_stream 17 | -------------------------------------------------------------------------------- /roles/pgbouncer/templates/pgbouncer.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=A lightweight connection pooler for PostgreSQL 3 | Documentation=man:pgbouncer(1) 4 | After=syslog.target 5 | After=network.target 6 | 7 | [Service] 8 | RemainAfterExit=yes 9 | Type=notify 10 | 11 | User=postgres 12 | Group=postgres 13 | 14 | # Path to the init file 15 | Environment=BOUNCERCONF={{ pgbouncer_conf_file }} 16 | 17 | ExecStart=/opt/tantor/usr/bin/pgbouncer -q $BOUNCERCONF 18 | ExecReload=/opt/tantor/usr/bin/pgbouncer -R -q $BOUNCERCONF 19 | 20 | # Give a reasonable amount of time for the server to start up/shut down 21 | TimeoutSec=300 22 | 23 | [Install] 24 | WantedBy=multi-user.target 25 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/message_types.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .protocol.message_types 3 | ## 4 | """ 5 | Data module providing a sequence of bytes objects whose value corresponds to its 6 | index in the sequence. 7 | 8 | This provides resource for buffer objects to use common message type objects. 9 | 10 | WARNING: It's tempting to use the 'is' operator and in some circumstances that 11 | may be okay. However, it's possible (sys.modules.clear()) for the extension 12 | modules' copy of this to become inconsistent with what protocol.element3 and 13 | protocol.xact3 are using, so it's important to **not** use 'is'. 14 | """ 15 | message_types = tuple([bytes((x,)) for x in range(256)]) 16 | -------------------------------------------------------------------------------- /roles/keepalived/check_scripts/chk_patroni_leader.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | NODENAME=$(cat {{ patroni_config_dir }}/{{ inventory_hostname }}.yml | grep -E "^name:" | cut -d: -f2 | tr -d '[:blank:]') 4 | 5 | if [[ -z "$NODENAME" ]]; then 6 | echo "Nodename is blank!" 7 | exit 1 8 | fi 9 | 10 | PATRONICTL_OUT=$(/opt/tantor/usr/bin/patronictl -c {{ patroni_config_dir }}/{{ inventory_hostname }}.yml list --format json) 11 | 12 | if [[ -z "$PATRONICTL_OUT" ]]; then 13 | echo "No patronictl output!" 14 | exit 1 15 | fi 16 | 17 | LEADER=$(echo $PATRONICTL_OUT | jq --raw-output ".[] | select ((.Role == \"Leader\") and (.State == \"running\")) | .Member") 18 | 19 | if [[ "$NODENAME" == "$LEADER" ]]; then 20 | echo "Is leader!" 21 | exit 0 22 | else 23 | echo "Is not leader!" 24 | exit 1 25 | fi 26 | -------------------------------------------------------------------------------- /roles/etcd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for etcd-cluster 3 | 4 | # Restart masters one-by-one to keep quorum 5 | - name: Restart etcd masters 6 | become: true 7 | become_user: root 8 | when: etcd_launch | bool 9 | ansible.builtin.service: 10 | name: etcd-tantor 11 | state: restarted 12 | run_once: true 13 | loop: '{{ groups[etcd_master_group_name] }}' 14 | delegate_to: '{{ item }}' 15 | 16 | # Restart non-voting members 17 | - name: Restart etcd members 18 | become: true 19 | become_user: root 20 | when: 21 | - etcd_launch | bool 22 | - inventory_hostname is not in groups[etcd_master_group_name] 23 | ansible.builtin.service: 24 | name: etcd-tantor 25 | state: restarted 26 | 27 | - name: Restart etcd-tantor.service 28 | ansible.builtin.systemd_service: 29 | name: etcd-tantor 30 | daemon-reload: true 31 | state: restarted 32 | enabled: true 33 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/os.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .python.os 3 | ## 4 | """ 5 | General OS abstractions and information. 6 | """ 7 | import sys 8 | import os 9 | 10 | #: By default, close the FDs on subprocess.Popen(). 11 | close_fds = True 12 | 13 | #: By default, there is no modification for executable references. 14 | platform_exe = str 15 | 16 | def find_file(basename, paths, 17 | join = os.path.join, exists = os.path.exists, 18 | ): 19 | """ 20 | Find the file in the given paths. Return the first path 21 | that exists. 22 | """ 23 | for x in paths: 24 | path = join(x, basename) 25 | if exists(path): 26 | return path 27 | 28 | if sys.platform in ('win32','win64'): 29 | # replace variants for windows 30 | from .msw import close_fds, platform_exe 31 | 32 | def find_executable(basename, pathsep = os.pathsep, platexe = platform_exe): 33 | paths = os.environ.get('PATH', '').split(pathsep) 34 | return find_file(platexe(basename), paths) 35 | -------------------------------------------------------------------------------- /inventory/group_vars/prepare_nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for pg_cluster 3 | 4 | add_nexus_repo: "false" 5 | 6 | nexus_key_url: "https://public.tantorlabs.ru/tantorlabs.ru.asc" 7 | nexus_apt_astra_1_7: "deb [signed-by=/etc/apt/keyrings/tantor-nexus.gpg arch=amd64] https://nexus-public.tantorlabs.ru/repository/astra-smolensk-1.7/ smolensk main" 8 | nexus_apt_astra_1_8: "deb [signed-by=/etc/apt/keyrings/tantor-nexus.gpg arch=amd64] https://nexus-public.tantorlabs.ru/repository/astra-1.8/ 1.8_x86-64 main" 9 | nexus_apt_ubuntu_22_04: "deb [arch=amd64] https://nexus-public.tantorlabs.ru/repository/ubuntu-22.04 jammy main" 10 | nexus_apt_ubuntu_20_04: "deb [arch=amd64] https://nexus-public.tantorlabs.ru/repository/ubuntu-20.04 focal main" 11 | nexus_yum_redos_7_3: "https://nexus-public.tantorlabs.ru/repository/redos-7.3/" 12 | nexus_yum_redos_8_0: "https://nexus-public.tantorlabs.ru/repository/redos-8/" 13 | nexus_yum_altlinux_c10f2: "https://nexus-public.tantorlabs.ru/repository/altrepo_c10f2/" 14 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/sphinx/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | cd "$(dirname $0)" 3 | 4 | # distutils doesn't make it straighforward to include an arbitrary 5 | # directory in the package data, so manage .static and .templates here. 6 | mkdir -p .static .templates 7 | cat >.static/unsuck.css_t <.templates/layout.html < 29 | {% endblock %} 30 | EOF 31 | 32 | mkdir -p ../html/doctrees 33 | sphinx-build -c "$(pwd)" -E -b html -d ../html/doctrees .. ../html 34 | cd ../html && pwd 35 | -------------------------------------------------------------------------------- /roles/pgbouncer/templates/pgbouncer.ini.j2: -------------------------------------------------------------------------------- 1 | [databases] 2 | * = host=127.0.0.1 port={{ patroni_pg_port }} auth_user={{ pgbouncer_auth_user }} 3 | 4 | [pgbouncer] 5 | logfile = {{ pgbouncer_log_file }} 6 | pidfile = {{ pgbouncer_pid_file }} 7 | 8 | listen_addr = 127.0.0.1,{{ pgbouncer_incoming_hosts }} 9 | listen_port = {{ pgbouncer_listen_port }} 10 | 11 | auth_file = /opt/tantor/etc/pgbouncer/userlist.txt 12 | auth_type = {% if major_version <= "13" %}md5{% elif major_version >= "14" %}scram-sha-256{%+ endif +%} 13 | auth_query = SELECT * FROM pgbouncer.get_auth($1) 14 | 15 | pool_mode = transaction 16 | max_client_conn = 5000 17 | default_pool_size = 200 18 | reserve_pool_size = 50 19 | 20 | ignore_startup_parameters = extra_float_digits,client_min_messages 21 | 22 | admin_users = postgres 23 | stats_users = stats, postgres 24 | 25 | server_lifetime = 86400 26 | server_idle_timeout = 10800 27 | query_wait_timeout = 120 28 | 29 | log_connections = 0 30 | log_disconnections = 0 31 | log_pooler_errors = 1 32 | 33 | server_reset_query=DISCARD ALL; 34 | -------------------------------------------------------------------------------- /roles/keepalived/templates/keepalived.conf.j2: -------------------------------------------------------------------------------- 1 | global_defs { 2 | router_id ocp_vrrp 3 | enable_script_security 4 | script_user root 5 | } 6 | 7 | vrrp_script chk_patroni_leader { 8 | script "timeout 3 /opt/keepalived/scripts/chk_patroni_leader.sh" 9 | interval 5 10 | fall 1 11 | rise 1 12 | } 13 | 14 | vrrp_script haproxy_check { 15 | script "/opt/keepalived/scripts/haproxy_check.sh" 16 | interval 2 17 | weight 2 18 | } 19 | 20 | vrrp_instance pgcluster_1 { 21 | interface {{ vip_interface }} 22 | state BACKUP 23 | priority {{ ansible_default_ipv4.address.split('.').3 }} 24 | virtual_router_id 10 25 | authentication { 26 | auth_type PASS 27 | auth_pass password 28 | } 29 | virtual_ipaddress { 30 | {{ cluster_vip_1 }} 31 | } 32 | unicast_src_ip {{ ansible_host }} 33 | unicast_peer { 34 | {% for host in groups['inv_keepalived'] %} 35 | {{ hostvars[host]['ansible_default_ipv4'].address }} 36 | {% endfor %} 37 | } 38 | track_script { 39 | chk_patroni_leader 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/pg_network.py: -------------------------------------------------------------------------------- 1 | from .. import INETOID, CIDROID, MACADDROID 2 | from . import lib 3 | import ipaddress 4 | 5 | oid_to_type = { 6 | MACADDROID : str, 7 | INETOID: ipaddress._IPAddressBase, 8 | CIDROID: ipaddress._BaseNetwork, 9 | } 10 | 11 | def inet_pack(ob, pack = lib.net_pack, Constructor = ipaddress.ip_address): 12 | a = Constructor(ob) 13 | return pack((a.version, None, a.packed)) 14 | 15 | def cidr_pack(ob, pack = lib.net_pack, Constructor = ipaddress.ip_network): 16 | a = Constructor(ob) 17 | return pack((a.version, a.prefixlen, a.network_address.packed)) 18 | 19 | def inet_unpack(data, unpack = lib.net_unpack, Constructor = ipaddress.ip_address): 20 | version, mask, data = unpack(data) 21 | return Constructor(data) 22 | 23 | def cidr_unpack(data, unpack = lib.net_unpack, Constructor = ipaddress.ip_network): 24 | version, mask, data = unpack(data) 25 | return Constructor(data).supernet(new_prefix=mask) 26 | 27 | oid_to_io = { 28 | MACADDROID : (lib.macaddr_pack, lib.macaddr_unpack, str), 29 | CIDROID : (cidr_pack, cidr_unpack, str), 30 | INETOID : (inet_pack, inet_unpack, str), 31 | } 32 | -------------------------------------------------------------------------------- /roles/patroni/templates/patroni-tantor.service.j2: -------------------------------------------------------------------------------- 1 | # This is an example systemd config file for Patroni 2 | # You can copy it to "/etc/systemd/system/patroni.service", 3 | 4 | [Unit] 5 | Description=Runners to orchestrate a high-availability PostgreSQL 6 | After=syslog.target network.target 7 | 8 | [Service] 9 | Type=simple 10 | LimitNOFILE=65536 11 | 12 | User={{ patroni_system_user }} 13 | Group={{ patroni_system_group }} 14 | 15 | # Where to send early-startup messages from the server 16 | # This is normally controlled by the global default set by systemd 17 | # StandardOutput=syslog 18 | 19 | ExecStartPre={{ patroni_exec_start_pre | default('') }} 20 | ExecStart={{ patroni_bin_dir }}/patroni {{ patroni_config_dir }}/{{ patroni_name|default(inventory_hostname) }}.yml 21 | 22 | # only kill the patroni process, not it's children, so it will gracefully stop postgres 23 | KillMode=process 24 | 25 | # Give a reasonable amount of time for the server to start up/shut down 26 | TimeoutSec=600 27 | 28 | # Do not restart the service if it crashes, we want to manually inspect database on failure 29 | Restart=no 30 | 31 | [Install] 32 | WantedBy=multi-user.target 33 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/itertools.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .python.itertools 3 | ## 4 | """ 5 | itertools extensions 6 | """ 7 | import collections 8 | from itertools import cycle, islice 9 | 10 | def interlace(*iters, next = next) -> collections.Iterable: 11 | """ 12 | interlace(i1, i2, ..., in) -> ( 13 | i1-0, i2-0, ..., in-0, 14 | i1-1, i2-1, ..., in-1, 15 | . 16 | . 17 | . 18 | i1-n, i2-n, ..., in-n, 19 | ) 20 | """ 21 | return map(next, cycle([iter(x) for x in iters])) 22 | 23 | def chunk(iterable, chunksize = 256): 24 | """ 25 | Given an iterable, return an iterable producing chunks of the objects 26 | produced by the given iterable. 27 | 28 | chunks([o1,o2,o3,o4], chunksize = 2) -> [ 29 | [o1,o2], 30 | [o3,o4], 31 | ] 32 | """ 33 | iterable = iter(iterable) 34 | last = () 35 | lastsize = chunksize 36 | while lastsize == chunksize: 37 | last = list(islice(iterable, chunksize)) 38 | lastsize = len(last) 39 | yield last 40 | 41 | def find(iterable, selector): 42 | """ 43 | Return the first item in the `iterable` that causes the `selector` to return 44 | `True`. 45 | """ 46 | for x in iterable: 47 | if selector(x): 48 | return x 49 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/testall.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.testall 3 | ## 4 | import unittest 5 | from sys import stderr 6 | 7 | from ..installation import default 8 | 9 | from .test_exceptions import * 10 | from .test_bytea_codec import * 11 | from .test_iri import * 12 | from .test_protocol import * 13 | from .test_configfile import * 14 | from .test_pgpassfile import * 15 | from .test_python import * 16 | 17 | from .test_installation import * 18 | from .test_cluster import * 19 | 20 | # These two require custom cluster configurations. 21 | from .test_connect import * 22 | # No SSL? cluster initialization will fail. 23 | if default().ssl: 24 | from .test_ssl_connect import * 25 | else: 26 | stderr.write("NOTICE: installation doesn't support SSL\n") 27 | 28 | try: 29 | from .test_optimized import * 30 | except ImportError: 31 | stderr.write("NOTICE: port.optimized could not be imported\n") 32 | 33 | from .test_driver import * 34 | from .test_alock import * 35 | from .test_notifyman import * 36 | from .test_copyman import * 37 | from .test_lib import * 38 | from .test_dbapi20 import * 39 | from .test_types import * 40 | 41 | if __name__ == '__main__': 42 | unittest.main() 43 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/changes-v1.1.rst: -------------------------------------------------------------------------------- 1 | Changes in v1.1 2 | =============== 3 | 4 | 1.1.0 5 | ----- 6 | 7 | * Remove two-phase commit interfaces per deprecation in v1.0. 8 | For proper two phase commit use, a lock manager must be employed that 9 | the implementation did nothing to accommodate for. 10 | * Add support for unpacking anonymous records (Elvis) 11 | * Support PostgreSQL 9.2 (Elvis) 12 | * Python 3.3 Support (Elvis) 13 | * Add column execution method. (jwp) 14 | * Add one-shot statement interface. Connection.query.* (jwp) 15 | * Modify the inet/cidr support by relying on the ipaddress module introduced in Python 3.3 (Google's ipaddr project) 16 | The existing implementation relied on simple str() representation supported by the 17 | socket module. Unfortunately, MS Windows' socket library does not appear to support the 18 | necessary functionality, or Python's socket module does not expose it. ipaddress fixes 19 | the problem. 20 | 21 | .. note:: 22 | The `ipaddress` module is now required for local inet and cidr. While it is 23 | of "preliminary" status, the ipaddr project has been around for some time and 24 | well supported. ipaddress appears to be the safest way forward for native 25 | network types. 26 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/sphinx/changes-v1.1.rst: -------------------------------------------------------------------------------- 1 | Changes in v1.1 2 | =============== 3 | 4 | 1.1.0 5 | ----- 6 | 7 | * Remove two-phase commit interfaces per deprecation in v1.0. 8 | For proper two phase commit use, a lock manager must be employed that 9 | the implementation did nothing to accommodate for. 10 | * Add support for unpacking anonymous records (Elvis) 11 | * Support PostgreSQL 9.2 (Elvis) 12 | * Python 3.3 Support (Elvis) 13 | * Add column execution method. (jwp) 14 | * Add one-shot statement interface. Connection.query.* (jwp) 15 | * Modify the inet/cidr support by relying on the ipaddress module introduced in Python 3.3 (Google's ipaddr project) 16 | The existing implementation relied on simple str() representation supported by the 17 | socket module. Unfortunately, MS Windows' socket library does not appear to support the 18 | necessary functionality, or Python's socket module does not expose it. ipaddress fixes 19 | the problem. 20 | 21 | .. note:: 22 | The `ipaddress` module is now required for local inet and cidr. While it is 23 | of "preliminary" status, the ipaddr project has been around for some time and 24 | well supported. ipaddress appears to be the safest way forward for native 25 | network types. 26 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/version.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .protocol.version 3 | ## 4 | """ 5 | PQ version class used by startup messages. 6 | """ 7 | from struct import Struct 8 | version_struct = Struct('!HH') 9 | 10 | class Version(tuple): 11 | """ 12 | Version((major, minor)) -> Version 13 | 14 | Version serializer and parser. 15 | """ 16 | major = property(fget = lambda s: s[0]) 17 | minor = property(fget = lambda s: s[1]) 18 | 19 | def __new__(subtype, major_minor): 20 | (major, minor) = major_minor 21 | major = int(major) 22 | minor = int(minor) 23 | # If it can't be packed like this, it's not a valid version. 24 | try: 25 | version_struct.pack(major, minor) 26 | except Exception as e: 27 | raise ValueError("unpackable major and minor") from e 28 | 29 | return tuple.__new__(subtype, (major, minor)) 30 | 31 | def __int__(self): 32 | return (self[0] << 16) | self[1] 33 | 34 | def bytes(self): 35 | return version_struct.pack(self[0], self[1]) 36 | 37 | def __repr__(self): 38 | return '%d.%d' %(self[0], self[1]) 39 | 40 | def parse(self, data): 41 | return self(version_struct.unpack(data)) 42 | parse = classmethod(parse) 43 | 44 | CancelRequestCode = Version((1234, 5678)) 45 | NegotiateSSLCode = Version((1234, 5679)) 46 | V2_0 = Version((2, 0)) 47 | V3_0 = Version((3, 0)) 48 | -------------------------------------------------------------------------------- /inventory/my_inventory: -------------------------------------------------------------------------------- 1 | [all:children] 2 | inv_cluster 3 | inv_etcd 4 | inv_pg 5 | inv_keepalived 6 | 7 | [inv_cluster] 8 | ansible_host= ansible_user=user_with_ssh_login_permissions 9 | ansible_host= ansible_user=user_with_ssh_login_permissions 10 | ansible_host= ansible_user=user_with_ssh_login_permissions 11 | 12 | [inv_etcd] 13 | ansible_host= ansible_user=user_with_ssh_login_permissions 14 | ansible_host= ansible_user=user_with_ssh_login_permissions 15 | ansible_host= ansible_user=user_with_ssh_login_permissions 16 | 17 | [inv_pg] 18 | ansible_host= ansible_user=user_with_ssh_login_permissions 19 | ansible_host= ansible_user=user_with_ssh_login_permissions 20 | ansible_host= ansible_user=user_with_ssh_login_permissions 21 | 22 | [inv_keepalived] 23 | ansible_host= ansible_user=user_with_ssh_login_permissions 24 | ansible_host= ansible_user=user_with_ssh_login_permissions 25 | ansible_host= ansible_user=user_with_ssh_login_permissions 26 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/admin.rst: -------------------------------------------------------------------------------- 1 | Administration 2 | ============== 3 | 4 | This chapter covers the administration of py-postgresql. This includes 5 | installation and other aspects of working with py-postgresql such as 6 | environment variables and configuration files. 7 | 8 | Installation 9 | ------------ 10 | 11 | py-postgresql uses Python's distutils package to manage the build and 12 | installation process of the package. The normal entry point for 13 | this is the ``setup.py`` script contained in the root project directory. 14 | 15 | After extracting the archive and changing the into the project's directory, 16 | installation is normally as simple as:: 17 | 18 | $ python3 ./setup.py install 19 | 20 | However, if you need to install for use with a particular version of python, 21 | just use the path of the executable that should be used:: 22 | 23 | $ /usr/opt/bin/python3 ./setup.py install 24 | 25 | 26 | Environment 27 | ----------- 28 | 29 | These environment variables effect the operation of the package: 30 | 31 | ============== =============================================================================== 32 | PGINSTALLATION The path to the ``pg_config`` executable of the installation to use by default. 33 | ============== =============================================================================== 34 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/sphinx/admin.rst: -------------------------------------------------------------------------------- 1 | Administration 2 | ============== 3 | 4 | This chapter covers the administration of py-postgresql. This includes 5 | installation and other aspects of working with py-postgresql such as 6 | environment variables and configuration files. 7 | 8 | Installation 9 | ------------ 10 | 11 | py-postgresql uses Python's distutils package to manage the build and 12 | installation process of the package. The normal entry point for 13 | this is the ``setup.py`` script contained in the root project directory. 14 | 15 | After extracting the archive and changing the into the project's directory, 16 | installation is normally as simple as:: 17 | 18 | $ python3 ./setup.py install 19 | 20 | However, if you need to install for use with a particular version of python, 21 | just use the path of the executable that should be used:: 22 | 23 | $ /usr/opt/bin/python3 ./setup.py install 24 | 25 | 26 | Environment 27 | ----------- 28 | 29 | These environment variables effect the operation of the package: 30 | 31 | ============== =============================================================================== 32 | PGINSTALLATION The path to the ``pg_config`` executable of the installation to use by default. 33 | ============== =============================================================================== 34 | -------------------------------------------------------------------------------- /roles/etcd/tasks/cluster_state.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check existing etcd cluster members 3 | ansible.builtin.shell: | 4 | ETCDCTL_API=3 {{ etcd_bin_path }} \ 5 | --endpoints=https://{{ hostvars[item]['ansible_default_ipv4']['address'] }}:{{ etcd_port_client }} \ 6 | --cacert={{ etcd_conf_dir }}/ca.pem \ 7 | --cert={{ etcd_conf_dir }}/{{ hostvars[item]['unified_hostname'] }}.pem \ 8 | --key={{ etcd_conf_dir }}/{{ hostvars[item]['unified_hostname'] }}-key.pem \ 9 | member list --write-out=json 10 | register: etcd_member_list 11 | failed_when: false 12 | changed_when: false 13 | delegate_to: "{{ item }}" 14 | with_items: "{{ groups['inv_etcd'] }}" 15 | run_once: true 16 | 17 | - name: Parse existing cluster state 18 | ansible.builtin.set_fact: 19 | etcd_active_nodes: >- 20 | {{ 21 | etcd_member_list.results | 22 | selectattr('rc', 'equalto', 0) | 23 | map(attribute='item') | 24 | list 25 | }} 26 | etcd_cluster_exists: >- 27 | {{ 28 | etcd_member_list.results | 29 | selectattr('rc', 'equalto', 0) | 30 | list | length > 0 31 | }} 32 | run_once: true 33 | 34 | - name: Determine cluster leader for management operations 35 | ansible.builtin.set_fact: 36 | etcd_leader: "{{ etcd_active_nodes | first }}" 37 | when: etcd_active_nodes | length > 0 38 | run_once: true 39 | -------------------------------------------------------------------------------- /roles/haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install haproxy-tantor-all (RHEL-like systems) 3 | when: ansible_os_family in ["RED", "Centos", "Rocky", "RedHat"] 4 | ansible.builtin.dnf: 5 | name: >- 6 | {{ 7 | 'haproxy-tantor-all-' + haproxy_package_version 8 | if haproxy_package_version | length > 0 9 | else 10 | 'haproxy-tantor-all' 11 | }} 12 | state: present 13 | allow_downgrade: true 14 | 15 | - name: Install haproxy-tantor-all (Alt Linux) 16 | when: ansible_os_family in ["Altlinux"] 17 | community.general.apt_rpm: 18 | name: >- 19 | {{ 20 | 'haproxy-tantor-all-' + haproxy_package_version 21 | if haproxy_package_version | length > 0 22 | else 23 | 'haproxy-tantor-all' 24 | }} 25 | state: present 26 | 27 | - name: Install haproxy-tantor-all (Debian-like systems) 28 | when: ansible_os_family in ["Astra Linux", "Debian"] 29 | ansible.builtin.apt: 30 | name: >- 31 | {{ 32 | 'haproxy-tantor-all=' + haproxy_package_version 33 | if haproxy_package_version | length > 0 34 | else 35 | 'haproxy-tantor-all' 36 | }} 37 | state: present 38 | allow_downgrade: true 39 | 40 | - name: Create config file for haproxy 41 | ansible.builtin.template: 42 | src: haproxy.cfg.j2 43 | dest: /opt/tantor/etc/haproxy/haproxy.cfg 44 | mode: "0644" 45 | notify: Restart HAproxy 46 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/decorlib.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .python.decorlib 3 | ## 4 | """ 5 | common decorators 6 | """ 7 | import os 8 | import types 9 | 10 | def propertydoc(ap): 11 | """ 12 | Helper function for extracting an `abstractproperty`'s real documentation. 13 | """ 14 | doc = "" 15 | rstr = "" 16 | if ap.fget: 17 | ret = ap.fget.__annotations__.get('return') 18 | if ret is not None: 19 | rstr = " -> " + repr(ret) 20 | if ap.fget.__doc__: 21 | doc += os.linesep*2 + "GET::" + (os.linesep + ' '*4) + (os.linesep + ' '*4).join( 22 | [x.strip() for x in ap.fget.__doc__.strip().split(os.linesep)] 23 | ) 24 | if ap.fset and ap.fset.__doc__: 25 | doc += os.linesep*2 + "SET::" + (os.linesep + ' '*4) + (os.linesep + ' '*4).join( 26 | [x.strip() for x in ap.fset.__doc__.strip().split(os.linesep)] 27 | ) 28 | if ap.fdel and ap.fdel.__doc__: 29 | doc += os.linesep*2 + "DELETE::" + (os.linesep + ' '*4) + (os.linesep + ' '*4).join( 30 | [x.strip() for x in ap.fdel.__doc__.strip().split(os.linesep)] 31 | ) 32 | ap.__doc__ = "" if not doc else ( 33 | "Abstract Property" + rstr + doc 34 | ) 35 | return ap 36 | 37 | class method(object): 38 | __slots__ = ('callable',) 39 | def __init__(self, callable): 40 | self.callable = callable 41 | def __get__(self, val, typ): 42 | if val is None: 43 | return self.callable 44 | return types.MethodType(self.callable, val) 45 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/contrib_hstore.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .types.io.contrib_hstore - I/O routines for binary hstore 3 | ## 4 | from ...python.structlib import split_sized_data, ulong_pack, ulong_unpack 5 | from ...python.itertools import chunk 6 | 7 | ## 8 | # Build the hstore I/O pair for a given typio. 9 | # It primarily needs typio for decode and encode. 10 | def hstore_factory(oid, typio, 11 | unpack_err = "expected {0} items in hstore, but found {1}".format 12 | ): 13 | def pack_hstore(x, 14 | encode = typio.encode, 15 | len = len, 16 | ): 17 | if hasattr(x, 'items'): 18 | x = x.items() 19 | encoded = [ 20 | (encode(k), encode(v)) if v is not None else (encode(k), None) 21 | for k,v in x 22 | ] 23 | return ulong_pack(len(encoded)) + b''.join( 24 | ulong_pack(len(k)) + k + b'\xFF\xFF\xFF\xFF' 25 | if v is None else ulong_pack(len(k)) + k + ulong_pack(len(v)) + v 26 | for k,v in encoded 27 | ) 28 | 29 | def unpack_hstore(x, 30 | decode = typio.decode, 31 | split = split_sized_data, 32 | len = len 33 | ): 34 | view = memoryview(x)[4:] 35 | n = ulong_unpack(x) 36 | r = { 37 | decode(y[0]) : (decode(y[1]) if y[1] is not None else None) 38 | for y in chunk(split(view), 2) if y 39 | } 40 | if len(r) != n: 41 | raise ValueError(unpack_err(n, len(r))) 42 | return r 43 | 44 | return (pack_hstore, unpack_hstore) 45 | 46 | oid_to_io = { 47 | 'contrib_hstore' : hstore_factory, 48 | } 49 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/datetime.py: -------------------------------------------------------------------------------- 1 | ## 2 | # python.datetime - parts needed to use stdlib.datetime 3 | ## 4 | import datetime 5 | 6 | ## 7 | # stdlib.datetime representation of PostgreSQL 'infinity' and '-infinity'. 8 | infinity_datetime = datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) 9 | negative_infinity_datetime = datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0, 0) 10 | 11 | infinity_date = datetime.date(datetime.MAXYEAR, 12, 31) 12 | negative_infinity_date = datetime.date(datetime.MINYEAR, 1, 1) 13 | 14 | class FixedOffset(datetime.tzinfo): 15 | def __init__(self, offset_in_seconds, tzname = None): 16 | self._tzname = tzname 17 | self._offset = offset_in_seconds 18 | self._offset_in_mins = offset_in_seconds // 60 19 | self._td_offset = datetime.timedelta(0, self._offset_in_mins * 60) 20 | self._dst = datetime.timedelta(0) 21 | 22 | def utcoffset(self, offset_from): 23 | return self._td_offset 24 | 25 | def tzname(self, dt): 26 | return self._tzname 27 | 28 | def dst(self, arg): 29 | return self._dst 30 | 31 | def __repr__(self): 32 | return "{path}.{name}({off}{tzname})".format( 33 | path = type(self).__module__, 34 | name = type(self).__name__, 35 | off = repr(self._td_offset.days * 24 * 60 * 60 + self._td_offset.seconds), 36 | tzname = ( 37 | ", tzname = {tzname!r}".format(tzname = self._tzname) \ 38 | if self._tzname is not None else "" 39 | ) 40 | ) 41 | 42 | UTC = FixedOffset(0, tzname = 'UTC') 43 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/pg_geometry.py: -------------------------------------------------------------------------------- 1 | from .. import POINTOID, BOXOID, LSEGOID, CIRCLEOID 2 | from ..geometry import Point, Box, Lseg, Circle 3 | from ...python.functools import Composition as compose 4 | from . import lib 5 | 6 | oid_to_type = { 7 | POINTOID: Point, 8 | BOXOID: Box, 9 | LSEGOID: Lseg, 10 | CIRCLEOID: Circle, 11 | } 12 | 13 | # Make a pair of pairs out of a sequence of four objects 14 | def two_pair(x): 15 | return ((x[0], x[1]), (x[2], x[3])) 16 | 17 | point_pack = lib.point_pack 18 | point_unpack = compose((lib.point_unpack, Point)) 19 | 20 | def box_pack(x): 21 | return lib.box_pack((x[0][0], x[0][1], x[1][0], x[1][1])) 22 | box_unpack = compose((lib.box_unpack, two_pair, Box,)) 23 | 24 | def lseg_pack(x, pack = lib.lseg_pack): 25 | return pack((x[0][0], x[0][1], x[1][0], x[1][1])) 26 | lseg_unpack = compose((lib.lseg_unpack, two_pair, Lseg)) 27 | 28 | def circle_pack(x): 29 | return lib.circle_pack((x[0][0], x[0][1], x[1])) 30 | def circle_unpack(x, unpack = lib.circle_unpack, Circle = Circle): 31 | x = unpack(x) 32 | return Circle(((x[0], x[1]), x[2])) 33 | 34 | # Map type oids to a (pack, unpack) pair. 35 | oid_to_io = { 36 | POINTOID : (point_pack, point_unpack, Point), 37 | BOXOID : (box_pack, box_unpack, Box), 38 | LSEGOID : (lseg_pack, lseg_unpack, Lseg), 39 | CIRCLEOID : (circle_pack, circle_unpack, Circle), 40 | #PATHOID : (path_pack, path_unpack), 41 | #POLYGONOID : (polygon_pack, polygon_unpack), 42 | #LINEOID : (line_pack, line_unpack), 43 | } 44 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/builtins.py: -------------------------------------------------------------------------------- 1 | from .. import \ 2 | INT2OID, INT4OID, INT8OID, \ 3 | BOOLOID, BYTEAOID, CHAROID, \ 4 | ABSTIMEOID, FLOAT4OID, FLOAT8OID, \ 5 | TEXTOID, BPCHAROID, NAMEOID, VARCHAROID 6 | from . import lib 7 | 8 | bool_pack = {True:b'\x01', False:b'\x00'}.__getitem__ 9 | bool_unpack = {b'\x01':True, b'\x00':False}.__getitem__ 10 | 11 | int2_pack, int2_unpack = lib.short_pack, lib.short_unpack 12 | int4_pack, int4_unpack = lib.long_pack, lib.long_unpack 13 | int8_pack, int8_unpack = lib.longlong_pack, lib.longlong_unpack 14 | 15 | bytea_pack = bytes 16 | bytea_unpack = bytes 17 | char_pack = bytes 18 | char_unpack = bytes 19 | 20 | oid_to_io = { 21 | BOOLOID : (bool_pack, bool_unpack, bool), 22 | 23 | BYTEAOID : (bytea_pack, bytea_unpack, bytes), 24 | CHAROID : (char_pack, char_unpack, bytes), 25 | 26 | INT2OID : (int2_pack, int2_unpack, int), 27 | INT4OID : (int4_pack, int4_unpack, int), 28 | INT8OID : (int8_pack, int8_unpack, int), 29 | 30 | ABSTIMEOID : (lib.long_pack, lib.long_unpack, int), 31 | FLOAT4OID : (lib.float_pack, lib.float_unpack, float), 32 | FLOAT8OID : (lib.double_pack, lib.double_unpack, float), 33 | } 34 | 35 | # Python Representations of PostgreSQL Types 36 | oid_to_type = { 37 | BOOLOID: bool, 38 | 39 | VARCHAROID: str, 40 | TEXTOID: str, 41 | BPCHAROID: str, 42 | NAMEOID: str, 43 | 44 | # This is *not* bpchar, the SQL CHARACTER type. 45 | CHAROID: bytes, 46 | BYTEAOID: bytes, 47 | 48 | INT2OID: int, 49 | INT4OID: int, 50 | INT8OID: int, 51 | 52 | FLOAT4OID: float, 53 | FLOAT8OID: float, 54 | } 55 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/test_bytea_codec.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.test_bytea_codec 3 | ## 4 | import unittest 5 | import struct 6 | from ..encodings import bytea 7 | 8 | byte = struct.Struct('B') 9 | 10 | class test_bytea_codec(unittest.TestCase): 11 | def testDecoding(self): 12 | for x in range(255): 13 | c = byte.pack(x) 14 | b = c.decode('bytea') 15 | # normalize into octal escapes 16 | if c == b'\\' and b == "\\\\": 17 | b = "\\" + oct(b'\\'[0])[2:] 18 | elif not b.startswith("\\"): 19 | b = "\\" + oct(ord(b))[2:] 20 | if int(b[1:], 8) != x: 21 | self.fail( 22 | "bytea encoding failed at %d; encoded %r to %r" %(x, c, b,) 23 | ) 24 | 25 | def testEncoding(self): 26 | self.assertEqual('bytea'.encode('bytea'), b'bytea') 27 | self.assertEqual('\\\\'.encode('bytea'), b'\\') 28 | self.assertRaises(ValueError, '\\'.encode, 'bytea') 29 | self.assertRaises(ValueError, 'foo\\'.encode, 'bytea') 30 | self.assertRaises(ValueError, r'foo\0'.encode, 'bytea') 31 | self.assertRaises(ValueError, r'foo\00'.encode, 'bytea') 32 | self.assertRaises(ValueError, r'\f'.encode, 'bytea') 33 | self.assertRaises(ValueError, r'\800'.encode, 'bytea') 34 | self.assertRaises(ValueError, r'\7f0'.encode, 'bytea') 35 | for x in range(255): 36 | seq = ('\\' + oct(x)[2:].lstrip('0').rjust(3, '0')) 37 | dx = ord(seq.encode('bytea')) 38 | if dx != x: 39 | self.fail( 40 | "generated sequence failed to map back; current is %d, " \ 41 | "rendered %r, transformed to %d" %(x, seq, dx) 42 | ) 43 | 44 | if __name__ == '__main__': 45 | unittest.main() 46 | -------------------------------------------------------------------------------- /roles/prepare_nodes/tasks/rhel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install prerequisite packages (yum or dnf) 3 | when: 4 | - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf' 5 | ansible.builtin.package: 6 | name: 7 | - openssl 8 | - ca-certificates 9 | - gnupg 10 | - gpg 11 | - libpq 12 | state: present 13 | 14 | - name: Add Tantor repository key for YUM-based systems 15 | when: 16 | - add_nexus_repo == 'true' 17 | - ansible_pkg_mgr in ['yum', 'dnf'] 18 | ansible.builtin.get_url: 19 | url: "{{ nexus_key_url }}" 20 | dest: "/etc/pki/rpm-gpg/RPM-GPG-KEY-tantorlabs" 21 | mode: "0644" 22 | 23 | - name: Block for REDos 24 | when: add_nexus_repo == 'true' 25 | block: 26 | - name: Add Tantor repository for RedOS-7.3 27 | ansible.builtin.yum_repository: 28 | name: tantorlabs 29 | description: Tantorlabs repository for RedOS 7.3 30 | baseurl: "{{ nexus_yum_redos_7_3 }}" 31 | gpgcheck: true 32 | gpgkey: "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-tantorlabs" 33 | when: 34 | - ansible_os_family in ["RED", "Centos", "Rocky", "RedHat"] 35 | - ansible_distribution_major_version == '7' 36 | 37 | - name: Add Tantor repository for RedOS-8.0 38 | ansible.builtin.yum_repository: 39 | name: tantorlabs 40 | description: Tantorlabs repository for RedOS 8.0 41 | baseurl: "{{ nexus_yum_redos_8_0 }}" 42 | gpgcheck: true 43 | gpgkey: "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-tantorlabs" 44 | when: 45 | - ansible_os_family in ["RED", "Centos", "Rocky", "RedHat"] 46 | - ansible_distribution_major_version == '8' 47 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/reference.rst: -------------------------------------------------------------------------------- 1 | Reference 2 | ========= 3 | 4 | :mod:`postgresql` 5 | ----------------- 6 | 7 | .. automodule:: postgresql 8 | .. autodata:: version 9 | .. autodata:: version_info 10 | .. autofunction:: open 11 | 12 | :mod:`postgresql.api` 13 | --------------------- 14 | 15 | .. automodule:: 16 | postgresql.api 17 | :members: 18 | :show-inheritance: 19 | 20 | :mod:`postgresql.sys` 21 | --------------------- 22 | 23 | .. automodule:: 24 | postgresql.sys 25 | :members: 26 | :show-inheritance: 27 | 28 | :mod:`postgresql.string` 29 | ------------------------ 30 | 31 | .. automodule:: 32 | postgresql.string 33 | :members: 34 | :show-inheritance: 35 | 36 | :mod:`postgresql.exceptions` 37 | ---------------------------- 38 | 39 | .. automodule:: 40 | postgresql.exceptions 41 | :members: 42 | :show-inheritance: 43 | 44 | :mod:`postgresql.temporal` 45 | -------------------------- 46 | 47 | .. automodule:: 48 | postgresql.temporal 49 | :members: 50 | :show-inheritance: 51 | 52 | :mod:`postgresql.installation` 53 | ------------------------------ 54 | 55 | .. automodule:: 56 | postgresql.installation 57 | :members: 58 | :show-inheritance: 59 | 60 | :mod:`postgresql.cluster` 61 | ------------------------- 62 | 63 | .. automodule:: 64 | postgresql.cluster 65 | :members: 66 | :show-inheritance: 67 | 68 | :mod:`postgresql.copyman` 69 | ------------------------- 70 | 71 | .. automodule:: 72 | postgresql.copyman 73 | :members: 74 | :show-inheritance: 75 | 76 | :mod:`postgresql.alock` 77 | ----------------------- 78 | 79 | .. automodule:: 80 | postgresql.alock 81 | :members: 82 | :show-inheritance: 83 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/sphinx/reference.rst: -------------------------------------------------------------------------------- 1 | Reference 2 | ========= 3 | 4 | :mod:`postgresql` 5 | ----------------- 6 | 7 | .. automodule:: postgresql 8 | .. autodata:: version 9 | .. autodata:: version_info 10 | .. autofunction:: open 11 | 12 | :mod:`postgresql.api` 13 | --------------------- 14 | 15 | .. automodule:: 16 | postgresql.api 17 | :members: 18 | :show-inheritance: 19 | 20 | :mod:`postgresql.sys` 21 | --------------------- 22 | 23 | .. automodule:: 24 | postgresql.sys 25 | :members: 26 | :show-inheritance: 27 | 28 | :mod:`postgresql.string` 29 | ------------------------ 30 | 31 | .. automodule:: 32 | postgresql.string 33 | :members: 34 | :show-inheritance: 35 | 36 | :mod:`postgresql.exceptions` 37 | ---------------------------- 38 | 39 | .. automodule:: 40 | postgresql.exceptions 41 | :members: 42 | :show-inheritance: 43 | 44 | :mod:`postgresql.temporal` 45 | -------------------------- 46 | 47 | .. automodule:: 48 | postgresql.temporal 49 | :members: 50 | :show-inheritance: 51 | 52 | :mod:`postgresql.installation` 53 | ------------------------------ 54 | 55 | .. automodule:: 56 | postgresql.installation 57 | :members: 58 | :show-inheritance: 59 | 60 | :mod:`postgresql.cluster` 61 | ------------------------- 62 | 63 | .. automodule:: 64 | postgresql.cluster 65 | :members: 66 | :show-inheritance: 67 | 68 | :mod:`postgresql.copyman` 69 | ------------------------- 70 | 71 | .. automodule:: 72 | postgresql.copyman 73 | :members: 74 | :show-inheritance: 75 | 76 | :mod:`postgresql.alock` 77 | ----------------------- 78 | 79 | .. automodule:: 80 | postgresql.alock 81 | :members: 82 | :show-inheritance: 83 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/test_exceptions.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.test_exceptions 3 | ## 4 | import unittest 5 | import postgresql.exceptions as pg_exc 6 | 7 | class test_exceptions(unittest.TestCase): 8 | def test_pg_code_lookup(self): 9 | # in 8.4, pg started using the SQL defined error code for limits 10 | # Users *will* get whatever code PG sends, but it's important 11 | # that they have some way to abstract it. many-to-one map ftw. 12 | self.assertEqual( 13 | pg_exc.ErrorLookup('22020'), pg_exc.LimitValueError 14 | ) 15 | 16 | def test_error_lookup(self): 17 | # An error code that doesn't exist yields pg_exc.Error 18 | self.assertEqual( 19 | pg_exc.ErrorLookup('00000'), pg_exc.Error 20 | ) 21 | 22 | self.assertEqual( 23 | pg_exc.ErrorLookup('XX000'), pg_exc.InternalError 24 | ) 25 | # check class fallback 26 | self.assertEqual( 27 | pg_exc.ErrorLookup('XX444'), pg_exc.InternalError 28 | ) 29 | 30 | # SEARV is a very large class, so there are many 31 | # sub-"codeclass" exceptions used to group the many 32 | # SEARV errors. Make sure looking up 42000 actually 33 | # gives the SEARVError 34 | self.assertEqual( 35 | pg_exc.ErrorLookup('42000'), pg_exc.SEARVError 36 | ) 37 | self.assertEqual( 38 | pg_exc.ErrorLookup('08P01'), pg_exc.ProtocolError 39 | ) 40 | 41 | def test_warning_lookup(self): 42 | self.assertEqual( 43 | pg_exc.WarningLookup('01000'), pg_exc.Warning 44 | ) 45 | self.assertEqual( 46 | pg_exc.WarningLookup('02000'), pg_exc.NoDataWarning 47 | ) 48 | self.assertEqual( 49 | pg_exc.WarningLookup('01P01'), pg_exc.DeprecationWarning 50 | ) 51 | self.assertEqual( 52 | pg_exc.WarningLookup('01888'), pg_exc.Warning 53 | ) 54 | 55 | if __name__ == '__main__': 56 | unittest.main() 57 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/functools.py: -------------------------------------------------------------------------------- 1 | ## 2 | # python.functools 3 | ## 4 | import sys 5 | from .decorlib import method 6 | 7 | def rsetattr(attr, val, ob): 8 | """ 9 | setattr() and return `ob`. Different order used to allow easier partial 10 | usage. 11 | """ 12 | setattr(ob, attr, val) 13 | return ob 14 | 15 | try: 16 | from ..port.optimized import rsetattr 17 | except ImportError: 18 | pass 19 | 20 | class Composition(tuple): 21 | def __call__(self, r): 22 | for x in self: 23 | r = x(r) 24 | return r 25 | 26 | try: 27 | from ..port.optimized import compose 28 | __call__ = method(compose) 29 | del compose 30 | except ImportError: 31 | pass 32 | 33 | try: 34 | # C implementation of the tuple processors. 35 | from ..port.optimized import process_tuple, process_chunk 36 | except ImportError: 37 | def process_tuple(procs, tup, exception_handler, len = len, tuple = tuple, cause = None): 38 | """ 39 | Call each item in `procs` with the corresponding 40 | item in `tup` returning the result as `type`. 41 | 42 | If an item in `tup` is `None`, don't process it. 43 | 44 | If a give transformation failes, call the given exception_handler. 45 | """ 46 | i = len(procs) 47 | if len(tup) != i: 48 | raise TypeError( 49 | "inconsistent items, %d processors and %d items in row" %( 50 | i, len(tup) 51 | ) 52 | ) 53 | r = [None] * i 54 | try: 55 | for i in range(i): 56 | ob = tup[i] 57 | if ob is None: 58 | continue 59 | r[i] = procs[i](ob) 60 | except Exception: 61 | cause = sys.exc_info()[1] 62 | 63 | if cause is not None: 64 | exception_handler(cause, procs, tup, i) 65 | raise RuntimeError("process_tuple exception handler failed to raise") 66 | return tuple(r) 67 | 68 | def process_chunk(procs, tupc, fail, process_tuple = process_tuple): 69 | return [process_tuple(procs, x, fail) for x in tupc] 70 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/encodings/aliases.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .encodings.aliases 3 | ## 4 | """ 5 | Module for mapping PostgreSQL encoding names to Python encoding names. 6 | 7 | These are **not** installed in Python's aliases. Rather, `get_python_name` 8 | should be used directly. 9 | 10 | URLs of interest: 11 | * http://docs.python.org/library/codecs.html 12 | * http://git.postgresql.org/gitweb?p=postgresql.git;a=blob;f=src/backend/utils/mb/encnames.c 13 | """ 14 | 15 | ## 16 | #: Dictionary of Postgres encoding names to Python encoding names. 17 | #: This mapping only contains those encoding names that do not intersect. 18 | postgres_to_python = { 19 | 'unicode' : 'utf_8', 20 | 'sql_ascii' : 'ascii', 21 | 'euc_jp' : 'eucjp', 22 | 'euc_cn' : 'euccn', 23 | 'euc_kr' : 'euckr', 24 | 'shift_jis_2004' : 'euc_jis_2004', 25 | 'sjis' : 'shift_jis', 26 | 'alt' : 'cp866', # IBM866 27 | 'abc' : 'cp1258', 28 | 'vscii' : 'cp1258', 29 | 'koi8r' : 'koi8_r', 30 | 'koi8u' : 'koi8_u', 31 | 'tcvn' : 'cp1258', 32 | 'tcvn5712' : 'cp1258', 33 | # 'euc_tw' : None, # N/A 34 | # 'mule_internal' : None, # N/A 35 | } 36 | 37 | def get_python_name(encname): 38 | """ 39 | Lookup the name in the `postgres_to_python` dictionary. If no match is 40 | found, check for a 'win' or 'windows-' name and convert that to a 'cp###' 41 | name. 42 | 43 | Returns `None` if there is no alias for `encname`. 44 | 45 | The win[0-9]+ and windows-[0-9]+ entries are handled functionally. 46 | """ 47 | # check the dictionary first 48 | localname = postgres_to_python.get(encname) 49 | if localname is not None: 50 | return localname 51 | # no explicit mapping, check for functional transformation 52 | if encname.startswith('win'): 53 | # handle win#### and windows-#### 54 | # remove the trailing CP number 55 | bare = encname.rstrip('0123456789') 56 | if bare.strip('_-') in ('win', 'windows'): 57 | return 'cp' + encname[len(bare):] 58 | return encname 59 | -------------------------------------------------------------------------------- /roles/etcd/tasks/cluster_discovery.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check existing etcd cluster members 3 | ansible.builtin.shell: | 4 | ETCDCTL_API=3 {{ etcd_bin_path }} \ 5 | --endpoints=https://{{ hostvars[item]['ansible_default_ipv4']['address'] }}:{{ etcd_port_client }} \ 6 | --cacert={{ etcd_conf_dir }}/ca.pem \ 7 | --cert={{ etcd_conf_dir }}/{{ hostvars[item]['unified_hostname'] }}.pem \ 8 | --key={{ etcd_conf_dir }}/{{ hostvars[item]['unified_hostname'] }}-key.pem \ 9 | member list --write-out=json 10 | register: etcd_probe_results 11 | failed_when: false 12 | changed_when: false 13 | delegate_to: "{{ item }}" 14 | loop: "{{ groups['inv_etcd'] }}" 15 | 16 | - name: Parse cluster state and set global facts 17 | ansible.builtin.set_fact: 18 | etcd_cluster_exists: "{{ etcd_probe_results.results | selectattr('rc', 'equalto', 0) | list | length > 0 }}" 19 | etcd_leader: "{{ (etcd_probe_results.results | selectattr('rc', 'equalto', 0) | map(attribute='item') | first) | default(groups['inv_etcd'] | first) }}" 20 | etcd_cluster_info: "{{ (etcd_probe_results.results | selectattr('rc', 'equalto', 0) | map(attribute='stdout') | map('from_json') | first) | default({}) }}" 21 | 22 | - name: Extract member names for easy reference 23 | ansible.builtin.set_fact: 24 | etcd_member_names: >- 25 | {{ 26 | etcd_cluster_info.members | default([]) | 27 | selectattr('name', 'defined') | 28 | selectattr('name', 'ne', '') | 29 | map(attribute='name') | list 30 | }} 31 | when: etcd_cluster_exists 32 | 33 | - name: Debug cluster state 34 | ansible.builtin.debug: 35 | msg: 36 | - "Cluster exists: {{ etcd_cluster_exists | default(false) }}" 37 | - "Leader/Admin node: {{ etcd_leader | default('none') }}" 38 | - "Current members: {{ etcd_member_names | default([]) }}" 39 | - "This node ({{ unified_hostname }}) in cluster: {{ unified_hostname in (etcd_member_names | default([])) }}" 40 | -------------------------------------------------------------------------------- /inventory/group_vars/etcd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | etcd_package_version: "" 3 | 4 | etcd_user: etcd 5 | etcd_group: etcd 6 | etcd_data_dir: /opt/tantor/var/lib/etcd 7 | 8 | etcd_master_group_name: inv_etcd 9 | 10 | etcd_secure: True 11 | etcd_bin_path: "/opt/tantor/usr/bin/etcdctl" 12 | etcd_conf_dir: "/opt/tantor/var/lib/etcd/pg-cluster.pki" 13 | etcd_pki_dir: "{{playbook_dir}}/pki-dir/{{ ansible_inventory_sources[0] | basename | regex_replace('\\.(ini|yml|yaml)$', '') }}" 14 | etcd_pki_key_suffix: -key.pem 15 | etcd_pki_cert_suffix: .pem 16 | 17 | etcd_use_ips: True 18 | etcd_iface_public: '{{ etcd_network_iface | default("all") }}' 19 | etcd_iface_cluster: '{{ etcd_network_iface | default("default") }}' 20 | etcd_port_client: 2379 21 | etcd_port_peer: 2380 22 | 23 | etcd_cluster_name: pg-cluster 24 | etcd_initial_cluster_token: d8bf8cc6-5158-11e6-8f13-3b32f4935bde 25 | 26 | etcd_init_system: systemd #** 27 | etcd_launch: True #** 28 | 29 | etcd_scheme: "{% if etcd_secure %}https{% else %}http{% endif %}://" 30 | etcd_cluster: "{% for host in groups[etcd_master_group_name] %}{{ hostvars[host]['unified_hostname'] }}={{ etcd_scheme }}{{ hostvars[host]['ansible_default_ipv4'].address }}:{{ etcd_port_peer }}{% if not loop.last %},{% endif %}{% endfor %}" 31 | 32 | etcd_cluster_data_dir: '{{ etcd_data_dir }}/{{ etcd_cluster_name }}.etcd' 33 | etcd_cluster_pki_dir: '{{ etcd_data_dir }}/{{ etcd_cluster_name }}.pki' 34 | 35 | etcd_pki_key_file: '{{ inventory_hostname }}{{ etcd_pki_key_suffix }}' 36 | etcd_pki_key_src: '{{ etcd_pki_dir }}/{{ etcd_pki_key_file }}' 37 | etcd_pki_key_dest: '{{ etcd_cluster_pki_dir }}/{{ etcd_pki_key_file }}' 38 | 39 | etcd_pki_cert_file: '{{ inventory_hostname }}{{ etcd_pki_cert_suffix }}' 40 | etcd_pki_cert_src: '{{ etcd_pki_dir }}/{{ etcd_pki_cert_file }}' 41 | etcd_pki_cert_dest: '{{ etcd_cluster_pki_dir }}/{{ etcd_pki_cert_file }}' 42 | 43 | etcd_pki_ca_file: 'ca{{ etcd_pki_cert_suffix }}' 44 | etcd_pki_ca_cert_src: '{{ etcd_pki_dir }}/{{ etcd_pki_ca_file }}' 45 | etcd_pki_ca_cert_dest: '{{ etcd_cluster_pki_dir }}/{{ etcd_pki_ca_file }}' 46 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/perf_query_io.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ## 3 | # .test.perf_query_io 4 | ## 5 | # Statement I/O: Mass insert and select performance 6 | ## 7 | import os 8 | import time 9 | import sys 10 | import decimal 11 | import datetime 12 | 13 | def insertSamples(count, insert_records): 14 | recs = [ 15 | ( 16 | -3, 123, 0xfffffea023, 17 | decimal.Decimal("90900023123.40031"), 18 | decimal.Decimal("432.40031"), 19 | 'some_óäæ_thing', 'varying', 'æ', 20 | datetime.datetime(1982, 5, 18, 12, 0, 0, 100232) 21 | ) 22 | for x in range(count) 23 | ] 24 | gen = time.time() 25 | insert_records.load_rows(recs) 26 | fin = time.time() 27 | xacttime = fin - gen 28 | ats = count / xacttime 29 | sys.stderr.write( 30 | "INSERT Summary,\n " \ 31 | "inserted tuples: %d\n " \ 32 | "total time: %f\n " \ 33 | "average tuples per second: %f\n\n" %( 34 | count, xacttime, ats, 35 | ) 36 | ) 37 | 38 | def timeTupleRead(ps): 39 | loops = 0 40 | tuples = 0 41 | genesis = time.time() 42 | for x in ps.chunks(): 43 | loops += 1 44 | tuples += len(x) 45 | finalis = time.time() 46 | looptime = finalis - genesis 47 | ats = tuples / looptime 48 | sys.stderr.write( 49 | "SELECT Summary,\n " \ 50 | "looped: {looped}\n " \ 51 | "looptime: {looptime}\n " \ 52 | "tuples: {ntuples}\n " \ 53 | "average tuples per second: {tps}\n ".format( 54 | looped = loops, 55 | looptime = looptime, 56 | ntuples = tuples, 57 | tps = ats 58 | ) 59 | ) 60 | 61 | def main(count): 62 | sqlexec('CREATE TEMP TABLE samples ' 63 | '(i2 int2, i4 int4, i8 int8, n numeric, n2 numeric, t text, v varchar, c char(2), ts timestamp)') 64 | insert_records = prepare( 65 | "INSERT INTO samples VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)" 66 | ) 67 | select_records = prepare("SELECT * FROM samples") 68 | try: 69 | insertSamples(count, insert_records) 70 | timeTupleRead(select_records) 71 | finally: 72 | sqlexec("DROP TABLE samples") 73 | 74 | def command(args): 75 | main(int((args + [25000])[1])) 76 | 77 | if __name__ == '__main__': 78 | command(sys.argv) 79 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/index.rst: -------------------------------------------------------------------------------- 1 | py-postgresql 2 | ============= 3 | 4 | py-postgresql is a project dedicated to improving the Python client interfaces to PostgreSQL. 5 | 6 | At its core, py-postgresql provides a PG-API, `postgresql.api`, and 7 | DB-API 2.0 interface for using a PostgreSQL database. 8 | 9 | Contents 10 | -------- 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | admin 16 | driver 17 | copyman 18 | notifyman 19 | alock 20 | cluster 21 | lib 22 | clientparameters 23 | gotchas 24 | 25 | Reference 26 | --------- 27 | 28 | .. toctree:: 29 | :maxdepth: 2 30 | 31 | bin 32 | reference 33 | 34 | Changes 35 | ------- 36 | 37 | .. toctree:: 38 | :maxdepth: 1 39 | 40 | changes-v1.2 41 | changes-v1.1 42 | changes-v1.0 43 | 44 | Sample Code 45 | ----------- 46 | 47 | Using `postgresql.driver`:: 48 | 49 | >>> import postgresql 50 | >>> db = postgresql.open("pq://user:password@host/name_of_database") 51 | >>> db.execute("CREATE TABLE emp (emp_name text PRIMARY KEY, emp_salary numeric)") 52 | >>> 53 | >>> # Create the statements. 54 | >>> make_emp = db.prepare("INSERT INTO emp VALUES ($1, $2)") 55 | >>> raise_emp = db.prepare("UPDATE emp SET emp_salary = emp_salary + $2 WHERE emp_name = $1") 56 | >>> get_emp_with_salary_lt = db.prepare("SELECT emp_name FROM emp WHERE emp_salay < $1") 57 | >>> 58 | >>> # Create some employees, but do it in a transaction--all or nothing. 59 | >>> with db.xact(): 60 | ... make_emp("John Doe", "150,000") 61 | ... make_emp("Jane Doe", "150,000") 62 | ... make_emp("Andrew Doe", "55,000") 63 | ... make_emp("Susan Doe", "60,000") 64 | >>> 65 | >>> # Give some raises 66 | >>> with db.xact(): 67 | ... for row in get_emp_with_salary_lt("125,000"): 68 | ... print(row["emp_name"]) 69 | ... raise_emp(row["emp_name"], "10,000") 70 | 71 | Of course, if DB-API 2.0 is desired, the module is located at 72 | `postgresql.driver.dbapi20`. DB-API extends PG-API, so the features 73 | illustrated above are available on DB-API connections. 74 | 75 | See :ref:`db_interface` for more information. 76 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/namedtuple.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .types.namedtuple - return rows as namedtuples 3 | ## 4 | """ 5 | Factories for namedtuple row representation. 6 | """ 7 | from collections import namedtuple 8 | 9 | #: Global namedtuple type cache. 10 | cache = {} 11 | 12 | # Build and cache the namedtuple's produced. 13 | def _factory(colnames : [str], namedtuple = namedtuple) -> tuple: 14 | global cache 15 | # Provide some normalization. 16 | # Anything beyond this can just get renamed. 17 | colnames = tuple([ 18 | x.replace(' ', '_') for x in colnames 19 | ]) 20 | try: 21 | return cache[colnames] 22 | except KeyError: 23 | NT = namedtuple('row', colnames, rename = True) 24 | cache[colnames] = NT 25 | return NT 26 | 27 | def NamedTupleFactory(attribute_map, composite_relid = None): 28 | """ 29 | Alternative db.typio.RowFactory for producing namedtuple's instead of 30 | postgresql.types.Row() instances. 31 | 32 | To install:: 33 | 34 | >>> from postgresql.types.namedtuple import NamedTupleFactory 35 | >>> import postgresql 36 | >>> db = postgresql.open(...) 37 | >>> db.typio.RowTypeFactory(NamedTupleFactory) 38 | 39 | And **all** Rows produced by that connection will be namedtuple()'s. 40 | This includes composites. 41 | """ 42 | colnames = list(attribute_map.items()) 43 | colnames.sort(key = lambda x: x[1]) 44 | return lambda y: _factory((x[0] for x in colnames))(*y) 45 | 46 | from itertools import chain, starmap 47 | 48 | def namedtuples(stmt, from_iter = chain.from_iterable, map = starmap): 49 | """ 50 | Alternative to the .rows() execution method. 51 | 52 | Use:: 53 | 54 | >>> from postgresql.types.namedtuple import namedtuples 55 | >>> ps = namedtuples(db.prepare(...)) 56 | >>> for nt in ps(...): 57 | ... nt.a_column_name 58 | 59 | This effectively selects the execution method to be used with the statement. 60 | """ 61 | NT = _factory(stmt.column_names) 62 | # build the execution "method" 63 | chunks = stmt.chunks 64 | def rows_as_namedtuples(*args, **kw): 65 | return map(NT, from_iter(chunks(*args, **kw))) # starmap 66 | return rows_as_namedtuples 67 | 68 | del chain, starmap 69 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/bin/pg_dotconf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import os 4 | from optparse import OptionParser 5 | from .. import configfile 6 | from .. import __version__ 7 | 8 | __all__ = ['command'] 9 | 10 | def command(args): 11 | """ 12 | pg_dotconf script entry point. 13 | """ 14 | op = OptionParser( 15 | "%prog [--stdout] [-f settings] postgresql.conf ([param=val]|[param])*", 16 | version = __version__ 17 | ) 18 | op.add_option( 19 | '-f', '--file', 20 | dest = 'settings', 21 | help = 'A file of settings to *apply* to the given "postgresql.conf"', 22 | default = [], 23 | action = 'append', 24 | ) 25 | op.add_option( 26 | '--stdout', 27 | dest = 'stdout', 28 | help = 'Redirect the product to standard output instead of writing back to the "postgresql.conf" file', 29 | action = 'store_true', 30 | default = False 31 | ) 32 | co, ca = op.parse_args(args[1:]) 33 | if not ca: 34 | return 0 35 | 36 | settings = {} 37 | for sfp in co.settings: 38 | with open(sfp) as sf: 39 | for line in sf: 40 | pl = configfile.parse_line(line) 41 | if pl is not None: 42 | if comment not in line[pl[0].start]: 43 | settings[line[pl[0]]] = unquote(line[pl[1]]) 44 | 45 | prev = None 46 | for p in ca[1:]: 47 | if '=' not in p: 48 | k = p 49 | v = None 50 | else: 51 | k, v = p.split('=', 1) 52 | k = k.strip() 53 | if not k: 54 | sys.stderr.write("ERROR: invalid setting, %r after %r%s" %( 55 | p, prev, os.linesep 56 | )) 57 | sys.stderr.write( 58 | "HINT: Settings must take the form 'setting=value' " \ 59 | "or 'setting_name_to_comment'. Settings must also be received " \ 60 | "as a single argument." + os.linesep 61 | ) 62 | sys.exit(1) 63 | prev = p 64 | settings[k] = v 65 | 66 | fp = ca[0] 67 | with open(fp, 'r') as fr: 68 | lines = configfile.alter_config(settings, fr) 69 | 70 | if co.stdout or fp == '/dev/stdin': 71 | for l in lines: 72 | sys.stdout.write(l) 73 | else: 74 | with open(fp, 'w') as fw: 75 | for l in lines: 76 | fw.write(l) 77 | return 0 78 | 79 | if __name__ == '__main__': 80 | sys.exit(command(sys.argv)) 81 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/sphinx/index.rst: -------------------------------------------------------------------------------- 1 | py-postgresql 2 | ============= 3 | 4 | py-postgresql is a project dedicated to improving the Python client interfaces to PostgreSQL. 5 | 6 | At its core, py-postgresql provides a PG-API, `postgresql.api`, and 7 | DB-API 2.0 interface for using a PostgreSQL database. 8 | 9 | Contents 10 | -------- 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | admin 16 | driver 17 | copyman 18 | notifyman 19 | alock 20 | cluster 21 | lib 22 | clientparameters 23 | gotchas 24 | 25 | Reference 26 | --------- 27 | 28 | .. toctree:: 29 | :maxdepth: 2 30 | 31 | bin 32 | reference 33 | 34 | Changes 35 | ------- 36 | 37 | .. toctree:: 38 | :maxdepth: 1 39 | 40 | changes-v1.2 41 | changes-v1.1 42 | changes-v1.0 43 | 44 | Sample Code 45 | ----------- 46 | 47 | Using `postgresql.driver`:: 48 | 49 | >>> import postgresql 50 | >>> db = postgresql.open("pq://user:password@host/name_of_database") 51 | >>> db.execute("CREATE TABLE emp (emp_name text PRIMARY KEY, emp_salary numeric)") 52 | >>> 53 | >>> # Create the statements. 54 | >>> make_emp = db.prepare("INSERT INTO emp VALUES ($1, $2)") 55 | >>> raise_emp = db.prepare("UPDATE emp SET emp_salary = emp_salary + $2 WHERE emp_name = $1") 56 | >>> get_emp_with_salary_lt = db.prepare("SELECT emp_name FROM emp WHERE emp_salay < $1") 57 | >>> 58 | >>> # Create some employees, but do it in a transaction--all or nothing. 59 | >>> with db.xact(): 60 | ... make_emp("John Doe", "150,000") 61 | ... make_emp("Jane Doe", "150,000") 62 | ... make_emp("Andrew Doe", "55,000") 63 | ... make_emp("Susan Doe", "60,000") 64 | >>> 65 | >>> # Give some raises 66 | >>> with db.xact(): 67 | ... for row in get_emp_with_salary_lt("125,000"): 68 | ... print(row["emp_name"]) 69 | ... raise_emp(row["emp_name"], "10,000") 70 | 71 | Of course, if DB-API 2.0 is desired, the module is located at 72 | `postgresql.driver.dbapi20`. DB-API extends PG-API, so the features 73 | illustrated above are available on DB-API connections. 74 | 75 | See :ref:`db_interface` for more information. 76 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/encodings/bytea.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .encodings.bytea 3 | ## 4 | 'PostgreSQL bytea encoding and decoding functions' 5 | import codecs 6 | import struct 7 | import sys 8 | 9 | ord_to_seq = { 10 | i : \ 11 | "\\" + oct(i)[2:].rjust(3, '0') \ 12 | if not (32 < i < 126) else r'\\' \ 13 | if i == 92 else chr(i) 14 | for i in range(256) 15 | } 16 | 17 | if sys.version_info[:2] >= (3, 3): 18 | # Subscripting memory in 3.3 returns byte as an integer, not as a bytestring 19 | def decode(data): 20 | return ''.join(map(ord_to_seq.__getitem__, (data[x] for x in range(len(data))))) 21 | else: 22 | def decode(data): 23 | return ''.join(map(ord_to_seq.__getitem__, (data[x][0] for x in range(len(data))))) 24 | 25 | def encode(data): 26 | diter = ((data[i] for i in range(len(data)))) 27 | output = [] 28 | next = diter.__next__ 29 | for x in diter: 30 | if x == "\\": 31 | try: 32 | y = next() 33 | except StopIteration: 34 | raise ValueError("incomplete backslash sequence") 35 | if y == "\\": 36 | # It's a backslash, so let x(\) be appended. 37 | x = ord(x) 38 | elif y.isdigit(): 39 | try: 40 | os = ''.join((y, next(), next())) 41 | except StopIteration: 42 | # requires three digits 43 | raise ValueError("incomplete backslash sequence") 44 | try: 45 | x = int(os, base = 8) 46 | except ValueError: 47 | raise ValueError("invalid bytea octal sequence '%s'" %(os,)) 48 | else: 49 | raise ValueError("invalid backslash follow '%s'" %(y,)) 50 | else: 51 | x = ord(x) 52 | output.append(x) 53 | return struct.pack(str(len(output)) + 'B', *output) 54 | 55 | class Codec(codecs.Codec): 56 | 'bytea codec' 57 | def encode(data, errors = 'strict'): 58 | return (encode(data), len(data)) 59 | encode = staticmethod(encode) 60 | 61 | def decode(data, errors = 'strict'): 62 | return (decode(data), len(data)) 63 | decode = staticmethod(decode) 64 | 65 | class StreamWriter(Codec, codecs.StreamWriter): pass 66 | class StreamReader(Codec, codecs.StreamReader): pass 67 | 68 | bytea_codec = (Codec.encode, Codec.decode, StreamReader, StreamWriter) 69 | codecs.register(lambda x: x == 'bytea' and bytea_codec or None) 70 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/pgpassfile.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .pgpassfile - parse and lookup passwords in a pgpassfile 3 | ## 4 | 'Parse pgpass files and subsequently lookup a password.' 5 | import os.path 6 | 7 | def split(line, len = len): 8 | line = line.strip() 9 | if not line: 10 | return None 11 | r = [] 12 | continuation = False 13 | for x in line.split(':'): 14 | if continuation: 15 | # The colon was preceded by a backslash, it's part 16 | # of the last field. Substitute the trailing backslash 17 | # with the colon and append the next value. 18 | r[-1] = r[-1][:-1] + ':' + x.replace('\\\\', '\\') 19 | continuation = False 20 | else: 21 | # Even number of backslashes preceded the split. 22 | # Normal field. 23 | r.append(x.replace('\\\\', '\\')) 24 | # Determine if the next field is a continuation of this one. 25 | if (len(x) - len(x.rstrip('\\'))) % 2 == 1: 26 | continuation = True 27 | if len(r) != 5: 28 | # Too few or too many fields. 29 | return None 30 | return r 31 | 32 | def parse(data): 33 | 'produce a list of [(word, (host,port,dbname,user))] from a pgpass file object' 34 | return [ 35 | (x[-1], x[0:4]) for x in [split(line) for line in data] if x 36 | ] 37 | 38 | def lookup_password(words, uhpd): 39 | """ 40 | lookup_password(words, (user, host, port, database)) -> password 41 | 42 | Where 'words' is the output from pgpass.parse() 43 | """ 44 | user, host, port, database = uhpd 45 | for word, (w_host, w_port, w_database, w_user) in words: 46 | if (w_user == '*' or w_user == user) and \ 47 | (w_host == '*' or w_host == host) and \ 48 | (w_port == '*' or w_port == port) and \ 49 | (w_database == '*' or w_database == database): 50 | return word 51 | 52 | def lookup_password_file(path, t): 53 | 'like lookup_password, but takes a file path' 54 | with open(path) as f: 55 | return lookup_password(parse(f), t) 56 | 57 | def lookup_pgpass(d, passfile, exists = os.path.exists): 58 | # If the password file exists, lookup the password 59 | # using the config's criteria. 60 | if exists(passfile): 61 | return lookup_password_file(passfile, ( 62 | str(d['user']), str(d['host']), str(d['port']), 63 | str(d.get('database', d['user'])) 64 | )) 65 | -------------------------------------------------------------------------------- /roles/patroni/templates/walg.json.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | 3 | {% if patroni_boostrap_method_walg_storage == 's3' %} 4 | { 5 | {% if postgresql_vendor == 'tantordb' %} 6 | "PGDATA": "{{ patroni_pg_data_dir }}", 7 | {% elif postgresql_vendor == 'classic' and ( ansible_facts['os_family'] == 'Debian' or (ansible_facts['os_family'] == "Astra Linux") or (ansible_facts['os_family'] == "Astra Linux (Orel)")) %} 8 | "PGDATA": "/var/lib/postgresql/{{ major_version }}/{{ inventory_hostname }}/main/", 9 | {% elif postgresql_vendor == 'classic' and (ansible_facts['os_family'] == "Altlinux") %} 10 | "PGDATA": "/var/lib/pgsql/data/{{ inventory_hostname }}", 11 | {% elif postgresql_vendor == 'classic' %} 12 | "PGDATA": "/var/lib/pgsql/{{ major_version }}/data/{{ inventory_hostname }}", 13 | {% endif %} 14 | "AWS_ACCESS_KEY_ID":"{{ patroni_boostrap_method_walg_s3_username }}", 15 | "AWS_SECRET_ACCESS_KEY": "{{ patroni_boostrap_method_walg_s3_password }}", 16 | "WALE_S3_PREFIX":"s3://{{ patroni_boostrap_method_walg_s3_bucket }}", 17 | "AWS_ENDPOINT":"https://storage.yandexcloud.net", 18 | "AWS_S3_FORCE_PATH_STYLE":"True", 19 | "AWS_REGION":"{{ patroni_boostrap_method_walg_s3_region }}" 20 | } 21 | {% elif patroni_boostrap_method_walg_storage == 'nfs' %} 22 | { 23 | "WALG_FILE_PREFIX": "{{ patroni_boostrap_method_nfs_storage }}", 24 | "WALG_COMPRESSION_METHOD": "{{ patroni_boostrap_method_nfs_compression }}", 25 | "WALG_DELTA_MAX_STEPS": "{{ patroni_boostrap_method_nfs_delta_max_steps }}", 26 | "PGHOST": "/var/run/postgresql/.s.PGSQL.{{ patroni_pg_port }}", 27 | {% if postgresql_vendor == 'tantordb' %} 28 | "PGDATA": "{{ patroni_pg_data_dir }}" 29 | {% elif postgresql_vendor == 'classic' and ( ansible_facts['os_family'] == 'Debian' or (ansible_facts['os_family'] == "Astra Linux") or (ansible_facts['os_family'] == "Astra Linux (Orel)")) %} 30 | "PGDATA": "/var/lib/postgresql/{{ major_version }}/{{ inventory_hostname }}/main/" 31 | {% elif postgresql_vendor == 'classic' and (ansible_facts['os_family'] == "Altlinux") %} 32 | "PGDATA": "/var/lib/pgsql/data/{{ inventory_hostname }}" 33 | {% elif postgresql_vendor == 'classic' %} 34 | "PGDATA": "/var/lib/pgsql/{{ major_version }}/data/{{ inventory_hostname }}" 35 | {% endif %} 36 | } 37 | {% endif %} -------------------------------------------------------------------------------- /roles/etcd/templates/etcd.conf.j2: -------------------------------------------------------------------------------- 1 | # [member] 2 | ETCD_NAME="{{unified_hostname}}" 3 | ETCD_DATA_DIR="{{etcd_cluster_data_dir}}" 4 | #ETCD_SNAPSHOT_COUNTER="10000" 5 | #ETCD_HEARTBEAT_INTERVAL="100" 6 | #ETCD_ELECTION_TIMEOUT="1000" 7 | ##{% if etcd_iface_public == 'all' %} 8 | ##{% set client_endpoints = [ etcd_listen_public ] %} 9 | ##{% else %} 10 | ##{% set client_endpoints = [ etcd_listen_public, 'localhost' ] %} 11 | ##{% endif %} 12 | ##ETCD_LISTEN_CLIENT_URLS="{{ client_endpoints | map('regex_replace', '^(.+)$', etcd_scheme ~ '\\1' ~ ':' ~ etcd_port_client) | join(',') }}" 13 | ETCD_LISTEN_CLIENT_URLS="{{etcd_scheme}}{{etcd_listen_cluster}}:{{etcd_port_client}},{{etcd_scheme}}localhost:{{etcd_port_client}}" 14 | #ETCD_MAX_SNAPSHOTS="5" 15 | #ETCD_MAX_WALS="5" 16 | #ETCD_CORS="" 17 | # 18 | # [cluster] 19 | {% if inventory_hostname in groups[etcd_master_group_name] %} 20 | ETCD_LISTEN_PEER_URLS="{{etcd_scheme}}{{etcd_listen_cluster}}:{{etcd_port_peer}}" 21 | ETCD_ADVERTISE_CLIENT_URLS="{{etcd_scheme}}{{etcd_address_public}}:{{etcd_port_client}}" 22 | ETCD_INITIAL_ADVERTISE_PEER_URLS="{{etcd_scheme}}{{etcd_address_cluster}}:{{etcd_port_peer}}" 23 | {% endif %} 24 | ETCD_INITIAL_CLUSTER="{{etcd_cluster}}" 25 | ETCD_INITIAL_CLUSTER_STATE="{{ etcd_initial_cluster_state }}" 26 | {% if etcd_use_initial_token %} 27 | ETCD_INITIAL_CLUSTER_TOKEN="{{etcd_initial_cluster_token}}" 28 | {% endif %} 29 | # if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." 30 | #ETCD_DISCOVERY="" 31 | #ETCD_DISCOVERY_SRV="" 32 | #ETCD_DISCOVERY_FALLBACK="proxy" 33 | #ETCD_DISCOVERY_PROXY="" 34 | # 35 | #[proxy] 36 | {% if inventory_hostname not in groups[etcd_master_group_name] %} 37 | ETCD_PROXY="on" 38 | {% endif %} 39 | # 40 | #[security] 41 | {% if etcd_secure %} 42 | ETCD_CERT_FILE="{{etcd_pki_cert_dest}}" 43 | ETCD_KEY_FILE="{{etcd_pki_key_dest}}" 44 | ETCD_CLIENT_CERT_AUTH="true" 45 | ETCD_TRUSTED_CA_FILE="{{etcd_pki_ca_cert_dest}}" 46 | ETCD_PEER_CERT_FILE="{{etcd_pki_cert_dest}}" 47 | ETCD_PEER_KEY_FILE="{{etcd_pki_key_dest}}" 48 | ETCD_PEER_CLIENT_CERT_AUTH="true" 49 | ETCD_PEER_TRUSTED_CA_FILE="{{etcd_pki_ca_cert_dest}}" 50 | {% endif %} 51 | # 52 | #[logging] 53 | #ETCD_DEBUG="true" 54 | # examples for -log-package-levels etcdserver=WARNING,security=DEBUG 55 | #ETCD_LOG_PACKAGE_LEVELS="etcdserver=DEBUG" 56 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/perf_copy_io.py: -------------------------------------------------------------------------------- 1 | ## 2 | # test.perf_copy_io - Copy I/O: To and From performance 3 | ## 4 | import os, sys, random, time 5 | 6 | if __name__ == '__main__': 7 | with open('/usr/share/dict/words', mode='brU') as wordfile: 8 | Words = wordfile.readlines() 9 | else: 10 | Words = [b'/usr/share/dict/words', b'is', b'read', b'in', b'__main__'] 11 | wordcount = len(Words) 12 | random.seed() 13 | 14 | def getWord(): 15 | "extract a random word from ``Words``" 16 | return Words[random.randrange(0, wordcount)].strip() 17 | 18 | def testSpeed(tuples = 50000 * 3): 19 | sqlexec("CREATE TEMP TABLE _copy " 20 | "(i int, t text, mt text, ts text, ty text, tx text);") 21 | try: 22 | Q = prepare("COPY _copy FROM STDIN") 23 | size = 0 24 | def incsize(data): 25 | 'count of bytes' 26 | nonlocal size 27 | size += len(data) 28 | return data 29 | sys.stderr.write("preparing data(%d tuples)...\n" %(tuples,)) 30 | 31 | # Use an LC to avoid the Python overhead involved with a GE 32 | data = [incsize(b'\t'.join(( 33 | str(x).encode('ascii'), getWord(), getWord(), 34 | getWord(), getWord(), getWord() 35 | )))+b'\n' for x in range(tuples)] 36 | 37 | sys.stderr.write("starting copy...\n") 38 | start = time.time() 39 | copied_in = Q.load_rows(data) 40 | duration = time.time() - start 41 | sys.stderr.write( 42 | "COPY FROM STDIN Summary,\n " \ 43 | "copied tuples: %d\n " \ 44 | "copied bytes: %d\n " \ 45 | "duration: %f\n " \ 46 | "average tuple size(bytes): %f\n " \ 47 | "average KB per second: %f\n " \ 48 | "average tuples per second: %f\n" %( 49 | tuples, size, duration, 50 | size / tuples, 51 | size / 1024 / duration, 52 | tuples / duration, 53 | ) 54 | ) 55 | Q = prepare("COPY _copy TO STDOUT") 56 | start = time.time() 57 | c = 0 58 | for rows in Q.chunks(): 59 | c += len(rows) 60 | duration = time.time() - start 61 | sys.stderr.write( 62 | "COPY TO STDOUT Summary,\n " \ 63 | "copied tuples: %d\n " \ 64 | "duration: %f\n " \ 65 | "average KB per second: %f\n " \ 66 | "average tuples per second: %f\n " %( 67 | c, duration, 68 | size / 1024 / duration, 69 | tuples / duration, 70 | ) 71 | ) 72 | finally: 73 | sqlexec("DROP TABLE _copy") 74 | 75 | if __name__ == '__main__': 76 | testSpeed() 77 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/test_pgpassfile.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.test_pgpassfile 3 | ## 4 | import unittest 5 | from .. import pgpassfile as client_pgpass 6 | from io import StringIO 7 | 8 | passfile_sample = """ 9 | # host:1111:dbname:user:password1 10 | host:1111:dbname:user:password1 11 | *:1111:dbname:user:password2 12 | *:*:dbname:user:password3 13 | 14 | # Comment 15 | 16 | *:*:*:user:password4 17 | *:*:*:usern:password4.5 18 | *:*:*:*:password5 19 | """ 20 | 21 | passfile_sample_map = { 22 | ('user', 'host', '1111', 'dbname') : 'password1', 23 | ('user', 'host', '1111', 'dbname') : 'password1', 24 | ('user', 'foo', '1111', 'dbname') : 'password2', 25 | ('user', 'foo', '4321', 'dbname') : 'password3', 26 | ('user', 'foo', '4321', 'db,name') : 'password4', 27 | 28 | ('uuser', 'foo', '4321', 'db,name') : 'password5', 29 | ('usern', 'foo', '4321', 'db,name') : 'password4.5', 30 | ('foo', 'bar', '19231', 'somedbn') : 'password5', 31 | } 32 | 33 | difficult_passfile_sample = r""" 34 | host\\:1111:db\:name:u\\ser:word1 35 | *:1111:\:dbname\::\\user\\:pass\:word2 36 | foohost:1111:\:dbname\::\\user\\:pass\:word3 37 | """ 38 | 39 | difficult_passfile_sample_map = { 40 | ('u\\ser','host\\','1111','db:name') : 'word1', 41 | ('\\user\\','somehost','1111',':dbname:') : 'pass:word2', 42 | ('\\user\\','someotherhost','1111',':dbname:') : 'pass:word2', 43 | # More specific, but comes after '*' 44 | ('\\user\\','foohost','1111',':dbname:') : 'pass:word2', 45 | ('','','','') : None, 46 | } 47 | 48 | class test_pgpass(unittest.TestCase): 49 | def runTest(self): 50 | sample1 = client_pgpass.parse(StringIO(passfile_sample)) 51 | sample2 = client_pgpass.parse(StringIO(difficult_passfile_sample)) 52 | 53 | for k, pw in passfile_sample_map.items(): 54 | lpw = client_pgpass.lookup_password(sample1, k) 55 | self.assertEqual(lpw, pw, 56 | "password lookup incongruity, expecting %r got %r with %r" 57 | " in \n%s" %( 58 | pw, lpw, k, passfile_sample 59 | ) 60 | ) 61 | 62 | for k, pw in difficult_passfile_sample_map.items(): 63 | lpw = client_pgpass.lookup_password(sample2, k) 64 | self.assertEqual(lpw, pw, 65 | "password lookup incongruity, expecting %r got %r with %r" 66 | " in \n%s" %( 67 | pw, lpw, k, difficult_passfile_sample 68 | ) 69 | ) 70 | 71 | if __name__ == '__main__': 72 | unittest.main() 73 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/sys.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .sys 3 | ## 4 | """ 5 | py-postgresql system functions and data. 6 | 7 | Data 8 | ---- 9 | 10 | ``libpath`` 11 | The local file system paths that contain query libraries. 12 | 13 | Overridable Functions 14 | --------------------- 15 | 16 | excformat 17 | Information that makes up an exception's displayed "body". 18 | Effectively, the implementation of `postgresql.exception.Error.__str__` 19 | 20 | msghook 21 | Display a message. 22 | """ 23 | import sys 24 | import os 25 | import traceback 26 | from .python.element import format_element 27 | from .python.string import indent 28 | 29 | libpath = [] 30 | 31 | def default_errformat(val): 32 | """ 33 | Built-in error formatter. DON'T TOUCH! 34 | """ 35 | it = val._e_metas() 36 | if val.creator is not None: 37 | # Protect against element traceback failures. 38 | try: 39 | after = os.linesep + format_element(val.creator) 40 | except Exception: 41 | after = 'Element Traceback of %r caused exception:%s' %( 42 | type(val.creator).__name__, 43 | os.linesep 44 | ) 45 | after += indent(traceback.format_exc()) 46 | after = os.linesep + indent(after).rstrip() 47 | else: 48 | after = '' 49 | return next(it)[1] \ 50 | + os.linesep + ' ' \ 51 | + (os.linesep + ' ').join( 52 | k + ': ' + v for k, v in it 53 | ) + after 54 | 55 | def default_msghook(msg, format_message = format_element): 56 | """ 57 | Built-in message hook. DON'T TOUCH! 58 | """ 59 | if sys.stderr and not sys.stderr.closed: 60 | try: 61 | sys.stderr.write(format_message(msg) + os.linesep) 62 | except Exception: 63 | try: 64 | sys.excepthook(*sys.exc_info()) 65 | except Exception: 66 | # gasp. 67 | pass 68 | 69 | def errformat(*args, **kw): 70 | """ 71 | Raised Database Error formatter pointing to default_excformat. 72 | 73 | Override if you like. All postgresql.exceptions.Error's are formatted using 74 | this function. 75 | """ 76 | return default_errformat(*args, **kw) 77 | 78 | def msghook(*args, **kw): 79 | """ 80 | Message hook pointing to default_msghook. 81 | 82 | Override if you like. All untrapped messages raised by 83 | driver connections come here to be printed to stderr. 84 | """ 85 | return default_msghook(*args, **kw) 86 | 87 | def reset_errformat(with_func = errformat): 88 | 'restore the original excformat function' 89 | global errformat 90 | errformat = with_func 91 | 92 | def reset_msghook(with_func = msghook): 93 | 'restore the original msghook function' 94 | global msghook 95 | msghook = with_func 96 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/port/signal1_msw.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .port.signal1_msw 3 | ## 4 | """ 5 | Support for PG signals on Windows platforms. 6 | 7 | This implementation supports all known versions of PostgreSQL. (2010) 8 | 9 | CallNamedPipe: 10 | http://msdn.microsoft.com/en-us/library/aa365144%28VS.85%29.aspx 11 | """ 12 | import errno 13 | from ctypes import windll, wintypes, pointer 14 | 15 | # CallNamedPipe from kernel32. 16 | CallNamedPipeA = windll.kernel32.CallNamedPipeA 17 | CallNamedPipeA.restype = wintypes.BOOL 18 | CallNamedPipeA.argtypes = ( 19 | wintypes.LPCSTR, # in namedpipename 20 | wintypes.LPVOID, # in inbuffer (for signal number) 21 | wintypes.DWORD, # in inbuffersize (always 1) 22 | wintypes.LPVOID, # OutBuffer (signal return validation) 23 | wintypes.DWORD, # in OutBufferSize (always 1) 24 | wintypes.LPVOID, # out bytes read, really LPDWORD. 25 | wintypes.DWORD, # in timeout 26 | ) 27 | 28 | from signal import SIGTERM, SIGINT, SIG_DFL 29 | # SYNC: Values taken from the port/win32.h file. 30 | SIG_DFL=0 31 | SIGHUP=1 32 | SIGQUIT=3 33 | SIGTRAP=5 34 | SIGABRT=22 # /* Set to match W32 value -- not UNIX value */ 35 | SIGKILL=9 36 | SIGPIPE=13 37 | SIGALRM=14 38 | SIGSTOP=17 39 | SIGTSTP=18 40 | SIGCONT=19 41 | SIGCHLD=20 42 | SIGTTIN=21 43 | SIGTTOU=22 # /* Same as SIGABRT -- no problem, I hope */ 44 | SIGWINCH=28 45 | SIGUSR1=30 46 | SIGUSR2=31 47 | 48 | # SYNC: port.h 49 | PG_SIGNAL_COUNT = 32 50 | 51 | # In the situation of another variant, another module should be constructed. 52 | def kill(pid : int, signal : int, timeout = 1000, dword1 = wintypes.DWORD(1)): 53 | """ 54 | Re-implementation of pg_kill for win32 using ctypes. 55 | """ 56 | if pid <= 0: 57 | raise OSError(errno.EINVAL, "process group not supported") 58 | if signal < 0 or signal >= PG_SIGNAL_COUNT: 59 | raise OSError(errno.EINVAL, "unsupported signal number") 60 | inbuffer = pointer(wintypes.BYTE(signal)) 61 | outbuffer = pointer(wintypes.BYTE(0)) 62 | outbytes = pointer(wintypes.DWORD(0)) 63 | pidpipe = br'\\.\pipe\pgsignal_' + str(pid).encode('ascii') 64 | timeout = wintypes.DWORD(timeout) 65 | r = CallNamedPipeA( 66 | pidpipe, inbuffer, dword1, outbuffer, dword1, outbytes, timeout 67 | ) 68 | if r: 69 | if outbuffer.contents.value == signal: 70 | if outbytes.contents.value == 1: 71 | # success 72 | return 73 | # Don't bother emulating the other failure cases/abstractions. 74 | # CallNamedPipeA should raise a WindowsError on those failures. 75 | raise OSError(errno.ESRCH, "unexpected output from CallNamedPipeA") 76 | __docformat__ = 'reStructuredText' 77 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/stdlib_xml_etree.py: -------------------------------------------------------------------------------- 1 | ## 2 | # types.io.stdlib_xml_etree 3 | ## 4 | try: 5 | import xml.etree.cElementTree as etree 6 | except ImportError: 7 | import xml.etree.ElementTree as etree 8 | from .. import XMLOID 9 | from ...python.functools import Composition as compose 10 | 11 | oid_to_type = { 12 | XMLOID: etree.ElementTree, 13 | } 14 | 15 | def xml_unpack(xmldata, XML = etree.XML): 16 | try: 17 | return XML(xmldata) 18 | except Exception: 19 | # try it again, but return the sequence of children. 20 | return tuple(XML('' + xmldata + '')) 21 | 22 | if not hasattr(etree, 'tostringlist'): 23 | # Python 3.1 support. 24 | def xml_pack(xml, tostr = etree.tostring, et = etree.ElementTree, 25 | str = str, isinstance = isinstance, tuple = tuple 26 | ): 27 | if isinstance(xml, str): 28 | # If it's a string, encode and return. 29 | return xml 30 | elif isinstance(xml, tuple): 31 | # If it's a tuple, encode and return the joined items. 32 | # We do not accept lists here--emphasizing lists being used for ARRAY 33 | # bounds. 34 | return ''.join((x if isinstance(x, str) else tostr(x) for x in xml)) 35 | return tostr(xml) 36 | 37 | def xml_io_factory(typoid, typio, c = compose): 38 | return ( 39 | c((xml_pack, typio.encode)), 40 | c((typio.decode, xml_unpack)), 41 | etree.ElementTree, 42 | ) 43 | else: 44 | # New etree tostring API. 45 | def xml_pack(xml, encoding, encoder, 46 | tostr = etree.tostring, et = etree.ElementTree, 47 | str = str, isinstance = isinstance, tuple = tuple, 48 | ): 49 | if isinstance(xml, bytes): 50 | return xml 51 | if isinstance(xml, str): 52 | # If it's a string, encode and return. 53 | return encoder(xml) 54 | elif isinstance(xml, tuple): 55 | # If it's a tuple, encode and return the joined items. 56 | # We do not accept lists here--emphasizing lists being used for ARRAY 57 | # bounds. 58 | ## 59 | # 3.2 60 | # XXX: tostring doesn't include declaration with utf-8? 61 | x = b''.join( 62 | x.encode('utf-8') if isinstance(x, str) else 63 | tostr(x, encoding = "utf-8") 64 | for x in xml 65 | ) 66 | else: 67 | ## 68 | # 3.2 69 | # XXX: tostring doesn't include declaration with utf-8? 70 | x = tostr(xml, encoding = "utf-8") 71 | if encoding in ('utf8','utf-8'): 72 | return x 73 | else: 74 | return encoder(x.decode('utf-8')) 75 | 76 | def xml_io_factory(typoid, typio, c = compose): 77 | def local_xml_pack(x, encoder = typio.encode, typio = typio, xml_pack = xml_pack): 78 | return xml_pack(x, typio.encoding, encoder) 79 | return (local_xml_pack, c((typio.decode, xml_unpack)), etree.ElementTree,) 80 | 81 | oid_to_io = { 82 | XMLOID : xml_io_factory 83 | } 84 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/test_installation.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.test_installation 3 | ## 4 | import sys 5 | import os 6 | import unittest 7 | from .. import installation as ins 8 | 9 | class test_installation(unittest.TestCase): 10 | """ 11 | Most of this is exercised by TestCaseWithCluster, but do some 12 | explicit checks up front to help find any specific issues that 13 | do not naturally occur. 14 | """ 15 | def test_parse_configure_options(self): 16 | # Check expectations. 17 | self.assertEqual( 18 | list(ins.parse_configure_options("")), [], 19 | ) 20 | self.assertEqual( 21 | list(ins.parse_configure_options(" ")), [], 22 | ) 23 | self.assertEqual( 24 | list(ins.parse_configure_options("--foo --bar")), 25 | [('foo',True),('bar',True)] 26 | ) 27 | self.assertEqual( 28 | list(ins.parse_configure_options("'--foo' '--bar'")), 29 | [('foo',True),('bar',True)] 30 | ) 31 | self.assertEqual( 32 | list(ins.parse_configure_options("'--foo=A properly isolated string' '--bar'")), 33 | [('foo','A properly isolated string'),('bar',True)] 34 | ) 35 | # hope they don't ever use backslash escapes. 36 | # This is pretty dirty, but it doesn't seem well defined anyways. 37 | self.assertEqual( 38 | list(ins.parse_configure_options("'--foo=A ''properly'' isolated string' '--bar'")), 39 | [('foo',"A 'properly' isolated string"),('bar',True)] 40 | ) 41 | # handle some simple variations, but it's 42 | self.assertEqual( 43 | list(ins.parse_configure_options("'--foo' \"--bar\"")), 44 | [('foo',True),('bar',True)] 45 | ) 46 | # Show the failure. 47 | try: 48 | self.assertEqual( 49 | list(ins.parse_configure_options("'--foo' \"--bar=/A dir/file\"")), 50 | [('foo',True),('bar','/A dir/file')] 51 | ) 52 | except AssertionError: 53 | pass 54 | else: 55 | self.fail("did not detect induced failure") 56 | 57 | def test_minimum(self): 58 | 'version info' 59 | # Installation only "needs" the version information 60 | i = ins.Installation({'version' : 'PostgreSQL 2.2.3'}) 61 | self.assertEqual( 62 | i.version, 'PostgreSQL 2.2.3' 63 | ) 64 | self.assertEqual( 65 | i.version_info, (2,2,3,'final',0) 66 | ) 67 | self.assertEqual(i.postgres, None) 68 | self.assertEqual(i.postmaster, None) 69 | 70 | def test_exec(self): 71 | # check the executable 72 | i = ins.pg_config_dictionary( 73 | sys.executable, '-m', __package__ + '.support', 'pg_config') 74 | # automatically lowers the key 75 | self.assertEqual(i['foo'], 'BaR') 76 | self.assertEqual(i['feh'], 'YEAH') 77 | self.assertEqual(i['version'], 'NAY') 78 | 79 | if __name__ == '__main__': 80 | from types import ModuleType 81 | this = ModuleType("this") 82 | this.__dict__.update(globals()) 83 | unittest.main(this) 84 | -------------------------------------------------------------------------------- /pg-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Tasks for group inv_cluster 3 | hosts: inv_cluster 4 | tasks: [] 5 | 6 | - name: Install basic soft 7 | hosts: inv_cluster 8 | become: true 9 | tags: prepare_nodes 10 | vars_files: 11 | - 'inventory/group_vars/prepare_nodes.yml' 12 | roles: 13 | - prepare_nodes 14 | any_errors_fatal: true 15 | 16 | - name: Generage certs 17 | hosts: localhost 18 | connection: local 19 | become: true 20 | tags: certificates 21 | roles: 22 | - role: certificates 23 | any_errors_fatal: true 24 | 25 | - name: Install etcd 26 | hosts: inv_etcd 27 | become: true 28 | tags: etcd 29 | vars_files: 30 | - 'inventory/group_vars/etcd.yml' 31 | roles: 32 | - role: etcd 33 | any_errors_fatal: true 34 | 35 | - name: Install PostgreSQL Tantordb 36 | hosts: inv_pg 37 | become: true 38 | tags: postgres_tantordb 39 | vars_files: 40 | - 'inventory/group_vars/etcd.yml' 41 | - 'inventory/group_vars/tantordb.yml' 42 | roles: 43 | - postgres_tantordb 44 | any_errors_fatal: true 45 | 46 | - name: Install PostgreSQL Classic 47 | hosts: inv_pg 48 | become: true 49 | tags: postgres_classic 50 | vars_files: 51 | - 'inventory/group_vars/etcd.yml' 52 | - 'inventory/group_vars/postgres_classic.yml' 53 | roles: 54 | - postgres_classic 55 | any_errors_fatal: true 56 | 57 | - name: Install Patroni 58 | hosts: inv_pg 59 | become: true 60 | tags: patroni 61 | vars_files: 62 | - 'inventory/group_vars/etcd.yml' 63 | - 'inventory/group_vars/patroni.yml' 64 | - 'inventory/group_vars/tantordb.yml' 65 | - 'inventory/group_vars/postgres_classic.yml' 66 | roles: 67 | - patroni 68 | any_errors_fatal: true 69 | 70 | - name: Install PGBouncer 71 | hosts: inv_pg 72 | become: true 73 | tags: pgbouncer 74 | vars_files: 75 | - 'inventory/group_vars/pgbouncer.yml' 76 | - 'inventory/group_vars/etcd.yml' 77 | - 'inventory/group_vars/patroni.yml' 78 | roles: 79 | - pgbouncer 80 | any_errors_fatal: true 81 | 82 | - name: Install Haproxy 83 | hosts: inv_pg 84 | become: true 85 | tags: haproxy 86 | vars_files: 87 | - 'inventory/group_vars/etcd.yml' 88 | - 'inventory/group_vars/haproxy.yml' 89 | - 'inventory/group_vars/patroni.yml' 90 | - 'inventory/group_vars/pgbouncer.yml' 91 | roles: 92 | - haproxy 93 | any_errors_fatal: true 94 | 95 | - name: Install Keepalived 96 | hosts: inv_keepalived 97 | become: true 98 | tags: keepalived 99 | vars_files: 100 | - 'inventory/group_vars/etcd.yml' 101 | - 'inventory/group_vars/patroni.yml' 102 | - 'inventory/group_vars/keepalived.yml' 103 | roles: 104 | - keepalived 105 | any_errors_fatal: true 106 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/test_cluster.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.test_cluster 3 | ## 4 | import sys 5 | import os 6 | import time 7 | import unittest 8 | import tempfile 9 | from .. import installation 10 | from ..cluster import Cluster, ClusterStartupError 11 | 12 | default_install = installation.default() 13 | if default_install is None: 14 | sys.stderr.write("ERROR: cannot find 'default' pg_config\n") 15 | sys.stderr.write("HINT: set the PGINSTALLATION environment variable to the `pg_config` path\n") 16 | sys.exit(1) 17 | 18 | class test_cluster(unittest.TestCase): 19 | def setUp(self): 20 | self.cluster = Cluster(default_install, 'test_cluster',) 21 | 22 | def tearDown(self): 23 | self.cluster.drop() 24 | self.cluster = None 25 | 26 | def start_cluster(self, logfile = None): 27 | self.cluster.start(logfile = logfile) 28 | self.cluster.wait_until_started(timeout = 10) 29 | 30 | def init(self, *args, **kw): 31 | self.cluster.init(*args, **kw) 32 | 33 | vi = self.cluster.installation.version_info[:2] 34 | if vi >= (9, 3): 35 | usd = 'unix_socket_directories' 36 | else: 37 | usd = 'unix_socket_directory' 38 | 39 | if vi > (9, 6): 40 | self.cluster.settings['max_wal_senders'] = '0' 41 | 42 | self.cluster.settings.update({ 43 | 'max_connections' : '8', 44 | 'listen_addresses' : 'localhost', 45 | 'port' : '6543', 46 | usd : self.cluster.data_directory, 47 | }) 48 | 49 | def testSilentMode(self): 50 | self.init() 51 | self.cluster.settings['silent_mode'] = 'on' 52 | # if it fails to start(ClusterError), silent_mode is not working properly. 53 | try: 54 | self.start_cluster(logfile = sys.stdout) 55 | except ClusterStartupError: 56 | # silent_mode is not supported on windows by PG. 57 | if sys.platform in ('win32','win64'): 58 | pass 59 | elif self.cluster.installation.version_info[:2] >= (9, 2): 60 | pass 61 | else: 62 | raise 63 | else: 64 | if sys.platform in ('win32','win64'): 65 | self.fail("silent_mode unexpectedly supported on windows") 66 | elif self.cluster.installation.version_info[:2] >= (9, 2): 67 | self.fail("silent_mode unexpectedly supported on PostgreSQL >=9.2") 68 | 69 | def testSuperPassword(self): 70 | self.init( 71 | user = 'test', 72 | password = 'secret', 73 | logfile = sys.stdout, 74 | ) 75 | self.start_cluster() 76 | c = self.cluster.connection( 77 | user='test', 78 | password='secret', 79 | database='template1', 80 | ) 81 | with c: 82 | self.assertEqual(c.prepare('select 1').first(), 1) 83 | 84 | def testNoParameters(self): 85 | 'simple init and drop' 86 | self.init() 87 | self.start_cluster() 88 | 89 | if __name__ == '__main__': 90 | from types import ModuleType 91 | this = ModuleType("this") 92 | this.__dict__.update(globals()) 93 | unittest.main(this) 94 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/bitwise.py: -------------------------------------------------------------------------------- 1 | class Varbit(object): 2 | __slots__ = ('data', 'bits') 3 | 4 | def from_bits(subtype, bits, data): 5 | if bits == 1: 6 | return (data[0] & (1 << 7)) and OneBit or ZeroBit 7 | else: 8 | rob = object.__new__(subtype) 9 | rob.bits = bits 10 | rob.data = data 11 | return rob 12 | from_bits = classmethod(from_bits) 13 | 14 | def __new__(typ, data): 15 | if isinstance(data, Varbit): 16 | return data 17 | if isinstance(data, bytes): 18 | return typ.from_bits(len(data) * 8, data) 19 | # str(), eg '00101100' 20 | bits = len(data) 21 | nbytes, remain = divmod(bits, 8) 22 | bdata = [bytes((int(data[x:x+8], 2),)) for x in range(0, bits - remain, 8)] 23 | if remain != 0: 24 | bdata.append(bytes((int(data[nbytes*8:].ljust(8,'0'), 2),))) 25 | return typ.from_bits(bits, b''.join(bdata)) 26 | 27 | def __str__(self): 28 | if self.bits: 29 | # cut off the remainder from the bits 30 | blocks = [bin(x)[2:].rjust(8, '0') for x in self.data] 31 | blocks[-1] = blocks[-1][0:(self.bits % 8) or 8] 32 | return ''.join(blocks) 33 | else: 34 | return '' 35 | 36 | def __repr__(self): 37 | return '%s.%s(%r)' %( 38 | type(self).__module__, 39 | type(self).__name__, 40 | str(self) 41 | ) 42 | 43 | def __eq__(self, ob): 44 | if not isinstance(ob, type(self)): 45 | ob = type(self)(ob) 46 | return ob.bits == self.bits and ob.data == self.data 47 | 48 | def __len__(self): 49 | return self.bits 50 | 51 | def __add__(self, ob): 52 | return Varbit(str(self) + str(ob)) 53 | 54 | def __mul__(self, ob): 55 | return Varbit(str(self) * ob) 56 | 57 | def getbit(self, bitoffset): 58 | if bitoffset < 0: 59 | idx = self.bits + bitoffset 60 | else: 61 | idx = bitoffset 62 | if not 0 <= idx < self.bits: 63 | raise IndexError("bit index %d out of range" %(bitoffset,)) 64 | 65 | byte, bitofbyte = divmod(idx, 8) 66 | if ord(self.data[byte]) & (1 << (7 - bitofbyte)): 67 | return OneBit 68 | else: 69 | return ZeroBit 70 | 71 | def __getitem__(self, item): 72 | if isinstance(item, slice): 73 | return type(self)(str(self)[item]) 74 | else: 75 | return self.getbit(item) 76 | 77 | def __nonzero__(self): 78 | for x in self.data: 79 | if x != 0: 80 | return True 81 | return False 82 | 83 | class Bit(Varbit): 84 | def __new__(subtype, ob): 85 | if ob is ZeroBit or ob is False or ob == '0': 86 | return ZeroBit 87 | elif ob is OneBit or ob is True or ob == '1': 88 | return OneBit 89 | 90 | raise ValueError('unknown bit value %r, 0 or 1' %(ob,)) 91 | 92 | def __nonzero__(self): 93 | return self is OneBit 94 | 95 | def __str__(self): 96 | return self is OneBit and '1' or '0' 97 | 98 | ZeroBit = object.__new__(Bit) 99 | ZeroBit.data = b'\x00' 100 | ZeroBit.bits = 1 101 | OneBit = object.__new__(Bit) 102 | OneBit.data = b'\x80' 103 | OneBit.bits = 1 104 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/types/io/__init__.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .types.io - I/O routines for packing and unpacking data 3 | ## 4 | """ 5 | PostgreSQL type I/O routines--packing and unpacking functions. 6 | 7 | This package manages the modules providing I/O routines. 8 | 9 | The name of the function describes what type the function is intended to be used 10 | on. Normally, the fucntions return a structured form of the serialized data to 11 | be used as a parameter to the creation of a higher level instance. In 12 | particular, most of the functions that deal with time return a pair for 13 | representing the relative offset: (seconds, microseconds). For times, this 14 | provides an abstraction for quad-word based times used by some configurations of 15 | PostgreSQL. 16 | """ 17 | import sys 18 | from itertools import cycle, chain 19 | from ... import types as pg_types 20 | 21 | io_modules = { 22 | 'builtins' : ( 23 | pg_types.BOOLOID, 24 | pg_types.CHAROID, 25 | pg_types.BYTEAOID, 26 | 27 | pg_types.INT2OID, 28 | pg_types.INT4OID, 29 | pg_types.INT8OID, 30 | 31 | pg_types.FLOAT4OID, 32 | pg_types.FLOAT8OID, 33 | pg_types.ABSTIMEOID, 34 | ), 35 | 36 | 'pg_bitwise': ( 37 | pg_types.BITOID, 38 | pg_types.VARBITOID, 39 | ), 40 | 41 | 'pg_network': ( 42 | pg_types.MACADDROID, 43 | pg_types.INETOID, 44 | pg_types.CIDROID, 45 | ), 46 | 47 | 'pg_system': ( 48 | pg_types.OIDOID, 49 | pg_types.XIDOID, 50 | pg_types.CIDOID, 51 | pg_types.TIDOID, 52 | ), 53 | 54 | 'pg_geometry': ( 55 | pg_types.POINTOID, 56 | pg_types.LSEGOID, 57 | pg_types.BOXOID, 58 | pg_types.CIRCLEOID, 59 | ), 60 | 61 | 'stdlib_datetime' : ( 62 | pg_types.DATEOID, 63 | pg_types.INTERVALOID, 64 | pg_types.TIMEOID, 65 | pg_types.TIMETZOID, 66 | pg_types.TIMESTAMPOID, 67 | pg_types.TIMESTAMPTZOID 68 | ), 69 | 70 | 'stdlib_decimal' : ( 71 | pg_types.NUMERICOID, 72 | ), 73 | 74 | 'stdlib_uuid' : ( 75 | pg_types.UUIDOID, 76 | ), 77 | 78 | 'stdlib_xml_etree' : ( 79 | pg_types.XMLOID, 80 | ), 81 | 82 | 'stdlib_jsonb' : ( 83 | pg_types.JSONBOID, 84 | ), 85 | 86 | # Must be db.typio.identify(contrib_hstore = 'hstore')'d 87 | 'contrib_hstore' : ( 88 | 'contrib_hstore', 89 | ), 90 | } 91 | 92 | # OID -> module name 93 | module_io = dict( 94 | chain.from_iterable(( 95 | zip(x[1], cycle((x[0],))) for x in io_modules.items() 96 | )) 97 | ) 98 | 99 | if sys.version_info[:2] < (3,3): 100 | def load(relmod): 101 | return __import__(__name__ + '.' + relmod, fromlist = True, level = 1) 102 | else: 103 | def load(relmod): 104 | return __import__(relmod, globals = globals(), locals = locals(), fromlist = [''], level = 1) 105 | 106 | def resolve(oid): 107 | io = module_io.get(oid) 108 | if io is None: 109 | return None 110 | if io.__class__ is str: 111 | module_io.update(load(io).oid_to_io) 112 | io = module_io[oid] 113 | return io 114 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/__init__.py: -------------------------------------------------------------------------------- 1 | ## 2 | # py-postgresql root package 3 | # http://github.com/python-postgres/fe 4 | ## 5 | """ 6 | py-postgresql is a Python package for using PostgreSQL. This includes low-level 7 | protocol tools, a driver(PG-API and DB-API 2.0), and cluster management tools. 8 | 9 | See for more information about PostgreSQL and 10 | for information about Python. 11 | """ 12 | __all__ = [ 13 | '__author__', 14 | '__date__', 15 | '__version__', 16 | '__docformat__', 17 | 'version', 18 | 'version_info', 19 | 'open', 20 | ] 21 | 22 | #: The version string of py-postgresql. 23 | version = '' # overridden by subsequent import from .project. 24 | 25 | #: The version triple of py-postgresql: (major, minor, patch). 26 | version_info = () # overridden by subsequent import from .project. 27 | 28 | # Optional. 29 | try: 30 | from .project import version_info, version, \ 31 | author as __author__, date as __date__ 32 | __version__ = version 33 | except ImportError: 34 | pass 35 | 36 | # Avoid importing these until requested. 37 | _pg_iri = _pg_driver = _pg_param = None 38 | def open(iri = None, prompt_title = None, **kw): 39 | """ 40 | Create a `postgresql.api.Connection` to the server referenced by the given 41 | `iri`:: 42 | 43 | >>> import postgresql 44 | # General Format: 45 | >>> db = postgresql.open('pq://user:password@host:port/database') 46 | 47 | # Connect to 'postgres' at localhost. 48 | >>> db = postgresql.open('localhost/postgres') 49 | 50 | Connection keywords can also be used with `open`. See the narratives for 51 | more information. 52 | 53 | The `prompt_title` keyword is ignored. `open` will never prompt for 54 | the password unless it is explicitly instructed to do so. 55 | 56 | (Note: "pq" is the name of the protocol used to communicate with PostgreSQL) 57 | """ 58 | global _pg_iri, _pg_driver, _pg_param 59 | if _pg_iri is None: 60 | from . import iri as _pg_iri 61 | from . import driver as _pg_driver 62 | from . import clientparameters as _pg_param 63 | 64 | return_connector = False 65 | if iri is not None: 66 | if iri.startswith('&'): 67 | return_connector = True 68 | iri = iri[1:] 69 | iri_params = _pg_iri.parse(iri) 70 | iri_params.pop('path', None) 71 | else: 72 | iri_params = {} 73 | 74 | std_params = _pg_param.collect(prompt_title = None) 75 | # If unix is specified, it's going to conflict with any standard 76 | # settings, so remove them right here. 77 | if 'unix' in kw or 'unix' in iri_params: 78 | std_params.pop('host', None) 79 | std_params.pop('port', None) 80 | params = _pg_param.normalize( 81 | list(_pg_param.denormalize_parameters(std_params)) + \ 82 | list(_pg_param.denormalize_parameters(iri_params)) + \ 83 | list(_pg_param.denormalize_parameters(kw)) 84 | ) 85 | _pg_param.resolve_password(params) 86 | 87 | C = _pg_driver.default.fit(**params) 88 | if return_connector is True: 89 | return C 90 | else: 91 | c = C() 92 | c.connect() 93 | return c 94 | 95 | __docformat__ = 'reStructuredText' 96 | -------------------------------------------------------------------------------- /roles/patroni/templates/patroni_custom_bootstrap_script.sh.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | #!/bin/bash 3 | 4 | # Define WAL-G configuration path 5 | config_path="/var/lib/postgresql/.walg.json" 6 | {% if postgresql_vendor == 'tantordb' %} 7 | pgdata_dir="{{ patroni_pg_data_dir }}" 8 | {% elif postgresql_vendor == 'classic' and ( ansible_facts['os_family'] == 'Debian' or (ansible_facts['os_family'] == "Astra Linux") or (ansible_facts['os_family'] == "Astra Linux (Orel)")) %} 9 | pgdata_dir="/var/lib/postgresql/{{ major_version }}/{{ inventory_hostname }}/main/" 10 | {% elif postgresql_vendor == 'classic' and (ansible_facts['os_family'] == "Altlinux") %} 11 | pgdata_dir="/var/lib/pgsql/data/{{ inventory_hostname }}" 12 | {% elif postgresql_vendor == 'classic' %} 13 | pgdata_dir="/var/lib/pgsql/{{ major_version }}/data/{{ inventory_hostname }}" 14 | {% endif %} 15 | 16 | # Check if the base directory exists, create if it doesn't 17 | if [ ! -d "$pgdata_dir" ]; then 18 | echo "Creating base directory: $pgdata_dir" 19 | mkdir -p "$pgdata_dir" 20 | fi 21 | 22 | # Check for existing backups and catchups 23 | backup_list=$(/opt/tantor/usr/bin/wal-g --config "$config_path" backup-list 2>&1) 24 | catchup_list=$(/opt/tantor/usr/bin/wal-g --config "$config_path" catchup-list 2>&1) 25 | 26 | function get_latest_date() { 27 | echo "$1" | tail -n +2 | awk '{print $2 " " $0}' | sort | tail -n 1 | cut -d' ' -f2- 28 | } 29 | 30 | # Check for "No backups found" 31 | if [[ "$backup_list" == *"No backups found"* && "$catchup_list" == *"No backups found"* ]]; then 32 | # Standard DB init if no backups are found 33 | echo "No backups found, initializing standard database..." >> /opt/tantor/var/log/patroni/patroni.log 34 | {% if postgresql_vendor == 'tantordb' %} 35 | {{ patroni_pg_bin_dir }}/initdb -D $pgdata_dir 36 | {% elif postgresql_vendor == 'classic' and ( ansible_facts['os_family'] == 'Debian' or (ansible_facts['os_family'] == "Astra Linux") or (ansible_facts['os_family'] == "Astra Linux (Orel)")) %} 37 | /usr/lib/postgresql/{{ major_version }}/bin/initdb -D $pgdata_dir 38 | {% elif postgresql_vendor == 'classic' and (ansible_facts['os_family'] == "Altlinux") %} 39 | /usr/bin/initdb -D $pgdata_dir 40 | {% elif postgresql_vendor == 'classic' %} 41 | /usr/pgsql-{{ major_version }}/bin/initdb -D $pgdata_dir 42 | {% endif %} 43 | exit 0 44 | else 45 | # Determine latest backup and perform appropriate fetch 46 | latest_backup_date=$(get_latest_date "$backup_list") 47 | latest_catchup_date=$(get_latest_date "$catchup_list") 48 | #latest_catchup_date=0 49 | 50 | if [[ "$latest_backup_date" > "$latest_catchup_date" ]]; then 51 | echo "Latest backup date: $latest_backup_date" >> /opt/tantor/var/log/patroni/patroni.log 52 | /opt/tantor/usr/bin/wal-g --config "$config_path" backup-fetch "$pgdata_dir" LATEST 53 | else 54 | echo "Latest catchup date: $latest_catchup_date" >> /opt/tantor/var/log/patroni/patroni.log 55 | /opt/tantor/usr/bin/wal-g --config "$config_path" catchup-fetch "$pgdata_dir" LATEST 56 | fi 57 | fi 58 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/pgstatlogger/pg_stat_logger.py: -------------------------------------------------------------------------------- 1 | from threading import Thread 2 | import threading 3 | import logging 4 | import logging.handlers 5 | import os 6 | import time 7 | from psc import PSC_DEBUG 8 | 9 | 10 | class PSCLogger(Thread): 11 | logger = None 12 | delay = None 13 | log_queue = [] 14 | log_level = None 15 | lock_logger = threading.Lock() 16 | do_stop = False 17 | __instance = None 18 | 19 | @staticmethod 20 | def instance(): 21 | if PSCLogger.__instance is None: 22 | PSCLogger("PSCLogger") 23 | return PSCLogger.__instance 24 | 25 | def __init__(self, app_name, log_level=logging.DEBUG, max_bytes=1024*1000*10, backup_count=50, delay=3): 26 | self.logger = logging.getLogger(app_name) 27 | parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) 28 | handler = logging.handlers.RotatingFileHandler( 29 | os.path.join(parent_dir, 'log', app_name + '.log'), 30 | maxBytes=max_bytes, 31 | backupCount=backup_count 32 | ) 33 | formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') 34 | handler.setFormatter(formatter) 35 | self.logger.addHandler(handler) 36 | self.logger.setLevel(log_level) 37 | self.delay = delay 38 | self.log_level = log_level 39 | PSCLogger.__instance = self 40 | Thread.__init__(self) 41 | 42 | def run(self): 43 | def flush_data(): 44 | self.lock_logger.acquire() 45 | for v in self.log_queue: 46 | if v[0] == 'Error': 47 | self.logger.error(str(v[1])) 48 | if v[0] == 'Warning': 49 | self.logger.warning(str(v[1])) 50 | if v[0] == 'Info': 51 | self.logger.info(str(v[1])) 52 | if v[0] == 'Debug': 53 | self.logger.debug(str(v[1])) 54 | del self.log_queue[:] 55 | self.lock_logger.release() 56 | 57 | live_iteration = 0 58 | while not self.do_stop: 59 | time.sleep(self.delay/50) 60 | if live_iteration % 50 == 0 or self.do_stop: 61 | flush_data() 62 | live_iteration += 1 63 | 64 | flush_data() 65 | self.logger.handlers[0].flush() 66 | if PSC_DEBUG: 67 | print("PSCLogger stopped!") 68 | 69 | def stop(self): 70 | self.do_stop = True 71 | 72 | def log(self, msg, code, do_print=False): 73 | if do_print: 74 | print(code + ": " + msg) 75 | self.lock_logger.acquire() 76 | if code == 'Debug' and self.log_level == logging.DEBUG: 77 | self.log_queue.append(['Debug', msg]) 78 | if code == 'Info' and self.log_level <= logging.INFO: 79 | self.log_queue.append(['Info', msg]) 80 | if code == 'Warning' and self.log_level <= logging.WARNING: 81 | self.log_queue.append(['Warning', msg]) 82 | if code == 'Error' and self.log_level <= logging.ERROR: 83 | self.log_queue.append(['Error', msg]) 84 | self.lock_logger.release() 85 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/cursor_integrity.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.cursor_integrity 3 | ## 4 | import os 5 | import unittest 6 | import random 7 | import itertools 8 | 9 | iot = '_dst' 10 | 11 | getq = "SELECT i FROM generate_series(0, %d) AS g(i)" 12 | copy = "COPY (%s) TO STDOUT" 13 | 14 | def random_read(curs, remaining_rows): 15 | """ 16 | Read from one of the three methods using a random amount if sized. 17 | - 50% chance of curs.read(random()) 18 | - 40% chance of next() 19 | - 10% chance of read() # no count 20 | """ 21 | if random.random() > 0.5: 22 | rrows = random.randrange(0, remaining_rows) 23 | return curs.read(rrows), rrows 24 | elif random.random() < 0.1: 25 | return curs.read(), -1 26 | else: 27 | try: 28 | return [next(curs)], 1 29 | except StopIteration: 30 | return [], 1 31 | 32 | def random_select_get(limit): 33 | return prepare(getq %(limit - 1,)) 34 | 35 | def random_copy_get(limit): 36 | return prepare(copy %(getq %(limit - 1,),)) 37 | 38 | class test_integrity(unittest.TestCase): 39 | """ 40 | test the integrity of the get and put interfaces on queries 41 | and result handles. 42 | """ 43 | def test_select(self): 44 | total = 0 45 | while total < 10000: 46 | limit = random.randrange(500000) 47 | read = 0 48 | total += limit 49 | p = random_select_get(limit)() 50 | last = ([(-1,)], 1) 51 | completed = [last[0]] 52 | while True: 53 | next = random_read(p, (limit - read) or 10) 54 | thisread = len(next[0]) 55 | read += thisread 56 | completed.append(next[0]) 57 | if thisread: 58 | self.failUnlessEqual( 59 | last[0][-1][0], next[0][0][0] - 1, 60 | "first row(-1) of next failed to match the last row of the previous" 61 | ) 62 | last = next 63 | elif next[1] != 0: 64 | # done 65 | break 66 | self.failUnlessEqual(read, limit) 67 | self.failUnlessEqual(list(range(-1, limit)), [ 68 | x[0] for x in itertools.chain(*completed) 69 | ]) 70 | 71 | def test_insert(self): 72 | pass 73 | 74 | if 'db' in dir(__builtins__) and pg.version_info >= (8,2,0): 75 | def test_copy_out(self): 76 | total = 0 77 | while total < 10000000: 78 | limit = random.randrange(500000) 79 | read = 0 80 | total += limit 81 | p = random_copy_get(limit)() 82 | last = ([-1], 1) 83 | completed = [last[0]] 84 | while True: 85 | next = random_read(p, (limit - read) or 10) 86 | next = ([int(x) for x in next[0]], next[1]) 87 | thisread = len(next[0]) 88 | read += thisread 89 | completed.append(next[0]) 90 | if thisread: 91 | self.failUnlessEqual( 92 | last[0][-1], next[0][0] - 1, 93 | "first row(-1) of next failed to match the last row of the previous" 94 | ) 95 | last = next 96 | elif next[1] != 0: 97 | # done 98 | break 99 | self.failUnlessEqual(read, limit) 100 | self.failUnlessEqual( 101 | list(range(-1, limit)), 102 | list(itertools.chain(*completed)) 103 | ) 104 | 105 | def test_copy_in(self): 106 | pass 107 | 108 | def main(): 109 | global copyin, loadin 110 | execute("CREATE TEMP TABLE _dst (i bigint)") 111 | copyin = prepare("COPY _dst FROM STDIN") 112 | loadin = prepare("INSERT INTO _dst VALUES ($1)") 113 | unittest.main() 114 | 115 | if __name__ == '__main__': 116 | main() 117 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/structlib.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .python.structlib - module for extracting serialized data 3 | ## 4 | import struct 5 | from .functools import Composition as compose 6 | 7 | null_sequence = b'\xff\xff\xff\xff' 8 | 9 | # Always to and from network order. 10 | # Create a pair, (pack, unpack) for the given `struct` format.' 11 | def mk_pack(x): 12 | s = struct.Struct('!' + x) 13 | if len(x) > 1: 14 | def pack(y, p = s.pack): 15 | return p(*y) 16 | return (pack, s.unpack_from) 17 | else: 18 | def unpack(y, p = s.unpack_from): 19 | return p(y)[0] 20 | return (s.pack, unpack) 21 | 22 | byte_pack, byte_unpack = lambda x: bytes((x,)), lambda x: x[0] 23 | double_pack, double_unpack = mk_pack("d") 24 | float_pack, float_unpack = mk_pack("f") 25 | dd_pack, dd_unpack = mk_pack("dd") 26 | ddd_pack, ddd_unpack = mk_pack("ddd") 27 | dddd_pack, dddd_unpack = mk_pack("dddd") 28 | LH_pack, LH_unpack = mk_pack("LH") 29 | lH_pack, lH_unpack = mk_pack("lH") 30 | llL_pack, llL_unpack = mk_pack("llL") 31 | qll_pack, qll_unpack = mk_pack("qll") 32 | dll_pack, dll_unpack = mk_pack("dll") 33 | 34 | dl_pack, dl_unpack = mk_pack("dl") 35 | ql_pack, ql_unpack = mk_pack("ql") 36 | 37 | hhhh_pack, hhhh_unpack = mk_pack("hhhh") 38 | 39 | longlong_pack, longlong_unpack = mk_pack("q") 40 | ulonglong_pack, ulonglong_unpack = mk_pack("Q") 41 | 42 | # Optimizations for int2, int4, and int8. 43 | try: 44 | from ..port import optimized as opt 45 | from sys import byteorder as bo 46 | if bo == 'little': 47 | short_unpack = opt.swap_int2_unpack 48 | short_pack = opt.swap_int2_pack 49 | ushort_unpack = opt.swap_uint2_unpack 50 | ushort_pack = opt.swap_uint2_pack 51 | long_unpack = opt.swap_int4_unpack 52 | long_pack = opt.swap_int4_pack 53 | ulong_unpack = opt.swap_uint4_unpack 54 | ulong_pack = opt.swap_uint4_pack 55 | 56 | if hasattr(opt, 'uint8_pack'): 57 | longlong_unpack = opt.swap_int8_unpack 58 | longlong_pack = opt.swap_int8_pack 59 | ulonglong_unpack = opt.swap_uint8_unpack 60 | ulonglong_pack = opt.swap_uint8_pack 61 | elif bo == 'big': 62 | short_unpack = opt.int2_unpack 63 | short_pack = opt.int2_pack 64 | ushort_unpack = opt.uint2_unpack 65 | ushort_pack = opt.uint2_pack 66 | long_unpack = opt.int4_unpack 67 | long_pack = opt.int4_pack 68 | ulong_unpack = opt.uint4_unpack 69 | ulong_pack = opt.uint4_pack 70 | 71 | if hasattr(opt, 'uint8_pack'): 72 | longlong_unpack = opt.int8_unpack 73 | longlong_pack = opt.int8_pack 74 | ulonglong_unpack = opt.uint8_unpack 75 | ulonglong_pack = opt.uint8_pack 76 | del bo, opt 77 | except ImportError: 78 | short_pack, short_unpack = mk_pack("h") 79 | ushort_pack, ushort_unpack = mk_pack("H") 80 | long_pack, long_unpack = mk_pack("l") 81 | ulong_pack, ulong_unpack = mk_pack("L") 82 | 83 | def split_sized_data( 84 | data, 85 | ulong_unpack = ulong_unpack, 86 | null_field = 0xFFFFFFFF, 87 | len = len, 88 | errmsg = "insufficient data in field {0}, required {1} bytes, {2} remaining".format 89 | ): 90 | """ 91 | Given serialized record data, return a tuple of tuples of type Oids and 92 | attributes. 93 | """ 94 | v = memoryview(data) 95 | f = 1 96 | while v: 97 | l = ulong_unpack(v) 98 | if l == null_field: 99 | v = v[4:] 100 | yield None 101 | continue 102 | l += 4 103 | d = v[4:l].tobytes() 104 | if len(d) < l-4: 105 | raise ValueError(errmsg(f, l - 4, len(d))) 106 | v = v[l:] 107 | f += 1 108 | yield d 109 | -------------------------------------------------------------------------------- /roles/etcd/tasks/cluster_manage.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Identify nodes to add and remove 3 | ansible.builtin.set_fact: 4 | nodes_to_add: >- 5 | {{ 6 | groups['inv_etcd'] | 7 | map('extract', hostvars, 'unified_hostname') | 8 | difference(etcd_member_names | default([])) 9 | }} 10 | nodes_to_remove: >- 11 | {{ 12 | etcd_member_names | default([]) | 13 | difference(groups['inv_etcd'] | map('extract', hostvars, 'unified_hostname')) 14 | }} 15 | run_once: true 16 | 17 | - name: Show planned changes 18 | ansible.builtin.debug: 19 | msg: 20 | - "Nodes to add: {{ nodes_to_add | default([]) }}" 21 | - "Nodes to remove: {{ nodes_to_remove | default([]) }}" 22 | run_once: true 23 | 24 | - name: Add new etcd members to cluster 25 | ansible.builtin.shell: | 26 | ETCDCTL_API=3 {{ etcd_bin_path }} \ 27 | --endpoints=https://{{ hostvars[etcd_leader]['ansible_default_ipv4']['address'] }}:{{ etcd_port_client }} \ 28 | --cacert={{ etcd_conf_dir }}/ca.pem \ 29 | --cert={{ etcd_conf_dir }}/{{ hostvars[etcd_leader]['unified_hostname'] }}.pem \ 30 | --key={{ etcd_conf_dir }}/{{ hostvars[etcd_leader]['unified_hostname'] }}-key.pem \ 31 | member add {{ item }} \ 32 | --peer-urls=https://{{ target_node_ip }}:{{ etcd_port_peer }} 33 | vars: 34 | target_node_ip: >- 35 | {{ 36 | groups['inv_etcd'] | 37 | map('extract', hostvars) | 38 | selectattr('unified_hostname', 'equalto', item) | 39 | map(attribute='ansible_default_ipv4.address') | 40 | first 41 | }} 42 | delegate_to: "{{ etcd_leader }}" 43 | loop: "{{ nodes_to_add }}" 44 | register: add_results 45 | failed_when: 46 | - add_results.rc != 0 47 | - "'already exists' not in add_results.stderr" 48 | changed_when: add_results.rc == 0 49 | when: nodes_to_add | length > 0 50 | run_once: true 51 | 52 | 53 | - name: Get member HEX IDs for removal 54 | when: 55 | - etcd_cluster_exists 56 | - nodes_to_remove | length > 0 57 | ansible.builtin.shell: | 58 | set -o pipefail 59 | ETCDCTL_API=3 {{ etcd_bin_path }} \ 60 | --endpoints=https://{{ hostvars[etcd_leader]['ansible_default_ipv4']['address'] }}:{{ etcd_port_client }} \ 61 | --cacert={{ etcd_conf_dir }}/ca.pem \ 62 | --cert={{ etcd_conf_dir }}/{{ hostvars[etcd_leader]['unified_hostname'] }}.pem \ 63 | --key={{ etcd_conf_dir }}/{{ hostvars[etcd_leader]['unified_hostname'] }}-key.pem \ 64 | member list | grep "{{ item }}" | cut -d',' -f1 65 | register: etcd_member_hex_ids_to_remove 66 | delegate_to: "{{ etcd_leader }}" 67 | loop: "{{ nodes_to_remove }}" 68 | changed_when: false 69 | failed_when: etcd_member_hex_ids_to_remove.rc != 0 70 | 71 | - name: Remove etcd members from cluster 72 | when: 73 | - etcd_cluster_exists 74 | - nodes_to_remove | length > 0 75 | - item.stdout != "" 76 | - item.rc == 0 77 | ansible.builtin.shell: | 78 | set -o pipefail 79 | ETCDCTL_API=3 {{ etcd_bin_path }} \ 80 | --endpoints=https://{{ hostvars[etcd_leader]['ansible_default_ipv4']['address'] }}:{{ etcd_port_client }} \ 81 | --cacert={{ etcd_conf_dir }}/ca.pem \ 82 | --cert={{ etcd_conf_dir }}/{{ hostvars[etcd_leader]['unified_hostname'] }}.pem \ 83 | --key={{ etcd_conf_dir }}/{{ hostvars[etcd_leader]['unified_hostname'] }}-key.pem \ 84 | member remove {{ item.stdout }} 85 | delegate_to: "{{ etcd_leader }}" 86 | loop: "{{ etcd_member_hex_ids_to_remove.results }}" 87 | run_once: true 88 | changed_when: true 89 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/python/socket.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .python.socket - additional tools for working with sockets 3 | ## 4 | import sys 5 | import os 6 | import random 7 | import socket 8 | import math 9 | import errno 10 | import ssl 11 | 12 | __all__ = ['find_available_port', 'SocketFactory'] 13 | 14 | class SocketFactory(object): 15 | """ 16 | Object used to create a socket and connect it. 17 | 18 | This is, more or less, a specialized partial() for socket creation. 19 | 20 | Additionally, it provides methods and attributes for abstracting 21 | exception management on socket operation. 22 | """ 23 | 24 | timeout_exception = socket.timeout 25 | fatal_exception = socket.error 26 | try_again_exception = socket.error 27 | 28 | def timed_out(self, err) -> bool: 29 | return err.__class__ is self.timeout_exception 30 | 31 | @staticmethod 32 | def try_again(err, codes = (errno.EAGAIN, errno.EINTR, errno.EWOULDBLOCK, errno.ETIMEDOUT)) -> bool: 33 | """ 34 | Does the error indicate that the operation should be tried again? 35 | 36 | More importantly, the connection is *not* dead. 37 | """ 38 | errno = getattr(err, 'errno', None) 39 | if errno is None: 40 | return False 41 | return errno in codes 42 | 43 | @classmethod 44 | def fatal_exception_message(typ, err) -> (str, None): 45 | """ 46 | If the exception was fatal to the connection, 47 | what message should be given to the user? 48 | """ 49 | if typ.try_again(err): 50 | return None 51 | return getattr(err, 'strerror', '') 52 | 53 | def secure(self, socket : socket.socket) -> ssl.SSLSocket: 54 | "secure a socket with SSL" 55 | if self.socket_secure is not None: 56 | return ssl.wrap_socket(socket, **self.socket_secure) 57 | else: 58 | return ssl.wrap_socket(socket) 59 | 60 | def __call__(self, timeout = None): 61 | s = socket.socket(*self.socket_create) 62 | try: 63 | s.settimeout(float(timeout) if timeout is not None else None) 64 | s.connect(self.socket_connect) 65 | s.settimeout(None) 66 | except Exception: 67 | s.close() 68 | raise 69 | return s 70 | 71 | def __init__(self, 72 | socket_create : "positional parameters given to socket.socket()", 73 | socket_connect : "parameter given to socket.connect()", 74 | socket_secure : "keywords given to ssl.wrap_socket" = None, 75 | ): 76 | self.socket_create = socket_create 77 | self.socket_connect = socket_connect 78 | self.socket_secure = socket_secure 79 | 80 | def __str__(self): 81 | return 'socket' + repr(self.socket_connect) 82 | 83 | def find_available_port( 84 | interface : "attempt to bind to interface" = 'localhost', 85 | address_family : "address family to use (default: AF_INET)" = socket.AF_INET, 86 | limit : "Number tries to make before giving up" = 1024, 87 | port_range = (6600, 56600) 88 | ) -> (int, None): 89 | """ 90 | Find an available port on the given interface for the given address family. 91 | 92 | Returns a port number that was successfully bound to or `None` if the 93 | attempt limit was reached. 94 | """ 95 | i = 0 96 | while i < limit: 97 | i += 1 98 | port = ( 99 | math.floor( 100 | random.random() * (port_range[1] - port_range[0]) 101 | ) + port_range[0] 102 | ) 103 | s = socket.socket(address_family, socket.SOCK_STREAM,) 104 | try: 105 | s.bind(('localhost', port)) 106 | s.close() 107 | except socket.error as e: 108 | s.close() 109 | if e.errno in (errno.EACCES, errno.EADDRINUSE, errno.EINTR): 110 | # try again 111 | continue 112 | break 113 | else: 114 | port = None 115 | 116 | return port 117 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/test_iri.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.test_iri 3 | ## 4 | import unittest 5 | import postgresql.iri as pg_iri 6 | 7 | value_errors = ( 8 | # Invalid scheme. 9 | 'http://user@host/index.html', 10 | ) 11 | 12 | iri_samples = ( 13 | 'host/dbname/path?param=val#frag', 14 | '#frag', 15 | '?param=val', 16 | '?param=val#frag', 17 | 'user@', 18 | ':pass@', 19 | 'u:p@h', 20 | 'u:p@h:1', 21 | 'pq://user:password@host:port/database?setting=value#public,private', 22 | 'pq://fæm.com:123/õéf/á?param=val', 23 | 'pq://l»»@fæm.com:123/õéf/á?param=val', 24 | 'pq://fæᎱᏋm.com/õéf/á?param=val', 25 | 'pq://fæᎱᏋm.com/õéf/á?param=val&[setting]=value', 26 | ) 27 | 28 | sample_structured_parameters = [ 29 | { 30 | 'host' : 'hostname', 31 | 'port' : '1234', 32 | 'database' : 'foo_db', 33 | }, 34 | { 35 | 'user' : 'username', 36 | 'database' : 'database_name', 37 | 'settings' : {'foo':'bar','feh':'bl%,23'}, 38 | }, 39 | { 40 | 'user' : 'username', 41 | 'database' : 'database_name', 42 | }, 43 | { 44 | 'database' : 'database_name', 45 | }, 46 | { 47 | 'user' : 'user_name', 48 | }, 49 | { 50 | 'host' : 'hostname', 51 | }, 52 | { 53 | 'user' : 'username', 54 | 'password' : 'pass', 55 | 'host' : '', 56 | 'port' : '4321', 57 | 'database' : 'database_name', 58 | 'path' : ['path'], 59 | }, 60 | { 61 | 'user' : 'user', 62 | 'password' : 'secret', 63 | 'host' : '', 64 | 'port' : 'ssh', 65 | 'database' : 'database_name', 66 | 'settings' : { 67 | 'set1' : 'val1', 68 | 'set2' : 'val2', 69 | }, 70 | }, 71 | { 72 | 'user' : 'user', 73 | 'password' : 'secret', 74 | 'host' : '', 75 | 'port' : 'ssh', 76 | 'database' : 'database_name', 77 | 'settings' : { 78 | 'set1' : 'val1', 79 | 'set2' : 'val2', 80 | }, 81 | 'connect_timeout' : '10', 82 | 'sslmode' : 'prefer', 83 | }, 84 | ] 85 | 86 | class test_iri(unittest.TestCase): 87 | def testPresentPasswordObscure(self): 88 | "password is present in IRI, and obscure it" 89 | s = 'pq://user:pass@host:port/dbname' 90 | o = 'pq://user:***@host:port/dbname' 91 | p = pg_iri.parse(s) 92 | ps = pg_iri.serialize(p, obscure_password = True) 93 | self.assertEqual(ps, o) 94 | 95 | def testPresentPasswordObscure(self): 96 | "password is *not* present in IRI, and do nothing" 97 | s = 'pq://user@host:port/dbname' 98 | o = 'pq://user@host:port/dbname' 99 | p = pg_iri.parse(s) 100 | ps = pg_iri.serialize(p, obscure_password = True) 101 | self.assertEqual(ps, o) 102 | 103 | def testValueErrors(self): 104 | for x in value_errors: 105 | self.assertRaises(ValueError, 106 | pg_iri.parse, x 107 | ) 108 | 109 | def testParseSerialize(self): 110 | scheme = 'pq://' 111 | for x in iri_samples: 112 | px = pg_iri.parse(x) 113 | spx = pg_iri.serialize(px) 114 | pspx = pg_iri.parse(spx) 115 | self.assertTrue( 116 | pspx == px, 117 | "parse-serialize incongruity, %r -> %r -> %r : %r != %r" %( 118 | x, px, spx, pspx, px 119 | ) 120 | ) 121 | spspx = pg_iri.serialize(pspx) 122 | self.assertTrue( 123 | spx == spspx, 124 | "parse-serialize incongruity, %r -> %r -> %r -> %r : %r != %r" %( 125 | x, px, spx, pspx, spspx, spx 126 | ) 127 | ) 128 | 129 | def testSerializeParse(self): 130 | for x in sample_structured_parameters: 131 | xs = pg_iri.serialize(x) 132 | uxs = pg_iri.parse(xs) 133 | self.assertTrue( 134 | x == uxs, 135 | "serialize-parse incongruity, %r -> %r -> %r" %( 136 | x, xs, uxs, 137 | ) 138 | ) 139 | 140 | if __name__ == '__main__': 141 | unittest.main() 142 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/versionstring.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .versionstring 3 | ## 4 | """ 5 | PostgreSQL version parsing. 6 | 7 | >>> postgresql.version.split('8.0.1') 8 | (8, 0, 1, None, None) 9 | """ 10 | 11 | def split(vstr : str) -> ( 12 | 'major','minor','patch',...,'state_class','state_level' 13 | ): 14 | """ 15 | Split a PostgreSQL version string into a tuple 16 | (major,minor,patch,...,state_class,state_level) 17 | """ 18 | v = vstr.strip().split('.') 19 | 20 | # Get rid of the numbers around the state_class (beta,a,dev,alpha, etc) 21 | state_class = v[-1].strip('0123456789') 22 | if state_class: 23 | last_version, state_level = v[-1].split(state_class) 24 | if not state_level: 25 | state_level = None 26 | else: 27 | state_level = int(state_level) 28 | vlist = [int(x or '0') for x in v[:-1] if x.isdigit()] 29 | if last_version: 30 | vlist.append(int(last_version)) 31 | vlist += [None] * (3 - len(vlist)) 32 | vlist += [state_class, state_level] 33 | else: 34 | state_level = None 35 | state_class = None 36 | vlist = [int(x or '0') for x in v] 37 | # pad the difference with `None` objects, and +2 for the state_*. 38 | vlist += [None] * ((3 - len(vlist)) + 2) 39 | return tuple(vlist) 40 | 41 | def unsplit(vtup : tuple) -> str: 42 | 'join a version tuple back into the original version string' 43 | svtup = [str(x) for x in vtup[:-2] if x is not None] 44 | state_class, state_level = vtup[-2:] 45 | return '.'.join(svtup) + ( 46 | '' if state_class is None else state_class + str(state_level) 47 | ) 48 | 49 | def normalize(split_version : "a tuple returned by `split`") -> tuple: 50 | """ 51 | Given a tuple produced by `split`, normalize the `None` objects into int(0) 52 | or 'final' if it's the ``state_class`` 53 | """ 54 | (*head, state_class, state_level) = split_version 55 | mmp = [x if x is not None else 0 for x in head] 56 | return tuple( 57 | mmp + [state_class or 'final', state_level or 0] 58 | ) 59 | 60 | default_state_class_priority = [ 61 | 'dev', 62 | 'a', 63 | 'alpha', 64 | 'b', 65 | 'beta', 66 | 'rc', 67 | 'final', 68 | None, 69 | ] 70 | 71 | python = repr 72 | 73 | def xml(self): 74 | return '\n' + \ 75 | ' ' + str(self[0]) + '\n' + \ 76 | ' ' + str(self[1]) + '\n' + \ 77 | ' ' + str(self[2]) + '\n' + \ 78 | ' ' + str(self[-2]) + '\n' + \ 79 | ' ' + str(self[-1]) + '\n' + \ 80 | '' 81 | 82 | def sh(self): 83 | return """PG_VERSION_MAJOR=%s 84 | PG_VERSION_MINOR=%s 85 | PG_VERSION_PATCH=%s 86 | PG_VERSION_STATE=%s 87 | PG_VERSION_LEVEL=%s""" %( 88 | str(self[0]), 89 | str(self[1]), 90 | str(self[2]), 91 | str(self[-2]), 92 | str(self[-1]), 93 | ) 94 | 95 | if __name__ == '__main__': 96 | import sys 97 | import os 98 | from optparse import OptionParser 99 | op = OptionParser() 100 | op.add_option('-f', '--format', 101 | type='choice', 102 | dest='format', 103 | help='format of output information', 104 | choices=('sh', 'xml', 'python'), 105 | default='sh', 106 | ) 107 | op.add_option('-n', '--normalize', 108 | action='store_true', 109 | dest='normalize', 110 | help='replace missing values with defaults', 111 | default=False, 112 | ) 113 | op.set_usage(op.get_usage().strip() + ' "version to parse"') 114 | co, ca = op.parse_args() 115 | if len(ca) != 1: 116 | op.error('requires exactly one argument, the version') 117 | else: 118 | v = split(ca[0]) 119 | if co.normalize: 120 | v = normalize(v) 121 | sys.stdout.write(getattr(sys.modules[__name__], co.format)(v)) 122 | sys.stdout.write(os.linesep) 123 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/changes-v1.0.rst: -------------------------------------------------------------------------------- 1 | Changes in v1.0 2 | =============== 3 | 4 | 1.0.4 in development 5 | -------------------- 6 | 7 | * Alter how changes are represented in documentation to simplify merging. 8 | 9 | 1.0.3 released on 2011-09-24 10 | ---------------------------- 11 | 12 | * Use raise x from y to generalize exceptions. (Elvis Pranskevichus) 13 | * Alter postgresql.string.quote_ident to always quote. (Elvis Pranskevichus) 14 | * Add postgresql.string.quote_ident_if_necessary (Modification of Elvis Pranskevichus' patch) 15 | * Many postgresql.string bug fixes (Elvis Pranskevichus) 16 | * Correct ResourceWarnings improving Python 3.2 support. (jwp) 17 | * Add test command to setup.py (Elvis Pranskevichus) 18 | 19 | 1.0.2 released on 2010-09-18 20 | ---------------------------- 21 | 22 | * Add support for DOMAINs in registered composites. (Elvis Pranskevichus) 23 | * Properly raise StopIteration in Cursor.__next__. (Elvis Pranskevichus) 24 | * Add Cluster Management documentation. 25 | * Release savepoints after rolling them back. 26 | * Fix Startup() usage for Python 3.2. 27 | * Emit deprecation warning when 'gid' is given to xact(). 28 | * Compensate for Python3.2's ElementTree API changes. 29 | 30 | 1.0.1 released on 2010-04-24 31 | ---------------------------- 32 | 33 | * Fix unpacking of array NULLs. (Elvis Pranskevichus) 34 | * Fix .first()'s handling of counts and commands. 35 | Bad logic caused zero-counts to return the command tag. 36 | * Don't interrupt and close a temporal connection if it's not open. 37 | * Use the Driver's typio attribute for TypeIO overrides. (Elvis Pranskevichus) 38 | 39 | 1.0 released on 2010-03-27 40 | -------------------------- 41 | 42 | * **DEPRECATION**: Removed 2PC support documentation. 43 | * **DEPRECATION**: Removed pg_python and pg_dotconf 'scripts'. 44 | They are still accessible by python3 -m postgresql.bin.pg_* 45 | * Add support for binary hstore. 46 | * Add support for user service files. 47 | * Implement a Copy manager for direct connection-to-connection COPY operations. 48 | * Added db.do() method for DO-statement support(convenience method). 49 | * Set the default client_min_messages level to WARNING. 50 | NOTICEs are often not desired by programmers, and py-postgresql's 51 | high verbosity further irritates that case. 52 | * Added postgresql.project module to provide project information. 53 | Project name, author, version, etc. 54 | * Increased default recvsize and chunksize for improved performance. 55 | * 'D' messages are special cased as builtins.tuples instead of 56 | protocol.element3.Tuple 57 | * Alter Statement.chunks() to return chunks of builtins.tuple. Being 58 | an interface intended for speed, types.Row() impedes its performance. 59 | * Fix handling of infinity values with timestamptz, timestamp, and date. 60 | [Bug reported by Axel Rau.] 61 | * Correct representation of PostgreSQL ARRAYs by properly recording 62 | lowerbounds and upperbounds. Internally, sub-ARRAYs have their own 63 | element lists. 64 | * Implement a NotificationManager for managing the NOTIFYs received 65 | by a connection. The class can manage NOTIFYs from multiple 66 | connections, whereas the db.wait() method is tailored for single targets. 67 | * Implement an ALock class for managing advisory locks using the 68 | threading.Lock APIs. [Feedback from Valentine Gogichashvili] 69 | * Implement reference symbols. Allow libraries to define symbols that 70 | are used to create queries that inherit the original symbol's type and 71 | execution method. ``db.prepare(db.prepare(...).first())`` 72 | * Fix handling of unix domain sockets by pg.open and driver.connect. 73 | [Reported by twitter.com/rintavarustus] 74 | * Fix typo/dropped parts of a raise LoadError in .lib. 75 | [Reported by Vlad Pranskevichus] 76 | * Fix db.tracer and pg_python's --pq-trace= 77 | * Fix count return from .first() method. Failed to provide an empty 78 | tuple for the rformats of the bind statement. 79 | [Reported by dou dou] 80 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/sphinx/changes-v1.0.rst: -------------------------------------------------------------------------------- 1 | Changes in v1.0 2 | =============== 3 | 4 | 1.0.4 in development 5 | -------------------- 6 | 7 | * Alter how changes are represented in documentation to simplify merging. 8 | 9 | 1.0.3 released on 2011-09-24 10 | ---------------------------- 11 | 12 | * Use raise x from y to generalize exceptions. (Elvis Pranskevichus) 13 | * Alter postgresql.string.quote_ident to always quote. (Elvis Pranskevichus) 14 | * Add postgresql.string.quote_ident_if_necessary (Modification of Elvis Pranskevichus' patch) 15 | * Many postgresql.string bug fixes (Elvis Pranskevichus) 16 | * Correct ResourceWarnings improving Python 3.2 support. (jwp) 17 | * Add test command to setup.py (Elvis Pranskevichus) 18 | 19 | 1.0.2 released on 2010-09-18 20 | ---------------------------- 21 | 22 | * Add support for DOMAINs in registered composites. (Elvis Pranskevichus) 23 | * Properly raise StopIteration in Cursor.__next__. (Elvis Pranskevichus) 24 | * Add Cluster Management documentation. 25 | * Release savepoints after rolling them back. 26 | * Fix Startup() usage for Python 3.2. 27 | * Emit deprecation warning when 'gid' is given to xact(). 28 | * Compensate for Python3.2's ElementTree API changes. 29 | 30 | 1.0.1 released on 2010-04-24 31 | ---------------------------- 32 | 33 | * Fix unpacking of array NULLs. (Elvis Pranskevichus) 34 | * Fix .first()'s handling of counts and commands. 35 | Bad logic caused zero-counts to return the command tag. 36 | * Don't interrupt and close a temporal connection if it's not open. 37 | * Use the Driver's typio attribute for TypeIO overrides. (Elvis Pranskevichus) 38 | 39 | 1.0 released on 2010-03-27 40 | -------------------------- 41 | 42 | * **DEPRECATION**: Removed 2PC support documentation. 43 | * **DEPRECATION**: Removed pg_python and pg_dotconf 'scripts'. 44 | They are still accessible by python3 -m postgresql.bin.pg_* 45 | * Add support for binary hstore. 46 | * Add support for user service files. 47 | * Implement a Copy manager for direct connection-to-connection COPY operations. 48 | * Added db.do() method for DO-statement support(convenience method). 49 | * Set the default client_min_messages level to WARNING. 50 | NOTICEs are often not desired by programmers, and py-postgresql's 51 | high verbosity further irritates that case. 52 | * Added postgresql.project module to provide project information. 53 | Project name, author, version, etc. 54 | * Increased default recvsize and chunksize for improved performance. 55 | * 'D' messages are special cased as builtins.tuples instead of 56 | protocol.element3.Tuple 57 | * Alter Statement.chunks() to return chunks of builtins.tuple. Being 58 | an interface intended for speed, types.Row() impedes its performance. 59 | * Fix handling of infinity values with timestamptz, timestamp, and date. 60 | [Bug reported by Axel Rau.] 61 | * Correct representation of PostgreSQL ARRAYs by properly recording 62 | lowerbounds and upperbounds. Internally, sub-ARRAYs have their own 63 | element lists. 64 | * Implement a NotificationManager for managing the NOTIFYs received 65 | by a connection. The class can manage NOTIFYs from multiple 66 | connections, whereas the db.wait() method is tailored for single targets. 67 | * Implement an ALock class for managing advisory locks using the 68 | threading.Lock APIs. [Feedback from Valentine Gogichashvili] 69 | * Implement reference symbols. Allow libraries to define symbols that 70 | are used to create queries that inherit the original symbol's type and 71 | execution method. ``db.prepare(db.prepare(...).first())`` 72 | * Fix handling of unix domain sockets by pg.open and driver.connect. 73 | [Reported by twitter.com/rintavarustus] 74 | * Fix typo/dropped parts of a raise LoadError in .lib. 75 | [Reported by Vlad Pranskevichus] 76 | * Fix db.tracer and pg_python's --pq-trace= 77 | * Fix count return from .first() method. Failed to provide an empty 78 | tuple for the rformats of the bind statement. 79 | [Reported by dou dou] 80 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/bin/pg_python.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .bin.pg_python - Python console with a connection. 3 | ## 4 | """ 5 | Python command with a PG-API connection(``db``). 6 | """ 7 | import os 8 | import sys 9 | import re 10 | import code 11 | import optparse 12 | import contextlib 13 | from .. import clientparameters 14 | from ..python import command as pycmd 15 | from .. import project 16 | 17 | from ..driver import default as pg_driver 18 | from .. import exceptions as pg_exc 19 | from .. import sys as pg_sys 20 | from .. import lib as pg_lib 21 | 22 | pq_trace = optparse.make_option( 23 | '--pq-trace', 24 | dest = 'pq_trace', 25 | help = 'trace PQ protocol transmissions', 26 | default = None, 27 | ) 28 | default_options = [ 29 | pq_trace, 30 | clientparameters.option_lib, 31 | clientparameters.option_libpath, 32 | ] + pycmd.default_optparse_options 33 | 34 | def command(argv = sys.argv): 35 | p = clientparameters.DefaultParser( 36 | "%prog [connection options] [script] ...", 37 | version = project.version, 38 | option_list = default_options 39 | ) 40 | p.disable_interspersed_args() 41 | co, ca = p.parse_args(argv[1:]) 42 | rv = 1 43 | 44 | # Resolve the category. 45 | pg_sys.libpath.insert(0, os.path.curdir) 46 | pg_sys.libpath.extend(co.libpath or []) 47 | if co.lib: 48 | cat = pg_lib.Category(*map(pg_lib.load, co.lib)) 49 | else: 50 | cat = None 51 | 52 | trace_file = None 53 | if co.pq_trace is not None: 54 | trace_file = open(co.pq_trace, 'a') 55 | 56 | try: 57 | need_prompt = False 58 | cond = None 59 | connector = None 60 | connection = None 61 | while connection is None: 62 | try: 63 | cond = clientparameters.collect(parsed_options = co, prompt_title = None) 64 | if need_prompt: 65 | # authspec error thrown last time, so force prompt. 66 | cond['prompt_password'] = True 67 | try: 68 | clientparameters.resolve_password(cond, prompt_title = 'pg_python') 69 | except EOFError: 70 | raise SystemExit(1) 71 | connector = pg_driver.fit(category = cat, **cond) 72 | connection = connector() 73 | if trace_file is not None: 74 | connection.tracer = trace_file.write 75 | connection.connect() 76 | except pg_exc.ClientCannotConnectError as err: 77 | for att in connection.failures: 78 | exc = att.error 79 | if isinstance(exc, pg_exc.AuthenticationSpecificationError): 80 | sys.stderr.write(os.linesep + exc.message + (os.linesep*2)) 81 | # keep prompting the user 82 | need_prompt = True 83 | connection = None 84 | break 85 | else: 86 | # no invalid password failures.. 87 | raise 88 | 89 | pythonexec = pycmd.Execution(ca, 90 | context = getattr(co, 'python_context', None), 91 | loader = getattr(co, 'python_main', None), 92 | ) 93 | 94 | builtin_overload = { 95 | # New built-ins 96 | 'connector' : connector, 97 | 'db' : connection, 98 | 'do' : connection.do, 99 | 'prepare' : connection.prepare, 100 | 101 | 'sqlexec' : connection.execute, 102 | 'settings' : connection.settings, 103 | 'proc' : connection.proc, 104 | 'xact' : connection.xact, 105 | } 106 | if not isinstance(__builtins__, dict): 107 | builtins_d = __builtins__.__dict__ 108 | else: 109 | builtins_d = __builtins__ 110 | restore = {k : builtins_d.get(k) for k in builtin_overload} 111 | 112 | builtins_d.update(builtin_overload) 113 | try: 114 | with connection: 115 | rv = pythonexec( 116 | context = pycmd.postmortem(os.environ.get('PYTHON_POSTMORTEM')) 117 | ) 118 | exc = getattr(sys, 'last_type', None) 119 | if rv and exc and not issubclass(exc, Exception): 120 | # Don't try to close it if wasn't an Exception. 121 | del connection.pq.socket 122 | finally: 123 | # restore __builtins__ 124 | builtins_d.update(restore) 125 | for k, v in builtin_overload.items(): 126 | if v is None: 127 | del builtins_d[x] 128 | if trace_file is not None: 129 | trace_file.close() 130 | except: 131 | pg_sys.libpath.remove(os.path.curdir) 132 | raise 133 | return rv 134 | 135 | if __name__ == '__main__': 136 | sys.exit(command(sys.argv)) 137 | ## 138 | # vim: ts=3:sw=3:noet: 139 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/port/_optimized/module.c: -------------------------------------------------------------------------------- 1 | /* 2 | * module.c - optimizations for various parts of py-postgresql 3 | * 4 | * This module.c file ties together other classified C source. 5 | * Each filename describing the part of the protocol package that it 6 | * covers. It merely uses CPP includes to bring them into this 7 | * file and then uses some CPP macros to expand the definitions 8 | * in each file. 9 | */ 10 | #include 11 | #include 12 | /* 13 | * If Python didn't find it, it won't include it. 14 | * However, it's quite necessary. 15 | */ 16 | #ifndef HAVE_STDINT_H 17 | #include 18 | #endif 19 | 20 | #define USHORT_MAX ((1<<16)-1) 21 | #define SHORT_MAX ((1<<15)-1) 22 | #define SHORT_MIN (-(1<<15)) 23 | 24 | #define PyObject_TypeName(ob) \ 25 | (((PyTypeObject *) (ob->ob_type))->tp_name) 26 | 27 | /* 28 | * buffer.c needs the message_types object from .protocol.message_types. 29 | * Initialized in PyInit_optimized. 30 | */ 31 | static PyObject *message_types = NULL; 32 | static PyObject *serialize_strob = NULL; 33 | static PyObject *msgtype_strob = NULL; 34 | 35 | static int32_t (*local_ntohl)(int32_t) = NULL; 36 | static short (*local_ntohs)(short) = NULL; 37 | 38 | /* 39 | * optimized module contents 40 | */ 41 | #include "structlib.c" 42 | #include "functools.c" 43 | #include "buffer.c" 44 | #include "wirestate.c" 45 | #include "element3.c" 46 | 47 | 48 | /* cpp abuse, read up on X-Macros if you don't understand */ 49 | #define mFUNC(name, typ, doc) \ 50 | {#name, (PyCFunction) name, typ, PyDoc_STR(doc)}, 51 | static PyMethodDef optimized_methods[] = { 52 | include_element3_functions 53 | include_structlib_functions 54 | include_functools_functions 55 | {NULL} 56 | }; 57 | #undef mFUNC 58 | 59 | static struct PyModuleDef optimized_module = { 60 | PyModuleDef_HEAD_INIT, 61 | "optimized", /* name of module */ 62 | NULL, /* module documentation, may be NULL */ 63 | -1, /* size of per-interpreter state of the module, 64 | or -1 if the module keeps state in global variables. */ 65 | optimized_methods, 66 | }; 67 | 68 | PyMODINIT_FUNC 69 | PyInit_optimized(void) 70 | { 71 | PyObject *mod; 72 | PyObject *msgtypes; 73 | PyObject *fromlist, *fromstr; 74 | long l; 75 | 76 | /* make some constants */ 77 | if (serialize_strob == NULL) 78 | { 79 | serialize_strob = PyUnicode_FromString("serialize"); 80 | if (serialize_strob == NULL) 81 | return(NULL); 82 | } 83 | if (msgtype_strob == NULL) 84 | { 85 | msgtype_strob = PyUnicode_FromString("type"); 86 | if (msgtype_strob == NULL) 87 | return(NULL); 88 | } 89 | 90 | mod = PyModule_Create(&optimized_module); 91 | if (mod == NULL) 92 | return(NULL); 93 | 94 | /* cpp abuse; ready types */ 95 | #define mTYPE(name) \ 96 | if (PyType_Ready(&name##_Type) < 0) \ 97 | goto cleanup; \ 98 | if (PyModule_AddObject(mod, #name, \ 99 | (PyObject *) &name##_Type) < 0) \ 100 | goto cleanup; 101 | 102 | /* buffer.c */ 103 | include_buffer_types 104 | /* wirestate.c */ 105 | include_wirestate_types 106 | #undef mTYPE 107 | 108 | l = 1; 109 | if (((char *) &l)[0] == 1) 110 | { 111 | /* little */ 112 | local_ntohl = swap_int4; 113 | local_ntohs = swap_short; 114 | } 115 | else 116 | { 117 | /* big */ 118 | local_ntohl = return_int4; 119 | local_ntohs = return_short; 120 | } 121 | 122 | /* 123 | * Get the message_types tuple to type "instantiation". 124 | */ 125 | fromlist = PyList_New(1); 126 | fromstr = PyUnicode_FromString("message_types"); 127 | PyList_SetItem(fromlist, 0, fromstr); 128 | msgtypes = PyImport_ImportModuleLevel( 129 | "protocol.message_types", 130 | PyModule_GetDict(mod), 131 | PyModule_GetDict(mod), 132 | fromlist, 2 133 | ); 134 | Py_DECREF(fromlist); 135 | if (msgtypes == NULL) 136 | goto cleanup; 137 | message_types = PyObject_GetAttrString(msgtypes, "message_types"); 138 | Py_DECREF(msgtypes); 139 | 140 | if (!PyObject_IsInstance(message_types, (PyObject *) (&PyTuple_Type))) 141 | { 142 | PyErr_SetString(PyExc_RuntimeError, 143 | "local protocol.message_types.message_types is not a tuple object"); 144 | goto cleanup; 145 | } 146 | 147 | return(mod); 148 | cleanup: 149 | Py_DECREF(mod); 150 | return(NULL); 151 | } 152 | /* 153 | * vim: ts=3:sw=3:noet: 154 | */ 155 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/test_notifyman.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.test_notifyman - test .notifyman 3 | ## 4 | import unittest 5 | import threading 6 | import time 7 | from ..temporal import pg_tmp 8 | from ..notifyman import NotificationManager 9 | 10 | class test_notifyman(unittest.TestCase): 11 | @pg_tmp 12 | def testNotificationManager(self): 13 | # signals each other 14 | alt = new() 15 | with alt: 16 | nm = NotificationManager(db, alt) 17 | db.listen('foo') 18 | alt.listen('bar') 19 | # notify the other. 20 | alt.notify('foo') 21 | db.notify('bar') 22 | # we can separate these here because there's no timeout 23 | for ndb, notifies in nm: 24 | for n in notifies: 25 | if ndb is db: 26 | self.assertEqual(n[0], 'foo') 27 | self.assertEqual(n[1], '') 28 | self.assertEqual(n[2], alt.backend_id) 29 | nm.connections.discard(db) 30 | elif ndb is alt: 31 | self.assertEqual(n[0], 'bar') 32 | self.assertEqual(n[1], '') 33 | self.assertEqual(n[2], db.backend_id) 34 | nm.connections.discard(alt) 35 | else: 36 | self.fail("unknown connection received notify..") 37 | 38 | @pg_tmp 39 | def testNotificationManagerTimeout(self): 40 | nm = NotificationManager(db, timeout = 0.1) 41 | db.listen('foo') 42 | count = 0 43 | for event in nm: 44 | if event is None: 45 | # do this a few times, then break out of the loop 46 | db.notify('foo') 47 | continue 48 | ndb, notifies = event 49 | self.assertEqual(ndb, db) 50 | for n in notifies: 51 | self.assertEqual(n[0], 'foo') 52 | self.assertEqual(n[1], '') 53 | self.assertEqual(n[2], db.backend_id) 54 | count = count + 1 55 | if count > 3: 56 | break 57 | 58 | @pg_tmp 59 | def testNotificationManagerZeroTimeout(self): 60 | # Zero-timeout means raise StopIteration when 61 | # there are no notifications to emit. 62 | # It checks the wire, but does *not* wait for data. 63 | nm = NotificationManager(db, timeout = 0) 64 | db.listen('foo') 65 | self.assertEqual(list(nm), []) 66 | db.notify('foo') 67 | time.sleep(0.01) 68 | self.assertEqual(list(nm), [('foo','',db.backend_id)]) # bit of a race 69 | 70 | @pg_tmp 71 | def test_iternotifies(self): 72 | # db.iternotifies() simplification of NotificationManager 73 | alt = new() 74 | alt.listen('foo') 75 | alt.listen('close') 76 | def get_notices(db, l): 77 | with db: 78 | for x in db.iternotifies(): 79 | if x[0] == 'close': 80 | break 81 | l.append(x) 82 | rl = [] 83 | t = threading.Thread(target = get_notices, args = (alt, rl,)) 84 | t.start() 85 | db.notify('foo') 86 | while not rl: 87 | time.sleep(0.05) 88 | channel, payload, pid = rl.pop(0) 89 | self.assertEqual(channel, 'foo') 90 | self.assertEqual(payload, '') 91 | self.assertEqual(pid, db.backend_id) 92 | db.notify('close') 93 | 94 | @pg_tmp 95 | def testNotificationManagerZeroTimeout(self): 96 | # Zero-timeout means raise StopIteration when 97 | # there are no notifications to emit. 98 | # It checks the wire, but does *not* wait for data. 99 | db.listen('foo') 100 | self.assertEqual(list(db.iternotifies(0)), []) 101 | db.notify('foo') 102 | time.sleep(0.01) 103 | self.assertEqual(list(db.iternotifies(0)), [('foo','', db.backend_id)]) # bit of a race 104 | 105 | @pg_tmp 106 | def testNotificationManagerOnClosed(self): 107 | # When the connection goes away, the NM iterator 108 | # should raise a Stop. 109 | db = new() 110 | db.listen('foo') 111 | db.notify('foo') 112 | for n in db.iternotifies(): 113 | db.close() 114 | self.assertEqual(db.closed, True) 115 | del db 116 | # closer, after an idle 117 | db = new() 118 | db.listen('foo') 119 | for n in db.iternotifies(0.2): 120 | if n is None: 121 | # In the loop, notify, and expect to 122 | # get the notification even though the 123 | # connection was closed. 124 | db.notify('foo') 125 | db.execute('') 126 | db.close() 127 | hit = False 128 | else: 129 | hit = True 130 | # hit should get set two times. 131 | # once on the first idle, and once on the event 132 | # received after the close. 133 | self.assertEqual(db.closed, True) 134 | self.assertEqual(hit, True) 135 | 136 | if __name__ == '__main__': 137 | unittest.main() 138 | -------------------------------------------------------------------------------- /roles/etcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure unified hostname is available (fallback if not set by prepare_nodes) 3 | block: 4 | - name: Check if unified_hostname is already set 5 | ansible.builtin.debug: 6 | var: unified_hostname 7 | failed_when: false 8 | 9 | - name: Set unified hostname fallback based on inventory format 10 | ansible.builtin.set_fact: 11 | has_domain: "{{ '.' in inventory_hostname }}" 12 | when: unified_hostname is not defined or unified_hostname != inventory_hostname.split('.')[0] 13 | 14 | - name: Set unified hostname for FQDN inventory (fallback) 15 | ansible.builtin.set_fact: 16 | unified_hostname: "{{ ansible_fqdn }}" 17 | cacheable: true 18 | when: 19 | - (unified_hostname is not defined or unified_hostname != inventory_hostname.split('.')[0]) 20 | - has_domain | default(false) 21 | 22 | - name: Set unified hostname for non-FQDN inventory (fallback) 23 | ansible.builtin.set_fact: 24 | unified_hostname: "{{ inventory_hostname }}" 25 | cacheable: true 26 | when: 27 | - (unified_hostname is not defined or unified_hostname != inventory_hostname) 28 | - not (has_domain | default(false)) 29 | 30 | - name: Debug final unified hostname 31 | ansible.builtin.debug: 32 | msg: "Final unified_hostname: {{ unified_hostname }}" 33 | 34 | - name: Collect facts for etcd 35 | ansible.builtin.set_fact: 36 | cacheable: true 37 | etcd_listen_public: >- 38 | {{ "0.0.0.0" if etcd_iface_public == "all" else 39 | (ansible_default_ipv4.address if etcd_iface_public == "default" 40 | else hostvars[inventory_hostname]["ansible_" ~ etcd_iface_public]["ipv4"]["address"]) }} 41 | 42 | etcd_listen_cluster: >- 43 | {{ "0.0.0.0" if etcd_iface_cluster == "all" else 44 | (ansible_default_ipv4.address if etcd_iface_cluster == "default" 45 | else hostvars[inventory_hostname]["ansible_" ~ etcd_iface_cluster]["ipv4"]["address"]) }} 46 | 47 | etcd_address_public: >- 48 | {{ unified_hostname if not etcd_use_ips | bool else 49 | (ansible_default_ipv4.address if etcd_iface_public is in [ "all", "default" ] 50 | else hostvars[inventory_hostname]["ansible_" ~ etcd_iface_public]["ipv4"]["address"]) }} 51 | 52 | etcd_address_cluster: >- 53 | {{ unified_hostname if not etcd_use_ips | bool else 54 | (ansible_default_ipv4.address if etcd_iface_cluster is in [ "all", "default" ] 55 | else hostvars[inventory_hostname]["ansible_" ~ etcd_iface_cluster]["ipv4"]["address"]) }} 56 | 57 | - name: Install etcd package 58 | ansible.builtin.package: 59 | name: >- 60 | {{ 61 | 'etcd-tantor-all' + ('-' + etcd_package_version if etcd_package_version | length > 0 else '') 62 | }} 63 | state: present 64 | 65 | - name: Create etcd directories 66 | ansible.builtin.file: 67 | state: directory 68 | path: '{{ etcd_cluster_pki_dir }}' 69 | owner: '{{ etcd_user }}' 70 | group: '{{ etcd_group }}' 71 | mode: "0755" 72 | 73 | - name: Discover etcd cluster state 74 | ansible.builtin.include_tasks: cluster_discovery.yml 75 | 76 | - name: Manage cluster membership 77 | ansible.builtin.include_tasks: cluster_manage.yml 78 | when: etcd_cluster_exists | default(false) 79 | 80 | - name: Set up PKI for etcd 81 | ansible.builtin.include_tasks: pki.yml 82 | when: etcd_secure | bool 83 | 84 | - name: Set etcd configuration parameters 85 | ansible.builtin.set_fact: 86 | etcd_initial_cluster_state: >- 87 | {%- if (etcd_cluster_exists | default(false)) and unified_hostname not in (etcd_member_names | default([])) -%} 88 | existing{%- else -%}new{%- endif -%} 89 | etcd_use_initial_token: >- 90 | {%- if (etcd_cluster_exists | default(false)) and unified_hostname not in (etcd_member_names | default([])) -%} 91 | false{%- else -%}true{%- endif -%} 92 | 93 | - name: Copy etcd configuration 94 | ansible.builtin.template: 95 | src: etcd.conf.j2 96 | dest: /opt/tantor/etc/etcd/etcd.conf 97 | mode: "0644" 98 | notify: Restart etcd-tantor.service 99 | 100 | - name: Configure etcd service 101 | ansible.builtin.template: 102 | src: etcd-tantor.service.j2 103 | dest: /etc/systemd/system/etcd-tantor.service 104 | mode: "0644" 105 | notify: Restart etcd-tantor.service 106 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/test/test_alock.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .test.test_alock - test .alock 3 | ## 4 | import unittest 5 | import threading 6 | import time 7 | from ..temporal import pg_tmp 8 | from .. import alock 9 | 10 | n_alocks = "select count(*) FROM pg_locks WHERE locktype = 'advisory'" 11 | 12 | class test_alock(unittest.TestCase): 13 | @pg_tmp 14 | def testALockWait(self): 15 | # sadly, this is primarily used to exercise the code paths.. 16 | ad = prepare(n_alocks).first 17 | self.assertEqual(ad(), 0) 18 | state = [False, False, False] 19 | alt = new() 20 | first = alock.ExclusiveLock(db, (0,0)) 21 | second = alock.ExclusiveLock(db, 1) 22 | def concurrent_lock(): 23 | try: 24 | with alock.ExclusiveLock(alt, 1): 25 | with alock.ExclusiveLock(alt, (0,0)): 26 | # start it 27 | state[0] = True 28 | while not state[1]: 29 | pass 30 | time.sleep(0.01) 31 | while not state[2]: 32 | time.sleep(0.01) 33 | except Exception: 34 | # Avoid dead lock in cases where advisory is not available. 35 | state[0] = state[1] = state[2] = True 36 | t = threading.Thread(target = concurrent_lock) 37 | t.start() 38 | while not state[0]: 39 | time.sleep(0.01) 40 | self.assertEqual(ad(), 2) 41 | state[1] = True 42 | with first: 43 | self.assertEqual(ad(), 2) 44 | state[2] = True 45 | with second: 46 | self.assertEqual(ad(), 2) 47 | t.join(timeout = 1) 48 | 49 | @pg_tmp 50 | def testALockNoWait(self): 51 | alt = new() 52 | ad = prepare(n_alocks).first 53 | self.assertEqual(ad(), 0) 54 | with alock.ExclusiveLock(db, (0,0)): 55 | l=alock.ExclusiveLock(alt, (0,0)) 56 | # should fail to acquire 57 | self.assertEqual(l.acquire(blocking=False), False) 58 | # no alocks should exist now 59 | self.assertEqual(ad(), 0) 60 | 61 | @pg_tmp 62 | def testALock(self): 63 | ad = prepare(n_alocks).first 64 | self.assertEqual(ad(), 0) 65 | # test a variety.. 66 | lockids = [ 67 | (1,4), 68 | -32532, 0, 2, 69 | (7, -1232), 70 | 4, 5, 232142423, 71 | (18,7), 72 | 2, (1,4) 73 | ] 74 | alt = new() 75 | xal1 = alock.ExclusiveLock(db, *lockids) 76 | xal2 = alock.ExclusiveLock(db, *lockids) 77 | sal1 = alock.ShareLock(db, *lockids) 78 | with sal1: 79 | with xal1, xal2: 80 | self.assertTrue(ad() > 0) 81 | for x in lockids: 82 | xl = alock.ExclusiveLock(alt, x) 83 | self.assertEqual(xl.acquire(blocking=False), False) 84 | # main has exclusives on these, so this should fail. 85 | xl = alock.ShareLock(alt, *lockids) 86 | self.assertEqual(xl.acquire(blocking=False), False) 87 | for x in lockids: 88 | # sal1 still holds 89 | xl = alock.ExclusiveLock(alt, x) 90 | self.assertEqual(xl.acquire(blocking=False), False) 91 | # sal1 still holds, but we want a share lock too. 92 | xl = alock.ShareLock(alt, x) 93 | self.assertEqual(xl.acquire(blocking=False), True) 94 | xl.release() 95 | # no alocks should exist now 96 | self.assertEqual(ad(), 0) 97 | 98 | @pg_tmp 99 | def testPartialALock(self): 100 | # Validates that release is properly cleaning up 101 | ad = prepare(n_alocks).first 102 | self.assertEqual(ad(), 0) 103 | held = (0,-1234) 104 | wanted = [0, 324, -1232948, 7, held, 1, (2,4), (834,1)] 105 | alt = new() 106 | with alock.ExclusiveLock(db, held): 107 | l=alock.ExclusiveLock(alt, *wanted) 108 | # should fail to acquire, db has held 109 | self.assertEqual(l.acquire(blocking=False), False) 110 | # No alocks should exist now. 111 | # This *MUST* occur prior to alt being closed. 112 | # Otherwise, we won't be testing for the recovery 113 | # of a failed non-blocking acquire(). 114 | self.assertEqual(ad(), 0) 115 | 116 | @pg_tmp 117 | def testALockParameterErrors(self): 118 | self.assertRaises(TypeError, alock.ALock) 119 | l = alock.ExclusiveLock(db) 120 | self.assertRaises(RuntimeError, l.release) 121 | 122 | @pg_tmp 123 | def testALockOnClosed(self): 124 | ad = prepare(n_alocks).first 125 | self.assertEqual(ad(), 0) 126 | held = (0,-1234) 127 | alt = new() 128 | # __exit__ should only touch the count. 129 | with alock.ExclusiveLock(alt, held) as l: 130 | self.assertEqual(ad(), 1) 131 | self.assertEqual(l.locked(), True) 132 | alt.close() 133 | time.sleep(0.005) 134 | self.assertEqual(ad(), 0) 135 | self.assertEqual(l.locked(), False) 136 | 137 | if __name__ == '__main__': 138 | unittest.main() 139 | -------------------------------------------------------------------------------- /roles/prepare_nodes/tasks/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create folders if not exists 3 | ansible.builtin.file: 4 | state: directory 5 | path: "{{ item }}" 6 | owner: 'root' 7 | group: 'root' 8 | mode: "0755" 9 | with_items: 10 | - /etc/apt/sources.list.d 11 | - /etc/apt/trusted.gpg.d 12 | - /etc/apt/auth.conf.d 13 | 14 | - name: Ensure APT keyrings directory exists 15 | ansible.builtin.file: 16 | path: /etc/apt/keyrings 17 | state: directory 18 | mode: '0755' 19 | when: 20 | - add_nexus_repo | bool 21 | - ansible_os_family != 'Altlinux' 22 | 23 | - name: Download Tantor GPG key (ASCII) 24 | ansible.builtin.get_url: 25 | url: "{{ nexus_key_url }}" 26 | dest: /etc/apt/keyrings/tantor-nexus.asc 27 | mode: '0644' 28 | when: 29 | - add_nexus_repo | bool 30 | - ansible_os_family != 'Altlinux' 31 | 32 | - name: Convert Tantor GPG key to binary .gpg format 33 | ansible.builtin.command: > 34 | gpg --dearmor -o /etc/apt/keyrings/tantor-nexus.gpg /etc/apt/keyrings/tantor-nexus.asc 35 | args: 36 | creates: /etc/apt/keyrings/tantor-nexus.gpg 37 | when: 38 | - add_nexus_repo | bool 39 | - ansible_os_family != 'Altlinux' 40 | - name: Block for Altlinux 41 | when: 42 | - add_nexus_repo == 'true' 43 | - ansible_os_family == 'Altlinux' 44 | block: 45 | - name: Add Tantor repository key for ALT Linux 46 | ansible.builtin.get_url: 47 | url: "{{ nexus_key_url }}" 48 | dest: "/etc/apt/trusted.gpg.d/RPM-GPG-KEY-tantorlabs" 49 | mode: "0644" 50 | 51 | - name: Add Tantor repository for Alt Linux c10f2 52 | when: 53 | - ansible_distribution_version == '10.2' 54 | ansible.builtin.copy: 55 | dest: "/etc/apt/sources.list.d/tantorlabs.list" 56 | content: | 57 | rpm {{ nexus_yum_altlinux_c10f2 }} x86_64 tantor 58 | mode: "0644" 59 | 60 | - name: Install prerequisite packages (apt_rpm) 61 | when: 62 | - ansible_pkg_mgr == 'apt_rpm' 63 | ansible.builtin.package: 64 | name: 65 | - glibc-locales 66 | - openssl 67 | - ca-certificates 68 | - gnupg 69 | - libpq5 70 | - libpq5-devel 71 | state: present 72 | 73 | - name: Block for Debian-like systems 74 | when: 75 | - ansible_os_family != 'Altlinux' 76 | block: 77 | - name: Install prerequisite packages 78 | when: 79 | - ansible_pkg_mgr == 'apt' 80 | ansible.builtin.package: 81 | name: 82 | - openssl 83 | - gnupg 84 | - gpg 85 | - locales 86 | - libpq-dev 87 | state: present 88 | update_cache: true 89 | 90 | - name: Ensure the locale exists 91 | community.general.locale_gen: 92 | name: "{{ item }}" 93 | state: present 94 | with_items: 95 | - ru_RU.UTF-8 96 | - en_US.UTF-8 97 | 98 | - name: Block for Astra 99 | when: 100 | - ansible_os_family in ["Astra Linux", "Astra Linux (Orel)"] 101 | - add_nexus_repo == 'true' 102 | block: 103 | - name: Configure nexus repository (Astra 1.7) 104 | ansible.builtin.apt_repository: 105 | repo: "{{ nexus_apt_astra_1_7 }}" 106 | when: 107 | - ansible_distribution == "Astra Linux" 108 | - ansible_distribution_version == "1.7_x86-64" 109 | 110 | - name: Configure nexus repository (Astra 1.8) 111 | ansible.builtin.apt_repository: 112 | repo: "{{ nexus_apt_astra_1_8 }}" 113 | when: 114 | - ansible_distribution == "Astra Linux" 115 | - ansible_distribution_version == "1.8_x86-64" 116 | 117 | - name: Block for Ubuntu 118 | when: (ansible_distribution == "Ubuntu") and (add_nexus_repo == 'true') 119 | block: 120 | - name: Configure nexus repository (Ubuntu 22.04) 121 | ansible.builtin.apt_repository: 122 | repo: "{{ nexus_apt_ubuntu_22_04 }}" 123 | state: present 124 | filename: tantorlabs 125 | when: 126 | - ansible_distribution == "Ubuntu" 127 | - ansible_distribution_major_version == "22" 128 | 129 | - name: Configure nexus repository (Ubuntu 20.04) 130 | ansible.builtin.apt_repository: 131 | repo: "{{ nexus_apt_ubuntu_20_04 }}" 132 | state: present 133 | filename: tantorlabs 134 | when: 135 | - ansible_distribution == "Ubuntu" 136 | - ansible_distribution_major_version == "20" 137 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/alock.rst: -------------------------------------------------------------------------------- 1 | .. _alock: 2 | 3 | ************** 4 | Advisory Locks 5 | ************** 6 | 7 | .. warning:: `postgresql.alock` is a new feature in v1.0. 8 | 9 | `Explicit Locking in PostgreSQL `_. 10 | 11 | PostgreSQL's advisory locks offer a cooperative synchronization primitive. 12 | These are used in cases where an application needs access to a resource, but 13 | using table locks may cause interference with other operations that can be 14 | safely performed alongside the application-level, exclusive operation. 15 | 16 | Advisory locks can be used by directly executing the stored procedures in the 17 | database or by using the :class:`postgresql.alock.ALock` subclasses, which 18 | provides a context manager that uses those stored procedures. 19 | 20 | Currently, only two subclasses exist. Each represents the lock mode 21 | supported by PostgreSQL's advisory locks: 22 | 23 | * :class:`postgresql.alock.ShareLock` 24 | * :class:`postgresql.alock.ExclusiveLock` 25 | 26 | 27 | Acquiring ALocks 28 | ================ 29 | 30 | An ALock instance represents a sequence of advisory locks. A single ALock can 31 | acquire and release multiple advisory locks by creating the instance with 32 | multiple lock identifiers:: 33 | 34 | >>> from postgresql import alock 35 | >>> table1_oid = 192842 36 | >>> table2_oid = 192849 37 | >>> l = alock.ExclusiveLock(db, (table1_oid, 0), (table2_oid, 0)) 38 | >>> l.acquire() 39 | >>> ... 40 | >>> l.release() 41 | 42 | :class:`postgresql.alock.ALock` is similar to :class:`threading.RLock`; in 43 | order for an ALock to be released, it must be released the number of times it 44 | has been acquired. ALocks are associated with and survived by their session. 45 | Much like how RLocks are associated with the thread they are acquired in: 46 | acquiring an ALock again will merely increment its count. 47 | 48 | PostgreSQL allows advisory locks to be identified using a pair of `int4` or a 49 | single `int8`. ALock instances represent a *sequence* of those identifiers:: 50 | 51 | >>> from postgresql import alock 52 | >>> ids = [(0,0), 0, 1] 53 | >>> with alock.ShareLock(db, *ids): 54 | ... ... 55 | 56 | Both types of identifiers may be used within the same ALock, and, regardless of 57 | their type, will be aquired in the order that they were given to the class' 58 | constructor. In the above example, ``(0,0)`` is acquired first, then ``0``, and 59 | lastly ``1``. 60 | 61 | 62 | ALocks 63 | ====== 64 | 65 | `postgresql.alock.ALock` is abstract; it defines the interface and some common 66 | functionality. The lock mode is selected by choosing the appropriate subclass. 67 | 68 | There are two: 69 | 70 | ``postgresql.alock.ExclusiveLock(database, *identifiers)`` 71 | Instantiate an ALock object representing the `identifiers` for use with the 72 | `database`. Exclusive locks will conflict with other exclusive locks and share 73 | locks. 74 | 75 | ``postgresql.alock.ShareLock(database, *identifiers)`` 76 | Instantiate an ALock object representing the `identifiers` for use with the 77 | `database`. Share locks can be acquired when a share lock with the same 78 | identifier has been acquired by another backend. However, an exclusive lock 79 | with the same identifier will conflict. 80 | 81 | 82 | ALock Interface Points 83 | ---------------------- 84 | 85 | Methods and properties available on :class:`postgresql.alock.ALock` instances: 86 | 87 | ``alock.acquire(blocking = True)`` 88 | Acquire the advisory locks represented by the ``alock`` object. If blocking is 89 | `True`, the default, the method will block until locks on *all* the 90 | identifiers have been acquired. 91 | 92 | If blocking is `False`, acquisition may not block, and success will be 93 | indicated by the returned object: `True` if *all* lock identifiers were 94 | acquired and `False` if any of the lock identifiers could not be acquired. 95 | 96 | ``alock.release()`` 97 | Release the advisory locks represented by the ``alock`` object. If the lock 98 | has not been acquired, a `RuntimeError` will be raised. 99 | 100 | ``alock.locked()`` 101 | Returns a boolean describing whether the locks are held or not. This will 102 | return `False` if the lock connection has been closed. 103 | 104 | ``alock.__enter__()`` 105 | Alias to ``acquire``; context manager protocol. Always blocking. 106 | 107 | ``alock.__exit__(typ, val, tb)`` 108 | Alias to ``release``; context manager protocol. 109 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/documentation/sphinx/alock.rst: -------------------------------------------------------------------------------- 1 | .. _alock: 2 | 3 | ************** 4 | Advisory Locks 5 | ************** 6 | 7 | .. warning:: `postgresql.alock` is a new feature in v1.0. 8 | 9 | `Explicit Locking in PostgreSQL `_. 10 | 11 | PostgreSQL's advisory locks offer a cooperative synchronization primitive. 12 | These are used in cases where an application needs access to a resource, but 13 | using table locks may cause interference with other operations that can be 14 | safely performed alongside the application-level, exclusive operation. 15 | 16 | Advisory locks can be used by directly executing the stored procedures in the 17 | database or by using the :class:`postgresql.alock.ALock` subclasses, which 18 | provides a context manager that uses those stored procedures. 19 | 20 | Currently, only two subclasses exist. Each represents the lock mode 21 | supported by PostgreSQL's advisory locks: 22 | 23 | * :class:`postgresql.alock.ShareLock` 24 | * :class:`postgresql.alock.ExclusiveLock` 25 | 26 | 27 | Acquiring ALocks 28 | ================ 29 | 30 | An ALock instance represents a sequence of advisory locks. A single ALock can 31 | acquire and release multiple advisory locks by creating the instance with 32 | multiple lock identifiers:: 33 | 34 | >>> from postgresql import alock 35 | >>> table1_oid = 192842 36 | >>> table2_oid = 192849 37 | >>> l = alock.ExclusiveLock(db, (table1_oid, 0), (table2_oid, 0)) 38 | >>> l.acquire() 39 | >>> ... 40 | >>> l.release() 41 | 42 | :class:`postgresql.alock.ALock` is similar to :class:`threading.RLock`; in 43 | order for an ALock to be released, it must be released the number of times it 44 | has been acquired. ALocks are associated with and survived by their session. 45 | Much like how RLocks are associated with the thread they are acquired in: 46 | acquiring an ALock again will merely increment its count. 47 | 48 | PostgreSQL allows advisory locks to be identified using a pair of `int4` or a 49 | single `int8`. ALock instances represent a *sequence* of those identifiers:: 50 | 51 | >>> from postgresql import alock 52 | >>> ids = [(0,0), 0, 1] 53 | >>> with alock.ShareLock(db, *ids): 54 | ... ... 55 | 56 | Both types of identifiers may be used within the same ALock, and, regardless of 57 | their type, will be aquired in the order that they were given to the class' 58 | constructor. In the above example, ``(0,0)`` is acquired first, then ``0``, and 59 | lastly ``1``. 60 | 61 | 62 | ALocks 63 | ====== 64 | 65 | `postgresql.alock.ALock` is abstract; it defines the interface and some common 66 | functionality. The lock mode is selected by choosing the appropriate subclass. 67 | 68 | There are two: 69 | 70 | ``postgresql.alock.ExclusiveLock(database, *identifiers)`` 71 | Instantiate an ALock object representing the `identifiers` for use with the 72 | `database`. Exclusive locks will conflict with other exclusive locks and share 73 | locks. 74 | 75 | ``postgresql.alock.ShareLock(database, *identifiers)`` 76 | Instantiate an ALock object representing the `identifiers` for use with the 77 | `database`. Share locks can be acquired when a share lock with the same 78 | identifier has been acquired by another backend. However, an exclusive lock 79 | with the same identifier will conflict. 80 | 81 | 82 | ALock Interface Points 83 | ---------------------- 84 | 85 | Methods and properties available on :class:`postgresql.alock.ALock` instances: 86 | 87 | ``alock.acquire(blocking = True)`` 88 | Acquire the advisory locks represented by the ``alock`` object. If blocking is 89 | `True`, the default, the method will block until locks on *all* the 90 | identifiers have been acquired. 91 | 92 | If blocking is `False`, acquisition may not block, and success will be 93 | indicated by the returned object: `True` if *all* lock identifiers were 94 | acquired and `False` if any of the lock identifiers could not be acquired. 95 | 96 | ``alock.release()`` 97 | Release the advisory locks represented by the ``alock`` object. If the lock 98 | has not been acquired, a `RuntimeError` will be raised. 99 | 100 | ``alock.locked()`` 101 | Returns a boolean describing whether the locks are held or not. This will 102 | return `False` if the lock connection has been closed. 103 | 104 | ``alock.__enter__()`` 105 | Alias to ``acquire``; context manager protocol. Always blocking. 106 | 107 | ``alock.__exit__(typ, val, tb)`` 108 | Alias to ``release``; context manager protocol. 109 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/message.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .message - PostgreSQL message representation 3 | ## 4 | from operator import itemgetter 5 | from .python.element import prime_factor 6 | # Final msghook called exists at .sys.msghook 7 | from . import sys as pg_sys 8 | 9 | from .api import Message 10 | class Message(Message): 11 | """ 12 | A message emitted by PostgreSQL. This element is universal, so 13 | `postgresql.api.Message` is a complete implementation for representing a 14 | message. Any interface should produce these objects. 15 | """ 16 | _e_label = property(lambda x: getattr(x, 'details').get('severity', 'MESSAGE')) 17 | _e_factors = ('creator',) 18 | 19 | def _e_metas(self, get0 = itemgetter(0)): 20 | yield (None, self.message) 21 | if self.code and self.code != "00000": 22 | yield ('CODE', self.code) 23 | locstr = self.location_string 24 | if locstr: 25 | yield ('LOCATION', locstr + ' from ' + self.source) 26 | else: 27 | yield ('LOCATION', self.source) 28 | for k, v in sorted(self.details.items(), key = get0): 29 | if k not in self.standard_detail_coverage: 30 | yield (k.upper(), str(v)) 31 | 32 | source = 'SERVER' 33 | code = '00000' 34 | message = None 35 | details = None 36 | 37 | severities = ( 38 | 'DEBUG', 39 | 'INFO', 40 | 'NOTICE', 41 | 'WARNING', 42 | 'ERROR', 43 | 'FATAL', 44 | 'PANIC', 45 | ) 46 | sources = ( 47 | 'SERVER', 48 | 'CLIENT', 49 | ) 50 | 51 | def isconsistent(self, other): 52 | """ 53 | Return `True` if the all the fields of the message in `self` are 54 | equivalent to the fields in `other`. 55 | """ 56 | if not isinstance(other, self.__class__): 57 | return False 58 | # creator is contextual information 59 | return ( 60 | self.code == other.code and \ 61 | self.message == other.message and \ 62 | self.details == other.details and \ 63 | self.source == other.source 64 | ) 65 | 66 | def __init__(self, 67 | message : "The primary information of the message", 68 | code : "Message code to attach (SQL state)" = None, 69 | details : "additional information associated with the message" = {}, 70 | source : "Which side generated the message(SERVER, CLIENT)" = None, 71 | creator : "The interface element that called for instantiation" = None, 72 | ): 73 | self.message = message 74 | self.details = details 75 | self.creator = creator 76 | if code is not None and self.code != code: 77 | self.code = code 78 | if source is not None and self.source != source: 79 | self.source = source 80 | 81 | def __repr__(self): 82 | return "{mod}.{typname}({message!r}{code}{details}{source}{creator})".format( 83 | mod = self.__module__, 84 | typname = self.__class__.__name__, 85 | message = self.message, 86 | code = ( 87 | "" if self.code == type(self).code 88 | else ", code = " + repr(self.code) 89 | ), 90 | details = ( 91 | "" if not self.details 92 | else ", details = " + repr(self.details) 93 | ), 94 | source = ( 95 | "" if self.source is None 96 | else ", source = " + repr(self.source) 97 | ), 98 | creator = ( 99 | "" if self.creator is None 100 | else ", creator = " + repr(self.creator) 101 | ) 102 | ) 103 | 104 | @property 105 | def location_string(self): 106 | """ 107 | A single line representation of the 'file', 'line', and 'function' keys 108 | in the `details` dictionary. 109 | """ 110 | details = self.details 111 | loc = [ 112 | details.get(k, '?') for k in ('file', 'line', 'function') 113 | ] 114 | return ( 115 | "" if loc == ['?', '?', '?'] 116 | else "File {0!r}, "\ 117 | "line {1!s}, in {2!s}".format(*loc) 118 | ) 119 | 120 | # keys to filter in .details 121 | standard_detail_coverage = frozenset(['message', 'severity', 'file', 'function', 'line',]) 122 | 123 | def emit(self, starting_point = None): 124 | """ 125 | Take the given message object and hand it to all the primary 126 | factors(creator) with a msghook callable. 127 | """ 128 | if starting_point is not None: 129 | f = starting_point 130 | else: 131 | f = self.creator 132 | 133 | while f is not None: 134 | if getattr(f, 'msghook', None) is not None: 135 | if f.msghook(self): 136 | # the trap returned a nonzero value, 137 | # so don't continue raising. (like with's __exit__) 138 | return f 139 | f = prime_factor(f) 140 | if f: 141 | f = f[1] 142 | # if the next primary factor is without a raise or does not exist, 143 | # send the message to postgresql.sys.msghook 144 | pg_sys.msghook(self) 145 | -------------------------------------------------------------------------------- /tools/pg_cluster_backend/psc/postgresql/protocol/pbuffer.py: -------------------------------------------------------------------------------- 1 | ## 2 | # .protocol.pbuffer 3 | ## 4 | """ 5 | Pure Python message buffer implementation. 6 | 7 | Given data read from the wire, buffer the data until a complete message has been 8 | received. 9 | """ 10 | __all__ = ['pq_message_stream'] 11 | 12 | from io import BytesIO 13 | import struct 14 | from .message_types import message_types 15 | 16 | xl_unpack = struct.Struct('!xL').unpack_from 17 | 18 | class pq_message_stream(object): 19 | 'provide a message stream from a data stream' 20 | _block = 512 21 | _limit = _block * 4 22 | def __init__(self): 23 | self._strio = BytesIO() 24 | self._start = 0 25 | 26 | def truncate(self): 27 | "remove all data in the buffer" 28 | self._strio.truncate(0) 29 | self._start = 0 30 | 31 | def _rtruncate(self, amt = None): 32 | "[internal] remove the given amount of data" 33 | strio = self._strio 34 | if amt is None: 35 | amt = self._strio.tell() 36 | strio.seek(0, 2) 37 | size = strio.tell() 38 | # if the total size is equal to the amt, 39 | # then the whole thing is going to be truncated. 40 | if size == amt: 41 | strio.truncate(0) 42 | return 43 | 44 | copyto_pos = 0 45 | copyfrom_pos = amt 46 | while True: 47 | strio.seek(copyfrom_pos) 48 | data = strio.read(self._block) 49 | # Next copyfrom 50 | copyfrom_pos = strio.tell() 51 | strio.seek(copyto_pos) 52 | strio.write(data) 53 | if len(data) != self._block: 54 | break 55 | # Next copyto 56 | copyto_pos = strio.tell() 57 | 58 | strio.truncate(size - amt) 59 | 60 | def has_message(self, xl_unpack = xl_unpack, len = len): 61 | "if the buffer has a message available" 62 | strio = self._strio 63 | strio.seek(self._start) 64 | header = strio.read(5) 65 | if len(header) < 5: 66 | return False 67 | length, = xl_unpack(header) 68 | if length < 4: 69 | raise ValueError("invalid message size '%d'" %(length,)) 70 | strio.seek(0, 2) 71 | return (strio.tell() - self._start) >= length + 1 72 | 73 | def __len__(self, xl_unpack = xl_unpack, len = len): 74 | "number of messages in buffer" 75 | count = 0 76 | rpos = self._start 77 | strio = self._strio 78 | strio.seek(self._start) 79 | while True: 80 | # get the message metadata 81 | header = strio.read(5) 82 | rpos += 5 83 | if len(header) < 5: 84 | # not enough data for another message 85 | break 86 | # unpack the length from the header 87 | length, = xl_unpack(header) 88 | rpos += length - 4 89 | 90 | if length < 4: 91 | raise ValueError("invalid message size '%d'" %(length,)) 92 | strio.seek(length - 4 - 1, 1) 93 | 94 | if len(strio.read(1)) != 1: 95 | break 96 | count += 1 97 | return count 98 | 99 | def _get_message(self, 100 | mtypes = message_types, 101 | len = len, 102 | xl_unpack = xl_unpack, 103 | ): 104 | strio = self._strio 105 | header = strio.read(5) 106 | if len(header) < 5: 107 | return 108 | length, = xl_unpack(header) 109 | typ = mtypes[header[0]] 110 | 111 | if length < 4: 112 | raise ValueError("invalid message size '%d'" %(length,)) 113 | length -= 4 114 | body = strio.read(length) 115 | if len(body) < length: 116 | # Not enough data for message. 117 | return 118 | return (typ, body) 119 | 120 | def next_message(self): 121 | if self._start > self._limit: 122 | self._rtruncate(self._start) 123 | self._start = 0 124 | 125 | self._strio.seek(self._start) 126 | msg = self._get_message() 127 | if msg is not None: 128 | self._start = self._strio.tell() 129 | return msg 130 | 131 | def __next__(self): 132 | if self._start > self._limit: 133 | self._rtruncate(self._start) 134 | self._start = 0 135 | 136 | self._strio.seek(self._start) 137 | msg = self._get_message() 138 | if msg is None: 139 | raise StopIteration 140 | self._start = self._strio.tell() 141 | return msg 142 | 143 | def read(self, num = 0xFFFFFFFF, len = len): 144 | if self._start > self._limit: 145 | self._rtruncate(self._start) 146 | self._start = 0 147 | 148 | new_start = self._start 149 | self._strio.seek(new_start) 150 | l = [] 151 | while len(l) < num: 152 | msg = self._get_message() 153 | if msg is None: 154 | break 155 | l.append(msg) 156 | new_start += (5 + len(msg[1])) 157 | self._start = new_start 158 | return l 159 | 160 | def write(self, data): 161 | # Always append data; it's a stream, damnit.. 162 | self._strio.seek(0, 2) 163 | self._strio.write(data) 164 | 165 | def getvalue(self): 166 | self._strio.seek(self._start) 167 | return self._strio.read() 168 | -------------------------------------------------------------------------------- /roles/prepare_nodes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Block for checking if hostnames are valid 3 | block: 4 | - name: Gather inventory hostnames from all hosts 5 | ansible.builtin.set_fact: 6 | all_inventory_names: "{{ groups['all'] }}" 7 | run_once: true 8 | 9 | - name: Extract domain part and count domain levels 10 | ansible.builtin.set_fact: 11 | domain_part: "{{ inventory_hostname | regex_replace('^[^.]+\\.?', '') }}" 12 | domain_levels: "{{ (inventory_hostname | regex_replace('^[^.]+\\.?', '')).split('.') | length if '.' in inventory_hostname else 0 }}" 13 | has_domain: "{{ '.' in inventory_hostname }}" 14 | 15 | - name: Collect domain information from all hosts 16 | ansible.builtin.set_fact: 17 | all_domain_parts: "{{ ansible_play_hosts | map('extract', hostvars, 'domain_part') | list }}" 18 | all_domain_levels: "{{ ansible_play_hosts | map('extract', hostvars, 'domain_levels') | list }}" 19 | all_has_domain: "{{ ansible_play_hosts | map('extract', hostvars, 'has_domain') | list }}" 20 | run_once: true 21 | 22 | - name: Check consistency of naming convention 23 | ansible.builtin.fail: 24 | msg: | 25 | Inconsistent inventory naming detected! 26 | All hosts must have the same domain structure (same number of domain levels). 27 | Current hosts analysis: 28 | {% for host in ansible_play_hosts %} 29 | - {{ host }}: {{ 'no domain' if not hostvars[host]['has_domain'] 30 | else hostvars[host]['domain_levels']|string + ' domain levels (' + 31 | hostvars[host]['domain_part'] + ')' }} 32 | {% endfor %} 33 | 34 | Issues found: 35 | {% if all_has_domain | unique | length > 1 %} 36 | - Mixed hostname formats: some have domains, others don't 37 | {% endif %} 38 | {% if all_domain_levels | unique | length > 1 %} 39 | - Different domain levels: {{ all_domain_levels | unique | sort }} 40 | {% endif %} 41 | 42 | Please ensure all hosts follow the same naming convention. 43 | when: 44 | - (all_has_domain | unique | length > 1) or (all_domain_levels | unique | length > 1) 45 | run_once: true 46 | 47 | - name: Set unified hostname variable for FQDN inventory 48 | ansible.builtin.set_fact: 49 | unified_hostname: "{{ ansible_fqdn }}" 50 | cacheable: true 51 | when: has_domain 52 | 53 | - name: Set unified hostname variable for hostname-only inventory 54 | ansible.builtin.set_fact: 55 | unified_hostname: "{{ inventory_hostname }}" 56 | cacheable: true 57 | when: not has_domain 58 | 59 | - name: "Configure hostname" 60 | ansible.builtin.hostname: 61 | name: "{{ inventory_hostname }}" 62 | become: true 63 | 64 | - name: Block for installation of basic utils 65 | module_defaults: 66 | ansible.builtin.apt: 67 | update_cache: true 68 | ansible.builtin.yum: 69 | update_cache: true 70 | ansible.builtin.dnf: 71 | update_cache: true 72 | community.general.apt_rpm: 73 | update_cache: true 74 | block: 75 | - name: Install basic utils 76 | ansible.builtin.package: 77 | name: 78 | - jq 79 | - chrony 80 | - curl 81 | state: present 82 | notify: Restart chronyd 83 | 84 | - name: Include task for Debian-like systems 85 | ansible.builtin.include_tasks: debian.yml 86 | when: ansible_os_family in ["Astra Linux", "Debian", "Altlinux"] 87 | 88 | - name: Include task for RHEL-like systems 89 | ansible.builtin.include_tasks: rhel.yml 90 | when: ansible_os_family in ["RED", "Centos", "Rocky", "RedHat"] 91 | 92 | - name: Get content of /etc/security/limits.conf file 93 | ansible.builtin.lineinfile: 94 | path: /etc/security/limits.conf 95 | regexp: '(postgres\shard)|(postgres\ssoft)' 96 | state: absent 97 | check_mode: true 98 | register: prepare_nodes_sec_limit 99 | changed_when: false 100 | 101 | - name: Add secure limits for postgres if not exists 102 | ansible.builtin.lineinfile: 103 | path: /etc/security/limits.conf 104 | line: "{{ item }}" 105 | state: present 106 | when: not prepare_nodes_sec_limit.found 107 | with_items: 108 | - "postgres soft nofile 65536" 109 | - "postgres hard nofile 100000" 110 | 111 | - name: Add another bin dir to system-wide $PATH. 112 | when: ansible_env.PATH is not search("/opt/tantor/usr/bin:/opt/tantor/usr/sbin") 113 | ansible.builtin.copy: 114 | dest: /etc/profile.d/tantor.sh 115 | content: 'PATH=$PATH:/opt/tantor/usr/bin:/opt/tantor/usr/sbin' 116 | mode: "0644" 117 | --------------------------------------------------------------------------------