├── .github └── workflows │ ├── kubernetes-test.yaml │ ├── publish-ghcr-container.yaml │ └── tests.yaml ├── .gitmodules ├── .zappr.yaml ├── BUGS.md ├── CONTRIBUTING.rst ├── ENVIRONMENT.rst ├── LICENSE ├── README.rst ├── contrib ├── cloudbuild-branch.yaml ├── cloudbuild-latest.yaml └── cloudbuild-tag.yaml ├── delivery.yaml ├── docs ├── DESIGN.md ├── Spilo_Architecture_High_Level.png ├── Spilo_Architecture_Instance.png ├── admin-guide │ ├── deploy_spilo.md │ ├── failover.md │ ├── scaling.md │ ├── sizing.md │ └── update.md ├── components │ ├── etcd-proxy.md │ └── governor.md ├── configuration.md ├── index.md ├── prerequisites.md └── user-guide │ └── connect_spilo.md ├── etcd-cluster-appliance └── README.md ├── kubernetes └── spilo_kubernetes.yaml ├── mkdocs.yml ├── postgres-appliance ├── .gitignore ├── CODEOWNERS ├── Dockerfile ├── bootstrap │ ├── clone_with_basebackup.py │ ├── clone_with_wale.py │ └── maybe_pg_upgrade.py ├── build_scripts │ ├── base.sh │ ├── compress_build.sh │ ├── dependencies.sh │ ├── locales.sh │ ├── patroni_wale.sh │ ├── post_build.sh │ └── prepare.sh ├── cron_unprivileged.c ├── dependencies │ ├── Dockerfile │ ├── README │ ├── build.sh │ ├── debs │ │ ├── libgdal30_3.4.1+dfsg-1build4_amd64.deb │ │ └── libgdal30_3.4.1+dfsg-1build4_arm64.deb │ └── patches │ │ └── gdal.patch ├── launch.sh ├── major_upgrade │ ├── inplace_upgrade.py │ └── pg_upgrade.py ├── motd ├── pgq_ticker.ini ├── runit │ ├── cron │ │ └── run │ ├── etcd │ │ └── run │ ├── patroni │ │ ├── finish │ │ └── run │ ├── pgbouncer │ │ └── run │ └── pgqd │ │ └── run ├── scripts │ ├── _zmon_schema.dump │ ├── basebackup.sh │ ├── callback_aws.py │ ├── callback_role.py │ ├── configure_spilo.py │ ├── create_user_functions.sql │ ├── metric_helpers.sql │ ├── on_role_change.sh │ ├── patroni_wait.sh │ ├── pg_partman │ │ └── after-create.sql │ ├── pgq │ │ └── after-create.sql │ ├── post_init.sh │ ├── postgres_backup.sh │ ├── postgres_fdw │ │ └── after-create.sql │ ├── renice.sh │ ├── restore_command.sh │ ├── spilo_commons.py │ ├── test_reload_ssl.sh │ ├── upload_pg_log_to_s3.py │ ├── wal-e-wal-fetch.sh │ └── wale_restore.sh ├── spilok8s.yaml └── tests │ ├── README.md │ ├── docker-compose.yml │ ├── locales_test │ ├── generate_data.sh │ ├── helper_script.sh │ └── test_locales.sh │ ├── schema.sql │ ├── test_spilo.sh │ ├── test_utils.sh │ └── timescaledb.sql ├── spilo_cmd ├── .gitignore ├── README.md ├── requirements.txt ├── setup.py ├── spilo │ ├── __init__.py │ └── spilo.py └── tests │ ├── pg_service.conf │ └── test_cli.py └── tox.ini /.github/workflows/kubernetes-test.yaml: -------------------------------------------------------------------------------- 1 | name: Kubernetes Installation Test 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'kubernetes/**' 7 | push: 8 | branches: 9 | - master 10 | paths: 11 | - 'kubernetes/**' 12 | 13 | jobs: 14 | kubernetes-installation-test: 15 | runs-on: ubuntu-20.04 16 | 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: Create Kind Cluster 20 | uses: helm/kind-action@v1.1.0 21 | - name: Install Spilo 22 | run: kubectl apply -f kubernetes/spilo_kubernetes.yaml 23 | -------------------------------------------------------------------------------- /.github/workflows/publish-ghcr-container.yaml: -------------------------------------------------------------------------------- 1 | name: Publish multiarch images on ghcr.io 2 | on: 3 | push: 4 | tags: 5 | - '*' 6 | 7 | env: 8 | REGISTRY: ghcr.io 9 | IMAGE_NAME: ${{ github.repository }} 10 | 11 | jobs: 12 | publish: 13 | name: Build and push Spilo multiarch images 14 | runs-on: ubuntu-22.04 15 | permissions: 16 | contents: 'read' 17 | packages: 'write' 18 | defaults: 19 | run: 20 | shell: bash 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v4 24 | 25 | - name: Set up packages 26 | run: sudo apt-get install -y docker-compose 27 | 28 | - name: Set up Python 29 | uses: actions/setup-python@v5 30 | with: 31 | python-version: '3.10' 32 | 33 | - name: Derive spilo image name 34 | id: image 35 | working-directory: postgres-appliance 36 | run: | 37 | PGVERSION=$(sed -n 's/^ARG PGVERSION=\([1-9][0-9]*\).*$/\1/p' Dockerfile) 38 | IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-$PGVERSION:${GITHUB_REF/refs\/tags\//}" 39 | echo "NAME=$IMAGE" >> $GITHUB_OUTPUT 40 | 41 | - name: Set up QEMU 42 | uses: docker/setup-qemu-action@v3 43 | 44 | - name: Set up Docker Buildx 45 | uses: docker/setup-buildx-action@v3 46 | 47 | - name: Login to GHCR 48 | uses: docker/login-action@v3 49 | with: 50 | registry: ${{ env.REGISTRY }} 51 | username: ${{ github.actor }} 52 | password: ${{ secrets.GITHUB_TOKEN }} 53 | 54 | - name: Build and export to local docker for testing 55 | uses: docker/build-push-action@v6 56 | with: 57 | context: "postgres-appliance/" 58 | load: true 59 | tags: ${{ steps.image.outputs.NAME }} 60 | 61 | - name: Test spilo docker image 62 | env: 63 | SPILO_TEST_IMAGE: "${{ steps.image.outputs.NAME }}" 64 | run: | 65 | bash postgres-appliance/tests/test_spilo.sh 66 | 67 | - name: Build arm64 additionaly and push multiarch image to ghcr 68 | uses: docker/build-push-action@v6 69 | with: 70 | context: "postgres-appliance/" 71 | push: true 72 | tags: "${{ steps.image.outputs.NAME }}" 73 | platforms: linux/amd64,linux/arm64 74 | -------------------------------------------------------------------------------- /.github/workflows/tests.yaml: -------------------------------------------------------------------------------- 1 | name: ubuntu 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | - trigger 9 | 10 | jobs: 11 | tests: 12 | runs-on: ubuntu-22.04 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: Set up packages 17 | run: sudo apt-get install -y shellcheck docker-compose 18 | - name: Set up Python 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: '3.10' 22 | - name: Install flake8 23 | run: python -m pip install flake8 24 | - name: Run shellcheck 25 | run: find postgres-appliance -name '*.sh' -print0 | xargs -0 shellcheck 26 | - name: Run flake8 27 | run: find postgres-appliance -name '*.py' -print0 | xargs -0 python -m flake8 28 | - name: Build spilo docker image 29 | run: cd postgres-appliance && docker build -t spilo . 30 | - name: Test spilo docker image 31 | run: bash postgres-appliance/tests/test_spilo.sh 32 | - name: Test USE_OLD_LOCALES 33 | run: bash -x postgres-appliance/tests/locales_test/test_locales.sh 34 | if: github.event_name == 'push' 35 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zalando/spilo/bb39d8109bc759e28d9b3fb1dec549ce82ba6a38/.gitmodules -------------------------------------------------------------------------------- /.zappr.yaml: -------------------------------------------------------------------------------- 1 | # for github.com 2 | approvals: 3 | groups: 4 | zalando: 5 | minimum: 2 6 | from: 7 | orgs: 8 | - "zalando" 9 | # team should be valid team id in team service https://teams.auth.zalando.com/api/teams/:id 10 | X-Zalando-Team: "acid" 11 | # type should be one of [code, doc, config, tools, secrets] 12 | # code will be the default value, if X-Zalando-Type is not found in .zappr.yml 13 | X-Zalando-Type: code 14 | -------------------------------------------------------------------------------- /BUGS.md: -------------------------------------------------------------------------------- 1 | 1. Connections from odd are not allowed in pg\_hba.conf: 2 | FATAL: no pg_hba.conf entry for host "[ip of odd]", user "standby", database "postgres", SSL off 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | Contributing Guidelines 2 | ======================= 3 | 4 | Wanna contribute to Spilo? Yay - here is how! 5 | 6 | Reporting issues 7 | ---------------- 8 | 9 | If you have a question about Spilo, or have a problem using it, please read the `README `__ before filing an issue. 10 | Also double-check with the current issues on our `Issues Tracker `__. 11 | 12 | Contributing a pull request 13 | --------------------------- 14 | 15 | 1) Submit a comment to the relevant issue or create a new issue describing your proposed change. 16 | 2) Do a fork, develop and test your code changes. 17 | 3) Include documentation 18 | 4) Submit a pull request. 19 | 20 | You'll get feedback about your pull request as soon as possible. 21 | 22 | Happy Spilo hacking ;-) 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ========================================= 2 | Spilo: HA PostgreSQL Clusters with Docker 3 | ========================================= 4 | 5 | Spilo is a Docker image that provides PostgreSQL and `Patroni `__ bundled together. Patroni is a template for PostgreSQL HA. Multiple Spilos can create a resilient High Available PostgreSQL cluster. For this, you'll need to start all participating Spilos with identical `etcd `__ addresses and cluster names. 6 | 7 | Spilo's name derives from სპილო [spiːlɒ], the Georgian word for "elephant." 8 | 9 | Real-World Usage and Plans 10 | -------------------------- 11 | 12 | Spilo is currently evolving: Its creators are working on a Postgres operator that would make it simpler to deploy scalable Postgres clusters in a Kubernetes environment, and also do maintenance tasks. Spilo would serve as an essential building block for this. There is already a `Helm chart `__ that relies on Spilo and Patroni to provision a five-node PostgreSQL HA cluster in a Kubernetes+Google Compute Engine environment. (The Helm chart deploys Spilo Docker images, not just "bare" Patroni.) 13 | 14 | How to Use This Docker Image 15 | ============================ 16 | 17 | .. important:: 18 | We encourage users to build the Docker images themselves from source code using the latest tags to benefit from ongoing improvements and fixes. The team continues to maintain the project and address issues, but does not make regular releases nor publishes latest Docker images. 19 | 20 | Spilo's setup assumes that you've correctly configured a load balancer (HAProxy, ELB, Google load balancer) that directs client connections to the master. There are two ways to achieve this: A) if the load balancer relies on the status code to distinguish between the healthy and failed nodes (like ELB), then one needs to configure it to poll the API URL; otherwise, B) you can use callback scripts to change the load balancer configuration dynamically. 21 | 22 | **Available container registry and image architectures** 23 | 24 | Spilo images are made available in the GitHub container registry (ghcr.io). Images are build and published as linux/amd64 and linux/arm64 on tag. For PostgreSQL version 14 currently availble images can be found here: https://github.com/zalando/spilo/pkgs/container/spilo-14 25 | 26 | 27 | How to Build This Docker Image 28 | ============================== 29 | 30 | $ cd postgres-appliance 31 | 32 | $ docker build --tag $YOUR_TAG . 33 | 34 | 35 | There are a few build arguments defined in the Dockerfile and it is possible to change them by specifying ``--build-arg`` arguments: 36 | 37 | - WITH_PERL=false # set to true if you want to install perl and plperl packages into image 38 | - PGVERSION="12" 39 | - PGOLDVERSIONS="9.5 9.6 10 11" 40 | - DEMO=false # set to true to build the smallest possible image which will work only on Kubernetes 41 | - TIMESCALEDB_APACHE_ONLY=true # set to false to build timescaledb community version (Timescale License) 42 | - TIMESCALEDB_TOOLKIT=true # set to false to skip installing toolkit with timescaledb community edition. Only relevant when TIMESCALEDB_APACHE_ONLY=false 43 | - ADDITIONAL_LOCALES= # additional UTF-8 locales to build into image (example: "de_DE pl_PL fr_FR") 44 | 45 | Run the image locally after build: 46 | 47 | $ docker run -it your-spilo-image:$YOUR_TAG 48 | 49 | Have a look inside the container: 50 | 51 | $ docker exec -it $CONTAINER_NAME bash 52 | 53 | Connecting to PostgreSQL 54 | ------------------------ 55 | **Administrative Connections** 56 | 57 | PostgreSQL is configured by default to listen to port 5432. Spilo master initializes PostgreSQL and creates the superuser and replication user (``postgres`` and ``standby`` by default). 58 | 59 | You'll need to setup Spilo to create a database and roles for your application(s). For example: 60 | 61 | .. code-block:: bash 62 | 63 | psql -h myfirstspilo.example.com -p 5432 -U admin -d postgres 64 | 65 | **Application Connections** 66 | 67 | Once you have created a database and roles for your application, you can connect to Spilo just like you want to connect to any other PostgreSQL cluster: 68 | 69 | .. code-block:: bash 70 | 71 | psql -h myfirstspilo.example.com -p 5432 -U wow_app -d wow 72 | psql -d "postgresql://myfirstspilo.example.com:5432/wow?user=wow_app" 73 | 74 | Configuration 75 | ------------- 76 | 77 | Spilo is configured via environment variables, the values of which are either supplied manually via the environment (whenever Spilo is launched as a set of Docker containers) or added in the configuration file or manifest (whenever Spilo is used in the Docker orchestration environment, such as Kubernetes or Docker Compose). 78 | 79 | Please go `here `__ to see our list of environment variables. 80 | 81 | To supply env variables manually via the environment for local testing: 82 | 83 | docker run -it -e YOUR_ENV_VAR=test your-spilo-image:latest 84 | 85 | Issues and Contributing 86 | ----------------------- 87 | 88 | Spilo welcomes questions via our `issues tracker `__. We also greatly appreciate fixes, feature requests, and updates; before submitting a pull request, please visit our `contributor guidelines `__. 89 | 90 | License 91 | ------- 92 | 93 | This project uses the `Apache 2.0 license `__. 94 | -------------------------------------------------------------------------------- /contrib/cloudbuild-branch.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: 'gcr.io/cloud-builders/docker' 3 | dir: 'postgres-appliance' 4 | entrypoint: 'bash' 5 | args: 6 | - '-c' 7 | - | 8 | ./build.sh \ 9 | -t gcr.io/$PROJECT_ID/$REPO_NAME:$REVISION_ID \ 10 | -t gcr.io/$PROJECT_ID/$REPO_NAME:$BRANCH_NAME \ 11 | . 12 | images: 13 | - 'gcr.io/$PROJECT_ID/$REPO_NAME:$REVISION_ID' 14 | - 'gcr.io/$PROJECT_ID/$REPO_NAME:$BRANCH_NAME' 15 | -------------------------------------------------------------------------------- /contrib/cloudbuild-latest.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: 'gcr.io/cloud-builders/docker' 3 | dir: 'postgres-appliance' 4 | entrypoint: 'bash' 5 | args: 6 | - '-c' 7 | - | 8 | ./build.sh \ 9 | -t gcr.io/$PROJECT_ID/$REPO_NAME:$REVISION_ID \ 10 | -t gcr.io/$PROJECT_ID/$REPO_NAME:latest \ 11 | . 12 | images: 13 | - 'gcr.io/$PROJECT_ID/$REPO_NAME:$REVISION_ID' 14 | - 'gcr.io/$PROJECT_ID/$REPO_NAME:latest' 15 | -------------------------------------------------------------------------------- /contrib/cloudbuild-tag.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: 'gcr.io/cloud-builders/docker' 3 | dir: 'postgres-appliance' 4 | entrypoint: 'bash' 5 | args: 6 | - '-c' 7 | - | 8 | ./build.sh \ 9 | -t gcr.io/$PROJECT_ID/$REPO_NAME:$REVISION_ID \ 10 | -t gcr.io/$PROJECT_ID/$REPO_NAME:$TAG_NAME \ 11 | . 12 | images: 13 | - 'gcr.io/$PROJECT_ID/$REPO_NAME:$REVISION_ID' 14 | - 'gcr.io/$PROJECT_ID/$REPO_NAME:$TAG_NAME' 15 | -------------------------------------------------------------------------------- /delivery.yaml: -------------------------------------------------------------------------------- 1 | version: "2017-09-20" 2 | allow_concurrent_steps: true 3 | 4 | build_env: &BUILD_ENV 5 | BASE_IMAGE: container-registry.zalando.net/library/ubuntu-22.04 6 | PGVERSION: 17 7 | MULTI_ARCH_REGISTRY: container-registry-test.zalando.net/acid 8 | 9 | pipeline: 10 | - id: push-spilo-cdp-pr 11 | env: 12 | <<: *BUILD_ENV 13 | type: script 14 | requires_human_approval: true 15 | when: 16 | - event: pull_request 17 | timeout: 10h 18 | vm_config: 19 | type: linux 20 | size: large 21 | commands: 22 | - desc: Tag and push spilo-cdp image without promotion 23 | cmd: | 24 | cd postgres-appliance 25 | 26 | PATRONIVERSION=$(sed -n 's/^ENV PATRONIVERSION=\([1-9][0-9]*\.[0-9]*\).*$/\1/p' Dockerfile) 27 | ECR_TEST_IMAGE="$MULTI_ARCH_REGISTRY/spilo-cdp-pr$CDP_PULL_REQUEST_NUMBER-$PGVERSION:$PATRONIVERSION-p$CDP_PULL_REQUEST_COUNTER" 28 | 29 | # create a Buildkit builder with CDP specific configuration 30 | docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use 31 | 32 | # single platform build for PR images! 33 | docker buildx build --platform "linux/amd64" \ 34 | --build-arg PGVERSION="$PGVERSION" \ 35 | --build-arg BASE_IMAGE="$BASE_IMAGE" \ 36 | --build-arg PGOLDVERSIONS="14 15 16" \ 37 | -t "$ECR_TEST_IMAGE" \ 38 | --push . 39 | 40 | - id: push-spilo-cdp 41 | env: 42 | <<: *BUILD_ENV 43 | type: script 44 | when: 45 | - event: push 46 | branch: trigger 47 | timeout: 10h 48 | vm_config: 49 | type: linux 50 | size: extra_large 51 | commands: 52 | - desc: Tag and push spilo-cdp image 53 | cmd: | 54 | cd postgres-appliance 55 | 56 | PATRONIVERSION=$(sed -n 's/^ENV PATRONIVERSION=\([1-9][0-9]*\.[0-9]*\).*$/\1/p' Dockerfile) 57 | ECR_TEST_IMAGE="$MULTI_ARCH_REGISTRY/spilo-cdp-$PGVERSION:trigger-$PATRONIVERSION-p$CDP_TARGET_BRANCH_COUNTER" 58 | 59 | # create a Buildkit builder with CDP specific configuration 60 | docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use 61 | 62 | docker buildx build --platform "linux/amd64,linux/arm64" \ 63 | --build-arg PGVERSION="$PGVERSION" \ 64 | --build-arg BASE_IMAGE="$BASE_IMAGE" \ 65 | --build-arg PGOLDVERSIONS="14 15 16" \ 66 | -t "$ECR_TEST_IMAGE" \ 67 | --push . 68 | cdp-promote-image "$ECR_TEST_IMAGE" 69 | 70 | - id: push-spilo-cdp-master 71 | env: 72 | <<: *BUILD_ENV 73 | type: script 74 | requires_human_approval: true 75 | when: 76 | - event: push 77 | branch: master 78 | timeout: 10h 79 | vm_config: 80 | type: linux 81 | size: extra_large 82 | commands: 83 | - desc: Tag and push spilo-cdp image to ecr 84 | cmd: | 85 | cd postgres-appliance 86 | 87 | PATRONIVERSION=$(sed -n 's/^ENV PATRONIVERSION=\([1-9][0-9]*\.[0-9]*\).*$/\1/p' Dockerfile) 88 | ECR_TEST_IMAGE="$MULTI_ARCH_REGISTRY/spilo-cdp-$PGVERSION:$PATRONIVERSION-p$CDP_TARGET_BRANCH_COUNTER" 89 | 90 | # create a Buildkit builder with CDP specific configuration 91 | docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use 92 | 93 | docker buildx build --platform "linux/amd64,linux/arm64" \ 94 | --build-arg PGVERSION="$PGVERSION" \ 95 | --build-arg BASE_IMAGE="$BASE_IMAGE" \ 96 | --build-arg PGOLDVERSIONS="14 15 16" \ 97 | -t "$ECR_TEST_IMAGE" \ 98 | --push . 99 | cdp-promote-image "$ECR_TEST_IMAGE" 100 | 101 | - id: tag-spilo 102 | type: script 103 | requires_human_approval: true 104 | when: 105 | event: push 106 | branch: master 107 | commands: 108 | - desc: Tag release spilo image 109 | cmd: | 110 | PATRONIVERSION=$(sed -n 's/^ENV PATRONIVERSION=\([1-9][0-9]*\.[0-9]*\).*$/\1/p' postgres-appliance/Dockerfile) 111 | COUNTER=$(git tag | sed -n "s/^$PATRONIVERSION-p//p" | sort -un | tail -n1) 112 | TAG="$PATRONIVERSION-p$((COUNTER+1))" 113 | 114 | git gh-tag "$TAG" 115 | -------------------------------------------------------------------------------- /docs/DESIGN.md: -------------------------------------------------------------------------------- 1 | Spilo combines Patroni with the Stups infrastructure. Its major components are: 2 | 3 | * [Patroni](https://github.com/zalando/patroni) 4 | * [Stups](https://stups.io) 5 | 6 | ## Patroni 7 | Patroni manages the [PostgreSQL](https://www.postgresql.org) databases which are running in a Spilo. It ensures 8 | there is at most 1 master within Spilo. 9 | 10 | To allow distributed nodes to agree on anything, we need a Distributed Configuration Store (DCS). 11 | Patroni can utilize [etcd](https://coreos.com/etcd/) and [ZooKeeper](https://zookeeper.apache.org/). 12 | 13 | **Etcd** 14 | 15 | Most deployments of Patroni use etcd. Etcd implements the Raft protocol. Explaining the details of 16 | the consensus algorithm of raft is outside the scope of this document. 17 | For more information, check the following interactive 18 | websites: 19 | 20 | * Introduction into raft [http://thesecretlivesofdata.com/raft/](http://thesecretlivesofdata.com/raft/) 21 | * Interactive raft visualization [https://raft.github.io](https://raft.github.io) 22 | 23 | 24 | ## Stups 25 | Stups allows us to run PostgreSQL on top of Amazon Web Services (AWS) in an audit-compliant manner. 26 | It requires us to use the [Taupage AMI](https://github.com/zalando-stups/taupage) and [Docker](https://docs.docker.com). 27 | 28 | **2 node Spilo** 29 | ![High Level Architecture](Spilo_Architecture_Instance.png) 30 | 31 | ## Amazon Web Services 32 | Running Spilo on [AWS](https://aws.amazon.com/) allows us to address issues like node replacement, connectivity, external backups. We use the following components: 33 | 34 | * CF (Cloud Formation) 35 | * ASG (Auto Scaling Groups) 36 | * ELB (Elastic Load Balancing) 37 | * Route 53 38 | * S3 (Simple Storage Service) 39 | * SG (Security Group) 40 | * KMS (Key Management Service) 41 | 42 | **3 node Spilo** 43 | ![High Level Architecture](Spilo_Architecture_High_Level.png) 44 | 45 | **Auto Scaling** 46 | 47 | The Auto Scaling ensures a new EC2 instance will run if a node fails. The Launch Configuration of the ASG has enough configuration for Patroni to discover the correct Patroni siblings. 48 | The ASG will also ensure nodes are running in different Availability Zones. 49 | 50 | **Elastic Load Balancing and Route 53** 51 | 52 | To allow applications to connect to the database using a dns name, we use a Route 53 (dns) CNAME 53 | which points to an ELB. This ELB will only route connections to the master and not to the replicas. 54 | 55 | A replica ELB is optionally available, it only routes connections to the replicas, never to the master. 56 | 57 | **S3** 58 | 59 | Binary backups are created periodically and are shipped to S3. WAL files generated by the master are also archived to S3. 60 | This allows Point-in-time recovery abilities. 61 | 62 | Having backups in S3 also allows new nodes to be restored from S3 (instead from the master). This may speed up spawning a new node and 63 | reduce pressure on the master. 64 | 65 | **Security Groups** 66 | 67 | Multiple Security Groups are created, one for the EC2 instances, which is by default restrictive: 68 | 69 | * Allows Spilo peers to connect to eachother 70 | * Allows ELB's to connect to the EC2 instances 71 | * Allows administrative and monitoring connections to the instances 72 | 73 | And a Security Group per ELB, to allow access to specific networks or applications, which by default allows traffic from this VPC's region. 74 | 75 | **Cloud Formation** 76 | 77 | All components are described in a Cloud Formation Template. The stups tool [senza](https://github.com/zalando-stups/senza) 78 | greatly simplifies generating this template. 79 | -------------------------------------------------------------------------------- /docs/Spilo_Architecture_High_Level.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zalando/spilo/bb39d8109bc759e28d9b3fb1dec549ce82ba6a38/docs/Spilo_Architecture_High_Level.png -------------------------------------------------------------------------------- /docs/Spilo_Architecture_Instance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zalando/spilo/bb39d8109bc759e28d9b3fb1dec549ce82ba6a38/docs/Spilo_Architecture_Instance.png -------------------------------------------------------------------------------- /docs/admin-guide/deploy_spilo.md: -------------------------------------------------------------------------------- 1 | ## Initialize a Spilo template 2 | The configuration for Spilo has a lot of items. To help you build a template for your needs, the `senza` application 3 | has a postgresapp template which will guide you through most of these items. 4 | 5 | For more details, check the configuration: [Configuration](/configuration/) 6 | 7 | ```bash 8 | senza init spilo-tutorial.yaml 9 | ``` 10 | 11 | You should choose the `postgresapp` option in senza. 12 | 13 | ## Create a Cloud Formation Stack 14 | After this you create a Cloud Formation Stack from the generated template using `senza create` 15 | ```bash 16 | senza create spilo-tutorial.yaml [PARAMETERS] 17 | ``` 18 | 19 | The `` is the same as a senza `` and therefore should adhere to those limitations. 20 | We advise you to use a descriptive name instead of a number, as a data store is supposed to be long lived and the 21 | stack will be upgraded in place. A descriptive name could be `mediawiki` if you are going to use it to store 22 | your own wiki. 23 | 24 | Parameters may not be required if you have specified all configuration options in the template. 25 | 26 | ## Demo Spilo deployment 27 | [![Demo on asciicast](https://asciinema.org/a/32288.png)](https://asciinema.org/a/32288) 28 | -------------------------------------------------------------------------------- /docs/admin-guide/failover.md: -------------------------------------------------------------------------------- 1 | In the lifetime of Spilo you may need to manually fail over the master to a replica, some of 2 | these reasons are: 3 | 4 | * Mandatory Taupage AMI update 5 | * Scheduled AWS maintenance in your region 6 | * Scheduled EC2 maintenance by AWS 7 | 8 | You can trigger a fail over using the api. The command line tool `patronictl` can also issue a fail over (since Patroni 0.75). 9 | Using the api to fail over is the preferred method, as it will execute preliminary checks and provide feedback. 10 | 11 | An alternative method which works with any version is to trigger an automatic fail over. 12 | 13 | * Identify the node currently running as a master (LB Status = `IN_SERVICE`) 14 | * Request access using `piu` to the EC2 instance currently running as a master 15 | * Stop the currently running Docker container 16 | * Wait 10-20 seconds 17 | * Start the previously stopped Docker container 18 | * Verify cluster health 19 | 20 | **Example failover (from master i-967d301b to master i-24051ead)** 21 | 22 | ```bash 23 | user@localhost ~ $ senza instances spilo tutorial 24 | Stack Name│Ver. │Resource ID│Instance ID│Public IP│Private IP │State │LB Status │Launched 25 | spilo tutorial AppServer i-24051ead 172.31.156.138 RUNNING OUT_OF_SERVICE 8d ago 26 | spilo tutorial AppServer i-482e02c3 172.31.170.155 RUNNING OUT_OF_SERVICE 8d ago 27 | spilo tutorial AppServer i-967d301b 172.31.142.172 RUNNING IN_SERVICE 8d ago 28 | 29 | user@localhost ~ $ piu 172.31.142.172 --user=aws_user "Failing over Patroni" 30 | 31 | user@localhost ~ $ ssh -tA aws_user@odd-eu-west-1.team.example.com ssh -o StrictHostKeyChecking=no aws_user@172.31.142.172 32 | 33 | aws_user@ip-172-31-142-172:~$ docker stop $(docker ps -q) 34 | 865347e6d41d 35 | 36 | aws_user@ip-172-31-142-172:~$ sleep 20 37 | 38 | aws_user@ip-172-31-142-172:~$ docker start $(docker ps -qa) 39 | 865347e6d41d 40 | 41 | aws_user@ip-172-31-142-172:~$ logout 42 | 43 | user@localhost ~ $ senza instances spilo tutorial 44 | Stack Name│Ver. │Resource ID│Instance ID│Public IP│Private IP │State │LB Status │Launched 45 | spilo tutorial AppServer i-24051ead 172.31.156.138 RUNNING IN_SERVICE 8d ago 46 | spilo tutorial AppServer i-482e02c3 172.31.170.155 RUNNING OUT_OF_SERVICE 8d ago 47 | spilo tutorial AppServer i-967d301b 172.31.142.172 RUNNING OUT_OF_SERVICE 8d ago 48 | ``` 49 | -------------------------------------------------------------------------------- /docs/admin-guide/scaling.md: -------------------------------------------------------------------------------- 1 | At some point during the lifetime of Spilo you may decide that you may need to scale up, scale down or change other 2 | parts of the infrastructure. 3 | 4 | The following plan will accomplish this: 5 | 6 | 1. Change the CloudFormation template to reflect the changes in infrastructure 7 | 2. Increase the number of EC2 instances in your AutoScalingGroup (or: Terminate one of the streaming replica's) 8 | 3. Kill one of the replica's running on the old configuration 9 | 4. Repeat step 2 and 3 until all replica's are running the new configuration 10 | 5. Fail over the Spilo cluster 11 | 6. Terminate the replica (the previous master) 12 | 13 | Only during the fail over will there be some downtime for the master instance. 14 | 15 | If your databases' size is significant, these steps may take some time to complete. Optimizations of this process 16 | (think of using EBS snapshots) are on the roadmap but not yet available. 17 | 18 | -------------------------------------------------------------------------------- /docs/admin-guide/sizing.md: -------------------------------------------------------------------------------- 1 | As every use case of a database is unique we cannot simply advise you to use a certain instance type for Spilo. 2 | To get some notion of what is available, look at the Amazon resources: 3 | 4 | * [Amazon EC2 Instances](https://aws.amazon.com/ec2/instance-types/) 5 | 6 | # Guidelines 7 | We can however provide some guidelines in choosing your instance types. 8 | Choosing an instances type has some basic parameters: 9 | 10 | - database size 11 | - database usage pattern 12 | - performance requirements 13 | - usage characteristics 14 | - costs 15 | 16 | ## Costs 17 | To have some notion of the costs of Spilo, we have calculated the costs for some possible configurations. 18 | 19 | **Note**: These are costs for an single Spilo deployment, as a case study. All prerequisites (Senza, Etcd) are considered 20 | to be available and are not part of the costs of Spilo. 21 | These figures are meant to show the approximate costs, they *will* vary depending on your use case and resource usage. 22 | 23 | 24 | The following table shows ballpark figures of the raw costs of a Spilo deployment in 3 Availability Zones. 25 | The pricing information is taken from Amazon with their calculator (January 2016). 26 | 27 | * [Amazon EC2 Pricing](https://aws.amazon.com/ec2/pricing/) 28 | * [Amazon EBS Pricing](https://aws.amazon.com/ebs/pricing/) 29 | * [Amazon ELB Pricing](https://aws.amazon.com/elasticloadbalancing/pricing/) 30 | 31 | **Monthly costs for a Spilo with 3 nodes** 32 | 33 | | EC2 | Storage | On-Demand ($) | 1 Yr All Upfront ($) | 3 Yr All Upfront ($) | 34 | |----------|---------------------------:|---------:|-------:|----:| 35 | | t2.micro | 10 GB EBS | 74 | 64 | 57 | 36 | | m4.large | 100 GB EBS | 371 | 370 | 308 | 37 | | r3.2xlarge | (provisioned) 500 GB EBS | 2164 | 1500 | 978 | 38 | | d2.8xlarge | (RAID 10) 24 TB HDD | 14244 | 7253 | 4970 | 39 | 40 | **Note**: When choosing All Upfront you can reduce your monthly costs considerably, yet an AWS Support plan is necessary. 41 | Support costs are included in these numbers, these may be much lower if you already buy support from AWS. 42 | 43 | Do your own calculations on the [Amazon Simple Monthly Calculator](http://calculator.s3.amazonaws.com/index.html) 44 | 45 | ## Database usage 46 | For some types of databases the smallest possible instance available can be a good candidate. Some examples that 47 | can probably run fine on t2.micro instances are: 48 | 49 | * Very small (< 500 MB) databases 50 | * Databases which are not queried often or heavily (some configuration store, an application registry) 51 | * Proof of Concepts / Prototyping 52 | 53 | 54 | -------------------------------------------------------------------------------- /docs/admin-guide/update.md: -------------------------------------------------------------------------------- 1 | Spilo is a stateful appliance, therefore you may need to update certain components from time to time. 2 | Examples of these updates are: 3 | 4 | * Update Taupage AMI 5 | * PostgreSQL minor upgrade (e.g. 9.4.4 to 9.4.5) 6 | * Spilo Patch (e.g. spilo94:0.76-p1 to spilo94:0.76-p3) 7 | * Bigger EBS volumes (e.g. 50 GB to 100 GB) 8 | 9 | ## Update the configuration 10 | * [Senza: Updating Taupage AMI](http://stups.readthedocs.org/en/latest/user-guide/maintenance.htm l?highlight=patch#updating-taupage-ami) 11 | To change the configuration of Spilo you need to change the Cloud Formation Stack or the Launch Configuration of Spilo. 12 | 13 | The following options are available to you 14 | 15 | **senza patch** 16 | 17 | Use `senza patch` to update the Launch Configuration of Spilo (Taupage AMI for example). 18 | 19 | [Senza: Updating Taupage AMI](http://stups.readthedocs.org/en/latest/user-guide/maintenance.html?highlight=patch#updating-taupage-ami) 20 | 21 | **senza update** 22 | 23 | If you need to change something other than the Taupage AMI, for example the Docker image, you will need 24 | to update the Cloud Formation Template. You should use the same template you used to create this Spilo, 25 | make the necessary changes and execute `senza update`. 26 | 27 | **Note**: Updating the Cloud Formation Stack carries risk. If you change any infrastructure parameters (e.g. 28 | reducing the Auto Scaling Group members), the update will force this change upon the infrastructure. 29 | 30 | ```bash 31 | senza update spilo-tutorial.yaml [PARAMETERS] 32 | ``` 33 | ## New replica initialization 34 | While you can always terminate a replica and make the autoscaling group replace it with a new one, which includes the changes made to the Cloud Formation or Autoscaling Group, the process involves a base backup from the master and might take a lot of time for large databases. Therefore, it might be more efficient to use the [EBS Snapshots](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html), the feature of AWS. It makes possible to bootstrap an arbitrary number of replicas without imposing any extra load on the master. Here's how to utilize them: 35 | 36 | * Pick up an existing replica and [make a snapshot](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-snapshot.html) of its non-root EBS volume (it's usually named /dev/xvdk). 37 | * Change the senza definition YAML: 38 | * Add the following line: 39 | ```SnapshotId: ID of the snapshot created on a previous step``` under `Ebs` block (a subblock of `BlockDeviceMappings`). 40 | * Set the parameter `erase_on_boot` under `mounts` (a subblock of `TaupageConfig`) to `false`. 41 | * Do ```senza update``` with the new definition. 42 | 43 | This ensures that a new replica will use a snapshot taken out of an existing one and will not try to erase a data populated by the snapshot automatically during the start. 44 | 45 | Note: usage of snapshots adds one extra step to the procedure below: 46 | * Revert the snapshot-related changes in the YAML template and do `senza update` with the changed YAML one last time. 47 | 48 | The demo video below demonstrates the process of updating your cluster to the new Spilo version with senza update and EBS snapshots: 49 | [![Demo on asciicast](https://asciinema.org/a/34689.png)](https://asciinema.org/a/34689) 50 | 51 | ## Apply the configuration 52 | This is a three step process: 53 | 54 | * Rotate the Spilo replica's 55 | * Terminate a Spilo replica 56 | * Wait for the new replica to be available and replicating 57 | * Repeat for other replica(s) 58 | * [Failover Spilo](/admin-guide/failover) 59 | * After succesfull fail over: Terminate the previous master 60 | 61 | All newly launched EC2 instances will now be using the updated Taupage AMI. 62 | -------------------------------------------------------------------------------- /docs/components/etcd-proxy.md: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | The etcd-proxy is the bridge between a HA-Cluster member and the etcd-cluster. By using a etcd-proxy we do not unnecessarily increase the quorum or the write performance of an etcd cluster, but we do have an etcd interface which knows about the etcd-cluster. 4 | -------------------------------------------------------------------------------- /docs/components/governor.md: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | Governor is the process that "governs" PostgreSQL. It uses information from PostgreSQL to determine its health. It uses etcd to determine what role this PostgreSQL instance has within the HA-cluster. 4 | It can start, restart, promote, rebuild a PostgreSQL instance. 5 | -------------------------------------------------------------------------------- /docs/configuration.md: -------------------------------------------------------------------------------- 1 | Configuration items specific to Spilo are listed here. 2 | 3 | ### Docker image 4 | The Docker image containing Patroni and AWS-specific code which constitutes Spilo. 5 | 6 | Example: `registry.opensource.zalan.do/acid/spilo-9.4:0.76-p1` 7 | 8 | ### Postgres WAL S3 bucket 9 | The location to store the `pg_basebackup` and the archived WAL-files of the PostgreSQL cluster. 10 | 11 | Example: `example-team-eu-west-1-spilo-app` 12 | 13 | ### EC2 instance type 14 | Which EC2 instance type you want to use. Amazon provides many types of instances for different use cases. 15 | 16 | Example: `m3.large` 17 | 18 | **Amazon Documentation** [Amazon EC2 Instance Types](https://aws.amazon.com/ec2/instance-types/) 19 | 20 | 21 | ### ETCD Discovery Domain 22 | What domain to use for etcd discovery. 23 | 24 | Example: `team.example.com` 25 | 26 | The supplied domain will be prefixed with `_etcd-server-ssl._tcp.` or `_etcd-server._tcp.` to 27 | resolve a dns SRV record. This record contains the IP-addresses and ports of the etcd cluster. Example output: 28 | 29 | ```bash 30 | $ dig +noall +answer SRV _etcd-server._tcp.tutorial.team.example.com 31 | _etcd-server._tcp.tutorial.team.example.com. 60 IN SRV 1 1 2380 ip-172-31-152-102.eu-west-1.compute.internal. 32 | _etcd-server._tcp.tutorial.team.example.com. 60 IN SRV 1 1 2380 ip-172-31-152-103.eu-west-1.compute.internal. 33 | _etcd-server._tcp.tutorial.team.example.com. 60 IN SRV 1 1 2380 ip-172-31-161-166.eu-west-1.compute.internal. 34 | _etcd-server._tcp.tutorial.team.example.com. 60 IN SRV 1 1 2380 ip-172-31-131-14.eu-west-1.compute.internal. 35 | _etcd-server._tcp.tutorial.team.example.com. 60 IN SRV 1 1 2380 ip-172-31-131-15.eu-west-1.compute.internal. 36 | ``` 37 | 38 | **etcd documentation** [DNS Discovery](https://github.com/coreos/etcd/blob/master/Documentation/clustering.md#dns-discovery) 39 | 40 | ### Database Volume Size 41 | The size for the volume containing the Database. 42 | 43 | ### Database Volume Type 44 | What EBS Volume type to use. Currently only three options are available: 45 | 46 | * gp2 47 | * io1 48 | * standard 49 | 50 | **Amazon Documentation** [Amazon EBS Volume Types](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) 51 | 52 | ### EBS Snapshot ID 53 | When reusing an old EBS snapshot, specify its ID here. 54 | 55 | Example: `snap-e8bdf4c1` 56 | 57 | ### File system for the data partition 58 | What file system to use for your PostgreSQL cluster. Choosing `ext4` is a safe bet. 59 | 60 | Example: `ext4` 61 | 62 | **Blog with a lot of pointers on what Filesystem to choose** 63 | [PostgreSQL performance on EXT4 and XFS](http://blog.pgaddict.com/posts/postgresql-performance-on-ext4-and-xfs) 64 | 65 | ### File system mount options 66 | Which mount options for the file system containing the data. 67 | 68 | Example: `noatime,nodiratime,nobarrier` 69 | 70 | ### Scalyr Account key 71 | The key with which Spilo is allowed to write Scalyr logs. 72 | 73 | Example: `W21hc3RlciA1NjExNDc2XSBBZGQgZ2VuZXJhdGVkIHBI-` 74 | 75 | **Stups documentation** 76 | [Configuring Application Logging](http://stups.readthedocs.org/en/latest/user-guide/standalone-deployment.html#optional-configuring-application-logging) 77 | 78 | ### PostgreSQL passwords 79 | The passwords to use for the initial PostgreSQL users. These can be randomly generated and optionally can be encrypted 80 | using KMS. 81 | 82 | ### KMS Encryption Key 83 | Which key to use to encrypt the passwords in the template. By using KMS encryption we avoid storing the passwords 84 | in plain text anywhere. They will only be available to the EC2 instances running Spilo. 85 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | Spilo (სპილო, elephant [spiːlɒ]) is a High Available PostgreSQL cluster (HA-cluster). It will run a number of clusters with one cluster being a master and the others being slaves. Its purpose is to provide a very resilient, high available PostgreSQL cluster which can be configured and started within minutes. 4 | -------------------------------------------------------------------------------- /docs/prerequisites.md: -------------------------------------------------------------------------------- 1 | Spilo combines [Patroni](https://github.com/zalando/Patroni) with [stups](https://stups.io). Running Spilo therefore 2 | requires that you have an Amazon VPC with Stups installed. For more details, 3 | see the [Stups Installation Guide](http://docs.stups.io/en/latest/installation/) 4 | 5 | ## Stups 6 | 7 | To be able to deploy and troubleshoot Spilo, you need the stups tooling installed locally: 8 | ```bash 9 | $ sudo pip3 install --upgrade stups 10 | ``` 11 | 12 | If you need different installation methods, run into issues, or want more details on how to install the stups tooling 13 | locally, we refer you to the [Stups User's Guide](http://docs.stups.io/en/latest/user-guide/index.html) 14 | 15 | You can test your setup by logging in using mai and then listing the already running instances using senza, 16 | example session: 17 | 18 | ```bash 19 | $ mai login 20 | Please enter password for encrypted keyring: 21 | Authenticating against https://aws.example.com.. OK 22 | Assuming role AWS Account 1234567890 (example-team): SomeRoleName.. OK 23 | Writing temporary AWS credentials.. OK 24 | ``` 25 | ```bash 26 | $ senza list 27 | Stack Name|Ver. |Status |Created|Description 28 | spilo tutorial CREATE_COMPLETE 6d ago Spilo () 29 | ``` 30 | 31 | ## etcd 32 | Patroni requires a DCS. One of these could be etcd. 33 | You may already have etcd installed within your account. 34 | 35 | To check if etcd is already deployed using the advised method: 36 | 37 | ```bash 38 | $ senza list etcd-cluster 39 | Stack Name │Ver. │Status │Created │Description 40 | etcd-cluster somename CREATE_COMPLETE 242d ago Etcd Cluster (
) 41 | ``` 42 | 43 | If etcd is not installed yet, you can use the 44 | [Installation Section](https://github.com/zalando/stups-etcd-cluster/blob/master/README.md#usage) of the 45 | stups-etcd-cluster appliance. 46 | 47 | **Demo etcd-cluster deployment** 48 | [![Demo on asciicast](https://asciinema.org/a/32703.png)](https://asciinema.org/a/32703) 49 | -------------------------------------------------------------------------------- /docs/user-guide/connect_spilo.md: -------------------------------------------------------------------------------- 1 | # Connect to Spilo 2 | Connecting to a Spilo appliance should be the same as connecting to a PostgreSQL database or an RDS instance. 3 | Use the dns-name you specified during creation as the hostname, and use your credentials to authenticate. 4 | 5 | ## Administrative connections 6 | You will need to setup Spilo to create a database and roles for your application(s). 7 | 8 | For example: 9 | 10 | psql -h myfirstspilo.example.com -p 5432 -U admin -d postgres 11 | 12 | ## Application connections 13 | Once you have created a database and roles for your application you can connect to Spilo just like you want to connect 14 | to any other PostgreSQL cluster 15 | 16 | * `psql -h myfirstspilo.example.com -p 5432 -U wow_app -d wow` 17 | * `"postgresql://myfirstspilo.example.com:5432/wow?user=wow_app"` 18 | -------------------------------------------------------------------------------- /etcd-cluster-appliance/README.md: -------------------------------------------------------------------------------- 1 | This appliance has been given its own repository: 2 | 3 | [https://github.com/zalando/stups-etcd-cluster](https://github.com/zalando/stups-etcd-cluster) 4 | -------------------------------------------------------------------------------- /kubernetes/spilo_kubernetes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: &cluster_name zalandodemo01 5 | labels: 6 | application: spilo 7 | spilo-cluster: *cluster_name 8 | spec: 9 | selector: 10 | matchLabels: 11 | application: spilo 12 | spilo-cluster: *cluster_name 13 | replicas: 3 14 | serviceName: *cluster_name 15 | template: 16 | metadata: 17 | labels: 18 | application: spilo 19 | spilo-cluster: *cluster_name 20 | annotations: 21 | # kube2iam should be running in a cluster and 22 | # app-spilo role needs to be created in the AWS account 23 | # the cluster is running in. It will be used to ship WALs, 24 | # and requires access to S3 bucket. See https://github.com/jtblin/kube2iam 25 | # for the sts::AssumeRole snippets to build trust relationship 26 | # between the kubernetes woker role and the one below. 27 | # if you don't use AWS, feel free to remove this annotation. 28 | iam.amazonaws.com/role: app-spilo 29 | # forces the scheduler not to put pods on the same node. 30 | scheduler.alpha.kubernetes.io/affinity: > 31 | { 32 | "podAntiAffinity": { 33 | "requiredDuringSchedulingIgnoredDuringExecution": [ 34 | { 35 | "labelSelector": { 36 | "matchExpressions": [ 37 | { 38 | "key": "spilo-cluster", 39 | "operator": "In", 40 | "values": ["zalandodemo01"] 41 | } 42 | ] 43 | }, 44 | "topologyKey": "kubernetes.io/hostname" 45 | } 46 | ] 47 | } 48 | } 49 | spec: 50 | # service account that allows changing endpoints and assigning pod labels 51 | # in the given namespace: https://kubernetes.io/docs/user-guide/service-accounts/ 52 | # not required unless you've changed the default service account in the namespace 53 | # used to deploy Spilo 54 | serviceAccountName: operator 55 | containers: 56 | - name: *cluster_name 57 | image: registry.opensource.zalan.do/acid/spilo-11:1.5-p5 # put the spilo image here 58 | imagePullPolicy: IfNotPresent 59 | ports: 60 | - containerPort: 8008 61 | protocol: TCP 62 | - containerPort: 5432 63 | protocol: TCP 64 | volumeMounts: 65 | - mountPath: /home/postgres/pgdata 66 | name: pgdata 67 | env: 68 | - name: DCS_ENABLE_KUBERNETES_API 69 | value: 'true' 70 | # - name: ETCD_HOST 71 | # value: 'test-etcd.default.svc.cluster.local:2379' # where is your etcd? 72 | # - name: WAL_S3_BUCKET 73 | # value: example-spilo-dbaas 74 | # - name: LOG_S3_BUCKET # may be the same as WAL_S3_BUCKET 75 | # value: example-spilo-dbaas 76 | # - name: BACKUP_SCHEDULE 77 | # value: "00 01 * * *" 78 | - name: KUBERNETES_SCOPE_LABEL 79 | value: spilo-cluster 80 | - name: KUBERNETES_ROLE_LABEL 81 | value: role 82 | - name: KUBERNETES_LEADER_LABEL_VALUE 83 | value: master 84 | - name: KUBERNETES_STANDBY_LEADER_LABEL_VALUE 85 | value: master 86 | - name: SPILO_CONFIGURATION 87 | value: | ## https://github.com/zalando/patroni#yaml-configuration 88 | bootstrap: 89 | initdb: 90 | - auth-host: md5 91 | - auth-local: trust 92 | - name: POD_IP 93 | valueFrom: 94 | fieldRef: 95 | apiVersion: v1 96 | fieldPath: status.podIP 97 | - name: POD_NAMESPACE 98 | valueFrom: 99 | fieldRef: 100 | apiVersion: v1 101 | fieldPath: metadata.namespace 102 | - name: PGPASSWORD_SUPERUSER 103 | valueFrom: 104 | secretKeyRef: 105 | name: *cluster_name 106 | key: superuser-password 107 | - name: PGUSER_ADMIN 108 | value: superadmin 109 | - name: PGPASSWORD_ADMIN 110 | valueFrom: 111 | secretKeyRef: 112 | name: *cluster_name 113 | key: admin-password 114 | - name: PGPASSWORD_STANDBY 115 | valueFrom: 116 | secretKeyRef: 117 | name: *cluster_name 118 | key: replication-password 119 | - name: SCOPE 120 | value: *cluster_name 121 | - name: PGROOT 122 | value: /home/postgres/pgdata/pgroot 123 | terminationGracePeriodSeconds: 0 124 | volumeClaimTemplates: 125 | - metadata: 126 | labels: 127 | application: spilo 128 | spilo-cluster: *cluster_name 129 | annotations: 130 | volume.beta.kubernetes.io/storage-class: standard 131 | name: pgdata 132 | spec: 133 | accessModes: 134 | - ReadWriteOnce 135 | resources: 136 | requests: 137 | storage: 10Gi 138 | 139 | --- 140 | apiVersion: v1 141 | kind: Endpoints 142 | metadata: 143 | name: &cluster_name zalandodemo01 144 | labels: 145 | application: spilo 146 | spilo-cluster: *cluster_name 147 | subsets: [] 148 | 149 | --- 150 | apiVersion: v1 151 | kind: Service 152 | metadata: 153 | name: &cluster_name zalandodemo01 154 | labels: 155 | application: spilo 156 | spilo-cluster: *cluster_name 157 | spec: 158 | type: ClusterIP 159 | ports: 160 | - name: postgresql 161 | port: 5432 162 | targetPort: 5432 163 | 164 | --- 165 | # headless service to avoid deletion of patronidemo-config endpoint 166 | apiVersion: v1 167 | kind: Service 168 | metadata: 169 | name: zalandodemo01-config 170 | labels: 171 | application: spilo 172 | spilo-cluster: zalandodemo01 173 | spec: 174 | clusterIP: None 175 | 176 | --- 177 | apiVersion: v1 178 | kind: Secret 179 | metadata: 180 | name: &cluster_name zalandodemo01 181 | labels: 182 | application: spilo 183 | spilo-cluster: *cluster_name 184 | type: Opaque 185 | data: 186 | superuser-password: emFsYW5kbw== 187 | replication-password: cmVwLXBhc3M= 188 | admin-password: YWRtaW4= 189 | 190 | --- 191 | apiVersion: v1 192 | kind: ServiceAccount 193 | metadata: 194 | name: operator 195 | 196 | --- 197 | apiVersion: rbac.authorization.k8s.io/v1 198 | kind: Role 199 | metadata: 200 | name: operator 201 | rules: 202 | - apiGroups: 203 | - "" 204 | resources: 205 | - configmaps 206 | verbs: 207 | - create 208 | - get 209 | - list 210 | - patch 211 | - update 212 | - watch 213 | # delete is required only for 'patronictl remove' 214 | - delete 215 | - apiGroups: 216 | - "" 217 | resources: 218 | - endpoints 219 | verbs: 220 | - get 221 | - patch 222 | - update 223 | # the following three privileges are necessary only when using endpoints 224 | - create 225 | - list 226 | - watch 227 | # delete is required only for for 'patronictl remove' 228 | - delete 229 | - apiGroups: 230 | - "" 231 | resources: 232 | - pods 233 | verbs: 234 | - get 235 | - list 236 | - patch 237 | - update 238 | - watch 239 | # The following privilege is only necessary for creation of headless service 240 | # for patronidemo-config endpoint, in order to prevent cleaning it up by the 241 | # k8s master. You can avoid giving this privilege by explicitly creating the 242 | # service like it is done in this manifest (lines 160..169) 243 | - apiGroups: 244 | - "" 245 | resources: 246 | - services 247 | verbs: 248 | - create 249 | 250 | --- 251 | apiVersion: rbac.authorization.k8s.io/v1 252 | kind: RoleBinding 253 | metadata: 254 | name: operator 255 | roleRef: 256 | apiGroup: rbac.authorization.k8s.io 257 | kind: Role 258 | name: operator 259 | subjects: 260 | - kind: ServiceAccount 261 | name: operator 262 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Spilo PostgreSQL appliance 2 | repo_url: https://github.com/zalando/spilo/ 3 | theme: readthedocs 4 | 5 | pages: 6 | - Introduction: index.md 7 | - Design: DESIGN.md 8 | - Prerequisites: prerequisites.md 9 | - Administrator Guide: 10 | - Deploy spilo: admin-guide/deploy_spilo.md 11 | - Connect to spilo: user-guide/connect_spilo.md 12 | - Sizing spilo: admin-guide/sizing.md 13 | - Update : admin-guide/update.md 14 | - Manual failover: admin-guide/failover.md 15 | - Configuration: configuration.md 16 | -------------------------------------------------------------------------------- /postgres-appliance/.gitignore: -------------------------------------------------------------------------------- 1 | scm-source.json 2 | *.sw? 3 | -------------------------------------------------------------------------------- /postgres-appliance/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # global owners 2 | * @hughcapet @Jan-M @FxKu @sdudoladov @jopadi @idanovinda 3 | -------------------------------------------------------------------------------- /postgres-appliance/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_IMAGE=ubuntu:22.04 2 | ARG PGVERSION=17 3 | ARG DEMO=false 4 | ARG COMPRESS=false 5 | ARG ADDITIONAL_LOCALES= 6 | 7 | 8 | FROM ubuntu:18.04 as ubuntu-18 9 | 10 | ARG ADDITIONAL_LOCALES 11 | 12 | COPY build_scripts/locales.sh /builddeps/ 13 | 14 | RUN bash /builddeps/locales.sh 15 | 16 | 17 | FROM $BASE_IMAGE as dependencies-builder 18 | 19 | ARG DEMO 20 | 21 | ENV WALG_VERSION=v3.0.3 22 | 23 | COPY build_scripts/dependencies.sh /builddeps/ 24 | 25 | COPY dependencies/debs /builddeps/ 26 | 27 | RUN bash /builddeps/dependencies.sh 28 | 29 | 30 | FROM $BASE_IMAGE as builder-false 31 | 32 | ARG DEMO 33 | ARG ADDITIONAL_LOCALES 34 | 35 | COPY build_scripts/prepare.sh build_scripts/locales.sh /builddeps/ 36 | 37 | RUN bash /builddeps/prepare.sh 38 | 39 | COPY --from=ubuntu-18 /usr/lib/locale/locale-archive /usr/lib/locale/locale-archive.18 40 | 41 | COPY cron_unprivileged.c /builddeps/ 42 | COPY build_scripts/base.sh /builddeps/ 43 | COPY --from=dependencies-builder /builddeps/*.deb /builddeps/ 44 | 45 | ARG PGVERSION 46 | ARG TIMESCALEDB_APACHE_ONLY=true 47 | ARG TIMESCALEDB_TOOLKIT=true 48 | ARG COMPRESS 49 | ARG PGOLDVERSIONS="13 14 15 16" 50 | ARG WITH_PERL=false 51 | 52 | ARG DEB_PG_SUPPORTED_VERSIONS="$PGOLDVERSIONS $PGVERSION" 53 | 54 | # Install PostgreSQL, extensions and contribs 55 | ENV POSTGIS_VERSION=3.5 \ 56 | BG_MON_COMMIT=7f5887218790b263fe3f42f85f4ddc9c8400b154 \ 57 | PG_AUTH_MON_COMMIT=fe099eef7662cbc85b0b79191f47f52f1e96b779 \ 58 | PG_MON_COMMIT=ead1de70794ed62ca1e34d4022f6165ff36e9a91 \ 59 | SET_USER=REL4_1_0 \ 60 | PLPROFILER=REL4_2_5 \ 61 | PG_PROFILE=4.7 \ 62 | PAM_OAUTH2=v1.0.1 \ 63 | PG_PERMISSIONS_COMMIT=f4b7c18676fa64236a1c8e28d34a35764e4a70e2 64 | 65 | WORKDIR /builddeps 66 | RUN bash base.sh 67 | 68 | # Install wal-g 69 | COPY --from=dependencies-builder /builddeps/wal-g /usr/local/bin/ 70 | 71 | COPY build_scripts/patroni_wale.sh build_scripts/compress_build.sh /builddeps/ 72 | 73 | # Install patroni and wal-e 74 | ENV PATRONIVERSION=4.0.4 75 | ENV WALE_VERSION=1.1.1 76 | 77 | WORKDIR / 78 | 79 | RUN bash /builddeps/patroni_wale.sh 80 | 81 | RUN if [ "$COMPRESS" = "true" ]; then bash /builddeps/compress_build.sh; fi 82 | 83 | 84 | FROM scratch as builder-true 85 | COPY --from=builder-false / / 86 | 87 | 88 | FROM builder-${COMPRESS} 89 | 90 | LABEL maintainer="Team ACID @ Zalando " 91 | 92 | ARG PGVERSION 93 | ARG DEMO 94 | ARG COMPRESS 95 | 96 | EXPOSE 5432 8008 8080 97 | 98 | ENV LC_ALL=en_US.utf-8 \ 99 | PATH=$PATH:/usr/lib/postgresql/$PGVERSION/bin \ 100 | PGHOME=/home/postgres \ 101 | RW_DIR=/run \ 102 | DEMO=$DEMO 103 | 104 | ENV WALE_ENV_DIR=$RW_DIR/etc/wal-e.d/env \ 105 | LOG_ENV_DIR=$RW_DIR/etc/log.d/env \ 106 | PGROOT=$PGHOME/pgdata/pgroot 107 | 108 | ENV PGDATA=$PGROOT/data \ 109 | PGLOG=$PGROOT/pg_log 110 | 111 | ENV USE_OLD_LOCALES=false 112 | 113 | WORKDIR $PGHOME 114 | 115 | COPY motd /etc/ 116 | COPY runit /etc/service/ 117 | COPY pgq_ticker.ini $PGHOME/ 118 | COPY build_scripts/post_build.sh /builddeps/ 119 | 120 | RUN sh /builddeps/post_build.sh && rm -rf /builddeps/ 121 | 122 | COPY scripts bootstrap major_upgrade /scripts/ 123 | COPY launch.sh / 124 | 125 | CMD ["/bin/sh", "/launch.sh", "init"] 126 | -------------------------------------------------------------------------------- /postgres-appliance/bootstrap/clone_with_basebackup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import logging 5 | import subprocess 6 | import sys 7 | 8 | from maybe_pg_upgrade import call_maybe_pg_upgrade 9 | 10 | logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | def read_configuration(): 15 | parser = argparse.ArgumentParser(description="Script to clone from another cluster using pg_basebackup") 16 | parser.add_argument('--scope', required=True, help='target cluster name', dest='name') 17 | parser.add_argument('--datadir', required=True, help='target cluster postgres data directory') 18 | parser.add_argument('--pgpass', required=True, 19 | help='path to the pgpass file containing credentials for the instance to be cloned') 20 | parser.add_argument('--host', required=True, help='hostname or IP address of the master to connect to') 21 | parser.add_argument('--port', required=False, help='PostgreSQL port master listens to', default=5432) 22 | parser.add_argument('--dbname', required=False, help='PostgreSQL database to connect to', default='postgres') 23 | parser.add_argument('--user', required=True, help='PostgreSQL user to connect with') 24 | return parser.parse_args() 25 | 26 | 27 | def escape_value(val): 28 | quote = False 29 | temp = [] 30 | for c in val: 31 | if c.isspace(): 32 | quote = True 33 | elif c in ('\'', '\\'): 34 | temp.append('\\') 35 | temp.append(c) 36 | result = ''.join(temp) 37 | return result if not quote else '\'{0}\''.format(result) 38 | 39 | 40 | def prepare_connection(options): 41 | connection = [] 42 | for attname in ('host', 'port', 'user', 'dbname'): 43 | attvalue = getattr(options, attname) 44 | connection.append('{0}={1}'.format(attname, escape_value(attvalue))) 45 | 46 | return ' '.join(connection), {'PGPASSFILE': options.pgpass} 47 | 48 | 49 | def run_basebackup(options): 50 | connstr, env = prepare_connection(options) 51 | logger.info('cloning cluster %s from "%s"', options.name, connstr) 52 | ret = subprocess.call(['pg_basebackup', '-D', options.datadir, '-X', 'stream', '-d', connstr, '-w'], env=env) 53 | if ret != 0: 54 | raise Exception("pg_basebackup exited with code={0}".format(ret)) 55 | return 0 56 | 57 | 58 | def main(): 59 | options = read_configuration() 60 | try: 61 | run_basebackup(options) 62 | except Exception: 63 | logger.exception("Clone failed") 64 | return 1 65 | return call_maybe_pg_upgrade() 66 | 67 | 68 | if __name__ == '__main__': 69 | sys.exit(main()) 70 | -------------------------------------------------------------------------------- /postgres-appliance/bootstrap/clone_with_wale.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import csv 5 | import logging 6 | import os 7 | import re 8 | import shlex 9 | import subprocess 10 | import sys 11 | 12 | from maybe_pg_upgrade import call_maybe_pg_upgrade 13 | 14 | from collections import namedtuple 15 | from dateutil.parser import parse 16 | 17 | logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | def read_configuration(): 22 | parser = argparse.ArgumentParser(description="Script to clone from S3 with support for point-in-time-recovery") 23 | parser.add_argument('--scope', required=True, help='target cluster name') 24 | parser.add_argument('--datadir', required=True, help='target cluster postgres data directory') 25 | parser.add_argument('--recovery-target-time', 26 | help='the timestamp up to which recovery will proceed (including time zone)', 27 | dest='recovery_target_time_string') 28 | parser.add_argument('--dry-run', action='store_true', help='find a matching backup and build the wal-e ' 29 | 'command to fetch that backup without running it') 30 | args = parser.parse_args() 31 | 32 | options = namedtuple('Options', 'name datadir recovery_target_time dry_run') 33 | if args.recovery_target_time_string: 34 | recovery_target_time = parse(args.recovery_target_time_string) 35 | if recovery_target_time.tzinfo is None: 36 | raise Exception("recovery target time must contain a timezone") 37 | else: 38 | recovery_target_time = None 39 | 40 | return options(args.scope, args.datadir, recovery_target_time, args.dry_run) 41 | 42 | 43 | def build_wale_command(command, datadir=None, backup=None): 44 | cmd = ['wal-g' if os.getenv('USE_WALG_RESTORE') == 'true' else 'wal-e'] + [command] 45 | if command == 'backup-fetch': 46 | if datadir is None or backup is None: 47 | raise Exception("backup-fetch requires datadir and backup arguments") 48 | cmd.extend([datadir, backup]) 49 | elif command != 'backup-list': 50 | raise Exception("invalid {0} command {1}".format(cmd[0], command)) 51 | return cmd 52 | 53 | 54 | def fix_output(output): 55 | """WAL-G is using spaces instead of tabs and writes some garbage before the actual header""" 56 | 57 | started = None 58 | for line in output.decode('utf-8').splitlines(): 59 | if not started: 60 | started = re.match(r'^(backup_)?name\s+(last_)?modified\s+', line) 61 | if started: 62 | line = line.replace(' modified ', ' last_modified ') 63 | if started: 64 | yield '\t'.join(line.split()) 65 | 66 | 67 | def choose_backup(backup_list, recovery_target_time): 68 | """ pick up the latest backup file starting before time recovery_target_time""" 69 | 70 | match_timestamp = match = None 71 | for backup in backup_list: 72 | last_modified = parse(backup['last_modified']) 73 | if last_modified < recovery_target_time: 74 | if match is None or last_modified > match_timestamp: 75 | match = backup 76 | match_timestamp = last_modified 77 | if match is not None: 78 | return match.get('name', match['backup_name']) 79 | 80 | 81 | def list_backups(env): 82 | backup_list_cmd = build_wale_command('backup-list') 83 | output = subprocess.check_output(backup_list_cmd, env=env) 84 | reader = csv.DictReader(fix_output(output), dialect='excel-tab') 85 | return list(reader) 86 | 87 | 88 | def get_clone_envdir(): 89 | from spilo_commons import get_patroni_config 90 | 91 | config = get_patroni_config() 92 | restore_command = shlex.split(config['bootstrap']['clone_with_wale']['recovery_conf']['restore_command']) 93 | if len(restore_command) > 4 and restore_command[0] == 'envdir': 94 | return restore_command[1] 95 | raise Exception('Failed to find clone envdir') 96 | 97 | 98 | def get_possible_versions(): 99 | from spilo_commons import LIB_DIR, get_binary_version, get_bin_dir, get_patroni_config 100 | 101 | config = get_patroni_config() 102 | 103 | max_version = float(get_binary_version(config.get('postgresql', {}).get('bin_dir'))) 104 | 105 | versions = {} 106 | 107 | for d in os.listdir(LIB_DIR): 108 | try: 109 | ver = get_binary_version(get_bin_dir(d)) 110 | fver = float(ver) 111 | if fver <= max_version: 112 | versions[fver] = ver 113 | except Exception: 114 | pass 115 | 116 | # return possible versions in reversed order, i.e. 12, 11, 10, 9.6, and so on 117 | return [ver for _, ver in sorted(versions.items(), reverse=True)] 118 | 119 | 120 | def get_wale_environments(env): 121 | use_walg = env.get('USE_WALG_RESTORE') == 'true' 122 | prefix = 'WALG_' if use_walg else 'WALE_' 123 | # len('WALE__PREFIX') = 12 124 | names = [name for name in env.keys() if name.endswith('_PREFIX') and name.startswith(prefix) and len(name) > 12] 125 | if len(names) != 1: 126 | raise Exception('Found find {0} {1}*_PREFIX environment variables, expected 1' 127 | .format(len(names), prefix)) 128 | 129 | name = names[0] 130 | orig_value = env[name] 131 | value = orig_value.rstrip('/') 132 | 133 | if '/spilo/' in value and value.endswith('/wal'): # path crafted in the configure_spilo.py? 134 | # Try all versions descending if we don't know the version of the source cluster 135 | for version in get_possible_versions(): 136 | yield name, '{0}/{1}/'.format(value, version) 137 | 138 | # Last, try the original value 139 | yield name, orig_value 140 | 141 | 142 | def find_backup(recovery_target_time, env): 143 | old_value = None 144 | for name, value in get_wale_environments(env): 145 | logger.info('Trying %s for clone', value) 146 | if not old_value: 147 | old_value = env[name] 148 | env[name] = value 149 | backup_list = list_backups(env) 150 | if backup_list: 151 | if recovery_target_time: 152 | backup = choose_backup(backup_list, recovery_target_time) 153 | if backup: 154 | return backup, (name if value != old_value else None) 155 | else: # We assume that the LATEST backup will be for the biggest postgres version! 156 | return 'LATEST', (name if value != old_value else None) 157 | if recovery_target_time: 158 | raise Exception('Could not find any backups prior to the point in time {0}'.format(recovery_target_time)) 159 | raise Exception('Could not find any backups') 160 | 161 | 162 | def run_clone_from_s3(options): 163 | env = os.environ.copy() 164 | 165 | backup_name, update_envdir = find_backup(options.recovery_target_time, env) 166 | 167 | backup_fetch_cmd = build_wale_command('backup-fetch', options.datadir, backup_name) 168 | logger.info("cloning cluster %s using %s", options.name, ' '.join(backup_fetch_cmd)) 169 | if not options.dry_run: 170 | ret = subprocess.call(backup_fetch_cmd, env=env) 171 | if ret != 0: 172 | raise Exception("wal-e backup-fetch exited with exit code {0}".format(ret)) 173 | 174 | if update_envdir: # We need to update file in the clone envdir or restore_command will fail! 175 | envdir = get_clone_envdir() 176 | with open(os.path.join(envdir, update_envdir), 'w') as f: 177 | f.write(env[update_envdir]) 178 | return 0 179 | 180 | 181 | def main(): 182 | options = read_configuration() 183 | try: 184 | run_clone_from_s3(options) 185 | except Exception: 186 | logger.exception("Clone failed") 187 | return 1 188 | return call_maybe_pg_upgrade() 189 | 190 | 191 | if __name__ == '__main__': 192 | sys.exit(main()) 193 | -------------------------------------------------------------------------------- /postgres-appliance/bootstrap/maybe_pg_upgrade.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import glob 3 | import logging 4 | import os 5 | import subprocess 6 | import sys 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | def tail_postgres_logs(): 12 | logdir = os.environ.get('PGLOG', '/home/postgres/pgdata/pgroot/pg_log') 13 | csv_files = glob.glob(os.path.join(logdir, '*.csv')) 14 | # Find the last modified CSV file 15 | logfile = max(csv_files, key=os.path.getmtime) 16 | return subprocess.check_output(['tail', '-n5', logfile]).decode('utf-8') 17 | 18 | 19 | def wait_end_of_recovery(postgresql): 20 | from patroni.utils import polling_loop 21 | 22 | for _ in polling_loop(postgresql.config.get('pg_ctl_timeout'), 10): 23 | postgresql.reset_cluster_info_state(None) 24 | if postgresql.is_primary(): 25 | break 26 | logger.info('waiting for end of recovery of the old cluster') 27 | 28 | 29 | def perform_pitr(postgresql, cluster_version, bin_version, config): 30 | logger.info('Trying to perform point-in-time recovery') 31 | 32 | config[config['method']]['command'] = 'true' 33 | try: 34 | if bin_version == cluster_version: 35 | if not postgresql.bootstrap.bootstrap(config): 36 | raise Exception('Point-in-time recovery failed') 37 | elif not postgresql.start_old_cluster(config, cluster_version): 38 | raise Exception('Failed to start the cluster with old postgres') 39 | return wait_end_of_recovery(postgresql) 40 | except Exception: 41 | logs = tail_postgres_logs() 42 | # Spilo has no other locales except en_EN.UTF-8, therefore we are safe here. 43 | if int(cluster_version) >= 13 and 'recovery ended before configured recovery target was reached' in logs: 44 | # Starting from version 13 Postgres stopped promoting when recovery target wasn't reached. 45 | # In order to improve the user experience we reset all possible recovery targets and retry. 46 | recovery_conf = config[config['method']].get('recovery_conf', {}) 47 | if recovery_conf: 48 | for target in ('name', 'time', 'xid', 'lsn'): 49 | recovery_conf['recovery_target_' + target] = '' 50 | logger.info('Retrying point-in-time recovery without target') 51 | if not postgresql.bootstrap.bootstrap(config): 52 | raise Exception('Point-in-time recovery failed.\nLOGS:\n--\n' + tail_postgres_logs()) 53 | return wait_end_of_recovery(postgresql) 54 | else: 55 | raise Exception('Point-in-time recovery failed.\nLOGS:\n--\n' + logs) 56 | 57 | 58 | def main(): 59 | from pg_upgrade import PostgresqlUpgrade 60 | from patroni.config import Config 61 | from spilo_commons import get_binary_version 62 | 63 | config = Config(sys.argv[1]) 64 | upgrade = PostgresqlUpgrade(config) 65 | 66 | bin_version = get_binary_version(upgrade.pgcommand('')) 67 | cluster_version = upgrade.get_cluster_version() 68 | 69 | logger.info('Cluster version: %s, bin version: %s', cluster_version, bin_version) 70 | assert float(cluster_version) <= float(bin_version) 71 | 72 | perform_pitr(upgrade, cluster_version, bin_version, config['bootstrap']) 73 | 74 | if cluster_version == bin_version: 75 | return 0 76 | 77 | if not upgrade.bootstrap.call_post_bootstrap(config['bootstrap']): 78 | upgrade.stop(block_callbacks=True, checkpoint=False) 79 | raise Exception('Failed to run bootstrap.post_init') 80 | 81 | if not upgrade.prepare_new_pgdata(bin_version): 82 | raise Exception('initdb failed') 83 | 84 | try: 85 | upgrade.drop_possibly_incompatible_objects() 86 | except Exception: 87 | upgrade.stop(block_callbacks=True, checkpoint=False) 88 | raise 89 | 90 | logger.info('Doing a clean shutdown of the cluster before pg_upgrade') 91 | if not upgrade.stop(block_callbacks=True, checkpoint=False): 92 | raise Exception('Failed to stop the cluster with old postgres') 93 | 94 | if not upgrade.do_upgrade(): 95 | raise Exception('Failed to upgrade cluster from {0} to {1}'.format(cluster_version, bin_version)) 96 | 97 | logger.info('Starting the cluster with new postgres after upgrade') 98 | if not upgrade.start(): 99 | raise Exception('Failed to start the cluster with new postgres') 100 | 101 | try: 102 | upgrade.update_extensions() 103 | except Exception as e: 104 | logger.error('Failed to update extensions: %r', e) 105 | 106 | upgrade.analyze() 107 | 108 | 109 | def call_maybe_pg_upgrade(): 110 | import inspect 111 | import os 112 | import subprocess 113 | 114 | from spilo_commons import PATRONI_CONFIG_FILE 115 | 116 | my_name = os.path.abspath(inspect.getfile(inspect.currentframe())) 117 | ret = subprocess.call([sys.executable, my_name, PATRONI_CONFIG_FILE]) 118 | if ret != 0: 119 | logger.error('%s script failed', my_name) 120 | return ret 121 | 122 | 123 | if __name__ == '__main__': 124 | logging.basicConfig(format='%(asctime)s maybe_pg_upgrade %(levelname)s: %(message)s', level='INFO') 125 | main() 126 | -------------------------------------------------------------------------------- /postgres-appliance/build_scripts/base.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## ------------------------------------------- 4 | ## Install PostgreSQL, extensions and contribs 5 | ## ------------------------------------------- 6 | 7 | export DEBIAN_FRONTEND=noninteractive 8 | MAKEFLAGS="-j $(grep -c ^processor /proc/cpuinfo)" 9 | export MAKEFLAGS 10 | 11 | set -ex 12 | sed -i 's/^#\s*\(deb.*universe\)$/\1/g' /etc/apt/sources.list 13 | 14 | apt-get update 15 | 16 | BUILD_PACKAGES=(devscripts equivs build-essential fakeroot debhelper git gcc libc6-dev make cmake libevent-dev libbrotli-dev libssl-dev libkrb5-dev) 17 | if [ "$DEMO" = "true" ]; then 18 | export DEB_PG_SUPPORTED_VERSIONS="$PGVERSION" 19 | WITH_PERL=false 20 | rm -f ./*.deb 21 | apt-get install -y "${BUILD_PACKAGES[@]}" 22 | else 23 | BUILD_PACKAGES+=(zlib1g-dev 24 | libprotobuf-c-dev 25 | libpam0g-dev 26 | libcurl4-openssl-dev 27 | libicu-dev 28 | libc-ares-dev 29 | pandoc 30 | pkg-config) 31 | apt-get install -y "${BUILD_PACKAGES[@]}" libcurl4 32 | 33 | # install pam_oauth2.so 34 | git clone -b "$PAM_OAUTH2" --recurse-submodules https://github.com/zalando-pg/pam-oauth2.git 35 | make -C pam-oauth2 install 36 | 37 | # prepare 3rd sources 38 | git clone -b "$PLPROFILER" https://github.com/bigsql/plprofiler.git 39 | curl -sL "https://github.com/zalando-pg/pg_mon/archive/$PG_MON_COMMIT.tar.gz" | tar xz 40 | 41 | for p in python3-keyring python3-docutils ieee-data; do 42 | version=$(apt-cache show $p | sed -n 's/^Version: //p' | sort -rV | head -n 1) 43 | printf "Section: misc\nPriority: optional\nStandards-Version: 3.9.8\nPackage: %s\nVersion: %s\nDescription: %s" "$p" "$version" "$p" > "$p" 44 | equivs-build "$p" 45 | done 46 | fi 47 | 48 | if [ "$WITH_PERL" != "true" ]; then 49 | version=$(apt-cache show perl | sed -n 's/^Version: //p' | sort -rV | head -n 1) 50 | printf "Priority: standard\nStandards-Version: 3.9.8\nPackage: perl\nMulti-Arch: allowed\nReplaces: perl-base, perl-modules\nVersion: %s\nDescription: perl" "$version" > perl 51 | equivs-build perl 52 | fi 53 | 54 | curl -sL "https://github.com/zalando-pg/bg_mon/archive/$BG_MON_COMMIT.tar.gz" | tar xz 55 | curl -sL "https://github.com/zalando-pg/pg_auth_mon/archive/$PG_AUTH_MON_COMMIT.tar.gz" | tar xz 56 | curl -sL "https://github.com/cybertec-postgresql/pg_permissions/archive/$PG_PERMISSIONS_COMMIT.tar.gz" | tar xz 57 | curl -sL "https://github.com/zubkov-andrei/pg_profile/archive/$PG_PROFILE.tar.gz" | tar xz 58 | git clone -b "$SET_USER" https://github.com/pgaudit/set_user.git 59 | 60 | apt-get install -y \ 61 | postgresql-common \ 62 | libevent-2.1 \ 63 | libevent-pthreads-2.1 \ 64 | brotli \ 65 | libbrotli1 \ 66 | python3.10 \ 67 | python3-psycopg2 68 | 69 | # forbid creation of a main cluster when package is installed 70 | sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf 71 | 72 | for version in $DEB_PG_SUPPORTED_VERSIONS; do 73 | sed -i "s/ main.*$/ main $version/g" /etc/apt/sources.list.d/pgdg.list 74 | apt-get update 75 | 76 | if [ "$DEMO" != "true" ]; then 77 | EXTRAS=("postgresql-pltcl-${version}" 78 | "postgresql-${version}-dirtyread" 79 | "postgresql-${version}-extra-window-functions" 80 | "postgresql-${version}-first-last-agg" 81 | "postgresql-${version}-hll" 82 | "postgresql-${version}-hypopg" 83 | "postgresql-${version}-partman" 84 | "postgresql-${version}-plproxy" 85 | "postgresql-${version}-pgaudit" 86 | "postgresql-${version}-pldebugger" 87 | "postgresql-${version}-pglogical" 88 | "postgresql-${version}-pglogical-ticker" 89 | "postgresql-${version}-plpgsql-check" 90 | "postgresql-${version}-pg-checksums" 91 | "postgresql-${version}-pgl-ddl-deploy" 92 | "postgresql-${version}-pgq-node" 93 | "postgresql-${version}-postgis-${POSTGIS_VERSION%.*}" 94 | "postgresql-${version}-postgis-${POSTGIS_VERSION%.*}-scripts" 95 | "postgresql-${version}-repack" 96 | "postgresql-${version}-wal2json" 97 | "postgresql-${version}-decoderbufs" 98 | "postgresql-${version}-pllua" 99 | "postgresql-${version}-pgvector") 100 | 101 | if [ "$WITH_PERL" = "true" ]; then 102 | EXTRAS+=("postgresql-plperl-${version}") 103 | fi 104 | 105 | fi 106 | 107 | if [ "${TIMESCALEDB_APACHE_ONLY}" = "true" ]; then 108 | EXTRAS+=("timescaledb-2-oss-postgresql-${version}") 109 | else 110 | EXTRAS+=("timescaledb-2-postgresql-${version}") 111 | fi 112 | 113 | # Install PostgreSQL binaries, contrib, plproxy and multiple pl's 114 | apt-get install --allow-downgrades -y \ 115 | "postgresql-${version}-cron" \ 116 | "postgresql-contrib-${version}" \ 117 | "postgresql-${version}-pgextwlist" \ 118 | "postgresql-plpython3-${version}" \ 119 | "postgresql-server-dev-${version}" \ 120 | "postgresql-${version}-pgq3" \ 121 | "postgresql-${version}-pg-stat-kcache" \ 122 | "${EXTRAS[@]}" 123 | 124 | # Clean up timescaledb versions except the last 5 minor versions 125 | exclude_patterns=() 126 | versions=$(find "/usr/lib/postgresql/$version/lib/" -name 'timescaledb-2.*.so' | sed -rn 's/.*timescaledb-([1-9]+\.[0-9]+\.[0-9]+)\.so$/\1/p' | sort -rV) 127 | latest_minor_versions=$(echo "$versions" | awk -F. '{print $1"."$2}' | uniq | head -n 5) 128 | for minor in $latest_minor_versions; do 129 | for full_version in $(echo "$versions" | grep "^$minor"); do 130 | exclude_patterns+=(! -name timescaledb-"${full_version}".so) 131 | exclude_patterns+=(! -name timescaledb-tsl-"${full_version}".so) 132 | done 133 | done 134 | find "/usr/lib/postgresql/$version/lib/" \( -name 'timescaledb-2.*.so' -o -name 'timescaledb-tsl-2.*.so' \) "${exclude_patterns[@]}" -delete 135 | 136 | # Install 3rd party stuff 137 | 138 | if [ "${TIMESCALEDB_APACHE_ONLY}" != "true" ] && [ "${TIMESCALEDB_TOOLKIT}" = "true" ]; then 139 | apt-get update 140 | if [ "$(apt-cache search --names-only "^timescaledb-toolkit-postgresql-${version}$" | wc -l)" -eq 1 ]; then 141 | apt-get install "timescaledb-toolkit-postgresql-$version" 142 | else 143 | echo "Skipping timescaledb-toolkit-postgresql-$version as it's not found in the repository" 144 | fi 145 | fi 146 | 147 | EXTRA_EXTENSIONS=() 148 | if [ "$DEMO" != "true" ]; then 149 | EXTRA_EXTENSIONS+=("plprofiler" "pg_mon-${PG_MON_COMMIT}") 150 | fi 151 | 152 | for n in bg_mon-${BG_MON_COMMIT} \ 153 | pg_auth_mon-${PG_AUTH_MON_COMMIT} \ 154 | set_user \ 155 | pg_permissions-${PG_PERMISSIONS_COMMIT} \ 156 | pg_profile-${PG_PROFILE} \ 157 | "${EXTRA_EXTENSIONS[@]}"; do 158 | make -C "$n" USE_PGXS=1 clean install-strip 159 | done 160 | done 161 | 162 | apt-get install -y skytools3-ticker pgbouncer 163 | 164 | sed -i "s/ main.*$/ main/g" /etc/apt/sources.list.d/pgdg.list 165 | apt-get update 166 | apt-get install -y postgresql postgresql-server-dev-all postgresql-all libpq-dev 167 | for version in $DEB_PG_SUPPORTED_VERSIONS; do 168 | apt-get install -y "postgresql-server-dev-${version}" 169 | done 170 | 171 | if [ "$DEMO" != "true" ]; then 172 | for version in $DEB_PG_SUPPORTED_VERSIONS; do 173 | # create postgis symlinks to make it possible to perform update 174 | ln -s "postgis-${POSTGIS_VERSION%.*}.so" "/usr/lib/postgresql/${version}/lib/postgis-2.5.so" 175 | done 176 | fi 177 | 178 | # make it possible for cron to work without root 179 | gcc -s -shared -fPIC -o /usr/local/lib/cron_unprivileged.so cron_unprivileged.c 180 | 181 | apt-get purge -y "${BUILD_PACKAGES[@]}" 182 | apt-get autoremove -y 183 | 184 | if [ "$WITH_PERL" != "true" ] || [ "$DEMO" != "true" ]; then 185 | dpkg -i ./*.deb || apt-get -y -f install 186 | fi 187 | 188 | # Remove unnecessary packages 189 | apt-get purge -y \ 190 | libdpkg-perl \ 191 | libperl5.* \ 192 | perl-modules-5.* \ 193 | postgresql \ 194 | postgresql-all \ 195 | postgresql-server-dev-* \ 196 | libpq-dev=* \ 197 | libmagic1 \ 198 | bsdmainutils 199 | apt-get autoremove -y 200 | apt-get clean 201 | dpkg -l | grep '^rc' | awk '{print $2}' | xargs apt-get purge -y 202 | 203 | # Try to minimize size by creating symlinks instead of duplicate files 204 | if [ "$DEMO" != "true" ]; then 205 | cd "/usr/lib/postgresql/$PGVERSION/bin" 206 | for u in clusterdb \ 207 | pg_archivecleanup \ 208 | pg_basebackup \ 209 | pg_isready \ 210 | pg_recvlogical \ 211 | pg_test_fsync \ 212 | pg_test_timing \ 213 | pgbench \ 214 | reindexdb \ 215 | vacuumlo *.py; do 216 | for v in /usr/lib/postgresql/*; do 217 | if [ "$v" != "/usr/lib/postgresql/$PGVERSION" ] && [ -f "$v/bin/$u" ]; then 218 | rm "$v/bin/$u" 219 | ln -s "../../$PGVERSION/bin/$u" "$v/bin/$u" 220 | fi 221 | done 222 | done 223 | 224 | set +x 225 | 226 | for v1 in $(find /usr/share/postgresql -type d -mindepth 1 -maxdepth 1 | sort -Vr); do 227 | # relink files with the same content 228 | cd "$v1/extension" 229 | while IFS= read -r -d '' orig 230 | do 231 | for f in "${orig%.sql}"--*.sql; do 232 | if [ ! -L "$f" ] && diff "$orig" "$f" > /dev/null; then 233 | echo "creating symlink $f -> $orig" 234 | rm "$f" && ln -s "$orig" "$f" 235 | fi 236 | done 237 | done < <(find . -type f -maxdepth 1 -name '*.sql' -not -name '*--*') 238 | 239 | for e in pgq pgq_node plproxy address_standardizer address_standardizer_data_us; do 240 | orig=$(basename "$(find . -maxdepth 1 -type f -name "$e--*--*.sql" | head -n1)") 241 | if [ "x$orig" != "x" ]; then 242 | for f in "$e"--*--*.sql; do 243 | if [ "$f" != "$orig" ] && [ ! -L "$f" ] && diff "$f" "$orig" > /dev/null; then 244 | echo "creating symlink $f -> $orig" 245 | rm "$f" && ln -s "$orig" "$f" 246 | fi 247 | done 248 | fi 249 | done 250 | 251 | # relink files with the same name and content across different major versions 252 | started=0 253 | for v2 in $(find /usr/share/postgresql -type d -mindepth 1 -maxdepth 1 | sort -Vr); do 254 | if [ "$v1" = "$v2" ]; then 255 | started=1 256 | elif [ $started = 1 ]; then 257 | for d1 in extension contrib contrib/postgis-$POSTGIS_VERSION; do 258 | cd "$v1/$d1" 259 | d2="$d1" 260 | d1="../../${v1##*/}/$d1" 261 | if [ "${d2%-*}" = "contrib/postgis" ]; then 262 | d1="../$d1" 263 | fi 264 | d2="$v2/$d2" 265 | for f in *.html *.sql *.control *.pl; do 266 | if [ -f "$d2/$f" ] && [ ! -L "$d2/$f" ] && diff "$d2/$f" "$f" > /dev/null; then 267 | echo "creating symlink $d2/$f -> $d1/$f" 268 | rm "$d2/$f" && ln -s "$d1/$f" "$d2/$f" 269 | fi 270 | done 271 | done 272 | fi 273 | done 274 | done 275 | set -x 276 | fi 277 | 278 | # Clean up 279 | rm -rf /var/lib/apt/lists/* \ 280 | /var/cache/debconf/* \ 281 | /builddeps \ 282 | /usr/share/doc \ 283 | /usr/share/man \ 284 | /usr/share/info \ 285 | /usr/share/locale/?? \ 286 | /usr/share/locale/??_?? \ 287 | /usr/share/postgresql/*/man \ 288 | /etc/pgbouncer/* \ 289 | /usr/lib/postgresql/*/bin/createdb \ 290 | /usr/lib/postgresql/*/bin/createlang \ 291 | /usr/lib/postgresql/*/bin/createuser \ 292 | /usr/lib/postgresql/*/bin/dropdb \ 293 | /usr/lib/postgresql/*/bin/droplang \ 294 | /usr/lib/postgresql/*/bin/dropuser \ 295 | /usr/lib/postgresql/*/bin/pg_standby \ 296 | /usr/lib/postgresql/*/bin/pltcl_* 297 | find /var/log -type f -exec truncate --size 0 {} \; 298 | -------------------------------------------------------------------------------- /postgres-appliance/build_scripts/compress_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | apt-get update 6 | apt-get install -y busybox xz-utils 7 | apt-get clean 8 | 9 | rm -rf /var/lib/apt/lists/* /var/cache/debconf/* /usr/share/doc /usr/share/man /etc/rc?.d /etc/systemd 10 | ln -snf busybox /bin/sh 11 | 12 | files="/bin/sh" 13 | arch=$(uname -m) 14 | darch=$(uname -m | sed 's/_/-/') 15 | 16 | IFS=" " read -r -a libs <<< "$(ldd $files | awk '{print $3;}' | grep '^/' | sort -u)" 17 | libs+=(/lib/ld-linux-"$darch".so.* \ 18 | /lib/"$arch"-linux-gnu/ld-linux-"$darch".so.* \ 19 | /lib/"$arch"-linux-gnu/libnsl.so.* \ 20 | /lib/"$arch"-linux-gnu/libnss_compat.so.*) 21 | 22 | (echo /var/run /var/spool "$files" "${libs[@]}" | tr ' ' '\n' && realpath "$files" "${libs[@]}") | sort -u | sed 's/^\///' > /exclude 23 | 24 | find /etc/alternatives -xtype l -delete 25 | save_dirs=(usr lib var bin sbin etc/ssl etc/init.d etc/alternatives etc/apt) 26 | XZ_OPT=-e9v tar -X /exclude -cpJf a.tar.xz "${save_dirs[@]}" 27 | 28 | rm -fr /usr/local/lib/python* 29 | 30 | /bin/busybox sh -c "(find ${save_dirs[*]} -not -type d && cat /exclude /exclude && echo exclude) | sort | uniq -u | xargs /bin/busybox rm" 31 | /bin/busybox --install -s 32 | /bin/busybox sh -c "find ${save_dirs[*]} -type d -depth -exec rmdir -p {}; 2> /dev/null" 33 | -------------------------------------------------------------------------------- /postgres-appliance/build_scripts/dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## ------------------ 4 | ## Dependencies magic 5 | ## ------------------ 6 | 7 | set -ex 8 | 9 | # should exist when $DEMO=TRUE to avoid 'COPY --from=dependencies-builder /builddeps/wal-g ...' failure 10 | 11 | if [ "$DEMO" = "true" ]; then 12 | mkdir /builddeps/wal-g 13 | exit 0 14 | fi 15 | 16 | export DEBIAN_FRONTEND=noninteractive 17 | MAKEFLAGS="-j $(grep -c ^processor /proc/cpuinfo)" 18 | export MAKEFLAGS 19 | ARCH="$(dpkg --print-architecture)" 20 | 21 | # We want to remove all libgdal30 debs except one that is for current architecture. 22 | printf "shopt -s extglob\nrm /builddeps/!(*_%s.deb)" "$ARCH" | bash -s 23 | 24 | echo -e 'APT::Install-Recommends "0";\nAPT::Install-Suggests "0";' > /etc/apt/apt.conf.d/01norecommend 25 | 26 | apt-get update 27 | apt-get install -y curl ca-certificates 28 | 29 | mkdir /builddeps/wal-g 30 | 31 | if [ "$ARCH" = "amd64" ]; then 32 | PKG_NAME='wal-g-pg-ubuntu-20.04-amd64' 33 | else 34 | PKG_NAME='wal-g-pg-ubuntu20.04-aarch64' 35 | fi 36 | 37 | curl -sL "https://github.com/wal-g/wal-g/releases/download/$WALG_VERSION/$PKG_NAME.tar.gz" \ 38 | | tar -C /builddeps/wal-g -xz 39 | mv "/builddeps/wal-g/$PKG_NAME" /builddeps/wal-g/wal-g 40 | -------------------------------------------------------------------------------- /postgres-appliance/build_scripts/locales.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## ---------------- 4 | ## Locales routines 5 | ## ---------------- 6 | 7 | set -ex 8 | 9 | apt-get update 10 | apt-get -y upgrade 11 | apt-get install -y locales 12 | 13 | # Cleanup all locales but en_US.UTF-8 and optionally specified in ADDITIONAL_LOCALES arg 14 | find /usr/share/i18n/charmaps/ -type f ! -name UTF-8.gz -delete 15 | 16 | # Prepare find expression for locales 17 | LOCALE_FIND_EXPR=(-type f) 18 | for loc in en_US en_GB $ADDITIONAL_LOCALES "i18n*" iso14651_t1 iso14651_t1_common "translit_*"; do 19 | LOCALE_FIND_EXPR+=(! -name "$loc") 20 | done 21 | find /usr/share/i18n/locales/ "${LOCALE_FIND_EXPR[@]}" -delete 22 | 23 | # Make sure we have the en_US.UTF-8 and all additional locales available 24 | truncate --size 0 /usr/share/i18n/SUPPORTED 25 | for loc in en_US $ADDITIONAL_LOCALES; do 26 | echo "$loc.UTF-8 UTF-8" >> /usr/share/i18n/SUPPORTED 27 | localedef -i "$loc" -c -f UTF-8 -A /usr/share/locale/locale.alias "$loc.UTF-8" 28 | done 29 | -------------------------------------------------------------------------------- /postgres-appliance/build_scripts/patroni_wale.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## ------------------------- 4 | ## Install patroni and wal-e 5 | ## ------------------------- 6 | 7 | export DEBIAN_FRONTEND=noninteractive 8 | 9 | set -ex 10 | 11 | BUILD_PACKAGES=(python3-pip python3-wheel python3-dev git patchutils binutils gcc) 12 | 13 | apt-get update 14 | 15 | # install most of the patroni dependencies from ubuntu packages 16 | apt-cache depends patroni \ 17 | | sed -n -e 's/.* Depends: \(python3-.\+\)$/\1/p' \ 18 | | grep -Ev '^python3-(sphinx|etcd|consul|kazoo|kubernetes)' \ 19 | | xargs apt-get install -y "${BUILD_PACKAGES[@]}" python3-pystache python3-requests 20 | 21 | pip3 install setuptools 22 | 23 | if [ "$DEMO" != "true" ]; then 24 | EXTRAS=",etcd,consul,zookeeper,aws" 25 | apt-get install -y \ 26 | python3-etcd \ 27 | python3-consul \ 28 | python3-kazoo \ 29 | python3-boto \ 30 | python3-boto3 \ 31 | python3-botocore \ 32 | python3-cachetools \ 33 | python3-cffi \ 34 | python3-gevent \ 35 | python3-pyasn1-modules \ 36 | python3-rsa \ 37 | python3-s3transfer \ 38 | python3-swiftclient 39 | 40 | find /usr/share/python-babel-localedata/locale-data -type f ! -name 'en_US*.dat' -delete 41 | 42 | pip3 install filechunkio protobuf \ 43 | 'git+https://github.com/zalando-pg/wal-e.git#egg=wal-e[aws,google,swift]' \ 44 | 'git+https://github.com/zalando/pg_view.git@master#egg=pg-view' 45 | 46 | # https://github.com/wal-e/wal-e/issues/318 47 | sed -i 's/^\( for i in range(0,\) num_retries):.*/\1 100):/g' /usr/lib/python3/dist-packages/boto/utils.py 48 | else 49 | EXTRAS="" 50 | fi 51 | 52 | pip3 install "patroni[kubernetes$EXTRAS]==$PATRONIVERSION" 53 | 54 | for d in /usr/local/lib/python3.10 /usr/lib/python3; do 55 | cd $d/dist-packages 56 | find . -type d -name tests -print0 | xargs -0 rm -fr 57 | find . -type f -name 'test_*.py*' -delete 58 | done 59 | find . -type f -name 'unittest_*.py*' -delete 60 | find . -type f -name '*_test.py' -delete 61 | find . -type f -name '*_test.cpython*.pyc' -delete 62 | 63 | # Clean up 64 | apt-get purge -y "${BUILD_PACKAGES[@]}" 65 | apt-get autoremove -y 66 | apt-get clean 67 | rm -rf /var/lib/apt/lists/* \ 68 | /var/cache/debconf/* \ 69 | /root/.cache \ 70 | /usr/share/doc \ 71 | /usr/share/man \ 72 | /usr/share/locale/?? \ 73 | /usr/share/locale/??_?? \ 74 | /usr/share/info 75 | find /var/log -type f -exec truncate --size 0 {} \; 76 | -------------------------------------------------------------------------------- /postgres-appliance/build_scripts/post_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | sed -i "s|/var/lib/postgresql.*|$PGHOME:/bin/bash|" /etc/passwd 6 | 7 | chown -R postgres:postgres "$PGHOME" "$RW_DIR" 8 | 9 | rm -fr /var/spool/cron /var/tmp 10 | mkdir -p /var/spool 11 | ln -s "$RW_DIR/cron" /var/spool/cron 12 | ln -s "$RW_DIR/tmp" /var/tmp 13 | 14 | for d in /etc/service/*; do 15 | chmod 755 "$d"/* 16 | ln -s /run/supervise/"$(basename "$d")" "$d/supervise" 17 | done 18 | 19 | ln -snf "$RW_DIR/service" /etc/service 20 | ln -s "$RW_DIR/pam.d-postgresql" /etc/pam.d/postgresql 21 | ln -s "$RW_DIR/postgres.yml" "$PGHOME/postgres.yml" 22 | ln -s "$RW_DIR/.bash_history" /root/.bash_history 23 | ln -s "$RW_DIR/postgresql/.bash_history" "$PGHOME/.bash_history" 24 | ln -s "$RW_DIR/postgresql/.psql_history" "$PGHOME/.psql_history" 25 | ln -s "$RW_DIR/etc" "$PGHOME/etc" 26 | 27 | for d in "$PGHOME" /root; do 28 | d="$d/.config/patroni" 29 | mkdir -p "$d" 30 | ln -s "$PGHOME/postgres.yml" "$d/patronictl.yaml" 31 | done 32 | 33 | sed -i 's/set compatible/set nocompatible/' /etc/vim/vimrc.tiny 34 | 35 | echo "PATH=\"$PATH\"" > /etc/environment 36 | 37 | for e in TERM=linux LC_ALL=C.UTF-8 LANG=C.UTF-8 EDITOR=editor; 38 | do echo "export $e" >> /etc/bash.bashrc 39 | done 40 | ln -s /etc/skel/.bashrc "$PGHOME/.bashrc" 41 | echo "source /etc/motd" >> /root/.bashrc 42 | 43 | # Allow users in the root group to access the following files and dirs 44 | if [ "$COMPRESS" != "true" ]; then 45 | chmod 664 /etc/passwd 46 | chmod o+r /etc/shadow 47 | chgrp -R 0 "$PGHOME" "$RW_DIR" 48 | chmod -R g=u "$PGHOME" "$RW_DIR" 49 | usermod -a -G root postgres 50 | fi 51 | -------------------------------------------------------------------------------- /postgres-appliance/build_scripts/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export DEBIAN_FRONTEND=noninteractive 4 | 5 | echo -e 'APT::Install-Recommends "0";\nAPT::Install-Suggests "0";' > /etc/apt/apt.conf.d/01norecommend 6 | 7 | apt-get update 8 | apt-get -y upgrade 9 | apt-get install -y curl ca-certificates less locales jq vim-tiny gnupg1 cron runit dumb-init libcap2-bin rsync sysstat gpg 10 | 11 | ln -s chpst /usr/bin/envdir 12 | 13 | # Make it possible to use the following utilities without root (if container runs without "no-new-privileges:true") 14 | setcap 'cap_sys_nice+ep' /usr/bin/chrt 15 | setcap 'cap_sys_nice+ep' /usr/bin/renice 16 | 17 | # Disable unwanted cron jobs 18 | rm -fr /etc/cron.??* 19 | truncate --size 0 /etc/crontab 20 | 21 | if [ "$DEMO" != "true" ]; then 22 | # Required for wal-e 23 | apt-get install -y pv lzop 24 | # install etcdctl 25 | ETCDVERSION=3.3.27 26 | curl -L https://github.com/coreos/etcd/releases/download/v${ETCDVERSION}/etcd-v${ETCDVERSION}-linux-"$(dpkg --print-architecture)".tar.gz \ 27 | | tar xz -C /bin --strip=1 --wildcards --no-anchored --no-same-owner etcdctl etcd 28 | fi 29 | 30 | # Dirty hack for smooth migration of existing dbs 31 | bash /builddeps/locales.sh 32 | mv /usr/lib/locale/locale-archive /usr/lib/locale/locale-archive.22 33 | ln -s /run/locale-archive /usr/lib/locale/locale-archive 34 | ln -s /usr/lib/locale/locale-archive.22 /run/locale-archive 35 | 36 | # Add PGDG repositories 37 | DISTRIB_CODENAME=$(sed -n 's/DISTRIB_CODENAME=//p' /etc/lsb-release) 38 | for t in deb deb-src; do 39 | echo "$t http://apt.postgresql.org/pub/repos/apt/ ${DISTRIB_CODENAME}-pgdg main" >> /etc/apt/sources.list.d/pgdg.list 40 | done 41 | curl -s -o - https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor > /etc/apt/trusted.gpg.d/apt.postgresql.org.gpg 42 | 43 | # add TimescaleDB repository 44 | echo "deb [signed-by=/etc/apt/keyrings/timescale_timescaledb-archive-keyring.gpg] https://packagecloud.io/timescale/timescaledb/ubuntu/ ${DISTRIB_CODENAME} main" | tee /etc/apt/sources.list.d/timescaledb.list 45 | curl -fsSL https://packagecloud.io/timescale/timescaledb/gpgkey | gpg --dearmor | tee /etc/apt/keyrings/timescale_timescaledb-archive-keyring.gpg > /dev/null 46 | 47 | # Clean up 48 | apt-get purge -y libcap2-bin 49 | apt-get autoremove -y 50 | apt-get clean 51 | rm -rf /var/lib/apt/lists/* \ 52 | /var/cache/debconf/* \ 53 | /usr/share/doc \ 54 | /usr/share/man \ 55 | /usr/share/locale/?? \ 56 | /usr/share/locale/??_?? 57 | find /var/log -type f -exec truncate --size 0 {} \; 58 | -------------------------------------------------------------------------------- /postgres-appliance/cron_unprivileged.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int seteuid(uid_t euid) 4 | { 5 | return 0; 6 | } 7 | 8 | int initgroups(const char *user, gid_t group) 9 | { 10 | return 0; 11 | } 12 | 13 | -------------------------------------------------------------------------------- /postgres-appliance/dependencies/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | LABEL maintainer="Alexander Kukushkin " 4 | 5 | ENV SOURCES="gdal" 6 | ENV PACKAGES="libgdal30" 7 | 8 | RUN export DEBIAN_FRONTEND=noninteractive \ 9 | && echo 'APT::Install-Recommends "0";' > /etc/apt/apt.conf.d/01norecommend \ 10 | && echo 'APT::Install-Suggests "0";' >> /etc/apt/apt.conf.d/01norecommend \ 11 | && sed -i 's/^# deb-src/deb-src/' /etc/apt/sources.list \ 12 | && apt-get update \ 13 | && apt-get install -y devscripts equivs \ 14 | && mk-build-deps $SOURCES \ 15 | && dpkg -i *-build-deps*.deb || apt-get -y -f install 16 | 17 | ADD patches /builddir/patches 18 | ADD debs /debs 19 | 20 | RUN export DEBIAN_FRONTEND=noninteractive \ 21 | && set -ex \ 22 | && apt-get update \ 23 | && apt-get upgrade -y \ 24 | && need_rebuild=false \ 25 | && for pkg in $PACKAGES; do \ 26 | new_package=$(apt-cache show $pkg | awk -F/ '/Filename: / {print $NF}'| sort -rV | head -n 1) \ 27 | && if [ ! -f /debs/$new_package ]; then \ 28 | need_rebuild=true \ 29 | && break; \ 30 | fi; \ 31 | done \ 32 | && if [ "$need_rebuild" = "true" ]; then \ 33 | cd /builddir \ 34 | && apt-get source $SOURCES \ 35 | && export MAKEFLAGS="-j $(grep -c ^processor /proc/cpuinfo)" \ 36 | && for pkg in $SOURCES; do \ 37 | cd $(ls -d /builddir/$pkg-*) \ 38 | && patch -p0 < /builddir/patches/$pkg.patch \ 39 | && debuild -b -uc -us; \ 40 | done \ 41 | 42 | && rm -f /debs/* \ 43 | && for pkg in $PACKAGES; do \ 44 | cp /builddir/${pkg}_*_$(dpkg --print-architecture).deb /debs; \ 45 | done; \ 46 | fi 47 | -------------------------------------------------------------------------------- /postgres-appliance/dependencies/README: -------------------------------------------------------------------------------- 1 | Rebuild some packages without graphic libraries. 2 | -------------------------------------------------------------------------------- /postgres-appliance/dependencies/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! docker info &> /dev/null; then 4 | if podman info &> /dev/null; then 5 | alias docker=podman 6 | shopt -s expand_aliases 7 | else 8 | echo "docker/podman: command not found" 9 | exit 1 10 | fi 11 | fi 12 | 13 | set -ex 14 | 15 | cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" 16 | 17 | readonly IMGTAG=spilo:dependencies 18 | 19 | docker build -t $IMGTAG . 20 | 21 | rm -f debs/*_"$(docker run --rm $IMGTAG dpkg --print-architecture)".deb 22 | 23 | docker run --rm $IMGTAG tar -C /debs -c .| tar -C debs -x 24 | -------------------------------------------------------------------------------- /postgres-appliance/dependencies/debs/libgdal30_3.4.1+dfsg-1build4_amd64.deb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zalando/spilo/bb39d8109bc759e28d9b3fb1dec549ce82ba6a38/postgres-appliance/dependencies/debs/libgdal30_3.4.1+dfsg-1build4_amd64.deb -------------------------------------------------------------------------------- /postgres-appliance/dependencies/debs/libgdal30_3.4.1+dfsg-1build4_arm64.deb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zalando/spilo/bb39d8109bc759e28d9b3fb1dec549ce82ba6a38/postgres-appliance/dependencies/debs/libgdal30_3.4.1+dfsg-1build4_arm64.deb -------------------------------------------------------------------------------- /postgres-appliance/dependencies/patches/gdal.patch: -------------------------------------------------------------------------------- 1 | --- debian/rules.orig 2022-08-31 07:28:29.166137956 +0000 2 | +++ debian/rules 2022-08-31 07:28:31.622350139 +0000 3 | @@ -130,42 +130,47 @@ 4 | --with-ssse3=$(WITH_SSSE3) \ 5 | --with-avx=$(WITH_AVX) \ 6 | --with-bash-completion=yes \ 7 | - --with-armadillo=yes \ 8 | - --with-blosc=yes \ 9 | - --with-cfitsio=yes \ 10 | - --with-charls \ 11 | - --with-curl \ 12 | + --with-armadillo=no \ 13 | + --with-blosc=no \ 14 | + --with-cfitsio=no \ 15 | + --with-curl=no \ 16 | --with-libdeflate \ 17 | --with-ecw=no \ 18 | - --with-freexl=yes \ 19 | + --with-freexl=no \ 20 | --with-geos \ 21 | - --with-geotiff=yes \ 22 | + --with-geotiff=internal \ 23 | --with-grass=no \ 24 | - $(WITH_HDF5) \ 25 | - --with-heif \ 26 | + --without-heif \ 27 | --with-libjson-c=/usr \ 28 | - --with-libkml=yes \ 29 | + --with-libkml=no \ 30 | --with-liblzma=yes \ 31 | - --with-libtiff=yes \ 32 | + --with-libtiff=internal \ 33 | --with-lz4=yes \ 34 | --with-mrsid=no \ 35 | - --with-mysql \ 36 | - --with-netcdf \ 37 | - --with-odbc \ 38 | - --with-ogdi \ 39 | - --with-openjpeg=yes \ 40 | - --with-pcre2 \ 41 | - --with-pg \ 42 | - --with-poppler=yes \ 43 | - --with-proj=yes \ 44 | - --with-qhull=yes \ 45 | - --with-sosi=yes \ 46 | - --with-spatialite=/usr \ 47 | - --with-sqlite3 \ 48 | - --with-webp \ 49 | - --with-xerces \ 50 | + --without-mysql \ 51 | + --without-netcdf \ 52 | + --with-odbc=no \ 53 | + --without-ogdi \ 54 | + --without-charls \ 55 | + --without-jpeg12 \ 56 | + --with-jpeg=internal \ 57 | + --with-png=internal \ 58 | + --with-openjpeg=no \ 59 | + --with-gif=no \ 60 | + --with-hdf4=no \ 61 | + --with-hdf5=no \ 62 | + --with-pcre2=no \ 63 | + --without-pg \ 64 | + --with-poppler=no \ 65 | + --with-proj \ 66 | + --with-qhull=no \ 67 | + --with-sosi=no \ 68 | + --with-spatialite=no \ 69 | + --with-sqlite3=no \ 70 | + --with-webp=no \ 71 | + --with-xerces=no \ 72 | --with-zstd \ 73 | - --with-perl \ 74 | + --without-perl \ 75 | --with-python; \ 76 | mv GDALmake.opt GDALmake.opt-$$V; \ 77 | done 78 | -------------------------------------------------------------------------------- /postgres-appliance/launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -f /a.tar.xz ]; then 4 | echo "decompressing spilo image..." 5 | if tar xpJf /a.tar.xz -C / > /dev/null 2>&1; then 6 | rm /a.tar.xz 7 | ln -snf dash /bin/sh 8 | else 9 | echo "failed to decompress spilo image" 10 | exit 1 11 | fi 12 | fi 13 | 14 | if [ "$1" = "init" ]; then 15 | exec /usr/bin/dumb-init -c --rewrite 1:0 -- /bin/sh /launch.sh 16 | fi 17 | 18 | sysctl -w vm.dirty_background_bytes=67108864 > /dev/null 2>&1 19 | sysctl -w vm.dirty_bytes=134217728 > /dev/null 2>&1 20 | 21 | if [ "$USE_OLD_LOCALES" = "true" ]; then 22 | ln -snf /usr/lib/locale/locale-archive.18 /run/locale-archive 23 | else 24 | ln -snf /usr/lib/locale/locale-archive.22 /run/locale-archive 25 | fi 26 | 27 | mkdir -p "$PGLOG" "$PGDATA" "$RW_DIR/postgresql" "$RW_DIR/tmp" "$RW_DIR/certs" 28 | if [ "$(id -u)" -ne 0 ]; then 29 | sed -e "s/^postgres:x:[^:]*:[^:]*:/postgres:x:$(id -u):$(id -g):/" /etc/passwd > "$RW_DIR/tmp/passwd" 30 | cat "$RW_DIR/tmp/passwd" > /etc/passwd 31 | rm "$RW_DIR/tmp/passwd" 32 | fi 33 | 34 | ## Ensure all logfiles exist, most appliances will have 35 | ## a foreign data wrapper pointing to these files 36 | for i in $(seq 0 7); do 37 | if [ "$LOG_SHIP_HOURLY" != "true" ]; then 38 | if [ ! -f "${PGLOG}/postgresql-${i}.csv" ]; then 39 | touch "${PGLOG}/postgresql-${i}.csv" 40 | fi 41 | else 42 | for h in $(seq -w 0 23); do 43 | if [ ! -f "${PGLOG}/postgresql-${i}-${h}.csv" ]; then 44 | touch "${PGLOG}/postgresql-${i}-${h}.csv" 45 | fi 46 | done 47 | fi 48 | done 49 | chown -R postgres: "$PGROOT" "$RW_DIR/certs" 50 | chmod -R go-w "$PGROOT" 51 | chmod 01777 "$RW_DIR/tmp" 52 | chmod 0700 "$PGDATA" 53 | 54 | if [ "$DEMO" = "true" ]; then 55 | python3 /scripts/configure_spilo.py patroni pgqd certificate pam-oauth2 56 | elif python3 /scripts/configure_spilo.py all; then 57 | CMD="/scripts/patroni_wait.sh -t 3600 -- envdir $WALE_ENV_DIR /scripts/postgres_backup.sh $PGDATA" 58 | if [ "$(id -u)" = "0" ]; then 59 | su postgres -c "PATH=$PATH $CMD" & 60 | else 61 | $CMD & 62 | fi 63 | fi 64 | 65 | sv_stop() { 66 | sv -w 86400 stop patroni 67 | sv -w 86400 stop /etc/service/* 68 | } 69 | 70 | [ ! -d /etc/service ] && exit 1 # /etc/service has not been created due to an error, the container is no-op 71 | 72 | trap sv_stop TERM QUIT INT 73 | 74 | /usr/bin/runsvdir -P /etc/service & 75 | 76 | wait 77 | -------------------------------------------------------------------------------- /postgres-appliance/major_upgrade/pg_upgrade.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import shutil 4 | import subprocess 5 | import psutil 6 | 7 | from patroni.postgresql import Postgresql 8 | from patroni.postgresql.mpp import get_mpp 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class _PostgresqlUpgrade(Postgresql): 14 | 15 | _INCOMPATIBLE_EXTENSIONS = ('pg_repack',) 16 | 17 | def adjust_shared_preload_libraries(self, version): 18 | from spilo_commons import adjust_extensions 19 | 20 | shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries') 21 | self._old_config_values['shared_preload_libraries'] = shared_preload_libraries 22 | 23 | if shared_preload_libraries: 24 | self.config.get('parameters')['shared_preload_libraries'] =\ 25 | adjust_extensions(shared_preload_libraries, version) 26 | 27 | def no_bg_mon(self): 28 | shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries') 29 | if shared_preload_libraries: 30 | tmp = filter(lambda a: a != "bg_mon", map(lambda a: a.strip(), shared_preload_libraries.split(","))) 31 | self.config.get('parameters')['shared_preload_libraries'] = ",".join(tmp) 32 | 33 | def restore_shared_preload_libraries(self): 34 | if getattr(self, '_old_shared_preload_libraries'): 35 | self.config.get('parameters')['shared_preload_libraries'] = self._old_shared_preload_libraries 36 | return True 37 | 38 | def start_old_cluster(self, config, version): 39 | self._new_bin_dir = self._bin_dir 40 | self.set_bin_dir_for_version(version) 41 | self._old_bin_dir = self._bin_dir 42 | 43 | # make sure we don't archive wals from the old version 44 | self._old_config_values = {'archive_mode': self.config.get('parameters').get('archive_mode')} 45 | self.config.get('parameters')['archive_mode'] = 'off' 46 | 47 | # and don't load shared_preload_libraries which don't exist in the old version 48 | self.adjust_shared_preload_libraries(float(version)) 49 | 50 | return self.bootstrap.bootstrap(config) 51 | 52 | def get_cluster_version(self): 53 | with open(self._version_file) as f: 54 | return f.read().strip() 55 | 56 | def set_bin_dir_for_version(self, version): 57 | from spilo_commons import get_bin_dir 58 | self.set_bin_dir(get_bin_dir(version)) 59 | 60 | def set_bin_dir(self, bin_dir): 61 | self._bin_dir = bin_dir 62 | self._available_gucs = None 63 | 64 | @property 65 | def local_conn_kwargs(self): 66 | conn_kwargs = self.connection_pool.conn_kwargs 67 | conn_kwargs['options'] = '-c synchronous_commit=local -c statement_timeout=0 -c search_path=' 68 | conn_kwargs.pop('connect_timeout', None) 69 | return conn_kwargs 70 | 71 | def _get_all_databases(self): 72 | return [d[0] for d in self.query('SELECT datname FROM pg_catalog.pg_database WHERE datallowconn')] 73 | 74 | def drop_possibly_incompatible_extensions(self): 75 | from patroni.postgresql.connection import get_connection_cursor 76 | 77 | logger.info('Dropping extensions from the cluster which could be incompatible') 78 | conn_kwargs = self.local_conn_kwargs 79 | 80 | for d in self._get_all_databases(): 81 | conn_kwargs['dbname'] = d 82 | with get_connection_cursor(**conn_kwargs) as cur: 83 | for ext in self._INCOMPATIBLE_EXTENSIONS: 84 | logger.info('Executing "DROP EXTENSION IF EXISTS %s" in the database="%s"', ext, d) 85 | cur.execute("DROP EXTENSION IF EXISTS {0}".format(ext)) 86 | 87 | def drop_possibly_incompatible_objects(self): 88 | from patroni.postgresql.connection import get_connection_cursor 89 | 90 | logger.info('Dropping objects from the cluster which could be incompatible') 91 | conn_kwargs = self.local_conn_kwargs 92 | 93 | for d in self._get_all_databases(): 94 | conn_kwargs['dbname'] = d 95 | with get_connection_cursor(**conn_kwargs) as cur: 96 | 97 | cmd = "REVOKE EXECUTE ON FUNCTION pg_catalog.pg_switch_{0}() FROM admin".format(self.wal_name) 98 | logger.info('Executing "%s" in the database="%s"', cmd, d) 99 | cur.execute(cmd) 100 | 101 | logger.info('Executing "DROP FUNCTION metric_helpers.pg_stat_statements" in the database="%s"', d) 102 | cur.execute("DROP FUNCTION IF EXISTS metric_helpers.pg_stat_statements(boolean) CASCADE") 103 | 104 | for ext in ('pg_stat_kcache', 'pg_stat_statements') + self._INCOMPATIBLE_EXTENSIONS: 105 | logger.info('Executing "DROP EXTENSION IF EXISTS %s" in the database="%s"', ext, d) 106 | cur.execute("DROP EXTENSION IF EXISTS {0}".format(ext)) 107 | 108 | cur.execute("SELECT oid::regclass FROM pg_catalog.pg_class" 109 | " WHERE relpersistence = 'u' AND relkind = 'r'") 110 | for unlogged in cur.fetchall(): 111 | logger.info('Truncating unlogged table %s', unlogged[0]) 112 | try: 113 | cur.execute('TRUNCATE {0}'.format(unlogged[0])) 114 | except Exception as e: 115 | logger.error('Failed: %r', e) 116 | 117 | def update_extensions(self): 118 | from patroni.postgresql.connection import get_connection_cursor 119 | 120 | conn_kwargs = self.local_conn_kwargs 121 | 122 | for d in self._get_all_databases(): 123 | conn_kwargs['dbname'] = d 124 | with get_connection_cursor(**conn_kwargs) as cur: 125 | cur.execute('SELECT quote_ident(extname), extversion FROM pg_catalog.pg_extension') 126 | for extname, version in cur.fetchall(): 127 | # require manual update to 5.X+ 128 | if extname == 'pg_partman' and int(version[0]) < 5: 129 | logger.warning("Skipping update of '%s' in database=%s. " 130 | "Extension version: %s. Consider manual update", 131 | extname, d, version) 132 | continue 133 | query = 'ALTER EXTENSION {0} UPDATE'.format(extname) 134 | logger.info("Executing '%s' in the database=%s", query, d) 135 | try: 136 | cur.execute(query) 137 | except Exception as e: 138 | logger.error('Failed: %r', e) 139 | 140 | @staticmethod 141 | def remove_new_data(d): 142 | if d.endswith('_new') and os.path.isdir(d): 143 | shutil.rmtree(d) 144 | 145 | def cleanup_new_pgdata(self): 146 | if getattr(self, '_new_data_dir', None): 147 | self.remove_new_data(self._new_data_dir) 148 | 149 | def cleanup_old_pgdata(self): 150 | if os.path.exists(self._old_data_dir): 151 | logger.info('Removing %s', self._old_data_dir) 152 | shutil.rmtree(self._old_data_dir) 153 | return True 154 | 155 | def switch_pgdata(self): 156 | self._old_data_dir = self._data_dir + '_old' 157 | self.cleanup_old_pgdata() 158 | os.rename(self._data_dir, self._old_data_dir) 159 | if getattr(self, '_new_data_dir', None): 160 | os.rename(self._new_data_dir, self._data_dir) 161 | self.configure_server_parameters() 162 | return True 163 | 164 | def switch_back_pgdata(self): 165 | if os.path.exists(self._data_dir): 166 | self._new_data_dir = self._data_dir + '_new' 167 | self.cleanup_new_pgdata() 168 | os.rename(self._data_dir, self._new_data_dir) 169 | os.rename(self._old_data_dir, self._data_dir) 170 | 171 | def pg_upgrade(self, check=False): 172 | upgrade_dir = self._data_dir + '_upgrade' 173 | if os.path.exists(upgrade_dir) and os.path.isdir(upgrade_dir): 174 | shutil.rmtree(upgrade_dir) 175 | 176 | os.makedirs(upgrade_dir) 177 | 178 | old_cwd = os.getcwd() 179 | os.chdir(upgrade_dir) 180 | 181 | pg_upgrade_args = ['-k', '-j', str(psutil.cpu_count()), 182 | '-b', self._old_bin_dir, '-B', self._new_bin_dir, 183 | '-d', self._data_dir, '-D', self._new_data_dir, 184 | '-O', "-c timescaledb.restoring='on'", 185 | '-O', "-c archive_mode='off'"] 186 | if 'username' in self.config.superuser: 187 | pg_upgrade_args += ['-U', self.config.superuser['username']] 188 | 189 | if check: 190 | pg_upgrade_args += ['--check'] 191 | else: 192 | self.config.write_postgresql_conf() 193 | 194 | self.set_bin_dir(self._new_bin_dir) 195 | 196 | logger.info('Executing pg_upgrade%s', (' --check' if check else '')) 197 | if subprocess.call([self.pgcommand('pg_upgrade')] + pg_upgrade_args) == 0: 198 | if check: 199 | self.set_bin_dir(self._old_bin_dir) 200 | os.chdir(old_cwd) 201 | shutil.rmtree(upgrade_dir) 202 | return True 203 | 204 | def prepare_new_pgdata(self, version): 205 | from spilo_commons import append_extensions 206 | 207 | locale = self.query("SELECT datcollate FROM pg_database WHERE datname='template1';")[0][0] 208 | encoding = self.query('SHOW server_encoding')[0][0] 209 | initdb_config = [{'locale': locale}, {'encoding': encoding}] 210 | if self.query("SELECT current_setting('data_checksums')::bool")[0][0]: 211 | initdb_config.append('data-checksums') 212 | 213 | logger.info('initdb config: %s', initdb_config) 214 | 215 | self._new_data_dir = os.path.abspath(self._data_dir) 216 | self._old_data_dir = self._new_data_dir + '_old' 217 | self._data_dir = self._new_data_dir + '_new' 218 | self.remove_new_data(self._data_dir) 219 | old_postgresql_conf = self.config._postgresql_conf 220 | self.config._postgresql_conf = os.path.join(self._data_dir, 'postgresql.conf') 221 | old_version_file = self._version_file 222 | self._version_file = os.path.join(self._data_dir, 'PG_VERSION') 223 | 224 | self._old_bin_dir = self._bin_dir 225 | self.set_bin_dir_for_version(version) 226 | self._new_bin_dir = self._bin_dir 227 | 228 | # shared_preload_libraries for the old cluster, cleaned from incompatible/missing libs 229 | old_shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries') 230 | 231 | # restore original values of archive_mode and shared_preload_libraries 232 | if getattr(self, '_old_config_values', None): 233 | for name, value in self._old_config_values.items(): 234 | if value is None: 235 | self.config.get('parameters').pop(name) 236 | else: 237 | self.config.get('parameters')[name] = value 238 | 239 | # for the new version we maybe need to add some libs to the shared_preload_libraries 240 | shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries') 241 | if shared_preload_libraries: 242 | self._old_shared_preload_libraries = self.config.get('parameters')['shared_preload_libraries'] =\ 243 | append_extensions(shared_preload_libraries, float(version)) 244 | self.no_bg_mon() 245 | 246 | if not self.bootstrap._initdb(initdb_config): 247 | return False 248 | self.bootstrap._running_custom_bootstrap = False 249 | 250 | # Copy old configs. XXX: some parameters might be incompatible! 251 | for f in os.listdir(self._new_data_dir): 252 | if f.startswith('postgresql.') or f.startswith('pg_hba.conf') or f == 'patroni.dynamic.json': 253 | shutil.copy(os.path.join(self._new_data_dir, f), os.path.join(self._data_dir, f)) 254 | 255 | self.config.write_postgresql_conf() 256 | self._new_data_dir, self._data_dir = self._data_dir, self._new_data_dir 257 | self.config._postgresql_conf = old_postgresql_conf 258 | self._version_file = old_version_file 259 | self.set_bin_dir(self._old_bin_dir) 260 | 261 | if old_shared_preload_libraries: 262 | self.config.get('parameters')['shared_preload_libraries'] = old_shared_preload_libraries 263 | self.no_bg_mon() 264 | self.configure_server_parameters() 265 | return True 266 | 267 | def do_upgrade(self): 268 | return self.pg_upgrade() and self.restore_shared_preload_libraries()\ 269 | and self.switch_pgdata() and self.cleanup_old_pgdata() 270 | 271 | def analyze(self, in_stages=False): 272 | vacuumdb_args = ['--analyze-in-stages'] if in_stages else [] 273 | logger.info('Rebuilding statistics (vacuumdb%s)', (' ' + vacuumdb_args[0] if in_stages else '')) 274 | if 'username' in self.config.superuser: 275 | vacuumdb_args += ['-U', self.config.superuser['username']] 276 | vacuumdb_args += ['-Z', '-j'] 277 | 278 | # vacuumdb is processing databases sequantially, while we better do them in parallel, 279 | # because it will help with the case when there are multiple databases in the same cluster. 280 | single_worker_dbs = ('postgres', 'template1') 281 | databases = self._get_all_databases() 282 | db_count = len([d for d in databases if d not in single_worker_dbs]) 283 | # calculate concurrency per database, except always existing "single_worker_dbs" (they'll get always 1 worker) 284 | concurrency = str(max(1, int(psutil.cpu_count()/max(1, db_count)))) 285 | procs = [] 286 | for d in databases: 287 | j = '1' if d in single_worker_dbs else concurrency 288 | try: 289 | procs.append(subprocess.Popen([self.pgcommand('vacuumdb')] + vacuumdb_args + [j, '-d', d])) 290 | except Exception: 291 | pass 292 | for proc in procs: 293 | try: 294 | proc.wait() 295 | except Exception: 296 | pass 297 | 298 | 299 | def PostgresqlUpgrade(config): 300 | config['postgresql'].update({'callbacks': {}, 'pg_ctl_timeout': 3600*24*7}) 301 | 302 | # avoid unnecessary interactions with PGDATA and postgres 303 | is_running = _PostgresqlUpgrade.is_running 304 | _PostgresqlUpgrade.is_running = lambda s: False 305 | try: 306 | return _PostgresqlUpgrade(config['postgresql'], get_mpp(config)) 307 | finally: 308 | _PostgresqlUpgrade.is_running = is_running 309 | -------------------------------------------------------------------------------- /postgres-appliance/motd: -------------------------------------------------------------------------------- 1 | echo " 2 | ____ _ _ 3 | / ___| _ __ (_) | ___ 4 | \___ \| '_ \| | |/ _ \\ 5 | ___) | |_) | | | (_) | 6 | |____/| .__/|_|_|\___/ 7 | |_| 8 | 9 | This container is managed by runit, when stopping/starting services use sv 10 | 11 | Examples: 12 | 13 | sv stop cron 14 | sv restart patroni 15 | 16 | Current status: (sv status /etc/service/*) 17 | " 18 | 19 | [ -d /etc/service ] && sv status /etc/service/* 20 | -------------------------------------------------------------------------------- /postgres-appliance/pgq_ticker.ini: -------------------------------------------------------------------------------- 1 | [pgqd] 2 | initial_database = postgres 3 | 4 | # how often to do maintentance, in seconds. 5 | maint_period = 120 6 | # how often to run ticker, in seconds. 7 | ticker_period = 1 8 | -------------------------------------------------------------------------------- /postgres-appliance/runit/cron/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | if [ "$(id -u)" -ne 0 ]; then 4 | LD_PRELOAD=/usr/local/lib/cron_unprivileged.so 5 | fi 6 | 7 | exec 2>&1 8 | exec env -i LD_PRELOAD=$LD_PRELOAD /usr/sbin/cron -f 9 | -------------------------------------------------------------------------------- /postgres-appliance/runit/etcd/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | exec 2>&1 4 | exec env -i ETCD_UNSUPPORTED_ARCH=$(dpkg --print-architecture) /bin/etcd --data-dir /run/etcd.data 5 | -------------------------------------------------------------------------------- /postgres-appliance/runit/patroni/finish: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | STATUS_FILE=supervise/restarts 4 | MAX_RESTARTS=5 5 | BACKOFF=30 6 | 7 | echo "$PWD: finished with code=$1 signal=$2" 8 | 9 | # unexpected exit code 10 | if [ "$1" != "0" ] && [ "$1" != "2" ]; then 11 | # don't count restarts if the service was killed by OOM 12 | if [ "$2" = "9" ]; then 13 | exit 0 14 | fi 15 | 16 | if [ -f $STATUS_FILE ]; then 17 | RESTARTS=$(cat $STATUS_FILE) 18 | fi 19 | 20 | # no status file or garbage in it 21 | if ! [ "$RESTARTS" -eq "$RESTARTS" ] 2> /dev/null 22 | then 23 | RESTARTS=1 24 | fi 25 | 26 | echo $((RESTARTS + 1)) > $STATUS_FILE 27 | 28 | if [ "$RESTARTS" -lt "$MAX_RESTARTS" ]; then 29 | SLEEPTIME=$((RESTARTS * BACKOFF)) 30 | echo "$PWD: sleeping $SLEEPTIME seconds" 31 | exec sleep $SLEEPTIME 32 | fi 33 | echo "$PWD: exceeded maximum number of restarts $MAX_RESTARTS" 34 | fi 35 | 36 | echo "stopping $PWD" 37 | sv stop . 38 | rm -f $STATUS_FILE 39 | -------------------------------------------------------------------------------- /postgres-appliance/runit/patroni/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | # Make patroni process real-time if we have enough permissions 4 | CHRT="chrt -Rr 99" 5 | if ! $CHRT true 2> /dev/null; then 6 | CHRT="" 7 | fi 8 | 9 | CHPST="chpst -u postgres" 10 | if ! $CHPST true 2> /dev/null; then 11 | CHPST="" 12 | fi 13 | 14 | # Exclude shared memory from coredump 15 | if ! echo 0x31 > /proc/self/coredump_filter 16 | then 17 | echo "Failed to enable coredump shared memory filter" 18 | fi 19 | 20 | # Enable core dumps 21 | if ! ulimit -c unlimited 22 | then 23 | echo "Failed to set unlimited size for coredump" 24 | fi 25 | 26 | # Only small subset of environment variables is allowed. We don't want accidentally disclose sensitive information 27 | for E in $(printenv -0 | tr '\n' ' ' | sed 's/\x00/\n/g' | grep -vE '^(KUBERNETES_(SERVICE|PORT|ROLE)[_=]|((POD_(IP|NAMESPACE))|HOSTNAME|PATH|PGHOME|LC_ALL|ENABLE_PG_MON)=)' | sed 's/=.*//g'); do 28 | unset $E 29 | done 30 | 31 | exec 2>&1 32 | exec $CHRT $CHPST env HOME=/home/postgres patroni /home/postgres/postgres.yml 33 | -------------------------------------------------------------------------------- /postgres-appliance/runit/pgbouncer/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | CHPST="chpst -u postgres" 4 | if ! $CHPST true 2> /dev/null; then 5 | CHPST="" 6 | fi 7 | 8 | exec 2>&1 9 | exec $CHPST env -i /usr/sbin/pgbouncer /run/pgbouncer/pgbouncer.ini 10 | -------------------------------------------------------------------------------- /postgres-appliance/runit/pgqd/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | CHPST="chpst -u postgres" 4 | if ! $CHPST true 2> /dev/null; then 5 | CHPST="" 6 | fi 7 | 8 | exec 2>&1 9 | exec $CHPST env -i PGAPPNAME="pgq ticker" /scripts/patroni_wait.sh --role primary -- /usr/bin/pgqd /home/postgres/pgq_ticker.ini 10 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/basebackup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RETRIES=2 4 | 5 | while getopts ":-:" optchar; do 6 | [[ "${optchar}" == "-" ]] || continue 7 | case "${OPTARG}" in 8 | datadir=* ) 9 | DATA_DIR=${OPTARG#*=} 10 | ;; 11 | connstring=* ) 12 | CONNSTR="${OPTARG#*=}" 13 | ;; 14 | retries=* ) 15 | RETRIES=${OPTARG#*=} 16 | ;; 17 | esac 18 | done 19 | 20 | [[ -z $DATA_DIR || -z "$CONNSTR" || ! $RETRIES =~ ^[1-9]$ ]] && exit 1 21 | 22 | if which pg_receivewal &> /dev/null; then 23 | PG_RECEIVEWAL=pg_receivewal 24 | PG_BASEBACKUP_OPTS=(-X none) 25 | else 26 | PG_RECEIVEWAL=pg_receivexlog 27 | PG_BASEBACKUP_OPTS=() 28 | fi 29 | 30 | WAL_FAST=$(dirname "$DATA_DIR")/wal_fast 31 | readonly WAL_FAST 32 | mkdir -p "$WAL_FAST" 33 | 34 | rm -fr "$DATA_DIR" "${WAL_FAST:?}"/* 35 | 36 | function sigterm_handler() { 37 | kill -SIGTERM "$receivewal_pid" "$basebackup_pid" 38 | exit 143 39 | } 40 | 41 | trap sigterm_handler QUIT TERM INT 42 | 43 | 44 | function start_receivewal() { 45 | local receivewal_pid=$BASHPID 46 | 47 | # wait for backup_label 48 | while [[ ! -f ${DATA_DIR}/backup_label ]]; do 49 | sleep 1 50 | done 51 | 52 | # get the first wal segment necessary for recovery from backup label 53 | SEGMENT=$(sed -n 's/^START WAL LOCATION: .*file \([0-9A-F]\{24\}\).*$/\1/p' "$DATA_DIR/backup_label") 54 | 55 | [ -z "$SEGMENT" ] && exit 1 56 | 57 | # run pg_receivewal until postgres will not start streaming 58 | ( 59 | while ! pgrep -cf 'wal {0,1}receiver( process){0,1}\s+streaming' > /dev/null; do 60 | # exit if pg_receivewal is not running 61 | kill -0 $receivewal_pid && sleep 1 || exit 62 | done 63 | 64 | kill $receivewal_pid && sleep 1 65 | rm -f "${WAL_FAST:?}"/* 66 | )& 67 | 68 | # calculate the name of previous segment 69 | timeline=${SEGMENT:0:8} 70 | log=$((16#${SEGMENT:8:8})) 71 | seg=$((16#${SEGMENT:16:8})) 72 | if [[ $seg == 0 ]]; then 73 | seg=255 74 | log=$((log-1)) 75 | else 76 | seg=$((seg-1)) 77 | fi 78 | 79 | SEGMENT=$(printf "%s%08X%08X\n" "$timeline" "$log" "$seg") 80 | 81 | # pg_receivewal doesn't have an argument to specify position to start stream from 82 | # therefore we will "precreate" previous file and pg_receivewal will start fetching the next one 83 | dd if=/dev/zero of="$WAL_FAST/$SEGMENT" bs=16k count=1k 84 | 85 | exec $PG_RECEIVEWAL --directory="$WAL_FAST" --dbname="$CONNSTR" 86 | } 87 | 88 | # make sure that there is only one receivewal running 89 | exec 9>"$WAL_FAST/receivewal.lock" 90 | if flock -x -n 9; then 91 | start_receivewal & 92 | receivewal_pid=$! 93 | echo $receivewal_pid > "$WAL_FAST/receivewal.pid" 94 | else 95 | receivewal_pid=$(cat "$WAL_FAST/receivewal.pid") 96 | fi 97 | 98 | ATTEMPT=0 99 | while [[ $((ATTEMPT++)) -le $RETRIES ]]; do 100 | pg_basebackup --pgdata="${DATA_DIR}" "${PG_BASEBACKUP_OPTS[@]}" --dbname="${CONNSTR}" & 101 | basebackup_pid=$! 102 | wait $basebackup_pid 103 | EXITCODE=$? 104 | if [[ $EXITCODE == 0 ]]; then 105 | break 106 | elif [[ $ATTEMPT -le $RETRIES ]]; then 107 | sleep $((ATTEMPT*10)) 108 | rm -fr "${DATA_DIR}" 109 | fi 110 | done 111 | 112 | [[ $EXITCODE != 0 && -n $receivewal_pid ]] && kill "$receivewal_pid" 113 | exit $EXITCODE 114 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/callback_aws.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import boto.ec2 4 | import boto.utils 5 | import logging 6 | import os 7 | import sys 8 | import time 9 | 10 | logger = logging.getLogger(__name__) 11 | LEADER_TAG_VALUE = os.environ.get('AWS_LEADER_TAG_VALUE', 'master') 12 | 13 | 14 | def retry(func): 15 | def wrapped(*args, **kwargs): 16 | count = 0 17 | while True: 18 | try: 19 | return func(*args, **kwargs) 20 | except boto.exception.BotoServerError as e: 21 | if count >= 10 or str(e.error_code) not in ('Throttling', 'RequestLimitExceeded'): 22 | raise 23 | logger.info('Throttling AWS API requests...') 24 | time.sleep(2 ** count * 0.5) 25 | count += 1 26 | 27 | return wrapped 28 | 29 | 30 | def get_instance_metadata(): 31 | return boto.utils.get_instance_identity()['document'] 32 | 33 | 34 | @retry 35 | def associate_address(ec2, allocation_id, instance_id): 36 | return ec2.associate_address(instance_id=instance_id, allocation_id=allocation_id, allow_reassociation=True) 37 | 38 | 39 | @retry 40 | def tag_resource(ec2, resource_id, tags): 41 | return ec2.create_tags([resource_id], tags) 42 | 43 | 44 | @retry 45 | def list_volumes(ec2, instance_id): 46 | return ec2.get_all_volumes(filters={'attachment.instance-id': instance_id}) 47 | 48 | 49 | @retry 50 | def get_instance(ec2, instance_id): 51 | return ec2.get_only_instances([instance_id])[0] 52 | 53 | 54 | def main(): 55 | logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) 56 | 57 | # EIP_ALLOCATION is optional argument 58 | argc = len(sys.argv) 59 | if argc not in (4, 5) or sys.argv[argc - 3] not in ('on_start', 'on_stop', 'on_role_change'): 60 | sys.exit("Usage: {0} [eip_allocation_id] action role name".format(sys.argv[0])) 61 | 62 | action, role, cluster = sys.argv[argc - 3:argc] 63 | 64 | metadata = get_instance_metadata() 65 | 66 | instance_id = metadata['instanceId'] 67 | 68 | ec2 = boto.ec2.connect_to_region(metadata['region']) 69 | 70 | if argc == 5 and role in ('primary', 'standby_leader') and action in ('on_start', 'on_role_change'): 71 | associate_address(ec2, sys.argv[1], instance_id) 72 | 73 | instance = get_instance(ec2, instance_id) 74 | 75 | tags = {'Role': LEADER_TAG_VALUE if role == 'primary' else role} 76 | tag_resource(ec2, instance_id, tags) 77 | 78 | tags.update({'Instance': instance_id}) 79 | 80 | volumes = list_volumes(ec2, instance_id) 81 | for v in volumes: 82 | if 'Name' in v.tags: 83 | tags_to_update = tags 84 | else: 85 | if v.attach_data.device == instance.root_device_name: 86 | volume_device = 'root' 87 | else: 88 | volume_device = 'data' 89 | tags_to_update = dict(tags, Name='spilo_{}_{}'.format(cluster, volume_device)) 90 | 91 | tag_resource(ec2, v.id, tags_to_update) 92 | 93 | 94 | if __name__ == '__main__': 95 | main() 96 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/callback_role.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import json 4 | import logging 5 | import requests 6 | import requests.exceptions 7 | import os 8 | import socket 9 | import sys 10 | import time 11 | 12 | KUBE_SERVICE_DIR = '/var/run/secrets/kubernetes.io/serviceaccount/' 13 | KUBE_NAMESPACE_FILENAME = KUBE_SERVICE_DIR + 'namespace' 14 | KUBE_TOKEN_FILENAME = KUBE_SERVICE_DIR + 'token' 15 | KUBE_CA_CERT = KUBE_SERVICE_DIR + 'ca.crt' 16 | 17 | KUBE_API_URL = 'https://kubernetes.default.svc.cluster.local/api/v1/namespaces' 18 | 19 | logger = logging.getLogger(__name__) 20 | 21 | LABEL = os.environ.get("KUBERNETES_ROLE_LABEL", 'spilo-role') 22 | LEADER_LABEL_VALUE = os.environ.get("KUBERNETES_LEADER_LABEL_VALUE", 'master') 23 | 24 | 25 | def read_first_line(filename): 26 | try: 27 | with open(filename) as f: 28 | return f.readline().rstrip() 29 | except IOError: 30 | return None 31 | 32 | 33 | def read_token(): 34 | return read_first_line(KUBE_TOKEN_FILENAME) 35 | 36 | 37 | def api_patch(namespace, kind, name, entity_name, body): 38 | api_url = '/'.join([KUBE_API_URL, namespace, kind, name]) 39 | count = 0 40 | while True: 41 | try: 42 | token = read_token() 43 | if token: 44 | r = requests.patch(api_url, data=body, verify=KUBE_CA_CERT, 45 | headers={'Content-Type': 'application/strategic-merge-patch+json', 46 | 'Authorization': 'Bearer {0}'.format(token)}) 47 | if r.status_code in range(200, 206): 48 | break 49 | logger.warning('Unable to change %s: %s', entity_name, r.text) 50 | if not (r.status_code in (500, 503, 504) or 'retry-after' in r.headers): 51 | break 52 | else: 53 | logger.warning('Unable to read Kubernetes authorization token') 54 | except requests.exceptions.RequestException as e: 55 | logger.warning('Exception when executing PATCH on %s: %s', api_url, e) 56 | if count >= 10: 57 | raise Exception('PATCH {0} failed'.format(api_url)) 58 | time.sleep(2 ** count * 0.5) 59 | count += 1 60 | 61 | 62 | def change_pod_role_label(namespace, new_role): 63 | body = json.dumps({'metadata': {'labels': {LABEL: new_role}}}) 64 | api_patch(namespace, 'pods', os.environ['HOSTNAME'], '{} label'.format(LABEL), body) 65 | 66 | 67 | def change_endpoints(namespace, cluster): 68 | ip = socket.getaddrinfo(socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0)[0][4][0] 69 | ip = os.environ.get('POD_IP', ip) 70 | body = json.dumps({'subsets': [{'addresses': [{'ip': ip}], 71 | 'ports': [{'name': 'postgresql', 'port': 5432, 'protocol': 'TCP'}]}]}) 72 | try: 73 | api_patch(namespace, 'endpoints', cluster, 'service endpoints', body) 74 | except Exception: 75 | pass 76 | 77 | 78 | def record_role_change(action, new_role, cluster): 79 | new_role = None if action == 'on_stop' else new_role 80 | logger.debug("Changing the pod's role to %s", new_role) 81 | pod_namespace = os.environ.get('POD_NAMESPACE', read_first_line(KUBE_NAMESPACE_FILENAME)) or 'default' 82 | if new_role == LEADER_LABEL_VALUE: 83 | change_endpoints(pod_namespace, cluster) 84 | change_pod_role_label(pod_namespace, new_role) 85 | 86 | 87 | def main(): 88 | logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) 89 | if len(sys.argv) == 4 and sys.argv[1] in ('on_start', 'on_stop', 'on_role_change', 'on_restart'): 90 | record_role_change(action=sys.argv[1], new_role=sys.argv[2], cluster=sys.argv[3]) 91 | else: 92 | sys.exit('Usage: %s ', sys.argv[0]) 93 | return 0 94 | 95 | 96 | if __name__ == '__main__': 97 | main() 98 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/create_user_functions.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA IF NOT EXISTS user_management; 2 | 3 | GRANT USAGE ON SCHEMA user_management TO admin; 4 | 5 | SET search_path TO user_management; 6 | 7 | CREATE OR REPLACE FUNCTION random_password (length integer) RETURNS text LANGUAGE sql AS 8 | $$ 9 | WITH chars (c) AS ( 10 | SELECT chr(33) 11 | UNION ALL 12 | SELECT chr(i) FROM generate_series (35, 38) AS t (i) 13 | UNION ALL 14 | SELECT chr(i) FROM generate_series (42, 90) AS t (i) 15 | UNION ALL 16 | SELECT chr(i) FROM generate_series (97, 122) AS t (i) 17 | ), 18 | bricks (b) AS ( 19 | -- build a pool of chars (the size will be the number of chars above times length) 20 | -- and shuffle it 21 | SELECT c FROM chars, generate_series(1, length) ORDER BY random() 22 | ) 23 | SELECT substr(string_agg(b, ''), 1, length) FROM bricks; 24 | $$ 25 | SET search_path to 'pg_catalog'; 26 | 27 | CREATE OR REPLACE FUNCTION create_application_user(username text) 28 | RETURNS text 29 | LANGUAGE plpgsql 30 | AS $function$ 31 | DECLARE 32 | pw text; 33 | BEGIN 34 | SELECT user_management.random_password(20) INTO pw; 35 | EXECUTE format($$ CREATE USER %I WITH PASSWORD %L $$, username, pw); 36 | RETURN pw; 37 | END 38 | $function$ 39 | SECURITY DEFINER SET search_path to 'pg_catalog'; 40 | 41 | REVOKE ALL ON FUNCTION create_application_user(text) FROM public; 42 | GRANT EXECUTE ON FUNCTION create_application_user(text) TO admin; 43 | 44 | COMMENT ON FUNCTION create_application_user(text) IS 'Creates a user that can login, sets the password to a strong random one, 45 | which is then returned'; 46 | 47 | 48 | 49 | CREATE OR REPLACE FUNCTION create_user(username text) 50 | RETURNS void 51 | LANGUAGE plpgsql 52 | AS $function$ 53 | BEGIN 54 | EXECUTE format($$ CREATE USER %I IN ROLE :HUMAN_ROLE, admin $$, username); 55 | EXECUTE format($$ ALTER ROLE %I SET log_statement TO 'all' $$, username); 56 | END; 57 | $function$ 58 | SECURITY DEFINER SET search_path to 'pg_catalog'; 59 | 60 | REVOKE ALL ON FUNCTION create_user(text) FROM public; 61 | GRANT EXECUTE ON FUNCTION create_user(text) TO admin; 62 | 63 | COMMENT ON FUNCTION create_user(text) IS 'Creates a user that is supposed to be a human, to be authenticated without a password'; 64 | 65 | 66 | CREATE OR REPLACE FUNCTION create_role(rolename text) 67 | RETURNS void 68 | LANGUAGE plpgsql 69 | AS $function$ 70 | BEGIN 71 | -- set ADMIN to the admin user, so every member of admin can GRANT these roles to each other 72 | EXECUTE format($$ CREATE ROLE %I WITH ADMIN admin $$, rolename); 73 | END; 74 | $function$ 75 | SECURITY DEFINER SET search_path to 'pg_catalog'; 76 | 77 | REVOKE ALL ON FUNCTION create_role(text) FROM public; 78 | GRANT EXECUTE ON FUNCTION create_role(text) TO admin; 79 | 80 | COMMENT ON FUNCTION create_role(text) IS 'Creates a role that cannot log in, but can be used to set up fine-grained privileges'; 81 | 82 | 83 | CREATE OR REPLACE FUNCTION create_application_user_or_change_password(username text, password text) 84 | RETURNS void 85 | LANGUAGE plpgsql 86 | AS $function$ 87 | BEGIN 88 | PERFORM 1 FROM pg_roles WHERE rolname = username; 89 | 90 | IF FOUND 91 | THEN 92 | EXECUTE format($$ ALTER ROLE %I WITH PASSWORD %L $$, username, password); 93 | ELSE 94 | EXECUTE format($$ CREATE USER %I WITH PASSWORD %L $$, username, password); 95 | END IF; 96 | END 97 | $function$ 98 | SECURITY DEFINER SET search_path to 'pg_catalog'; 99 | 100 | REVOKE ALL ON FUNCTION create_application_user_or_change_password(text, text) FROM public; 101 | GRANT EXECUTE ON FUNCTION create_application_user_or_change_password(text, text) TO admin; 102 | 103 | COMMENT ON FUNCTION create_application_user_or_change_password(text, text) IS 'USE THIS ONLY IN EMERGENCY! The password will appear in the DB logs. 104 | Creates a user that can login, sets the password to the one provided. 105 | If the user already exists, sets its password.'; 106 | 107 | 108 | CREATE OR REPLACE FUNCTION revoke_admin(username text) 109 | RETURNS void 110 | LANGUAGE plpgsql 111 | AS $function$ 112 | BEGIN 113 | EXECUTE format($$ REVOKE admin FROM %I $$, username); 114 | END 115 | $function$ 116 | SECURITY DEFINER SET search_path to 'pg_catalog'; 117 | 118 | REVOKE ALL ON FUNCTION revoke_admin(text) FROM public; 119 | GRANT EXECUTE ON FUNCTION revoke_admin(text) TO admin; 120 | 121 | COMMENT ON FUNCTION revoke_admin(text) IS 'Use this function to make a human user less privileged, 122 | ie. when you want to grant someone read privileges only'; 123 | 124 | 125 | CREATE OR REPLACE FUNCTION drop_user(username text) 126 | RETURNS void 127 | LANGUAGE plpgsql 128 | AS $function$ 129 | BEGIN 130 | EXECUTE format($$ DROP ROLE %I $$, username); 131 | END 132 | $function$ 133 | SECURITY DEFINER SET search_path to 'pg_catalog'; 134 | 135 | REVOKE ALL ON FUNCTION drop_user(text) FROM public; 136 | GRANT EXECUTE ON FUNCTION drop_user(text) TO admin; 137 | 138 | COMMENT ON FUNCTION drop_user(text) IS 'Drop a human or application user. Intended for cleanup (either after team changes or mistakes in role setup). 139 | Roles (= users) that own database objects cannot be dropped.'; 140 | 141 | 142 | CREATE OR REPLACE FUNCTION drop_role(username text) 143 | RETURNS void 144 | LANGUAGE sql 145 | AS $function$ 146 | SELECT user_management.drop_user(username); 147 | $function$ 148 | SECURITY DEFINER SET search_path to 'pg_catalog'; 149 | 150 | REVOKE ALL ON FUNCTION drop_role(text) FROM public; 151 | GRANT EXECUTE ON FUNCTION drop_role(text) TO admin; 152 | 153 | COMMENT ON FUNCTION drop_role(text) IS 'Drop a human or application user. Intended for cleanup (either after team changes or mistakes in role setup). 154 | Roles (= users) that own database objects cannot be dropped.'; 155 | 156 | 157 | CREATE OR REPLACE FUNCTION terminate_backend(pid integer) 158 | RETURNS boolean 159 | LANGUAGE sql 160 | AS $function$ 161 | SELECT pg_terminate_backend(pid); 162 | $function$ 163 | SECURITY DEFINER SET search_path to 'pg_catalog'; 164 | 165 | REVOKE ALL ON FUNCTION terminate_backend(integer) FROM public; 166 | GRANT EXECUTE ON FUNCTION terminate_backend(integer) TO admin; 167 | 168 | COMMENT ON FUNCTION terminate_backend(integer) IS 'When there is a process causing harm, you can kill it using this function. Get the pid from pg_stat_activity 169 | (be careful to match the user name (usename) and the query, in order not to kill innocent kittens) and pass it to terminate_backend()'; 170 | 171 | -- to allow checking what to kill: 172 | GRANT SELECT ON pg_stat_activity TO admin; 173 | 174 | 175 | RESET search_path; 176 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/metric_helpers.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA IF NOT EXISTS metric_helpers AUTHORIZATION postgres; 2 | 3 | GRANT USAGE ON SCHEMA metric_helpers TO admin, robot_zmon; 4 | 5 | SET search_path TO metric_helpers; 6 | 7 | -- table and btree bloat estimation queries are borrowed from https://github.com/ioguix/pgsql-bloat-estimation 8 | CREATE OR REPLACE FUNCTION get_table_bloat_approx ( 9 | OUT t_database name, 10 | OUT t_schema_name name, 11 | OUT t_table_name name, 12 | OUT t_real_size numeric, 13 | OUT t_extra_size double precision, 14 | OUT t_extra_ratio double precision, 15 | OUT t_fill_factor integer, 16 | OUT t_bloat_size double precision, 17 | OUT t_bloat_ratio double precision, 18 | OUT t_is_na boolean 19 | ) RETURNS SETOF record AS 20 | $_$ 21 | SELECT 22 | current_database(), 23 | schemaname, 24 | tblname, 25 | (bs*tblpages) AS real_size, 26 | ((tblpages-est_tblpages)*bs) AS extra_size, 27 | CASE WHEN tblpages - est_tblpages > 0 28 | THEN 100 * (tblpages - est_tblpages)/tblpages::float 29 | ELSE 0 30 | END AS extra_ratio, 31 | fillfactor, 32 | CASE WHEN tblpages - est_tblpages_ff > 0 33 | THEN (tblpages-est_tblpages_ff)*bs 34 | ELSE 0 35 | END AS bloat_size, 36 | CASE WHEN tblpages - est_tblpages_ff > 0 37 | THEN 100 * (tblpages - est_tblpages_ff)/tblpages::float 38 | ELSE 0 39 | END AS bloat_ratio, 40 | is_na 41 | FROM ( 42 | SELECT ceil( reltuples / ( (bs-page_hdr)/tpl_size ) ) + ceil( toasttuples / 4 ) AS est_tblpages, 43 | ceil( reltuples / ( (bs-page_hdr)*fillfactor/(tpl_size*100) ) ) + ceil( toasttuples / 4 ) AS est_tblpages_ff, 44 | tblpages, fillfactor, bs, tblid, schemaname, tblname, heappages, toastpages, is_na 45 | -- , tpl_hdr_size, tpl_data_size, pgstattuple(tblid) AS pst -- (DEBUG INFO) 46 | FROM ( 47 | SELECT 48 | ( 4 + tpl_hdr_size + tpl_data_size + (2*ma) 49 | - CASE WHEN tpl_hdr_size%ma = 0 THEN ma ELSE tpl_hdr_size%ma END 50 | - CASE WHEN ceil(tpl_data_size)::int%ma = 0 THEN ma ELSE ceil(tpl_data_size)::int%ma END 51 | ) AS tpl_size, bs - page_hdr AS size_per_block, (heappages + toastpages) AS tblpages, heappages, 52 | toastpages, reltuples, toasttuples, bs, page_hdr, tblid, schemaname, tblname, fillfactor, is_na 53 | -- , tpl_hdr_size, tpl_data_size 54 | FROM ( 55 | SELECT 56 | tbl.oid AS tblid, ns.nspname AS schemaname, tbl.relname AS tblname, tbl.reltuples, 57 | tbl.relpages AS heappages, coalesce(toast.relpages, 0) AS toastpages, 58 | coalesce(toast.reltuples, 0) AS toasttuples, 59 | coalesce(substring( 60 | array_to_string(tbl.reloptions, ' ') 61 | FROM 'fillfactor=([0-9]+)')::smallint, 100) AS fillfactor, 62 | current_setting('block_size')::numeric AS bs, 63 | CASE WHEN version()~'mingw32' OR version()~'64-bit|x86_64|ppc64|ia64|amd64' THEN 8 ELSE 4 END AS ma, 64 | 24 AS page_hdr, 65 | 23 + CASE WHEN MAX(coalesce(s.null_frac,0)) > 0 THEN ( 7 + count(s.attname) ) / 8 ELSE 0::int END 66 | + CASE WHEN bool_or(att.attname = 'oid' and att.attnum < 0) THEN 4 ELSE 0 END AS tpl_hdr_size, 67 | sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 0) ) AS tpl_data_size, 68 | bool_or(att.atttypid = 'pg_catalog.name'::regtype) 69 | OR sum(CASE WHEN att.attnum > 0 THEN 1 ELSE 0 END) <> count(s.attname) AS is_na 70 | FROM pg_attribute AS att 71 | JOIN pg_class AS tbl ON att.attrelid = tbl.oid 72 | JOIN pg_namespace AS ns ON ns.oid = tbl.relnamespace 73 | LEFT JOIN pg_stats AS s ON s.schemaname=ns.nspname 74 | AND s.tablename = tbl.relname AND s.inherited=false AND s.attname=att.attname 75 | LEFT JOIN pg_class AS toast ON tbl.reltoastrelid = toast.oid 76 | WHERE NOT att.attisdropped 77 | AND tbl.relkind = 'r' 78 | GROUP BY 1,2,3,4,5,6,7,8,9,10 79 | ORDER BY 2,3 80 | ) AS s 81 | ) AS s2 82 | ) AS s3 WHERE schemaname NOT LIKE 'information_schema'; 83 | $_$ LANGUAGE sql SECURITY DEFINER IMMUTABLE STRICT SET search_path to 'pg_catalog'; 84 | 85 | CREATE OR REPLACE VIEW table_bloat AS SELECT * FROM get_table_bloat_approx(); 86 | 87 | CREATE OR REPLACE FUNCTION get_btree_bloat_approx ( 88 | OUT i_database name, 89 | OUT i_schema_name name, 90 | OUT i_table_name name, 91 | OUT i_index_name name, 92 | OUT i_real_size numeric, 93 | OUT i_extra_size numeric, 94 | OUT i_extra_ratio double precision, 95 | OUT i_fill_factor integer, 96 | OUT i_bloat_size double precision, 97 | OUT i_bloat_ratio double precision, 98 | OUT i_is_na boolean 99 | ) RETURNS SETOF record AS 100 | $_$ 101 | SELECT current_database(), nspname AS schemaname, tblname, idxname, bs*(relpages)::bigint AS real_size, 102 | bs*(relpages-est_pages)::bigint AS extra_size, 103 | 100 * (relpages-est_pages)::float / relpages AS extra_ratio, 104 | fillfactor, 105 | CASE WHEN relpages > est_pages_ff 106 | THEN bs*(relpages-est_pages_ff) 107 | ELSE 0 108 | END AS bloat_size, 109 | 100 * (relpages-est_pages_ff)::float / relpages AS bloat_ratio, 110 | is_na 111 | -- , 100-(pst).avg_leaf_density AS pst_avg_bloat, est_pages, index_tuple_hdr_bm, maxalign, pagehdr, nulldatawidth, nulldatahdrwidth, reltuples, relpages -- (DEBUG INFO) 112 | FROM ( 113 | SELECT coalesce(1 + 114 | ceil(reltuples/floor((bs-pageopqdata-pagehdr)/(4+nulldatahdrwidth)::float)), 0 -- ItemIdData size + computed avg size of a tuple (nulldatahdrwidth) 115 | ) AS est_pages, 116 | coalesce(1 + 117 | ceil(reltuples/floor((bs-pageopqdata-pagehdr)*fillfactor/(100*(4+nulldatahdrwidth)::float))), 0 118 | ) AS est_pages_ff, 119 | bs, nspname, tblname, idxname, relpages, fillfactor, is_na 120 | -- , pgstatindex(idxoid) AS pst, index_tuple_hdr_bm, maxalign, pagehdr, nulldatawidth, nulldatahdrwidth, reltuples -- (DEBUG INFO) 121 | FROM ( 122 | SELECT maxalign, bs, nspname, tblname, idxname, reltuples, relpages, idxoid, fillfactor, 123 | ( index_tuple_hdr_bm + 124 | maxalign - CASE -- Add padding to the index tuple header to align on MAXALIGN 125 | WHEN index_tuple_hdr_bm%maxalign = 0 THEN maxalign 126 | ELSE index_tuple_hdr_bm%maxalign 127 | END 128 | + nulldatawidth + maxalign - CASE -- Add padding to the data to align on MAXALIGN 129 | WHEN nulldatawidth = 0 THEN 0 130 | WHEN nulldatawidth::integer%maxalign = 0 THEN maxalign 131 | ELSE nulldatawidth::integer%maxalign 132 | END 133 | )::numeric AS nulldatahdrwidth, pagehdr, pageopqdata, is_na 134 | -- , index_tuple_hdr_bm, nulldatawidth -- (DEBUG INFO) 135 | FROM ( 136 | SELECT n.nspname, ct.relname AS tblname, i.idxname, i.reltuples, i.relpages, 137 | i.idxoid, i.fillfactor, current_setting('block_size')::numeric AS bs, 138 | CASE -- MAXALIGN: 4 on 32bits, 8 on 64bits (and mingw32 ?) 139 | WHEN version() ~ 'mingw32' OR version() ~ '64-bit|x86_64|ppc64|ia64|amd64' THEN 8 140 | ELSE 4 141 | END AS maxalign, 142 | /* per page header, fixed size: 20 for 7.X, 24 for others */ 143 | 24 AS pagehdr, 144 | /* per page btree opaque data */ 145 | 16 AS pageopqdata, 146 | /* per tuple header: add IndexAttributeBitMapData if some cols are null-able */ 147 | CASE WHEN max(coalesce(s.stanullfrac,0)) = 0 148 | THEN 2 -- IndexTupleData size 149 | ELSE 2 + (( 32 + 8 - 1 ) / 8) -- IndexTupleData size + IndexAttributeBitMapData size ( max num filed per index + 8 - 1 /8) 150 | END AS index_tuple_hdr_bm, 151 | /* data len: we remove null values save space using it fractionnal part from stats */ 152 | sum( (1-coalesce(s.stanullfrac, 0)) * coalesce(s.stawidth, 1024)) AS nulldatawidth, 153 | max( CASE WHEN a.atttypid = 'pg_catalog.name'::regtype THEN 1 ELSE 0 END ) > 0 AS is_na 154 | FROM ( 155 | SELECT idxname, reltuples, relpages, tbloid, idxoid, fillfactor, 156 | CASE WHEN indkey[i]=0 THEN idxoid ELSE tbloid END AS att_rel, 157 | CASE WHEN indkey[i]=0 THEN i ELSE indkey[i] END AS att_pos 158 | FROM ( 159 | SELECT idxname, reltuples, relpages, tbloid, idxoid, fillfactor, indkey, generate_series(1,indnatts) AS i 160 | FROM ( 161 | SELECT ci.relname AS idxname, ci.reltuples, ci.relpages, i.indrelid AS tbloid, 162 | i.indexrelid AS idxoid, 163 | coalesce(substring( 164 | array_to_string(ci.reloptions, ' ') 165 | from 'fillfactor=([0-9]+)')::smallint, 90) AS fillfactor, 166 | i.indnatts, 167 | string_to_array(textin(int2vectorout(i.indkey)),' ')::int[] AS indkey 168 | FROM pg_index i 169 | JOIN pg_class ci ON ci.oid=i.indexrelid 170 | WHERE ci.relam=(SELECT oid FROM pg_am WHERE amname = 'btree') 171 | AND ci.relpages > 0 172 | ) AS idx_data 173 | ) AS idx_data_cross 174 | ) i 175 | JOIN pg_attribute a ON a.attrelid = i.att_rel 176 | AND a.attnum = i.att_pos 177 | JOIN pg_statistic s ON s.starelid = i.att_rel 178 | AND s.staattnum = i.att_pos 179 | JOIN pg_class ct ON ct.oid = i.tbloid 180 | JOIN pg_namespace n ON ct.relnamespace = n.oid 181 | GROUP BY 1,2,3,4,5,6,7,8,9,10 182 | ) AS rows_data_stats 183 | ) AS rows_hdr_pdg_stats 184 | ) AS relation_stats; 185 | $_$ LANGUAGE sql SECURITY DEFINER IMMUTABLE STRICT SET search_path to 'pg_catalog'; 186 | 187 | CREATE OR REPLACE VIEW index_bloat AS SELECT * FROM get_btree_bloat_approx(); 188 | 189 | CREATE OR REPLACE FUNCTION pg_stat_statements(showtext boolean) RETURNS SETOF public.pg_stat_statements AS 190 | $$ 191 | SELECT * FROM public.pg_stat_statements(showtext); 192 | $$ LANGUAGE sql IMMUTABLE SECURITY DEFINER STRICT; 193 | 194 | CREATE OR REPLACE VIEW pg_stat_statements AS SELECT * FROM pg_stat_statements(true); 195 | 196 | CREATE OR REPLACE FUNCTION get_nearly_exhausted_sequences( 197 | IN threshold float, 198 | OUT schemaname name, 199 | OUT sequencename name, 200 | OUT seq_percent_used numeric 201 | ) RETURNS SETOF record AS 202 | $_$ 203 | SELECT * 204 | FROM ( 205 | SELECT 206 | schemaname, 207 | sequencename, 208 | round(abs( 209 | ceil((abs(last_value::numeric - start_value) + 1) / increment_by) / 210 | floor((CASE WHEN increment_by > 0 211 | THEN (max_value::numeric - start_value) 212 | ELSE (start_value::numeric - min_value) 213 | END + 1) / increment_by 214 | ) * 100 215 | ), 216 | 2) AS seq_percent_used 217 | FROM pg_sequences 218 | WHERE NOT CYCLE AND last_value IS NOT NULL 219 | ) AS s 220 | WHERE seq_percent_used >= threshold; 221 | $_$ 222 | LANGUAGE sql SECURITY DEFINER STRICT SET search_path to 'pg_catalog'; 223 | 224 | CREATE OR REPLACE VIEW nearly_exhausted_sequences AS SELECT * FROM get_nearly_exhausted_sequences(0.8); 225 | 226 | REVOKE ALL ON ALL TABLES IN SCHEMA metric_helpers FROM public; 227 | GRANT SELECT ON ALL TABLES IN SCHEMA metric_helpers TO admin, robot_zmon; 228 | 229 | REVOKE ALL ON ALL FUNCTIONS IN SCHEMA metric_helpers FROM public; 230 | GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA metric_helpers TO admin, robot_zmon; 231 | 232 | RESET search_path; 233 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/on_role_change.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | readonly HUMAN_ROLE=$1 4 | shift 5 | 6 | "$@" 7 | 8 | 9 | readonly dbname=postgres 10 | if [[ "${*: -3:1}" == "on_role_change" && "${*: -2:1}" == "primary" ]]; then 11 | num=30 # wait 30 seconds for end of recovery 12 | while [[ $((num--)) -gt 0 ]]; do 13 | if [[ "$(psql -d $dbname -tAc 'SELECT pg_catalog.pg_is_in_recovery()')" == "f" ]]; then 14 | vacuumdb -aZ > /dev/null 2>&1 & 15 | exec /scripts/post_init.sh "$HUMAN_ROLE" "$dbname" 16 | else 17 | sleep 1 18 | fi 19 | done 20 | fi 21 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/patroni_wait.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROLE=primary 4 | INTERVAL=60 5 | TIMEOUT="" 6 | 7 | if [ -z "$1" ] 8 | then 9 | cat <<__EOT__ 10 | Usage: $(basename 0) [OPTIONS] [-- COMMAND [ARG1] [ARG2]] 11 | 12 | Options: 13 | 14 | -i, --interval Specify the polling INTERVAL (default: $INTERVAL) 15 | 16 | -r, --role Which ROLE to wait upon (default: $ROLE) 17 | 18 | -t, --timeout Fail after TIMEOUT seconds (default: no timeout) 19 | 20 | Waits for ROLE (primary or replica). It will check every INTERVAL seconds ($INTERVAL). 21 | If TIMEOUT is specified, it will stop trying after TIMEOUT seconds. 22 | 23 | Executes COMMAND after ROLE has become available. (Default: exit 0) 24 | returns 2 if the request timed out. 25 | 26 | Examples: 27 | 28 | $(basename "$0") -r replica -- echo "Replica is available" 29 | $(basename "$0") -t 1800 -- pg_basebackup -h localhost -D /tmp/backup --xlog-method=stream 30 | __EOT__ 31 | exit 1 32 | fi 33 | 34 | 35 | while [ $# -gt 0 ] 36 | do 37 | case $1 in 38 | -r|--role) 39 | ROLE=$2 40 | shift 41 | ;; 42 | -i|--interval) 43 | INTERVAL=$2 44 | shift 45 | ;; 46 | -t|--timeout) 47 | TIMEOUT=$2 48 | shift 49 | ;; 50 | --) 51 | shift 52 | break 53 | ;; 54 | *) 55 | echo "Unknown option: $1" 56 | exit 1 57 | ;; 58 | esac 59 | shift 60 | done 61 | 62 | if [ $# -gt 0 ]; then 63 | [ -n "$TIMEOUT" ] && CUTOFF=$(($(date +%s)+TIMEOUT)) 64 | 65 | while [ "$(curl -so /dev/null -w '%{http_code}' "http://localhost:8008/$ROLE")" != "200" ]; do 66 | [ -n "$TIMEOUT" ] && [ $CUTOFF -le "$(date +%s)" ] && exit 2 67 | sleep "$INTERVAL" 68 | done 69 | 70 | exec "$@" # Execute the command that was specified 71 | fi 72 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/pg_partman/after-create.sql: -------------------------------------------------------------------------------- 1 | DO $$ 2 | BEGIN 3 | PERFORM * FROM pg_catalog.pg_authid WHERE rolname = 'part_man'; 4 | IF FOUND THEN 5 | ALTER ROLE part_man WITH NOCREATEDB NOLOGIN NOCREATEROLE NOSUPERUSER NOREPLICATION INHERIT; 6 | GRANT part_man TO admin; 7 | ELSE 8 | CREATE ROLE part_man ADMIN admin; 9 | END IF; 10 | 11 | EXECUTE 'GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA @extschema@ TO part_man'; 12 | END;$$; 13 | GRANT USAGE ON SCHEMA @extschema@ TO part_man; 14 | GRANT ALL ON ALL TABLES IN SCHEMA @extschema@ TO part_man; 15 | GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA @extschema@ TO part_man; 16 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/pgq/after-create.sql: -------------------------------------------------------------------------------- 1 | GRANT pgq_admin, pgq_writer, pgq_reader TO admin WITH ADMIN OPTION; 2 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/postgres_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function log 4 | { 5 | echo "$(date "+%Y-%m-%d %H:%M:%S.%3N") - $0 - $*" 6 | } 7 | 8 | [[ -z $1 ]] && echo "Usage: $0 PGDATA" && exit 1 9 | 10 | log "I was called as: $0 $*" 11 | 12 | 13 | readonly PGDATA=$1 14 | DAYS_TO_RETAIN=$BACKUP_NUM_TO_RETAIN 15 | 16 | IN_RECOVERY=$(psql -tXqAc "select pg_catalog.pg_is_in_recovery()") 17 | readonly IN_RECOVERY 18 | if [[ $IN_RECOVERY == "f" ]]; then 19 | [[ "$WALG_BACKUP_FROM_REPLICA" == "true" ]] && log "Cluster is not in recovery, not running backup" && exit 0 20 | elif [[ $IN_RECOVERY == "t" ]]; then 21 | [[ "$WALG_BACKUP_FROM_REPLICA" != "true" ]] && log "Cluster is in recovery, not running backup" && exit 0 22 | else 23 | log "ERROR: Recovery state unknown: $IN_RECOVERY" && exit 1 24 | fi 25 | 26 | # leave at least 2 days base backups before creating a new one 27 | [[ "$DAYS_TO_RETAIN" -lt 2 ]] && DAYS_TO_RETAIN=2 28 | 29 | if [[ "$USE_WALG_BACKUP" == "true" ]]; then 30 | readonly WAL_E="wal-g" 31 | [[ -z $WALG_BACKUP_COMPRESSION_METHOD ]] || export WALG_COMPRESSION_METHOD=$WALG_BACKUP_COMPRESSION_METHOD 32 | export PGHOST=/var/run/postgresql 33 | else 34 | readonly WAL_E="wal-e" 35 | 36 | # Ensure we don't have more workes than CPU's 37 | POOL_SIZE=$(grep -c ^processor /proc/cpuinfo 2>/dev/null || 1) 38 | [ "$POOL_SIZE" -gt 4 ] && POOL_SIZE=4 39 | POOL_SIZE=(--pool-size "$POOL_SIZE") 40 | fi 41 | 42 | BEFORE="" 43 | LEFT=0 44 | 45 | NOW=$(date +%s -u) 46 | readonly NOW 47 | while read -r name last_modified rest; do 48 | last_modified=$(date +%s -ud "$last_modified") 49 | if [ $(((NOW-last_modified)/86400)) -ge $DAYS_TO_RETAIN ]; then 50 | if [ -z "$BEFORE" ] || [ "$last_modified" -gt "$BEFORE_TIME" ]; then 51 | BEFORE_TIME=$last_modified 52 | BEFORE=$name 53 | fi 54 | else 55 | # count how many backups will remain after we remove everything up to certain date 56 | ((LEFT=LEFT+1)) 57 | fi 58 | done < <($WAL_E backup-list 2> /dev/null | sed '0,/^\(backup_\)\?name\s*\(last_\)\?modified\s*/d') 59 | 60 | # we want keep at least N backups even if the number of days exceeded 61 | if [ -n "$BEFORE" ] && [ $LEFT -ge $DAYS_TO_RETAIN ]; then 62 | if [[ "$USE_WALG_BACKUP" == "true" ]]; then 63 | $WAL_E delete before FIND_FULL "$BEFORE" --confirm 64 | else 65 | $WAL_E delete --confirm before "$BEFORE" 66 | fi 67 | fi 68 | 69 | # push a new base backup 70 | log "producing a new backup" 71 | # We reduce the priority of the backup for CPU consumption 72 | exec nice -n 5 $WAL_E backup-push "$PGDATA" "${POOL_SIZE[@]}" 73 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/postgres_fdw/after-create.sql: -------------------------------------------------------------------------------- 1 | GRANT USAGE ON FOREIGN DATA WRAPPER postgres_fdw TO admin; 2 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/renice.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -Eeuo pipefail 4 | 5 | # we don't need to use procps utils because bg_mon already "knows" all postgres processes 6 | curl -s http://localhost:8080 \ 7 | | jq '.processes[] | select( 8 | .type == "checkpointer" 9 | or .type == "archiver" 10 | or .type == "startup" 11 | or .type == "walsender" 12 | or .type == "walreceiver" 13 | ) | .pid' \ 14 | | xargs renice -n -20 -p &> /tmp/renice.log 15 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/restore_command.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$ENABLE_WAL_PATH_COMPAT" = "true" ]]; then 4 | unset ENABLE_WAL_PATH_COMPAT 5 | bash "$(readlink -f "${BASH_SOURCE[0]}")" "$@" 6 | exitcode=$? 7 | [[ $exitcode = 0 ]] && exit 0 8 | for wale_env in $(printenv -0 | tr '\n' ' ' | sed 's/\x00/\n/g' | sed -n 's/^\(WAL[EG]_[^=][^=]*_PREFIX\)=.*$/\1/p'); do 9 | suffix=$(basename "${!wale_env}") 10 | if [[ -x "/usr/lib/postgresql/$suffix/bin/postgres" ]]; then 11 | prefix=$(dirname "${!wale_env}") 12 | if [[ $prefix =~ /spilo/ ]] && [[ $prefix =~ /wal$ ]]; then 13 | printf -v "$wale_env" "%s" "$prefix" 14 | # shellcheck disable=SC2163 15 | export "$wale_env" 16 | changed_env=true 17 | fi 18 | fi 19 | done 20 | [[ "$changed_env" == "true" ]] || exit $exitcode 21 | fi 22 | 23 | readonly wal_filename=$1 24 | readonly wal_destination=$2 25 | 26 | [[ -z $wal_filename || -z $wal_destination ]] && exit 1 27 | 28 | wal_dir=$(dirname "$wal_destination") 29 | readonly wal_dir 30 | wal_fast_source=$(dirname "$(dirname "$(realpath "$wal_dir")")")/wal_fast/$wal_filename 31 | readonly wal_fast_source 32 | 33 | [[ -f $wal_fast_source ]] && exec mv "${wal_fast_source}" "${wal_destination}" 34 | 35 | if [[ "$wal_destination" =~ /$wal_filename$ ]]; then # Patroni fetching missing files for pg_rewind 36 | export WALG_DOWNLOAD_CONCURRENCY=1 37 | POOL_SIZE=0 38 | else 39 | POOL_SIZE=$WALG_DOWNLOAD_CONCURRENCY 40 | fi 41 | 42 | [[ "$USE_WALG_RESTORE" == "true" ]] && exec wal-g wal-fetch "${wal_filename}" "${wal_destination}" 43 | 44 | [[ $POOL_SIZE -gt 8 ]] && POOL_SIZE=8 45 | 46 | if [[ -z $WALE_S3_PREFIX ]]; then # non AWS environment? 47 | readonly wale_prefetch_source=${wal_dir}/.wal-e/prefetch/${wal_filename} 48 | if [[ -f $wale_prefetch_source ]]; then 49 | exec mv "${wale_prefetch_source}" "${wal_destination}" 50 | else 51 | exec wal-e wal-fetch -p $POOL_SIZE "${wal_filename}" "${wal_destination}" 52 | fi 53 | else 54 | exec bash /scripts/wal-e-wal-fetch.sh wal-fetch -p $POOL_SIZE "${wal_filename}" "${wal_destination}" 55 | fi 56 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/spilo_commons.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import subprocess 4 | import re 5 | import yaml 6 | 7 | logger = logging.getLogger('__name__') 8 | 9 | RW_DIR = os.environ.get('RW_DIR', '/run') 10 | PATRONI_CONFIG_FILE = os.path.join(RW_DIR, 'postgres.yml') 11 | LIB_DIR = '/usr/lib/postgresql' 12 | 13 | # (min_version, max_version, shared_preload_libraries, extwlist.extensions) 14 | extensions = { 15 | 'timescaledb': (9.6, 17, True, True), 16 | 'pg_cron': (9.5, 17, True, False), 17 | 'pg_stat_kcache': (9.4, 17, True, False), 18 | 'pg_partman': (9.4, 17, False, True) 19 | } 20 | if os.environ.get('ENABLE_PG_MON') == 'true': 21 | extensions['pg_mon'] = (11, 17, True, False) 22 | 23 | 24 | def adjust_extensions(old, version, extwlist=False): 25 | ret = [] 26 | for name in old.split(','): 27 | name = name.strip() 28 | value = extensions.get(name) 29 | if name not in ret and value is None or value[0] <= version <= value[1] and (not extwlist or value[3]): 30 | ret.append(name) 31 | return ','.join(ret) 32 | 33 | 34 | def append_extensions(old, version, extwlist=False): 35 | extwlist = 3 if extwlist else 2 36 | ret = [] 37 | 38 | def maybe_append(name): 39 | value = extensions.get(name) 40 | if name not in ret and (value is None or value[0] <= version <= value[1] and value[extwlist]): 41 | ret.append(name) 42 | 43 | for name in old.split(','): 44 | maybe_append(name.strip()) 45 | 46 | for name in extensions.keys(): 47 | maybe_append(name) 48 | 49 | return ','.join(ret) 50 | 51 | 52 | def get_binary_version(bin_dir): 53 | postgres = os.path.join(bin_dir or '', 'postgres') 54 | version = subprocess.check_output([postgres, '--version']).decode() 55 | version = re.match(r'^[^\s]+ [^\s]+ (\d+)(\.(\d+))?', version) 56 | return '.'.join([version.group(1), version.group(3)]) if int(version.group(1)) < 10 else version.group(1) 57 | 58 | 59 | def get_bin_dir(version): 60 | return '{0}/{1}/bin'.format(LIB_DIR, version) 61 | 62 | 63 | def is_valid_pg_version(version): 64 | bin_dir = get_bin_dir(version) 65 | postgres = os.path.join(bin_dir, 'postgres') 66 | # check that there is postgres binary inside 67 | return os.path.isfile(postgres) and os.access(postgres, os.X_OK) 68 | 69 | 70 | def write_file(config, filename, overwrite): 71 | if not overwrite and os.path.exists(filename): 72 | logger.warning('File %s already exists, not overwriting. (Use option --force if necessary)', filename) 73 | else: 74 | with open(filename, 'w') as f: 75 | logger.info('Writing to file %s', filename) 76 | f.write(config) 77 | 78 | 79 | def get_patroni_config(): 80 | with open(PATRONI_CONFIG_FILE) as f: 81 | return yaml.safe_load(f) 82 | 83 | 84 | def write_patroni_config(config, force): 85 | write_file(yaml.dump(config, default_flow_style=False, width=120), PATRONI_CONFIG_FILE, force) 86 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/test_reload_ssl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script is called to check if the spilo or postgres configuration need to 4 | # be reloaded due to changes in TLS files. 5 | # 6 | # Usage: test_reload_ssl.sh 7 | set -euo pipefail 8 | 9 | # Directory where hashes for each SSL file are stored 10 | last_hash_dir=$1 11 | 12 | # Redirect output to a log file 13 | # exec >$last_hash_dir/test_reload_ssl.log 2>&1 14 | # NOW="$(date)" 15 | # LOGNAME="$last_hash_dir/test_reload_ssl.log."${NOW} 16 | # exec > "$LOGNAME" 2>&1 17 | 18 | # The hash command to use 19 | hash_cmd="sha256sum" 20 | 21 | ## Functions ## 22 | 23 | log() { 24 | echo "$*" >&2 25 | } 26 | 27 | has_changed() { 28 | local env=$1 29 | local src_path=${!1:-} 30 | local hash_path="$last_hash_dir/${env}.hash" 31 | local live_hash 32 | local last_hash 33 | 34 | if [[ -z "$src_path" ]]; then 35 | log "env=$env: environment is not set" 36 | return 1 37 | fi 38 | if [[ ! -e "$src_path" ]]; then 39 | log "env=$env src_path=$src_path: does not exist" 40 | return 1 41 | fi 42 | if [[ ! -e "$hash_path" ]]; then 43 | log "env=$env hash_path=$hash_path: does not exist yet" 44 | return 0 45 | fi 46 | 47 | live_hash=$($hash_cmd "$src_path") 48 | last_hash=$(cat "$hash_path") 49 | 50 | if [[ $live_hash = "$last_hash" ]]; then 51 | log "env=$env path=$src_path live_hash=$live_hash: no changes detected" 52 | return 1 53 | fi 54 | log "env=$env path=$src_path live_hash=$live_hash last_hash=$last_hash: found changes" 55 | return 0 56 | } 57 | 58 | write_hash() { 59 | local env=$1 60 | local src_path=${!1:-} 61 | local hash_path="$last_hash_dir/${env}.hash" 62 | 63 | if [[ ! -e "$src_path" ]]; then 64 | log "env=$env src_path=$src_path: does not exist; skipped writing hash" 65 | return 0 66 | fi 67 | 68 | $hash_cmd "$src_path" > "$hash_path" 69 | } 70 | 71 | write_hashes() { 72 | write_hash SSL_CA_FILE 73 | write_hash SSL_CRL_FILE 74 | write_hash SSL_CERTIFICATE_FILE 75 | write_hash SSL_PRIVATE_KEY_FILE 76 | } 77 | 78 | ## Main ## 79 | 80 | if 81 | has_changed SSL_CA_FILE || \ 82 | has_changed SSL_CRL_FILE || \ 83 | has_changed SSL_CERTIFICATE_FILE || \ 84 | has_changed SSL_PRIVATE_KEY_FILE 85 | then 86 | log "Reloading due to detected changes" 87 | pg_ctl reload 88 | write_hashes 89 | fi 90 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/upload_pg_log_to_s3.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import boto3 5 | import os 6 | import logging 7 | import subprocess 8 | import sys 9 | import time 10 | 11 | from datetime import datetime, timedelta 12 | 13 | from boto3.exceptions import S3UploadFailedError 14 | from boto3.s3.transfer import TransferConfig 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | def get_file_names(): 20 | prev_interval = datetime.now() - timedelta(days=1) 21 | prev_interval_number = prev_interval.strftime('%u') 22 | upload_filename = prev_interval.strftime('%F') 23 | 24 | if os.getenv('LOG_SHIP_HOURLY') == 'true': 25 | prev_interval = datetime.now() - timedelta(hours=1) 26 | prev_interval_number = prev_interval.strftime('%u-%H') 27 | upload_filename = prev_interval.strftime('%F-%H') 28 | 29 | log_file = os.path.join(os.getenv('PGLOG'), 'postgresql-' + prev_interval_number + '.csv') 30 | archived_log_file = os.path.join(os.getenv('LOG_TMPDIR'), upload_filename + '.csv.gz') 31 | 32 | return log_file, archived_log_file 33 | 34 | 35 | def compress_pg_log(): 36 | log_file, archived_log_file = get_file_names() 37 | 38 | if os.path.getsize(log_file) == 0: 39 | logger.warning("Postgres log '%s' is empty.", log_file) 40 | sys.exit(0) 41 | 42 | try: 43 | with open(archived_log_file, 'wb') as f_out: 44 | subprocess.Popen(['gzip', '-9c', log_file], stdout=f_out).wait() 45 | except Exception: 46 | logger.exception('Failed to compress log file %s', log_file) 47 | 48 | return archived_log_file 49 | 50 | 51 | def upload_to_s3(local_file_path): 52 | # boto picks up AWS credentials automatically when run within a EC2 instance 53 | s3 = boto3.resource( 54 | service_name="s3", 55 | endpoint_url=os.getenv('LOG_S3_ENDPOINT'), 56 | region_name=os.getenv('LOG_AWS_REGION') 57 | ) 58 | 59 | bucket_name = os.getenv('LOG_S3_BUCKET') 60 | bucket = s3.Bucket(bucket_name) 61 | 62 | key_name = os.path.join(os.getenv('LOG_S3_KEY'), os.path.basename(local_file_path)) 63 | if os.getenv('LOG_GROUP_BY_DATE'): 64 | key_name = key_name.format(**{'DATE': os.path.basename(local_file_path).split('.')[0]}) 65 | 66 | chunk_size = 52428800 # 50 MiB 67 | config = TransferConfig(multipart_threshold=chunk_size, multipart_chunksize=chunk_size) 68 | 69 | try: 70 | bucket.upload_file(local_file_path, key_name, Config=config, ExtraArgs={'Tagging': os.getenv('LOG_S3_TAGS')}) 71 | except S3UploadFailedError as e: 72 | logger.exception('Failed to upload the %s to the bucket %s under the key %s. Exception: %r', 73 | local_file_path, bucket_name, key_name, e) 74 | return False 75 | 76 | return True 77 | 78 | 79 | def main(): 80 | max_retries = 3 81 | compressed_log = compress_pg_log() 82 | 83 | for _ in range(max_retries): 84 | if upload_to_s3(compressed_log): 85 | return os.unlink(compressed_log) 86 | time.sleep(10) 87 | 88 | logger.warning('Upload of the compressed log file %s failed after %s attempts.', compressed_log, max_retries) 89 | sys.exit(1) 90 | 91 | 92 | if __name__ == '__main__': 93 | main() 94 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/wal-e-wal-fetch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | date 5 | 6 | prefetch=8 7 | 8 | function load_aws_instance_profile() { 9 | local CREDENTIALS_URL=http://169.254.169.254/latest/meta-data/iam/security-credentials/ 10 | local INSTANCE_PROFILE 11 | INSTANCE_PROFILE=$(curl -s "$CREDENTIALS_URL") 12 | # shellcheck source=/dev/null 13 | source <(curl -s "$CREDENTIALS_URL$INSTANCE_PROFILE" | jq -r '"AWS_SECURITY_TOKEN=\"" + .Token + "\"\nAWS_SECRET_ACCESS_KEY=\"" + .SecretAccessKey + "\"\nAWS_ACCESS_KEY_ID=\"" + .AccessKeyId + "\""') 14 | } 15 | 16 | function load_region_from_aws_instance_profile() { 17 | local AZ 18 | AZ=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone) 19 | AWS_REGION=${AZ:0:-1} 20 | } 21 | 22 | function usage() { 23 | echo "Usage: $0 wal-fetch [--prefetch PREFETCH] WAL_SEGMENT WAL_DESTINATION" 24 | exit 1 25 | } 26 | 27 | while [[ $# -gt 0 ]]; do 28 | case $1 in 29 | --s3-prefix ) 30 | WALE_S3_PREFIX=$2 31 | shift 32 | ;; 33 | -k|--aws-access-key-id ) 34 | AWS_ACCESS_KEY_ID=$2 35 | shift 36 | ;; 37 | --aws-instance-profile ) 38 | AWS_INSTANCE_PROFILE=true 39 | ;; 40 | wal-fetch ) 41 | ;; 42 | -p|--prefetch ) 43 | prefetch=$2 44 | shift 45 | ;; 46 | * ) 47 | PARAMS+=("$1") 48 | ;; 49 | esac 50 | shift 51 | done 52 | 53 | [[ ${#PARAMS[@]} == 2 ]] || usage 54 | 55 | [[ "$AWS_INSTANCE_PROFILE" == "true" ]] && load_aws_instance_profile 56 | 57 | if [[ -z $AWS_SECRET_ACCESS_KEY || -z $AWS_ACCESS_KEY_ID || -z $WALE_S3_PREFIX ]]; then 58 | echo bad environment 59 | exit 1 60 | fi 61 | 62 | readonly SEGMENT=${PARAMS[-2]} 63 | readonly DESTINATION=${PARAMS[-1]} 64 | 65 | if [[ $WALE_S3_PREFIX =~ ^s3://([^\/]+)(.+) ]]; then 66 | readonly BUCKET=${BASH_REMATCH[1]} 67 | BUCKET_PATH=${BASH_REMATCH[2]} 68 | readonly BUCKET_PATH=${BUCKET_PATH%/} 69 | else 70 | echo bad WALE_S3_PREFIX 71 | exit 1 72 | fi 73 | 74 | if [[ -n $WALE_S3_ENDPOINT && $WALE_S3_ENDPOINT =~ ^([a-z\+]{2,10}://)?([^:\/?]+) ]]; then 75 | S3_HOST=${BASH_REMATCH[2]} 76 | fi 77 | 78 | if [[ -z $AWS_REGION ]]; then 79 | if [[ -n $WALE_S3_ENDPOINT && $WALE_S3_ENDPOINT =~ ^([a-z\+]{2,10}://)?s3-([^\.]+) ]]; then 80 | AWS_REGION=${BASH_REMATCH[2]} 81 | elif [[ "$AWS_INSTANCE_PROFILE" == "true" ]]; then 82 | load_region_from_aws_instance_profile 83 | fi 84 | fi 85 | 86 | if [[ -z $AWS_REGION ]]; then 87 | echo AWS_REGION is unknown 88 | exit 1 89 | fi 90 | 91 | if [[ -z $S3_HOST ]]; then 92 | S3_HOST=s3.$AWS_REGION.amazonaws.com 93 | fi 94 | 95 | readonly SERVICE=s3 96 | readonly REQUEST=aws4_request 97 | readonly HOST=$BUCKET.$S3_HOST 98 | TIME=$(date +%Y%m%dT%H%M%SZ) 99 | readonly TIME 100 | readonly DATE=${TIME%T*} 101 | readonly DRSR="$DATE/$AWS_REGION/$SERVICE/$REQUEST" 102 | readonly EMPTYHASH=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 103 | 104 | function hmac_sha256() { 105 | echo -en "$2" | openssl dgst -sha256 -mac HMAC -macopt "$1" | sed 's/^.* //' 106 | } 107 | 108 | # Four-step signing key calculation 109 | DATE_KEY=$(hmac_sha256 key:"AWS4$AWS_SECRET_ACCESS_KEY" "$DATE") 110 | readonly DATE_KEY 111 | DATE_REGION_KEY=$(hmac_sha256 "hexkey:$DATE_KEY" "$AWS_REGION") 112 | readonly DATE_REGION_KEY 113 | DATE_REGION_SERVICE_KEY=$(hmac_sha256 "hexkey:$DATE_REGION_KEY" "$SERVICE") 114 | readonly DATE_REGION_SERVICE_KEY 115 | SIGNING_KEY=$(hmac_sha256 "hexkey:$DATE_REGION_SERVICE_KEY" "$REQUEST") 116 | readonly SIGNING_KEY 117 | 118 | if [[ -z $AWS_INSTANCE_PROFILE ]]; then 119 | readonly SIGNED_HEADERS="host;x-amz-content-sha256;x-amz-date" 120 | readonly REQUEST_TOKEN="" 121 | readonly TOKEN_HEADER=() 122 | else 123 | readonly SIGNED_HEADERS="host;x-amz-content-sha256;x-amz-date;x-amz-security-token" 124 | readonly REQUEST_TOKEN="x-amz-security-token:$AWS_SECURITY_TOKEN\n" 125 | readonly TOKEN_HEADER=(-H "x-amz-security-token: $AWS_SECURITY_TOKEN") 126 | fi 127 | 128 | function s3_get() { 129 | local segment=$1 130 | local destination=$2 131 | local FILE=$BUCKET_PATH/wal_005/$segment.lzo 132 | local CANONICAL_REQUEST="GET\n$FILE\n\nhost:$HOST\nx-amz-content-sha256:$EMPTYHASH\nx-amz-date:$TIME\n$REQUEST_TOKEN\n$SIGNED_HEADERS\n$EMPTYHASH" 133 | local CANONICAL_REQUEST_HASH 134 | CANONICAL_REQUEST_HASH=$(echo -en "$CANONICAL_REQUEST" | openssl dgst -sha256 | sed 's/^.* //') 135 | local STRING_TO_SIGN="AWS4-HMAC-SHA256\n$TIME\n$DRSR\n$CANONICAL_REQUEST_HASH" 136 | local SIGNATURE 137 | SIGNATURE=$(hmac_sha256 "hexkey:$SIGNING_KEY" "$STRING_TO_SIGN") 138 | 139 | if curl -s "https://$HOST$FILE" "${TOKEN_HEADER[@]}" -H "x-amz-content-sha256: $EMPTYHASH" -H "x-amz-date: $TIME" \ 140 | -H "Authorization: AWS4-HMAC-SHA256 Credential=$AWS_ACCESS_KEY_ID/$DRSR, SignedHeaders=$SIGNED_HEADERS, Signature=$SIGNATURE" \ 141 | | lzop -dc > "$destination" 2> /dev/null && [[ ${PIPESTATUS[0]} == 0 ]]; then 142 | [[ -s $destination ]] && echo "$$ success $FILE" && return 0 143 | fi 144 | rm -f "$destination" 145 | echo "$$ failed $FILE" 146 | return 1 147 | } 148 | 149 | function generate_next_segments() { 150 | local num=$1 151 | 152 | local timeline=${SEGMENT:0:8} 153 | local log=$((16#${SEGMENT:8:8})) 154 | local seg=$((16#${SEGMENT:16:8})) 155 | 156 | while [[ $((num--)) -gt 0 ]]; do 157 | seg=$((seg+1)) 158 | printf "%s%08X%08X\n" "$timeline" $((log+seg/256)) $((seg%256)) 159 | done 160 | } 161 | 162 | function clear_except() { 163 | set +e 164 | for dir in "$PREFETCHDIR"/running/0*; do 165 | item=$(basename "$dir") 166 | if [[ $item =~ ^[0-9A-F]{24}$ ]]; then 167 | [[ " ${PREFETCHES[*]} " =~ \ $item\ ]] || rm -fr "$dir" 168 | fi 169 | done 170 | 171 | for file in "$PREFETCHDIR"/0*; do 172 | item=$(basename "$file") 173 | if [[ $item =~ ^[0-9A-F]{24}$ ]]; then 174 | [[ " ${PREFETCHES[*]} " =~ \ $item\ ]] || rm -f "$file" 175 | fi 176 | done 177 | set -e 178 | return 0 179 | } 180 | 181 | function try_to_promote_prefetched() { 182 | local prefetched=$PREFETCHDIR/$SEGMENT 183 | [[ -f $prefetched ]] || return 1 184 | echo "$$ promoting $prefetched" 185 | mv "$prefetched" "$DESTINATION" && clear_except && exit 0 186 | } 187 | 188 | echo "$$ $SEGMENT" 189 | 190 | PREFETCHDIR=$(dirname "$DESTINATION")/.wal-e/prefetch 191 | readonly PREFETCHDIR 192 | if [[ $prefetch -gt 0 && $SEGMENT =~ ^[0-9A-F]{24}$ ]]; then 193 | mapfile -t PREFETCHES < <(generate_next_segments "$prefetch") 194 | readonly PREFETCHES 195 | for segment in "${PREFETCHES[@]}"; do 196 | running="$PREFETCHDIR/running/$segment" 197 | [[ -d $running || -f $PREFETCHDIR/$segment ]] && continue 198 | 199 | mkdir -p "$running" 200 | ( 201 | trap 'rm -fr $running' QUIT TERM EXIT 202 | TMPFILE=$(mktemp -p "$running") 203 | echo "$$ prefetching $segment" 204 | s3_get "$segment" "$TMPFILE" && mv "$TMPFILE" "$PREFETCHDIR/$segment" 205 | ) & 206 | done 207 | 208 | last_size=0 209 | while ! try_to_promote_prefetched; do 210 | size=$(du -bs "$PREFETCHDIR/running/$SEGMENT" 2> /dev/null | cut -f1) 211 | if [[ -z $size ]]; then 212 | try_to_promote_prefetched || break 213 | elif [[ $size > $last_size ]]; then 214 | echo "($size > $last_size), sleeping 1" 215 | last_size=$size 216 | sleep 1 217 | else 218 | echo "size=$size, last_size=$last_size" 219 | break 220 | fi 221 | done 222 | clear_except 223 | fi 224 | 225 | s3_get "$SEGMENT" "$DESTINATION" 226 | -------------------------------------------------------------------------------- /postgres-appliance/scripts/wale_restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RETRIES=2 4 | THRESHOLD_PERCENTAGE=30 5 | THRESHOLD_MEGABYTES=10240 6 | 7 | export PGOPTIONS="-c search_path=pg_catalog" 8 | 9 | while getopts ":-:" optchar; do 10 | [[ "${optchar}" == "-" ]] || continue 11 | case "${OPTARG}" in 12 | datadir=* ) 13 | DATA_DIR=${OPTARG#*=} 14 | ;; 15 | connstring=* ) 16 | CONNSTR="${OPTARG#*=}" 17 | ;; 18 | retries=* ) 19 | RETRIES=${OPTARG#*=} 20 | ;; 21 | threshold_backup_size_percentage=*|threshold-backup-size-percentage=* ) 22 | THRESHOLD_PERCENTAGE=${OPTARG#*=} 23 | ;; 24 | threshold_megabytes=*|threshold-megabytes=* ) 25 | THRESHOLD_MEGABYTES=${OPTARG#*=} 26 | ;; 27 | no_leader=*|no-master=* ) 28 | NO_MASTER=${OPTARG#*=} 29 | ;; 30 | esac 31 | done 32 | 33 | [[ -z $DATA_DIR ]] && exit 1 34 | [[ -z $NO_MASTER && -z "$CONNSTR" ]] && exit 1 35 | 36 | if [[ "$USE_WALG_RESTORE" == "true" ]]; then 37 | readonly WAL_E="wal-g" 38 | else 39 | readonly WAL_E="wal-e" 40 | fi 41 | 42 | ATTEMPT=0 43 | server_version="-1" 44 | while true; do 45 | [[ -z $wal_segment_backup_start ]] && wal_segment_backup_start=$($WAL_E backup-list 2> /dev/null \ 46 | | sed '0,/^\(backup_\)\?name\s*\(last_\)\?modified\s*/d' | sort -bk2 | tail -n1 | awk '{print $3;}' | sed 's/_.*$//') 47 | 48 | [[ -n "$CONNSTR" && $server_version == "-1" ]] && server_version=$(psql -d "$CONNSTR" -tAc 'show server_version_num' 2> /dev/null || echo "-1") 49 | 50 | [[ -n $wal_segment_backup_start && ( -z "$CONNSTR" || $server_version != "-1") ]] && break 51 | [[ $((ATTEMPT++)) -ge $RETRIES ]] && break 52 | sleep 1 53 | done 54 | 55 | [[ -z $wal_segment_backup_start ]] && echo "Can not find any backups" && exit 1 56 | 57 | [[ -z $NO_MASTER && $server_version == "-1" ]] && echo "Failed to reach master" && exit 1 58 | 59 | if [[ $server_version != "-1" ]]; then 60 | readonly lsn_segment=$((16#${wal_segment_backup_start:8:8})) 61 | readonly lsn_offset=$((16#${wal_segment_backup_start:16:8})) 62 | printf -v backup_start_lsn "%X/%X" $lsn_segment $((lsn_offset << 24)) 63 | 64 | readonly query="SELECT CASE WHEN pg_is_in_recovery() THEN GREATEST(pg_wal_lsn_diff(COALESCE(pg_last_wal_receive_lsn(), '0/0'), '$backup_start_lsn')::bigint, pg_wal_lsn_diff(pg_last_wal_replay_lsn(), '$backup_start_lsn')::bigint) ELSE pg_wal_lsn_diff(pg_current_wal_lsn(), '$backup_start_lsn')::bigint END" 65 | 66 | ATTEMPT=0 67 | while true; do 68 | [[ -z $diff_in_bytes ]] && diff_in_bytes=$(psql -d "$CONNSTR" -tAc "$query") 69 | [[ -z $cluster_size ]] && cluster_size=$(psql -d "$CONNSTR" -tAc "SELECT SUM(pg_catalog.pg_database_size(datname)) FROM pg_catalog.pg_database") 70 | [[ -n $diff_in_bytes && -n $cluster_size ]] && break 71 | [[ $((ATTEMPT++)) -ge $RETRIES ]] && break 72 | sleep 1 73 | done 74 | [[ -z $diff_in_bytes || -z $cluster_size ]] && echo "could not determine difference with the master location" && exit 1 75 | 76 | echo "Current cluster size: $cluster_size" 77 | echo "Wals generated since the last backup: $diff_in_bytes" 78 | 79 | [[ $diff_in_bytes -gt $((THRESHOLD_MEGABYTES*1048576)) ]] && echo "not restoring from backup because of amount of generated wals exceeds ${THRESHOLD_MEGABYTES}MB" && exit 1 80 | 81 | readonly threshold_bytes=$((cluster_size*THRESHOLD_PERCENTAGE/100)) 82 | [[ $threshold_bytes -lt $diff_in_bytes ]] && echo "not restoring from backup because of amount of generated wals exceeds $THRESHOLD_PERCENTAGE% of cluster_size" && exit 1 83 | fi 84 | 85 | ATTEMPT=0 86 | while true; do 87 | if $WAL_E backup-fetch "$DATA_DIR" LATEST; then 88 | version=$(<"$DATA_DIR/PG_VERSION") 89 | [[ "$version" =~ \. ]] && wal_name=xlog || wal_name=wal 90 | readonly wal_dir=$DATA_DIR/pg_$wal_name 91 | [[ ! -d $wal_dir ]] && rm -f "$wal_dir" && mkdir "$wal_dir" 92 | # remove broken symlinks from PGDATA 93 | find "$DATA_DIR" -xtype l -delete 94 | exit 0 95 | fi 96 | [[ $((ATTEMPT++)) -ge $RETRIES ]] && break 97 | rm -fr "$DATA_DIR" 98 | sleep 1 99 | done 100 | 101 | exit 1 102 | -------------------------------------------------------------------------------- /postgres-appliance/spilok8s.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1alpha1 2 | kind: PetSet 3 | metadata: 4 | name: &cluster_name spilodemo 5 | labels: 6 | application: spilo 7 | spilo-cluster: *cluster_name 8 | spec: 9 | replicas: 3 10 | serviceName: *cluster_name 11 | template: 12 | metadata: 13 | labels: 14 | application: spilo 15 | spilo-cluster: *cluster_name 16 | annotations: 17 | pod.alpha.kubernetes.io/initialized: "true" 18 | spec: 19 | containers: 20 | - name: *cluster_name 21 | image: registry.opensource.zalan.do/acid/spilotest-9.6:1.1-p10 # put the spilo image here 22 | imagePullPolicy: Always 23 | ports: 24 | - containerPort: 8008 25 | protocol: TCP 26 | - containerPort: 5432 27 | protocol: TCP 28 | volumeMounts: 29 | - mountPath: /home/postgres/pgdata 30 | name: pgdata 31 | env: 32 | - name: ETCD_HOST 33 | value: 'etcd.default.svc.cluster.local:2379' # where is your etcd? 34 | - name: POD_IP 35 | valueFrom: 36 | fieldRef: 37 | apiVersion: v1 38 | fieldPath: status.podIP 39 | - name: POD_NAMESPACE 40 | valueFrom: 41 | fieldRef: 42 | apiVersion: v1 43 | fieldPath: metadata.namespace 44 | - name: PGPASSWORD_SUPERUSER 45 | valueFrom: 46 | secretKeyRef: 47 | name: *cluster_name 48 | key: superuser-password 49 | - name: PGPASSWORD_ADMIN 50 | valueFrom: 51 | secretKeyRef: 52 | name: *cluster_name 53 | key: admin-password 54 | - name: PGPASSWORD_STANDBY 55 | valueFrom: 56 | secretKeyRef: 57 | name: *cluster_name 58 | key: replication-password 59 | - name: SCOPE 60 | value: *cluster_name 61 | - name: PGROOT 62 | value: /home/postgres/pgdata/pgroot 63 | terminationGracePeriodSeconds: 0 64 | volumes: 65 | - name: pgdata 66 | emptyDir: {} 67 | # volumeClaimTemplates: 68 | # - metadata: 69 | # labels: 70 | # application: spilo 71 | # spilo-cluster: *cluster_name 72 | # annotations: 73 | # volume.alpha.kubernetes.io/storage-class: anything 74 | # name: pgdata 75 | # spec: 76 | # accessModes: 77 | # - ReadWriteOnce 78 | # resources: 79 | # requests: 80 | # storage: 5Gi 81 | 82 | --- 83 | apiVersion: v1 84 | kind: Endpoints 85 | metadata: 86 | name: &cluster_name spilodemo 87 | labels: 88 | application: spilo 89 | spilo-cluster: *cluster_name 90 | subsets: [] 91 | 92 | --- 93 | apiVersion: v1 94 | kind: Service 95 | metadata: 96 | name: &cluster_name spilodemo 97 | labels: 98 | application: spilo 99 | spilo-cluster: *cluster_name 100 | spec: 101 | type: ClusterIP 102 | ports: 103 | - port: 5432 104 | targetPort: 5432 105 | 106 | --- 107 | apiVersion: v1 108 | kind: Secret 109 | metadata: 110 | name: &cluster_name spilodemo 111 | labels: 112 | application: spilo 113 | spilo-cluster: *cluster_name 114 | type: Opaque 115 | data: 116 | superuser-password: emFsYW5kbw== 117 | replication-password: cmVwLXBhc3M= 118 | admin-password: YWRtaW4= 119 | -------------------------------------------------------------------------------- /postgres-appliance/tests/README.md: -------------------------------------------------------------------------------- 1 | # Run tests 2 | 3 | After building the image, you can test your image by: 4 | 5 | 1. Setting up the environment variable `SPILO_TEST_IMAGE` to test the specific image. If unset, the default will be `spilo`. 6 | ``` 7 | export SPILO_TEST_IMAGE= 8 | ``` 9 | 2. Run the test: 10 | ``` 11 | bash test_spilo.sh 12 | ``` 13 | To enable debugging for an entire script when it runs: 14 | ``` 15 | bash -x test_spilo.sh 16 | ``` 17 | 18 | The test will create multiple containers. They will be cleaned up by the last line before running `main` in `test_spilo.sh`. To keep and debug the containers after running the test, this part can be commented. 19 | ``` 20 | trap cleanup QUIT TERM EXIT 21 | ``` 22 | -------------------------------------------------------------------------------- /postgres-appliance/tests/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | networks: 4 | demo: 5 | 6 | services: 7 | minio: 8 | image: minio/minio:RELEASE.2021-09-03T03-56-13Z 9 | networks: [ demo ] 10 | environment: 11 | MINIO_ACCESS_KEY: &access_key Eeghei0uVej1Wea8mato 12 | MINIO_SECRET_KEY: &secret_key lecheidohbah7aThohziezah3iev7ima4eeXu9gu 13 | hostname: minio 14 | container_name: demo-minio 15 | entrypoint: sh 16 | command: -c 'mkdir -p /export/testbucket && /usr/bin/minio server /export' 17 | 18 | etcd: 19 | image: ${SPILO_TEST_IMAGE:-spilo} 20 | networks: [ demo ] 21 | container_name: demo-etcd 22 | hostname: etcd 23 | command: "sh -c 'export ETCD_UNSUPPORTED_ARCH=$$(dpkg --print-architecture) && exec etcd -name etcd1 -listen-client-urls http://0.0.0.0:2379 -advertise-client-urls http://$$(hostname --ip-address):2379'" 24 | 25 | spilo1: &spilo 26 | depends_on: [ minio ] 27 | image: ${SPILO_TEST_IMAGE:-spilo} 28 | networks: [ demo ] 29 | environment: 30 | SPILO_PROVIDER: 'local' 31 | AWS_ACCESS_KEY_ID: *access_key 32 | AWS_SECRET_ACCESS_KEY: *secret_key 33 | AWS_ENDPOINT: &aws_endpoint 'http://minio:9000' 34 | AWS_S3_FORCE_PATH_STYLE: &aws_s3_force_path_style 'true' 35 | WAL_S3_BUCKET: &bucket testbucket 36 | # USE_WALG: 'true' # wal-e is used and tested by default, wal-g is used automatically for restore in case of S3 37 | WALE_DISABLE_S3_SSE: &wale_disable_s3_sse 'true' 38 | ETCDCTL_ENDPOINTS: http://etcd:2379 39 | ETCD3_HOST: "etcd:2379" 40 | SCOPE: demo 41 | ENABLE_PG_MON: 'true' 42 | SPILO_CONFIGURATION: | 43 | bootstrap: 44 | dcs: 45 | loop_wait: 2 46 | postgresql: 47 | parameters: 48 | wal_decode_buffer_size: '521kB' 49 | wal_keep_segments: 8 50 | jit: 'off' 51 | postgresql: 52 | parameters: 53 | shared_buffers: 32MB 54 | PGVERSION: '13' 55 | # Just to test upgrade with clone. Without CLONE_SCOPE they don't work 56 | CLONE_WAL_S3_BUCKET: *bucket 57 | CLONE_AWS_ACCESS_KEY_ID: *access_key 58 | CLONE_AWS_SECRET_ACCESS_KEY: *secret_key 59 | CLONE_AWS_ENDPOINT: *aws_endpoint 60 | CLONE_AWS_S3_FORCE_PATH_STYLE: *aws_s3_force_path_style 61 | CLONE_WALE_DISABLE_S3_SSE: *wale_disable_s3_sse 62 | hostname: spilo1 63 | container_name: demo-spilo1 64 | 65 | spilo2: 66 | <<: *spilo 67 | hostname: spilo2 68 | container_name: demo-spilo2 69 | 70 | spilo3: 71 | <<: *spilo 72 | hostname: spilo3 73 | container_name: demo-spilo3 74 | -------------------------------------------------------------------------------- /postgres-appliance/tests/locales_test/generate_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" || exit 1 6 | 7 | readonly container=$1 8 | readonly output_file=$2 9 | 10 | 11 | function generate_data() { 12 | docker_exec "$container" $'cd $PGDATA; 13 | rm -rf locales_test; mkdir locales_test; cd locales_test; 14 | /bin/bash "/home/postgres/tests/helper_script.sh"; 15 | truncate -s -1 _base-characters \ 16 | && psql -c "insert into chars select regexp_split_to_table(pg_read_file(\'locales_test/_base-characters\')::text, E\'\\n\');" 17 | ' 18 | } 19 | 20 | # Create an auxiliary table 21 | docker_exec "$container" "psql -d postgres -c 'drop table if exists chars; create table chars(chr text);'" 22 | 23 | # Insert data into the auxiliary table 24 | generate_data 25 | 26 | # Write sorted data to an output file 27 | docker_exec "$container" "psql -c '\copy (select * from chars order by 1) to ${output_file}'" 28 | -------------------------------------------------------------------------------- /postgres-appliance/tests/locales_test/helper_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script is copied from https://github.com/ardentperf/glibc-unicode-sorting 4 | 5 | UNICODE_VERS="14" 6 | curl -kO https://www.unicode.org/Public/${UNICODE_VERS}.0.0/ucd/UnicodeData.txt 7 | 8 | perl -naF';' -CO -e' 9 | use utf8; 10 | sub pr { 11 | print chr($_[0]) . "\n"; # 199 12 | print chr($_[0]) . "B\n"; # 200 13 | print chr($_[0]) . "O\n"; # 201 14 | print chr($_[0]) . "3\n"; # 202 15 | print chr($_[0]) . ".\n"; # 203 16 | print chr($_[0]) . " \n"; # 204 17 | print chr($_[0]) . "様\n"; # 205 18 | print chr($_[0]) . "ク\n"; # 206 19 | print "B" . chr($_[0]) . "\n"; # 210 20 | print "O" . chr($_[0]) . "\n"; # 211 21 | print "3" . chr($_[0]) . "\n"; # 212 22 | print "." . chr($_[0]) . "\n"; # 213 23 | print " " . chr($_[0]) . "\n"; # 214 24 | print "様" . chr($_[0]) . "\n"; # 215 25 | print "ク" . chr($_[0]) . "\n"; # 216 26 | print chr($_[0]) . chr($_[0]) . "\n"; # 299 27 | print chr($_[0]) . "BB\n"; # 300 28 | print chr($_[0]) . "OO\n"; # 301 29 | print chr($_[0]) . "33\n"; # 302 30 | print chr($_[0]) . "..\n"; # 303 31 | print chr($_[0]) . " \n"; # 304 32 | print chr($_[0]) . "様様\n"; # 305 33 | print chr($_[0]) . "クク\n"; # 306 34 | print "B" . chr($_[0]) . "B\n"; # 310 35 | print "O" . chr($_[0]) . "O\n"; # 311 36 | print "3" . chr($_[0]) . "3\n"; # 312 37 | print "." . chr($_[0]) . ".\n"; # 313 38 | print " " . chr($_[0]) . " \n"; # 314 39 | print "様" . chr($_[0]) . "様\n"; # 315 40 | print "ク" . chr($_[0]) . "ク\n"; # 316 41 | print "BB" . chr($_[0]) . "\n"; # 320 42 | print "OO" . chr($_[0]) . "\n"; # 321 43 | print "33" . chr($_[0]) . "\n"; # 322 44 | print ".." . chr($_[0]) . "\n"; # 323 45 | print " " . chr($_[0]) . "\n"; # 324 46 | print "様様" . chr($_[0]) . "\n"; # 325 47 | print "クク" . chr($_[0]) . "\n"; # 326 48 | print chr($_[0]) . chr($_[0]) . "B\n"; # 330 49 | print chr($_[0]) . chr($_[0]) . "O\n"; # 331 50 | print chr($_[0]) . chr($_[0]) . "3\n"; # 332 51 | print chr($_[0]) . chr($_[0]) . ".\n"; # 333 52 | print chr($_[0]) . chr($_[0]) . " \n"; # 334 53 | print chr($_[0]) . chr($_[0]) . "様\n"; # 335 54 | print chr($_[0]) . chr($_[0]) . "ク\n"; # 336 55 | print chr($_[0]) . "B" . chr($_[0]) . "\n"; # 340 56 | print chr($_[0]) . "O" . chr($_[0]) . "\n"; # 341 57 | print chr($_[0]) . "3" . chr($_[0]) . "\n"; # 342 58 | print chr($_[0]) . "." . chr($_[0]) . "\n"; # 343 59 | print chr($_[0]) . " " . chr($_[0]) . "\n"; # 344 60 | print chr($_[0]) . "様" . chr($_[0]) . "\n"; # 345 61 | print chr($_[0]) . "ク" . chr($_[0]) . "\n"; # 346 62 | print "B" . chr($_[0]) . chr($_[0]) . "\n"; # 350 63 | print "O" . chr($_[0]) . chr($_[0]) . "\n"; # 351 64 | print "3" . chr($_[0]) . chr($_[0]) . "\n"; # 352 65 | print "." . chr($_[0]) . chr($_[0]) . "\n"; # 353 66 | print " " . chr($_[0]) . chr($_[0]) . "\n"; # 354 67 | print "様" . chr($_[0]) . chr($_[0]) . "\n"; # 355 68 | print "ク" . chr($_[0]) . chr($_[0]) . "\n"; # 356 69 | print "3B" . chr($_[0]) . "\n"; # 380 70 | print chr($_[0]) . chr($_[0]) . chr($_[0]) . "\n"; # 399 71 | print chr($_[0]) . chr($_[0]) . "BB\n"; # 400 72 | print chr($_[0]) . chr($_[0]) . "OO\n"; # 401 73 | print chr($_[0]) . chr($_[0]) . "33\n"; # 402 74 | print chr($_[0]) . chr($_[0]) . "..\n"; # 403 75 | print chr($_[0]) . chr($_[0]) . " \n"; # 404 76 | print chr($_[0]) . chr($_[0]) . "様様\n"; # 405 77 | print chr($_[0]) . chr($_[0]) . "クク\n"; # 406 78 | print "B" . chr($_[0]) . chr($_[0]) . "B\n"; # 410 79 | print "O" . chr($_[0]) . chr($_[0]) . "O\n"; # 411 80 | print "3" . chr($_[0]) . chr($_[0]) . "3\n"; # 412 81 | print "." . chr($_[0]) . chr($_[0]) . ".\n"; # 413 82 | print " " . chr($_[0]) . chr($_[0]) . " \n"; # 414 83 | print "様" . chr($_[0]) . chr($_[0]) . "様\n"; # 415 84 | print "ク" . chr($_[0]) . chr($_[0]) . "ク\n"; # 416 85 | print "BB" . chr($_[0]) . chr($_[0]) . "\n"; # 420 86 | print "OO" . chr($_[0]) . chr($_[0]) . "\n"; # 421 87 | print "33" . chr($_[0]) . chr($_[0]) . "\n"; # 422 88 | print ".." . chr($_[0]) . chr($_[0]) . "\n"; # 423 89 | print " " . chr($_[0]) . chr($_[0]) . "\n"; # 424 90 | print "様様" . chr($_[0]) . chr($_[0]) . "\n"; # 425 91 | print "クク" . chr($_[0]) . chr($_[0]) . "\n"; # 426 92 | print "3B" . chr($_[0]) . "B\n"; # 480 93 | print "3B-" . chr($_[0]) . "\n"; # 481 94 | print chr($_[0]) . chr($_[0]) . chr($_[0]) . chr($_[0]) . "\n"; # 499 95 | print "BB" . chr($_[0]) . chr($_[0]) . "\t\n"; # 580 96 | print "\tBB" . chr($_[0]) . chr($_[0]) . "\n"; # 581 97 | print "BB-" . chr($_[0]) . chr($_[0]) . "\n"; # 582 98 | print "🙂👍" . chr($_[0]) . "❤™\n"; # 583 99 | print chr($_[0]) . chr($_[0]) . ".33\n"; # 584 100 | print "3B-" . chr($_[0]) . "B\n"; # 585 101 | print chr($_[0]) . chr($_[0]) . chr($_[0]) . chr($_[0]) . chr($_[0]) . "\n"; # 599 102 | } 103 | if(//){next}; # skip control characters 104 | if($F[2] eq "Cs"){next}; # skip surrogates 105 | if(/ First>/){$fi=hex("0x".$F[0]);next}; # generate blocks 106 | if(/ Last>/){$la=hex("0x".$F[0]);for($fi..$la){pr($_)};next}; 107 | pr(hex("0x".$F[0])) # generate individual characters 108 | ' UnicodeData.txt > _base-characters 109 | -------------------------------------------------------------------------------- /postgres-appliance/tests/locales_test/test_locales.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd "$(dirname "${BASH_SOURCE[0]}")" || exit 1 3 | 4 | # shellcheck disable=SC1091 5 | source ../test_utils.sh 6 | 7 | TEST_CONTAINER_NAME='spilo-test' 8 | TEST_IMAGE=( 9 | 'registry.opensource.zalan.do/acid/spilo-cdp-14' 10 | 'spilo' 11 | ) 12 | 13 | function main() { 14 | for i in 0 1; do 15 | rm_container "$TEST_CONTAINER_NAME" 16 | docker run --rm -d --privileged \ 17 | --name "$TEST_CONTAINER_NAME" \ 18 | -v "$PWD":/home/postgres/tests \ 19 | -e SPILO_PROVIDER=local -e USE_OLD_LOCALES=true \ 20 | "${TEST_IMAGE[$i]}" #USE_OLD_LOCALES takes no effect for cdp-14 21 | attempts=0 22 | while ! docker exec -i spilo-test su postgres -c "pg_isready"; do 23 | if [[ "$attempts" -ge 15 ]]; then 24 | docker logs "$TEST_CONTAINER_NAME" 25 | exit 1 26 | fi 27 | ((attempts++)) 28 | sleep 1 29 | done 30 | /bin/bash -x ./generate_data.sh "$TEST_CONTAINER_NAME" "/home/postgres/output${i}.txt" 31 | docker exec "$TEST_CONTAINER_NAME" mv "/home/postgres/output${i}.txt" "/home/postgres/tests" 32 | done 33 | 34 | diff -u output0.txt output1.txt > /dev/null || (echo "Outputs are different!" && exit 1) 35 | rm -f output0.txt output1.txt 36 | } 37 | 38 | trap 'rm_container $TEST_CONTAINER_NAME' QUIT TERM EXIT 39 | 40 | main 41 | -------------------------------------------------------------------------------- /postgres-appliance/tests/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE EXTENSION pg_repack; /* the upgrade script must delete it before running pg_upgrade --check! */ 2 | 3 | CREATE DATABASE test_db; 4 | \c test_db 5 | 6 | CREATE UNLOGGED TABLE "bAr" ("bUz" INTEGER); 7 | ALTER TABLE "bAr" ALTER COLUMN "bUz" SET STATISTICS 500; 8 | INSERT INTO "bAr" SELECT * FROM generate_series(1, 100000); 9 | -------------------------------------------------------------------------------- /postgres-appliance/tests/test_utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! docker info &> /dev/null; then 4 | if podman info &> /dev/null; then 5 | alias docker=podman 6 | alias xargs='xargs ' # allows '| xargs docker' 7 | shopt -s expand_aliases 8 | else 9 | echo "docker/podman: command not found" 10 | exit 1 11 | fi 12 | fi 13 | 14 | set -a 15 | 16 | if [[ -t 2 ]]; then 17 | readonly RED="\033[1;31m" 18 | readonly RESET="\033[0m" 19 | readonly GREEN="\033[0;32m" 20 | else 21 | readonly RED="" 22 | readonly RESET="" 23 | readonly GREEN="" 24 | fi 25 | 26 | function log_info() { 27 | echo -e "${GREEN}$*${RESET}" 28 | } 29 | 30 | function log_error() { 31 | echo -e "${RED}$*${RESET}" 32 | exit 1 33 | } 34 | 35 | function next_minute() { 36 | date -d '1 minute' -u +'%F %T UTC' 2>/dev/null || date -v+1M -u +'%F %T UTC' 37 | } 38 | 39 | function next_hour() { 40 | date -d '1 hour' -u +'%F %T UTC' 2>/dev/null || date -v+1H -u +'%F %T UTC' 41 | } 42 | 43 | function start_containers() { 44 | docker-compose up -d 45 | } 46 | 47 | function stop_containers() { 48 | docker-compose rm -fs 49 | } 50 | 51 | function rm_container() { 52 | docker rm -f "$1" 53 | } 54 | 55 | function docker_exec() { 56 | declare -r cmd=${*: -1:1} 57 | docker exec "${@:1:$(($#-1))}" su postgres -c "$cmd" 58 | } 59 | 60 | function run_test() { 61 | "$@" || log_error "Test case $1 FAILED" 62 | echo -e "Test case $1 ${GREEN}PASSED${RESET}" 63 | } 64 | -------------------------------------------------------------------------------- /postgres-appliance/tests/timescaledb.sql: -------------------------------------------------------------------------------- 1 | \c test_db 2 | 3 | CREATE EXTENSION timescaledb; 4 | 5 | CREATE TABLE "fOo" (id bigint NOT NULL PRIMARY KEY); 6 | SELECT create_hypertable('"fOo"', 'id', chunk_time_interval => 100000); 7 | INSERT INTO "fOo" SELECT * FROM generate_series(1, 1000000); 8 | ALTER TABLE "fOo" ALTER COLUMN id SET STATISTICS 500; 9 | -------------------------------------------------------------------------------- /spilo_cmd/.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | dist/ 3 | *.egg-info/ 4 | .coverage 5 | .eggs/ 6 | coverage.xml 7 | junit.xml 8 | spilo.yaml 9 | */__pycache__ 10 | -------------------------------------------------------------------------------- /spilo_cmd/README.md: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | -------------------------------------------------------------------------------- /spilo_cmd/requirements.txt: -------------------------------------------------------------------------------- 1 | clickclick>=0.9 2 | boto>=2.37.0 3 | PyYAML 4 | stups>=0.6 5 | prettytable 6 | -------------------------------------------------------------------------------- /spilo_cmd/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | 6 | """ 7 | 8 | import sys 9 | import os 10 | import inspect 11 | from distutils.cmd import Command 12 | 13 | import setuptools 14 | from setuptools.command.test import test as TestCommand 15 | from setuptools import setup 16 | 17 | if sys.version_info < (3, 4, 0): 18 | sys.stderr.write('FATAL: STUPS Senza needs to be run with Python 3.4+\n') 19 | sys.exit(1) 20 | 21 | __location__ = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))) 22 | 23 | 24 | def read_version(package): 25 | data = {} 26 | with open(os.path.join(package, '__init__.py'), 'r') as fd: 27 | exec(fd.read(), data) 28 | return data['__version__'] 29 | 30 | NAME = 'spilo' 31 | MAIN_PACKAGE = 'spilo' 32 | VERSION = read_version(MAIN_PACKAGE) 33 | DESCRIPTION = 'Spilo command line client' 34 | LICENSE = 'Apache License 2.0' 35 | URL = 'https://github.com/zalando/spilo' 36 | AUTHOR = 'Feike Steenbergen' 37 | EMAIL = 'feike.steenbergen@zalando.de' 38 | KEYWORDS = 'aws spilo PostgreSQL cluster tunnel connect' 39 | 40 | COVERAGE_XML = True 41 | COVERAGE_HTML = False 42 | JUNIT_XML = True 43 | 44 | # Add here all kinds of additional classifiers as defined under 45 | # https://pypi.python.org/pypi?%3Aaction=list_classifiers 46 | CLASSIFIERS = [ 47 | 'Development Status :: 4 - Beta', 48 | 'Environment :: Console', 49 | 'Intended Audience :: Developers', 50 | 'Intended Audience :: System Administrators', 51 | 'License :: OSI Approved :: Apache Software License', 52 | 'Operating System :: POSIX :: Linux', 53 | 'Programming Language :: Python', 54 | 'Programming Language :: Python :: 3.4', 55 | 'Programming Language :: Python :: Implementation :: CPython', 56 | ] 57 | 58 | CONSOLE_SCRIPTS = ['spilo = spilo.spilo:cli'] 59 | 60 | 61 | class PyTest(TestCommand): 62 | 63 | user_options = [('cov=', None, 'Run coverage'), ('cov-xml=', None, 'Generate junit xml report'), ('cov-html=', 64 | None, 'Generate junit html report'), ('junitxml=', None, 'Generate xml of test results')] 65 | 66 | def initialize_options(self): 67 | TestCommand.initialize_options(self) 68 | self.cov = None 69 | self.cov_xml = False 70 | self.cov_html = False 71 | self.junitxml = None 72 | 73 | def finalize_options(self): 74 | TestCommand.finalize_options(self) 75 | if self.cov is not None: 76 | self.cov = ['--cov', self.cov, '--cov-report', 'term-missing'] 77 | if self.cov_xml: 78 | self.cov.extend(['--cov-report', 'xml']) 79 | if self.cov_html: 80 | self.cov.extend(['--cov-report', 'html']) 81 | if self.junitxml is not None: 82 | self.junitxml = ['--junitxml', self.junitxml] 83 | 84 | def run_tests(self): 85 | try: 86 | import pytest 87 | except: 88 | raise RuntimeError('py.test is not installed, run: pip install pytest') 89 | params = {'args': self.test_args} 90 | if self.cov: 91 | params['args'] += self.cov 92 | params['plugins'] = ['cov'] 93 | if self.junitxml: 94 | params['args'] += self.junitxml 95 | params['args'] += ['--doctest-modules', MAIN_PACKAGE, '-s', '-vv'] 96 | errno = pytest.main(**params) 97 | sys.exit(errno) 98 | 99 | 100 | def sphinx_builder(): 101 | try: 102 | from sphinx.setup_command import BuildDoc 103 | except ImportError: 104 | 105 | class NoSphinx(Command): 106 | 107 | user_options = [] 108 | 109 | def initialize_options(self): 110 | raise RuntimeError('Sphinx documentation is not installed, run: pip install sphinx') 111 | 112 | return NoSphinx 113 | 114 | class BuildSphinxDocs(BuildDoc): 115 | 116 | def run(self): 117 | if self.builder == 'doctest': 118 | import sphinx.ext.doctest as doctest 119 | # Capture the DocTestBuilder class in order to return the total 120 | # number of failures when exiting 121 | ref = capture_objs(doctest.DocTestBuilder) 122 | BuildDoc.run(self) 123 | errno = ref[-1].total_failures 124 | sys.exit(errno) 125 | else: 126 | BuildDoc.run(self) 127 | 128 | return BuildSphinxDocs 129 | 130 | 131 | class ObjKeeper(type): 132 | 133 | instances = {} 134 | 135 | def __init__(cls, name, bases, dct): 136 | cls.instances[cls] = [] 137 | 138 | def __call__(cls, *args, **kwargs): 139 | cls.instances[cls].append(super(ObjKeeper, cls).__call__(*args, **kwargs)) 140 | return cls.instances[cls][-1] 141 | 142 | 143 | def capture_objs(cls): 144 | from six import add_metaclass 145 | module = inspect.getmodule(cls) 146 | name = cls.__name__ 147 | keeper_class = add_metaclass(ObjKeeper)(cls) 148 | setattr(module, name, keeper_class) 149 | cls = getattr(module, name) 150 | return keeper_class.instances[cls] 151 | 152 | 153 | def get_install_requirements(path): 154 | content = open(os.path.join(__location__, path)).read() 155 | return [req for req in content.split('\\n') if req != ''] 156 | 157 | 158 | def read(fname): 159 | return open(os.path.join(__location__, fname)).read() 160 | 161 | 162 | def setup_package(): 163 | # Assemble additional setup commands 164 | cmdclass = {} 165 | cmdclass['docs'] = sphinx_builder() 166 | cmdclass['doctest'] = sphinx_builder() 167 | cmdclass['test'] = PyTest 168 | 169 | # Some helper variables 170 | version = os.getenv('GO_PIPELINE_LABEL', VERSION) 171 | 172 | docs_path = os.path.join(__location__, 'docs') 173 | docs_build_path = os.path.join(docs_path, '_build') 174 | install_reqs = get_install_requirements('requirements.txt') 175 | 176 | command_options = {'docs': { 177 | 'project': ('setup.py', MAIN_PACKAGE), 178 | 'version': ('setup.py', version.split('-', 1)[0]), 179 | 'release': ('setup.py', version), 180 | 'build_dir': ('setup.py', docs_build_path), 181 | 'config_dir': ('setup.py', docs_path), 182 | 'source_dir': ('setup.py', docs_path), 183 | }, 'doctest': { 184 | 'project': ('setup.py', MAIN_PACKAGE), 185 | 'version': ('setup.py', version.split('-', 1)[0]), 186 | 'release': ('setup.py', version), 187 | 'build_dir': ('setup.py', docs_build_path), 188 | 'config_dir': ('setup.py', docs_path), 189 | 'source_dir': ('setup.py', docs_path), 190 | 'builder': ('setup.py', 'doctest'), 191 | }, 'test': {'test_suite': ('setup.py', 'tests'), 'cov': ('setup.py', MAIN_PACKAGE)}} 192 | if JUNIT_XML: 193 | command_options['test']['junitxml'] = 'setup.py', 'junit.xml' 194 | if COVERAGE_XML: 195 | command_options['test']['cov_xml'] = 'setup.py', True 196 | if COVERAGE_HTML: 197 | command_options['test']['cov_html'] = 'setup.py', True 198 | 199 | setup( 200 | name=NAME, 201 | version=version, 202 | url=URL, 203 | description=DESCRIPTION, 204 | author=AUTHOR, 205 | author_email=EMAIL, 206 | license=LICENSE, 207 | keywords=KEYWORDS, 208 | long_description=read('README.md'), 209 | classifiers=CLASSIFIERS, 210 | test_suite='tests', 211 | packages=setuptools.find_packages(exclude=['tests', 'tests.*']), 212 | install_requires=install_reqs, 213 | setup_requires=['flake8'], 214 | cmdclass=cmdclass, 215 | tests_require=['pytest-cov', 'pytest'], 216 | command_options=command_options, 217 | entry_points={'console_scripts': CONSOLE_SCRIPTS}, 218 | ) 219 | 220 | 221 | if __name__ == '__main__': 222 | setup_package() 223 | -------------------------------------------------------------------------------- /spilo_cmd/spilo/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.1' 2 | -------------------------------------------------------------------------------- /spilo_cmd/tests/pg_service.conf: -------------------------------------------------------------------------------- 1 | [mock] 2 | host=nowhere 3 | port=5433 4 | user=johnny 5 | -------------------------------------------------------------------------------- /spilo_cmd/tests/test_cli.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | from click.testing import CliRunner 4 | 5 | from spilo.spilo import cli, process_options, print_spilos, tunnel 6 | 7 | Spilo = collections.namedtuple('Spilo', 'stack_name, version, dns, elb, instances, vpc_id, stack') 8 | 9 | 10 | def test_cli(): 11 | cli 12 | 13 | 14 | def test_tunnel(): 15 | arguments = [] 16 | runner = CliRunner() 17 | result = runner.invoke(tunnel, arguments) 18 | assert 'Usage: tunnel [OPTIONS] CLUSTER' in result.output 19 | 20 | arguments = ['--list', True, 'abc'] 21 | result = runner.invoke(tunnel, arguments) 22 | assert result.output == '' 23 | 24 | options = ['--pg_service-file', 'tests/pg_service.conf', 'mock'] 25 | result = runner.invoke(tunnel, options) 26 | assert result.exit_code == -1 27 | assert 't3st' in str(result.exception) 28 | 29 | arguments = ['abc'] 30 | result = runner.invoke(tunnel, arguments) 31 | assert result.exit_code != 0 32 | 33 | 34 | def test_list(): 35 | pass 36 | 37 | 38 | def test_option_processing(): 39 | process_options(opts=None) 40 | process_options(opts={'loglevel': 'DEBUG', 'cluster': 'feike', 'odd_config_file': '~/.config/piu/piu.yaml'}) 41 | 42 | 43 | def test_print_spilos(): 44 | spilos = list() 45 | print_spilos(spilos) 46 | 47 | spilos.append(Spilo(None, None, None, None, None, None, None)) 48 | print_spilos(spilos) 49 | 50 | spilos.append(Spilo(None, None, None, None, None, None, None)) 51 | print_spilos(spilos) 52 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length=120 3 | --------------------------------------------------------------------------------