├── .dockerignore ├── .github └── workflows │ ├── build_branch.yaml │ └── publish_images.yaml ├── .gitignore ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── build_scripts ├── install_extensions ├── pg_version.sh ├── shared.sh ├── shared_install.sh ├── shared_versions.sh └── versions.yaml ├── cicd ├── install_checks ├── shared.sh ├── smoketest.sh └── version_info.sql ├── docker-entrypoint.sh ├── fetch_tag_digest ├── pgbackrest_entrypoint.sh ├── scripts ├── 010_install_timescaledb_toolkit.sh ├── augment_patroni_configuration.py ├── configure_spilo.py ├── on_role_change.sh ├── pgbackrest-rest.py ├── post_init.sh ├── timescaledb │ ├── after-create.sql │ └── after-update.sql └── tsdbadmin.sql ├── sources ├── sources.list.amd64 └── sources.list.arm64 └── timescaledb_entrypoint.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .git/ 3 | .github/ 4 | -------------------------------------------------------------------------------- /.github/workflows/build_branch.yaml: -------------------------------------------------------------------------------- 1 | name: Build branch 2 | 3 | on: 4 | push: 5 | branches: 6 | - "*/**" 7 | paths-ignore: 8 | - ".github/workflows/publish*.yaml" 9 | - "*.md" 10 | 11 | concurrency: 12 | group: build-branch-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | env: 16 | DOCKER_REPOSITORY: timescale/timescaledb-ha 17 | DOCKER_REGISTRY: docker.io 18 | PG_MAJOR: 17 19 | ALL_VERSIONS: "true" 20 | OSS_ONLY: "false" 21 | 22 | jobs: 23 | build-branch: 24 | name: Build and push branch 25 | runs-on: ${{ matrix.runs_on }} 26 | 27 | strategy: 28 | fail-fast: false 29 | matrix: 30 | platform: [ amd64, arm64 ] 31 | include: 32 | - platform: amd64 33 | runs_on: ubuntu-22.04 34 | - platform: arm64 35 | runs_on: cloud-image-runner-arm64 36 | 37 | steps: 38 | # The github runners have a lot of space in /mnt, but apparently not enough in /. This step removes about 13G. 39 | - name: remove unneeded runner software 40 | run: | 41 | df -h 42 | du -chs /usr/share/dotnet /usr/local/lib/android /opt/microsoft || true 43 | sudo rm -fr /usr/share/dotnet /usr/local/lib/android /opt/microsoft || true 44 | sudo docker image prune --all --force || true 45 | df -h 46 | 47 | - name: Checkout code 48 | uses: actions/checkout@v4 49 | 50 | - name: Login to Docker Hub 51 | uses: docker/login-action@v3 52 | with: 53 | username: ${{ secrets.ORG_DOCKER_HUB_USERNAME }} 54 | password: ${{ secrets.ORG_DOCKER_HUB_ACCESS_TOKEN }} 55 | 56 | - name: Setup | Buildx 57 | uses: docker/setup-buildx-action@v3 58 | 59 | - name: Build 60 | env: 61 | PLATFORM: ${{ matrix.platform }} 62 | run: make build-sha 63 | 64 | - name: Check 65 | env: 66 | PLATFORM: ${{ matrix.platform }} 67 | run: make check-sha 68 | 69 | - name: Publish 70 | env: 71 | PLATFORM: ${{ matrix.platform }} 72 | run: make publish-sha 73 | 74 | publish-combined-manifest: 75 | name: Publish branch manifest 76 | needs: [ "build-branch" ] 77 | runs-on: ubuntu-latest 78 | 79 | steps: 80 | - name: Checkout code 81 | uses: actions/checkout@v4 82 | 83 | - name: Login to Docker Hub 84 | uses: docker/login-action@v3 85 | with: 86 | username: ${{ secrets.ORG_DOCKER_HUB_USERNAME }} 87 | password: ${{ secrets.ORG_DOCKER_HUB_ACCESS_TOKEN }} 88 | 89 | - name: Publish combined manifest for branch 90 | run: make publish-combined-sha 91 | -------------------------------------------------------------------------------- /.github/workflows/publish_images.yaml: -------------------------------------------------------------------------------- 1 | name: Publish images 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | push: 7 | branches: 8 | - master 9 | - main 10 | paths-ignore: 11 | - "*.md" 12 | 13 | schedule: 14 | - cron: '0 7 * * 2' 15 | 16 | concurrency: 17 | group: publish-ha-images-${{ github.ref }} 18 | cancel-in-progress: true 19 | 20 | env: 21 | DOCKER_REPOSITORY: timescale/timescaledb-ha 22 | DOCKER_REGISTRY: docker.io 23 | 24 | jobs: 25 | publish: 26 | name: Publish pg${{ matrix.pg_major }}${{ matrix.all }}${{ matrix.oss }} ${{ matrix.platform }} 27 | 28 | strategy: 29 | fail-fast: false 30 | matrix: 31 | platform: [ "amd64", "arm64" ] 32 | pg_major: [ "17", "16", "15", "14", "13" ] 33 | all_versions: [ "false", "true" ] 34 | oss_only: [ "false", "true" ] 35 | 36 | include: 37 | - oss_only: "true" 38 | oss: "-oss" 39 | - all_versions: "true" 40 | all: "-all" 41 | - platform: amd64 42 | runs_on: ubuntu-22.04 43 | - platform: arm64 44 | runs_on: cloud-image-runner-arm64 45 | 46 | runs-on: "${{ matrix.runs_on }}" 47 | 48 | steps: 49 | # The github runners have a lot of space in /mnt, but apparently not enough in /. This step removes about 13G. 50 | - name: remove unneeded runner software 51 | run: | 52 | df -h 53 | du -chs /usr/share/dotnet /usr/local/lib/android /opt/microsoft || true 54 | sudo rm -fr /usr/share/dotnet /usr/local/lib/android /opt/microsoft || true 55 | sudo docker image prune --all --force || true 56 | df -h 57 | 58 | - name: Checkout code 59 | uses: actions/checkout@v4 60 | 61 | - name: Login to Docker Hub 62 | uses: docker/login-action@v3 63 | with: 64 | username: ${{ secrets.ORG_DOCKER_HUB_USERNAME }} 65 | password: ${{ secrets.ORG_DOCKER_HUB_ACCESS_TOKEN }} 66 | 67 | - name: Setup | Docker Context 68 | run: if ! docker context use ha-builder; then docker context create ha-builder; fi 69 | 70 | - name: Setup | Buildx 71 | uses: docker/setup-buildx-action@v3 72 | with: 73 | endpoint: ha-builder 74 | 75 | - name: Build and publish (pg${{ matrix.pg_major }}${{ matrix.all }}${{ matrix.oss }} ${{ matrix.platform }}) 76 | id: build 77 | env: 78 | PLATFORM: ${{ matrix.platform }} 79 | PG_MAJOR: ${{ matrix.pg_major }} 80 | ALL_VERSIONS: ${{ matrix.all_versions }} 81 | OSS_ONLY: ${{ matrix.oss_only }} 82 | run: | 83 | GIT_REV="${GITHUB_REF#refs/tags/}" make publish-builder publish-release 84 | 85 | - name: export outputs 86 | run: | 87 | mkdir -p /tmp/outputs 88 | builder_id="${{ steps.build.outputs.builder_id }}" 89 | release_id="${{ steps.build.outputs.release_id }}" 90 | touch "/tmp/outputs/builder-$(echo "$builder_id" | cut -d: -f2)" 91 | touch "/tmp/outputs/release-$(echo "$release_id" | cut -d: -f2)" 92 | 93 | - name: upload outputs 94 | uses: actions/upload-artifact@v4 95 | with: 96 | name: outputs-${{ matrix.pg_major }}-${{ matrix.all_versions }}-${{ matrix.oss_only }}-${{ matrix.platform }} 97 | path: /tmp/outputs/* 98 | if-no-files-found: error 99 | retention-days: 1 100 | 101 | publish-combined-manifests: 102 | name: Publish manifest pg${{ matrix.pg_major }}${{ matrix.docker_tag_postfix }} 103 | needs: [ "publish" ] 104 | runs-on: ubuntu-latest 105 | strategy: 106 | fail-fast: false 107 | 108 | matrix: 109 | pg_major: [ "17", "16", "15", "14", "13" ] 110 | docker_tag_postfix: ["", "-all", "-oss", "-all-oss" ] 111 | include: 112 | - docker_tag_postfix: "" 113 | oss_only: "false" 114 | all_versions: "false" 115 | - docker_tag_postfix: "-all" 116 | oss_only: "false" 117 | all_versions: "true" 118 | - docker_tag_postfix: "-oss" 119 | oss_only: "true" 120 | all_versions: "false" 121 | - docker_tag_postfix: "-all-oss" 122 | oss_only: "true" 123 | all_versions: "true" 124 | 125 | steps: 126 | - name: Download arm64 outputs 127 | uses: actions/download-artifact@v4 128 | with: 129 | name: outputs-${{ matrix.pg_major }}-${{ matrix.all_versions }}-${{ matrix.oss_only }}-arm64 130 | path: /tmp/outputs 131 | pattern: '*' 132 | merge-multiple: true 133 | 134 | - name: Download amd64 outputs 135 | uses: actions/download-artifact@v4 136 | with: 137 | name: outputs-${{ matrix.pg_major }}-${{ matrix.all_versions }}-${{ matrix.oss_only }}-amd64 138 | path: /tmp/outputs 139 | pattern: '*' 140 | merge-multiple: true 141 | 142 | - name: Checkout code 143 | uses: actions/checkout@v4 144 | 145 | - name: Login to Docker Hub 146 | uses: docker/login-action@v3 147 | with: 148 | username: ${{ secrets.ORG_DOCKER_HUB_USERNAME }} 149 | password: ${{ secrets.ORG_DOCKER_HUB_ACCESS_TOKEN }} 150 | 151 | # QEMU for multiplatform, which should be quick enough for pulling version information out of the images 152 | - name: Set up QEMU 153 | uses: docker/setup-qemu-action@v3 154 | 155 | - name: Publish combined manifest for pg${{ matrix.pg_major }}${{ matrix.docker_tag_postfix }} 156 | env: 157 | PG_MAJOR: ${{ matrix.pg_major }} 158 | VERSION_TAG: pg${{ matrix.pg_major}}${{ matrix.docker_tag_postfix }}-builder 159 | DOCKER_TAG_POSTFIX: ${{ matrix.docker_tag_postfix }} 160 | run: make publish-manifests 161 | 162 | check: 163 | name: Check image pg${{ matrix.pg_major }}${{ matrix.docker_tag_postfix }} 164 | needs: [ "publish", "publish-combined-manifests" ] 165 | runs-on: ubuntu-latest 166 | strategy: 167 | fail-fast: false 168 | matrix: 169 | pg_major: [ "17", "16", "15", "14", "13" ] 170 | docker_tag_postfix: ["", "-all", "-oss", "-all-oss" ] 171 | 172 | steps: 173 | - name: Checkout code 174 | uses: actions/checkout@v4 175 | 176 | - name: Login to Docker Hub 177 | uses: docker/login-action@v3 178 | with: 179 | username: ${{ secrets.ORG_DOCKER_HUB_USERNAME }} 180 | password: ${{ secrets.ORG_DOCKER_HUB_ACCESS_TOKEN }} 181 | 182 | # QEMU for multiplatform, which should be quick enough for just the checks 183 | - name: Set up QEMU 184 | uses: docker/setup-qemu-action@v3 185 | 186 | - name: Check pg${{ matrix.pg_major }}${{ matrix.docker_tag_postfix }} 187 | env: 188 | PG_MAJOR: ${{ matrix.pg_major }} 189 | DOCKER_TAG_POSTFIX: ${{ matrix.docker_tag_postfix }} 190 | run: make get-image-config check 191 | 192 | dispatch-ha-image-published-event: 193 | name: Dispatch HA image published event 194 | needs: [ "check" ] 195 | if: ${{ github.event_name == 'push' }} 196 | runs-on: ubuntu-latest 197 | 198 | steps: 199 | - name: Dispatch event to Publish cloud images workflow 200 | run: | 201 | curl -H "Accept: application/vnd.github.everest-preview+json" \ 202 | -H "Authorization: token ${{ secrets.ORG_AUTOMATION_TOKEN }}" \ 203 | --request POST \ 204 | --data '{"event_type": "ha_image_published"}' \ 205 | https://api.github.com/repos/timescale/timescaledb-docker-cloud/dispatches 206 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .build* 2 | .idea/ 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | These are changes that will probably be included in the next release. 9 | 10 | ## [future release] 11 | 12 | * Include pgvectorscale 13 | * Include pgai 14 | 15 | ## [v1.7.1] - 2023-04-27 16 | 17 | * [Refactor base HA image](https://github.com/timescale/timescaledb-docker-ha/pull/355) 18 | * Include and default to [TimescaleDB 2.10.3](https://github.com/timescale/timescaledb/releases/tag/2.10.3) 19 | 20 | ## [v1.6.9] - 2023-04-21 21 | 22 | * Include and default to [TimescaleDB 2.10.2](https://github.com/timescale/timescaledb/releases/tag/2.10.2) 23 | 24 | ## [v1.6.5] - 2023-03-24 25 | 26 | * Include and default to [TimescaleDB 2.10.1](https://github.com/timescale/timescaledb/releases/tag/2.10.1) 27 | 28 | ## [v1.5.22] - 2023-01-24 29 | 30 | ## [v1.5.22] - 2023-01-24 31 | 32 | * Include and default to [TimescaleDB 2.9.2](https://github.com/timescale/timescaledb/releases/tag/2.9.2) 33 | 34 | ## [v1.5.21] - 2023-01-06 35 | 36 | * Upgrade Promscale extension to 0.8.0 37 | 38 | ## [v1.5.20] - 2023-01-02 39 | 40 | * Include and default to [TimescaleDB 2.9.1](https://github.com/timescale/timescaledb/releases/tag/2.9.1) 41 | 42 | ## [v1.5.19] - 2022-12-19 43 | 44 | * Include Toolkit [1.13.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.13.0) 45 | * Include and default to [TimescaleDB 2.9.0](https://github.com/timescale/timescaledb/releases/tag/2.9.0) 46 | * Reduce surface area for [leaking passwords](https://github.com/timescale/timescaledb-docker-ha/pull/338) 47 | 48 | ## [v1.5.18] - 2022-12-05 49 | 50 | * Include ts\_stat\_statements. 51 | 52 | ## [v1.5.17] - 2022-11-21 53 | 54 | * Include Timescale-osm 55 | 56 | ## [v1.5.16] - 2022-11-21 57 | 58 | * Include Toolkit [1.12.1](https://github.com/timescale/timescaledb-docker-ha/pull/327) 59 | 60 | ## [v1.5.15] - 2022-11-10 61 | 62 | * Update patroni, [add fix for creating k8s svc](https://github.com/timescale/timescaledb-docker-ha/pull/319) 63 | * Minor PostgreSQL version upgrade (fetching latest distro packages): 14.6 and 13.9 64 | * Include Toolkit [1.12.0](https://github.com/timescale/timescaledb-docker-ha/pull/325) 65 | 66 | ## [v1.5.14] - 2022-11-04 67 | 68 | * Upgrade OpenSSL to 3.0.7 (fetching latest distro packages) 69 | 70 | ## [v1.5.13] - 2022-10-24 71 | 72 | * [Upgrade OOMGuard](https://github.com/timescale/timescaledb-docker-ha/pull/320) 73 | 74 | ## [v1.5.12] - 2022-10-11 75 | 76 | * Upgrade Promscale extension to 0.7.0 77 | 78 | ## [v1.5.11] - 2022-10-06 79 | 80 | * Include and default to [TimescaleDB 2.8.1](https://github.com/timescale/timescaledb/releases/tag/2.8.1) 81 | 82 | ## [v1.5.10] - 2022-09-29 83 | 84 | * Bump `hot_forge` to 0.1.39 for AWS Security Token Service 85 | 86 | ## [v1.5.9] - 2022-09-29 87 | 88 | * Switch from docker API v1 to v2 for determining immutable tag names 89 | 90 | ## [v1.5.8] - 2022-09-28 91 | 92 | * Upgrade TimescaleDB Toolkit extension to [1.11.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.11.0) 93 | * Include timescaledb\_cloudutils v1.1.7 94 | 95 | ## [v1.5.7] - 2022-09-02 96 | 97 | * Include [timescaledb-tune 0.14.1](https://github.com/timescale/timescaledb-tune/releases/tag/v0.14.1) 98 | 99 | ## [v1.5.6] - 2022-08-31 100 | 101 | * Include and default to [TimescaleDB 2.8.0](https://github.com/timescale/timescaledb/releases/tag/2.8.0) 102 | 103 | ## [v1.5.5] - 2022-08-24 104 | 105 | * Upgrade the Promscale extension to 0.6.0 106 | 107 | ## [v1.5.2] - 2022-08-24 108 | 109 | * Include patroni-k8s-sync in non-oss images 110 | 111 | ## [v1.5.1] - 2022-08-23 112 | 113 | * Upgrade TimescaleDB Toolkit extension to [1.10.1](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.10.1) 114 | 115 | ## [v1.5.0] - 2022-08-11 116 | 117 | * [PostgreSQL 14.5](https://www.postgresql.org/docs/release/14.5/) was now actually released 118 | * [PostgreSQL 13.8](https://www.postgresql.org/docs/release/13.8/) was now actually released 119 | 120 | ## [v1.4.9] - 2022-07-25 121 | 122 | * Include and default to [TimescaleDB 2.7.2](https://github.com/timescale/timescaledb/releases/tag/2.7.2) 123 | 124 | ## [v1.4.8] - 2022-07-19 125 | 126 | * Upgrade Promscale extension to 0.5.4 127 | * [Use binary packages for promscale and toolkit](https://github.com/timescale/timescaledb-docker-ha/pull/277) 128 | 129 | ## [v1.4.7] - 2022-07-14 130 | 131 | * [Update OOMGuard](https://github.com/timescale/timescaledb-docker-ha/pull/279) 132 | 133 | ## [v1.4.6] - 2022-07-07 134 | 135 | * Include and default to [TimescaleDB 2.7.1](https://github.com/timescale/timescaledb/releases/tag/2.7.1) 136 | * Upgrade TimescaleDB Toolkit extension to [1.8.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.8.0) 137 | 138 | ## [v1.4.5] - 2022-06-23 139 | 140 | * Upgrade Promscale extension to 0.5.2 141 | 142 | ## [v1.4.4] - 2022-06-17 143 | 144 | * [PostgreSQL 14.4](https://www.postgresql.org/docs/release/14.4/) was now actually released 145 | as debian packages 146 | 147 | ## [v1.4.3] - 2022-06-16 148 | 149 | This release reintroduces all minor versions of TimescaleDB that were dropped when 1.4.0 was 150 | released. We received multiple reports from users of this Docker Image that they rely on 151 | older (minor) versions of TimescaleDB. 152 | 153 | ## [v1.4.2] - 2022-06-14 154 | 155 | * [PostgreSQL 14.4](https://www.postgresql.org/docs/release/14.4/) was released 156 | 157 | ## [v1.4.1] - 2022-06-14 158 | 159 | ### Changed 160 | 161 | * Upgrade Promscale extension to version 0.5.1 162 | * Patroni was updated to [2.1.4](https://patroni.readthedocs.io/en/latest/releases.html#version-2-1-4) 163 | 164 | ## [v1.4.0] - 2022-06-09 165 | 166 | This release removes a lot of minor versions of TimescaleDB. We keep the following versions for 167 | compatibility with older Docker Images: 168 | 169 | * 1.7.5 - This version is the final version 1.x.x version of TimescaleDB.for PostgreSQL 11 users. 170 | This version is only available for PostgreSQL 12. 171 | Having this version in the Docker Image allows this Image to be a stepping stone in a migration 172 | from PostgreSQL 11 and/or TimescaleDB 1.7.5, using `pg_dump` and `pg_restore` for example. 173 | * 2.6.1 - The final point release for the previous minor release 174 | * 2.7.0 - The latest TimescaleDB release 175 | 176 | For those users that are currently running a TimescaleDB version that is removed from this image, 177 | they are adviced to update their TimescaleDB extension to 2.7.0 *prior* to using the newer Docker 178 | Image. 179 | 180 | The latest Docker Image that allows you to run many previous minor versions to 2.7.0 is: 181 | 182 | ```shell 183 | timescale/timescaledb-ha:pg14.3-ts2.7.0-p1 184 | ``` 185 | 186 | This release also deprecates versions of `timescaledb_toolkit`. The same advice applies for this 187 | extension as for the `timescaledb` extension, to update the extension to 1.7.0 *prior* to using the 188 | newer Docker Image. 189 | 190 | ### Removed 191 | 192 | * TimescaleDB versions: 193 | * 2.1.0 194 | * 2.1.1 195 | * 2.2.0 196 | * 2.2.1 197 | * 2.3.0 198 | * 2.3.1 199 | * 2.4.0 200 | * 2.4.1 201 | * 2.4.2 202 | * 2.5.0 203 | * 2.5.1 204 | * 2.5.2 205 | * 2.6.0 206 | 207 | * TimescaleDB Toolkit versions: 208 | * forge-stable-1.3.1 209 | * 1.5.1-cloud 210 | 211 | ## [v1.3.4] - 2022-06-03 212 | 213 | * Include timescaledb\_cloudutils v1.1.6 214 | 215 | ## [v1.3.3] - 2022-05-24 216 | 217 | ### Changed 218 | 219 | * Include and default to [TimescaleDB 2.7.0](https://github.com/timescale/timescaledb/releases/tag/2.7.0) 220 | 221 | ## [v1.3.2] - 2022-05-20 222 | 223 | ### Changed 224 | 225 | * Ensure experimental Patroni image also supports PostgreSQL 12 and 13 226 | 227 | ## [v1.3.1] - 2022-05-20 228 | 229 | ### Changed 230 | 231 | * Upgrade TimescaleDB Toolkit extension to [1.7.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.7.0) 232 | * Upgrade Oom Guard to 1.2.0 233 | 234 | ## [v1.3.0-beta.0] - 2022-05-18 235 | 236 | ### Changed 237 | 238 | * Patroni has been updated to support a new static primary configuration pattern which 239 | is optimized to ensure that a single-node Patroni cluster is able to maintain maximum 240 | uptime. 241 | 242 | ## [v1.2.8] - 2022-05-12 243 | 244 | ### Changed 245 | 246 | * PostgreSQL [12.11](https://www.postgresql.org/docs/12/release-12-11.html), 247 | [13.7](https://www.postgresql.org/docs/13/release-13-7.html) have been released, and 248 | [14.3](https://www.postgresql.org/docs/14/release-14-3.html) have been released 249 | 250 | ## [v1.2.7] - 2022-05-12 251 | 252 | ### Changed 253 | 254 | * Install timescaledb_toolkit extension by default 255 | 256 | ## [v1.2.6] - 2022-05-11 257 | 258 | ### Changed 259 | 260 | * Upgrade promscale extension to version 0.5.0 261 | * Upgrade Timescale Cloudutils to 1.1.5 262 | 263 | ## [v1.2.5] - 2022-04-26 264 | 265 | ### Changed 266 | 267 | * Use Ubuntu 22.04 LTS as a base image instead of Ubuntu 21.10 268 | * Bump `hot_forge` to 0.1.37 269 | * Include Timescale Cloudutils 1.1.4 270 | 271 | ## [v1.2.4] - 2022-04-20 272 | 273 | ### Changed 274 | 275 | * Include Timescale Cloudutils 1.1.3 276 | 277 | ## [v1.2.3] - 2022-04-11 278 | 279 | ### Changed 280 | 281 | * Include and default to [TimescaleDB 2.6.1](https://github.com/timescale/timescaledb/releases/tag/2.6.1) 282 | * Include Cloudutils v1.1.2 283 | 284 | ## [v1.2.2] - 2022-04-06 285 | 286 | * Upgrade TimescaleDB Toolkit extension to [1.6.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.6.0) 287 | * pgBackRest is upgraded to [2.38](https://pgbackrest.org/release.html#2.38) 288 | 289 | ## [v1.2.1] - 2022-03-21 290 | 291 | ### Changed 292 | 293 | * Upgrade promscale extension to version 0.3.2 294 | 295 | ## [v1.2.0] - 2022-03-11 296 | 297 | Minor release bump as we change Ubuntu to 21.10, which includes a higher 298 | version of `glibc`. 299 | 300 | ### Changed 301 | 302 | * Use Ubuntu 21.10 as a base image instead of Ubuntu 21.04 303 | * Patroni was updated to [2.1.3]() 304 | 305 | ## [v1.1.9] - 2022-02-23 306 | 307 | ### Changed 308 | 309 | * ~Patroni was updated to [2.1.3]~ Due to packaging problems, Patroni was still at 2.1.2 for this release. 310 | 311 | ## [v1.1.8] - 2022-02-17 312 | 313 | ### Changed 314 | 315 | * Include and default to [TimescaleDB 2.6.0](https://github.com/timescale/timescaledb/releases/tag/2.6.0) 316 | 317 | ## [v1.1.7] - 2022-02-17 318 | 319 | ### Changed 320 | 321 | * Include Timescale Cloudutils 1.1.1 322 | * Include TimescaleDB Toolkit 1.5.1 323 | * Fix `search_path` for TimescaleDB < 2.5.2 324 | * Use Docker Secrets during building 325 | 326 | ## [v1.1.6] - 2022-02-10 327 | 328 | ### Changed 329 | 330 | * PostgreSQL [12.10](https://www.postgresql.org/docs/12/release-12-10.html), 331 | [13.6](https://www.postgresql.org/docs/13/release-13-6.html) have been released, and 332 | [14.2](https://www.postgresql.org/docs/14/release-14-2.html) have been released 333 | 334 | ## [v1.1.5] - 2022-02-10 335 | 336 | ### Changed 337 | 338 | * Include and default to [TimescaleDB 2.5.2](https://github.com/timescale/timescaledb/releases/tag/2.5.2) 339 | * Include PostgreSQL in the image labeled with PostgreSQL 14 to allow `pg_upgrade` from version 12 to version 14 340 | 341 | ## [v1.1.4] - 2022-02-08 342 | 343 | ### Changed 344 | 345 | * Use Rust 1.58.1 to allow 346 | [Rust 2021 edition](https://doc.rust-lang.org/edition-guide/rust-2021/index.html) 347 | projects to be included 348 | * Build fewer versions of Toolkit to improve build time 349 | * Switch to using Docker Secrets with the 350 | [`--secret`](https://docs.docker.com/engine/reference/commandline/build/#options) option 351 | this also requires the use of 352 | [Docker BuildKit](https://docs.docker.com/develop/develop-images/build_enhancements/) 353 | 354 | ## [v1.1.3] - 2022-02-07 355 | 356 | ### Added 357 | 358 | * Include [`pg_stat_monitor`](https://github.com/percona/pg_stat_monitor) 359 | 360 | ### Changed 361 | 362 | * Include Timescale Cloudutils 1.1.0, this includes support for PostgreSQL 14 363 | * Retain PostgreSQL 12 support in the builder/compiler images 364 | 365 | ## [v1.1.2] - 2021-12-17 366 | 367 | ### Changed 368 | 369 | * Include Timescale Cloudutils 1.0.3 370 | * Upgrade Oom Guard to 1.1.1 371 | * Include Hot Forge v0.1.36 372 | 373 | ### Added 374 | 375 | * [`pldebugger`](https://github.com/EnterpriseDB/pldebugger) is now included (from packages) 376 | 377 | ## [v1.1.1] - 2021-12-08 378 | 379 | ### Added 380 | 381 | * Include TimescaleDB 1.7.5 to allow users of TimescaleDB 1.x to keep using this 382 | image in combination with PostgreSQL 12 databases. 383 | 384 | ## [v1.1.0] - 2021-12-02 385 | 386 | This release marks the point where we no longer publish images containing 387 | PostgreSQL 11. 388 | 389 | As every Docker Image we release contains the PostgreSQL version of the tag, 390 | but also of the major PostgreSQL version before the tag, that means you can 391 | use the following images: 392 | 393 | * `pg13*`: Supports running PostgreSQL 13 and 12 394 | * `pg14*`: Supports running PostgreSQL 14 and 13 395 | 396 | For those that used to use the Docker Images tagged with pg12 with PostgreSQL 12 397 | you can now use the `pg13` tagged images. 398 | 399 | > NOTICE: the `pg13` images do have their PATH default to PostgreSQL 13 binaries, so 400 | be sure to configure the PATH environment variable correctly in the container 401 | that you use. 402 | 403 | For example, the `timescaledb-single` Helm Chart configures the path based 404 | upon the user input in [`values.yaml`](https://github.com/timescale/timescaledb-kubernetes/blob/a12dd47a2339ce1bbacde728f3eeb94309ce0e6f/charts/timescaledb-single/templates/statefulset-timescaledb.yaml#L253-L254) 405 | 406 | ### Removed 407 | 408 | * We no longer build images containing PostgreSQL 12 and PostgreSQL 11 409 | 410 | ### Added 411 | 412 | * We now also build PostgreSQL 14 Docker Images, they include PostgreSQL 14 and 13. 413 | 414 | ### Changed 415 | 416 | * Include and default to Timescale 2.5.1 417 | * Include Timescale Cloudutils 1.4.0 418 | * Upgrade promscale extension to version 0.3.0 419 | 420 | ## [v1.0.8] - 2021-11-11 421 | 422 | ### Changed 423 | 424 | * PostgreSQL [12.9](https://www.postgresql.org/docs/12/release-12-9.html) and [13.5](https://www.postgresql.org/docs/13/release-13-5.html) have been released 425 | * Include dependencies to support native Raft support for Patroni [PySyncObj](https://github.com/bakwc/PySyncObj) 426 | 427 | ## [v1.0.7] - 2021-10-27 428 | 429 | ### Changed 430 | 431 | * Include and default to Timescale 2.4.2 432 | * Include Timescale Cloudutils 1.0.2 433 | * Update Toolkit to 1.3.1 434 | 435 | ## [v1.0.4] - 2021-10-08 436 | 437 | ### Changed 438 | 439 | * Include Hot Forge v0.1.35 440 | 441 | ## [v1.0.3] - 2021-10-07 442 | 443 | ### Changed 444 | 445 | * Include `timescaledb_cloudutils` v1.0.1 446 | * Include Hot Forge v0.1.33 447 | 448 | ### Added 449 | 450 | * `pg_cron` 451 | 452 | ## [v1.0.2] - 2021-09-20 453 | 454 | ### Changed 455 | 456 | * Include and default to Timescale 2.4.2 457 | 458 | ## [v1.0.1] - 2021-09-17 459 | 460 | ### Fixed 461 | 462 | * Build of `timescaledb_cloudutils` 463 | 464 | ## [v1.0.0] - 2021-09-16 465 | 466 | This is a major release, as the base Docker Image has changed from Debian to Ubuntu. 467 | Our tests have not yet shown any issues with this update. We would advice anyone 468 | that consumes these images to test that the new images also work for them in their 469 | environment. 470 | 471 | As this Docker Image has been in production for a while, it seems awkward to still not 472 | be on version 1.0.0+, therefore, we mark this occasion with releasing version 1.0.0. 473 | 474 | ### Added 475 | 476 | * Installation of rust compiler inside the Dockerfile 477 | 478 | ### Changed 479 | 480 | * Base the Docker Image on `ubuntu` (21.04) instead of `rust:debian` 481 | * Bump `timescaledb_toolkit` to version [1.2.0](https://github.com/timescale/timescaledb-toolkit/releases/tag/1.2.0) 482 | 483 | ### Removed 484 | 485 | * Support for PostGIS 2.5 486 | 487 | ### Fixed 488 | 489 | ## [v0.4.29] - 2021-09-08 490 | 491 | ### Changed 492 | 493 | * Bump `hot_forge` to 0.1.32 494 | 495 | ## [v0.4.28] - 2021-09-07 496 | 497 | ### Changed 498 | 499 | * Bump `hot_forge` to 0.1.31 for publishing 500 | 501 | ## [v0.4.27] - 2021-09-07 502 | 503 | ### Changed 504 | 505 | * Bump `hot_forge` to 0.1.31 506 | 507 | ## [v0.4.26] - 2021-09-07 508 | 509 | ### Fixed 510 | 511 | * `timescaledb_cloudutils` now actually builds 512 | 513 | ### Changed 514 | 515 | ## [v0.4.25] - 2021-09-06 516 | 517 | ### Added 518 | 519 | * `timescaledb_cloudutils` for non-oss builds 520 | 521 | ### Changed 522 | 523 | * Bump `hot_forge` to 0.1.18 524 | 525 | ## [v0.4.24] - 2021-08-24 526 | 527 | ### Changed 528 | 529 | * Download precompiled hot-forge instead of building from source 530 | * Switch to rust Docker Image base (which is based on Debian itself) 531 | 532 | ## [v0.4.22] - 2021-08-19 533 | 534 | ### Changed 535 | 536 | * Include and default to Timescale 2.4.1 537 | 538 | ## [v0.4.19] - 2021-08-12 539 | 540 | ### Changed 541 | 542 | * PostgreSQL [12.8](https://www.postgresql.org/docs/12/release-12-8.html) and [13.4](https://www.postgresql.org/docs/13/release-13-4.html) have been released 543 | 544 | ## [v0.4.17] - 2021-08-02 545 | 546 | ### Fixed 547 | 548 | * Skip building timescaledb 2.4+ for PostgreSQL 11 549 | 550 | ## [v0.4.15] - 2021-08-02 551 | 552 | ### Changed 553 | 554 | * Include and default to Timescale 2.4.0 555 | 556 | ### Fixed 557 | 558 | * Silence warnings about missing Cargo files 559 | * Build process 560 | 561 | ## [v0.4.14] - 2021-07-29 562 | 563 | ### Fixed 564 | 565 | * Fix building some extensions for non-default Postgres version 566 | 567 | ## [v0.4.13] - 2021-07-06 568 | 569 | ### Changed 570 | 571 | * Include downgrade scripts if available 572 | 573 | ## [v0.4.12] - 2021-07-06 574 | 575 | ### Changed 576 | 577 | * Include and default to Timescale 2.3.1 578 | * Bump `hot_forge` to 0.1.20 579 | 580 | ## [v0.4.10] - 2021-07-05 581 | 582 | ### Changed 583 | 584 | * Bump `hot_forge` to 0.1.18 585 | * Upgrade promscale extension to version 0.2.0 586 | * Rename Analytics to Toolkit and up to 1.0.0 (#129) 587 | 588 | ## [v0.4.9] - 2021-06-28 589 | 590 | ### Changed 591 | 592 | * Bump `hot_forge` to 0.1.14 593 | 594 | ## [v0.4.8] - 2021-06-28 595 | 596 | ### Changed 597 | 598 | * Bump `hot_forge` to 0.1.13 599 | 600 | ## [v0.4.7] - 2021-06-23 601 | 602 | ### Added 603 | 604 | * `hot_forge`: A private Timescale Project allowing hot patching of containers 605 | 606 | ### Changed 607 | 608 | * Bump `timescale_analytics` to 0.3.0 609 | * Make all compiled extensions owned by `postgres`: Allows hot-patching 610 | 611 | ### Removed 612 | 613 | * `sqlite_fdw`: The potential use case switched to using `file_fdw` 614 | 615 | ## [v0.4.6] - 2021-05-25 616 | 617 | ### Changed 618 | 619 | * Include and default to Timescale 2.3.0 620 | 621 | ## [v0.4.5] - 2021-05-18 622 | 623 | ### Added 624 | 625 | * `gdb` and `gdbserver` to aid in debugging 626 | * [pg\_stat\_kcache](https://github.com/powa-team/pg_stat_kcache) extension: Gathers statistics about real reads and writes done by the filesystem layer 627 | 628 | ### Changed 629 | 630 | * Label Docker Image with all minor PostgreSQL versions 631 | 632 | ## [v0.4.4] - 2021-05-13 633 | 634 | ### Changed 635 | 636 | * PostgreSQL 12.7 and 13.3 [have been released](https://www.postgresql.org/about/news/postgresql-133-127-1112-1017-and-9622-released-2210/) 637 | 638 | ## [v0.4.3] - 2021-05-05 639 | 640 | ### Changed 641 | 642 | * Include and default to Timescale 2.2.1 643 | 644 | ## [v0.4.2] - 2021-04-13 645 | 646 | ### Changed 647 | 648 | * Include and default to Timescale 2.2.0 649 | 650 | ## [v0.4.1] - 2021-04-12 651 | 652 | ### Added 653 | 654 | * Bump [promscale\_extension](https://github.com/timescale/promscale_extension) to 0.1.2 and build for PostgreSQL 13 655 | 656 | ## [v0.4.0] - 2021-04-09 657 | 658 | ### Added 659 | 660 | * PostgreSQL 13 images 661 | * [pg\_repack](https://github.com/reorg/pg_repack) extension: Reorganize tables in PostgreSQL databases with minimal locks 662 | * [hypopg](https://github.com/HypoPG/hypopg) extension: HypoPG is a PostgreSQL extension adding support for hypothetical indexes. 663 | 664 | ### Changed 665 | 666 | * [timescale\_analytics](https://github.com/timescale/timescale-analytics) was upgraded 667 | 668 | ### Removed 669 | 670 | * PostgreSQL 11 images 671 | 672 | ## [v0.3.6] - 2021-03-26 673 | 674 | ### Added 675 | 676 | * Allow additional extensions to be added to a running container 677 | 678 | If enabled, this allows one to create new extension libraries and new supporting files 679 | in their respective directories. 680 | Files that are part of the Docker Image are guarded against mutations, so only *new* files 681 | can be added. 682 | 683 | ### Changed 684 | 685 | * Include and default to Timescale 2.1.1 686 | * CI/CD has moved from gitlab to GitHub actions 687 | * Images now get pushed to `timescale/timescaledb-ha` (used to be `timescaledev/timescaledb-ha`) 688 | * Built images also get labeled with the available TimescaleDB versions in the image, for example: 689 | "com.timescaledb.image.timescaledb.available_versions": "1.7.0,1.7.1,1.7.2,1.7.3,1.7.4,1.7.5,2.0.0,2.0.0-rc3,2.0.0-rc4,2.0.1,2.0.2,2.1.0" 690 | 691 | ### Removed 692 | 693 | * timescale-prometheus [superseeded by (promscale](https://github.com/timescale/promscale)) 694 | * [pg\_prometheus](https://github.com/timescale/pg_prometheus): Was already excluded from being built for a long while 695 | 696 | ## [v0.3.4] - 2021-02-22 697 | 698 | ### Changed 699 | 700 | * Include Timescale 2.0.2 and 2.1.0 and default to 2.1.0 701 | 702 | ## [v0.3.3] - 2021-02-16 703 | 704 | ### Added 705 | 706 | * Include Extension [pg\_auth\_mon](https://github.com/RafiaSabih/pg_auth_mon) 707 | * Include Extension [logerrors](https://github.com/munakoiso/logerrors) 708 | 709 | ### Changed 710 | 711 | * TimescaleDB [1.7.5](https://github.com/timescale/timescaledb/releases/tag/1.7.5) was released 712 | 713 | ## [v0.3.2] - 2021-01-28 714 | 715 | ### Changed 716 | 717 | * TimescaleDB [2.0.1](https://github.com/timescale/timescaledb/releases/tag/2.0.1) was released 718 | 719 | ## [v0.3.1] - 2021-01-28 720 | 721 | ### This release failed the build and was never published 722 | 723 | ## [v0.3.0] - 2021-01-04 724 | 725 | ### Changed 726 | 727 | * Default to Timescale 2.0.0 728 | 729 | ## [v0.2.30] - 2020-12-21 730 | 731 | ### Changed 732 | 733 | * Include (but not default to) Timescale 2.0.0 734 | 735 | ## [v0.2.29] - 2020-12-13 736 | 737 | ### Changed 738 | 739 | * Include (but not default to) Timescale 2.0.0-rc4 740 | 741 | ## [v0.2.28] - 2020-11-13 742 | 743 | ### Changed 744 | 745 | * Include (but not default to) Timescale 2.0.0-rc3 746 | 747 | ## [v0.2.27] - 2020-11-13 748 | 749 | ### Changed 750 | 751 | * PostgreSQL 11.10 and 12.5 [have been released](https://www.postgresql.org/about/news/postgresql-131-125-1110-1015-9620-and-9524-released-2111/) 752 | 753 | ## [v0.2.26] - 2020-10-28 754 | 755 | ### Added 756 | 757 | * Include libraries for Timescale 2.0.0-rc2 758 | 759 | ## [v0.2.25] - 2020-09-29 760 | 761 | ### Added 762 | 763 | * Include [promscale](https://github.com/timescale/promscale_extension) extension 764 | 765 | ### Removed 766 | 767 | * Remove Rust build directories from the final image 768 | 769 | ## [v0.2.24] - 2020-09-07 770 | 771 | ### Changed 772 | 773 | * TimescaleDB [1.7.4](https://github.com/timescale/timescaledb/releases/tag/1.7.4) was released 774 | 775 | ## [v0.2.22] - 2020-09-07 776 | 777 | ### Added 778 | 779 | * Include [pgrouting](https://pgrouting.org/) in the Docker Image 780 | 781 | ### Changed 782 | 783 | * Timescale-Prometheus [0.1.0-beta.4](https://github.com/timescale/timescale-prometheus/releases/tag/0.1.0-beta.4) was released 784 | 785 | ## [v0.2.21] - 2020-08-28 786 | 787 | ### Added 788 | 789 | * Include [hll](https://github.com/citusdata/postgresql-hll) extension 790 | 791 | ### Changed 792 | 793 | * TimescaleDB [1.7.3](https://github.com/timescale/timescaledb/releases/tag/1.7.3) was released 794 | * Timescale-Prometheus [0.1.0-beta.3](https://github.com/timescale/timescale-prometheus/releases/tag/0.1.0-beta.3) was released 795 | 796 | ## [v0.2.20] - 2020-08-14 797 | 798 | ### Changed 799 | 800 | * PostgreSQL 11.9 and 12.4 are released 801 | 802 | ## [v0.2.19] - 2020-08-03 803 | 804 | ### Added 805 | 806 | * Upgrade `timescale_prometheus` to version `0.1.0-beta.1` 807 | 808 | ## [v0.2.18] - 2020-07-27 809 | 810 | ### Added 811 | 812 | * [`pgBouncer`](https://www.pgbouncer.org/) as part of the image 813 | 814 | ## [v0.2.17] - 2020-07-17 815 | 816 | ### Changed 817 | 818 | * `tsdb_admin` 0.1.1 was released 819 | 820 | ## [v0.2.16] - 2020-07-08 821 | 822 | ### Changed 823 | 824 | * TimescaleDB 1.7.2 was released 825 | 826 | ## [v0.2.15] - 2020-06-30 827 | 828 | ### Changed 829 | 830 | * Docker Image tag names, all images which are mutable are postfixed with `-latest` 831 | 832 | ## [v0.2.14] - 2020-06-24 833 | 834 | ### Fixed 835 | 836 | * Ensure builder is built for every new tagged release 837 | 838 | ## [v0.2.13] - 2020-06-23 839 | 840 | ### Added 841 | 842 | * Include `psutils` to allow some process troubleshooting inside the container 843 | * Include custom timescaledb scripts for pgextwlist 844 | * `lz4` support, which can be used by pgBackRest 845 | * `tsdb_admin` can be included in the image 846 | * `timescale_prometheus` is now included in the image 847 | 848 | ### Changed 849 | 850 | * GitLab CI/CD will now publish Docker images to Docker hub on version tags 851 | 852 | ## [v0.2.12] - 2020-05-19 853 | 854 | ### Changed 855 | 856 | * TimescaleDB 1.7.1 is released 857 | * PostgreSQL 11.8 and 12.3 are released 858 | 859 | ## [v0.2.11] - 2020-05-14 860 | 861 | These are changes that will probably be included in the next release. 862 | 863 | ### Added 864 | 865 | * Include the [timescale-prometheus](https://github.com/timescale/timescale-prometheus) extension by default 866 | 867 | ### Changed 868 | 869 | * Allow restore from backup even when no master is running 870 | * Deprecate including the `pg_prometheus` extension, it is not built by default anymore 871 | * PostgreSQL minor patches are released 872 | 873 | ### Fixed 874 | 875 | * Backup parameters 876 | 877 | ## [v0.2.10] - 2020-04-20 878 | 879 | ### Added 880 | 881 | * Support PostgreSQL 12 882 | * Support for TimescaleDB 1.7 (PostgreSQL 11 & PostgreSQL 12) 883 | * Remove stale pidfile if it exists 884 | * Include `strace` for debugging 885 | 886 | ### Changed 887 | 888 | * Build 2 sets of Docker images in CI/CD (PostgreSQL 11 & PostgreSQL 12) 889 | 890 | ### Fixed 891 | 892 | * Fail build if a single item in a loop fails 893 | 894 | ### Removed 895 | 896 | * Some perl dependencies of `pgBackRest`, which are no longer needed 897 | as `pgBackRest` is now fully written in C 898 | 899 | ## [v0.2.9] - 2020-02-13 900 | 901 | ### Changed 902 | 903 | * PostgreSQL 11.7 was released 904 | * PostGIS is now included in all the Docker images 905 | 906 | This reduces the number of images that need to be built, maintained and supported 907 | 908 | #### Build process 909 | 910 | * Add Labels to the Docker images, in line with the Open Container Initiative 911 | [Annotations Rules](https://github.com/opencontainers/image-spec/blob/master/annotations.md#rules) for their Image Format Specification. 912 | 913 | These labels can be used to identify exact version information of TimescaleDB, PostgreSQL and some 914 | other extensions, as well as the default labels for `created`, `revision` and `source`. 915 | 916 | This deprecates adding the `scm-source.json` that was added to the Docker Images. 917 | * Improve build & release process 918 | 919 | ## [v0.2.8] - 2020-01-15 920 | 921 | ### Added 922 | 923 | * Create a additional Docker image including PostGIS 924 | 925 | ### Changed 926 | 927 | * TimescaleDB 1.6.0 was released 928 | 929 | ## [v0.2.7] - 2019-11-14 930 | 931 | ### Changed 932 | 933 | * PostgreSQL 11.6 was released 934 | * TimescaleDB 1.5.1 was released 935 | 936 | ## [v0.2.6] - 2019-11-06 937 | 938 | ### Changed 939 | 940 | * Reduce log output during installation of tsdbadmin scripts 941 | 942 | ## [v0.2.5] - 2019-10-31 943 | 944 | ### Added 945 | 946 | * Include pgextwlist to allow extension whitelisting 947 | * Possibility to build a Docker image for a given repository and/or tag 948 | * TimescaleDB 1.5.0 was released and is now included 949 | 950 | ## [v0.2.4] - 2019-10-29 951 | 952 | ### Added 953 | 954 | * pg_prometheus is now part of the Docker image 955 | 956 | ### Changed 957 | 958 | * Pass on all PostgreSQL parameters to Patroni 959 | 960 | ### Fixed 961 | 962 | * timescaledb-tune runs with the PG_MAJOR version 963 | 964 | ## [v0.2.3] - 2019-10-09 965 | 966 | ### Added 967 | 968 | * Install [tsdbadmin](https://github.com/timescale/savannah-tsdbadmin/) scripts into postgres database 969 | 970 | ## [v0.2.2] - 2019-09-11 971 | 972 | ### Changed 973 | 974 | * TimescaleDB 1.4.2 was released, rebuilding the Docker image to include that version 975 | 976 | ## [v0.2.1] - 2019-09-06 977 | 978 | ### Changed 979 | 980 | * The default command for the Dockerfile is now "postgres". This ensures we have the same interface as other Docker images out there. 981 | 982 | ## [v0.2.0] - 2019-08-30 983 | 984 | ### Added 985 | 986 | * Allow PostgreSQL compile time customizations to be made. 987 | 988 | Some environments benefit from being able to change things like `NAMEDATALEN`. 989 | 990 | * Makefile to aid in building the Docker image 991 | * Gitlab CI/CD configuration to trigger automated builds 992 | * Entrypoint for `pgBackRest` 993 | * The TimescaleDB extension is added to the `template1` and `postgres` database 994 | * Git context is injected into the Docker image 995 | 996 | ### Changed 997 | 998 | * Default entrypoint is `docker_entrypoint.sh`. 999 | 1000 | This enables the Docker image to also be used in a non-kubernetes environment, allowing 1001 | developers to run the exact same software as production environments. 1002 | 1003 | * Default Docker repository names 1004 | * Failure of first backup does not fail the database initialization 1005 | 1006 | ### Removed 1007 | 1008 | * Removed many packages to reduce Docker image size without breaking TimescaleDB 1009 | 1010 | ### Fixed 1011 | 1012 | * Only configure a Patroni namespace if a `POD_NAMESPACE` 1013 | 1014 | ## [v0.1.0] - 2019-08-30 1015 | 1016 | This is the first stable release of the TimescaleDB HA Docker image. 1017 | It was built from the [TimescaleDB Operator](https://github.com/timescale/timescaledb-operator/tree/v0.1.0) before 1018 | this repository was split away from it. 1019 | 1020 | ### Added 1021 | 1022 | * A Docker image based on Debian buster 1023 | 1024 | The basic components of the Docker image are: 1025 | 1026 | * [TimescaleDB](https://github.com/timescale/timescaledb), all recent releases 1027 | * [PostgreSQL](https://github.com/postgres/postgres) 1028 | * [Patroni](https://github.com/zalando/patroni) 1029 | * [pgBackRest](https://github.com/pgbackrest/pgbackrest) 1030 | 1031 | This Docker image can be used in the same way as the (smaller) public 1032 | [TimescaleDB Docker](https://github.com/timescale/timescaledb-docker) image, 1033 | however this image has HA built in, which leverages Patroni to do 1034 | auto failover of PostgreSQL if needed. 1035 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ## The purpose of this Dockerfile is to build an image that contains: 2 | ## - timescale from (internal) sources 3 | ## - many PostgreSQL extensions 4 | ## - patroni for High Availability 5 | ## - Barman Cloud for CloudNativePG compatibility 6 | ## - spilo to allow the github.com/zalando/postgres-operator to be compatible 7 | ## - pgBackRest to allow good backups 8 | 9 | ## We have many base images to choose from, (alpine, bitnami) but as we're adding a lot 10 | ## of tools to the image anyway, the end result is that we would only 11 | ## reduce the final Docker image by single digit MB's, which is insignificant 12 | ## in relation to the total image size. 13 | ## By choosing a very basic base image, we do keep full control over every part 14 | ## of the build steps. This Dockerfile contains every piece of magic we want. 15 | 16 | ## To allow us to use specific glibc 2.33+ features, we need to find a way 17 | ## to run glibc 2.33. Running multiple glibc versions inside the same 18 | ## container is something we'd like to avoid, we've seen multiple glibc 19 | ## related bugs in our lifetime, adding multiple glibc versions in the mix 20 | ## would make debugging harder. 21 | 22 | ## Debian (and rust:debian) has served us well in the past, however even Debian's 23 | ## latest release (bullseye, August 2021) cannot give us glibc 2.33. 24 | ## Ubuntu however does give us glibc 2.33 - as Ubuntu is based upon Debian 25 | ## the changes required are not that big for this Docker Image. Most of the 26 | ## tools we use will be the same across the board, as most of our tools our 27 | ## installed using external repositories. 28 | ARG DOCKER_FROM=ubuntu:22.04 29 | FROM ${DOCKER_FROM} AS builder 30 | 31 | # By including multiple versions of PostgreSQL we can use the same Docker image, 32 | # regardless of the major PostgreSQL Version. It also allow us to support (eventually) 33 | # pg_upgrade from one major version to another, 34 | # so we need all the postgres & timescale libraries for all versions 35 | ARG PG_VERSIONS="17 16 15 14 13" 36 | ARG PG_MAJOR=17 37 | 38 | ENV DEBIAN_FRONTEND=noninteractive 39 | 40 | # We need full control over the running user, including the UID, therefore we 41 | # create the postgres user as the first thing on our list 42 | RUN adduser --home /home/postgres --uid 1000 --disabled-password --gecos "" postgres 43 | 44 | RUN echo 'APT::Install-Recommends "false";' >> /etc/apt/apt.conf.d/01norecommend 45 | RUN echo 'APT::Install-Suggests "false";' >> /etc/apt/apt.conf.d/01norecommend 46 | 47 | # Ubuntu will throttle downloads which can slow things down so much that we can't complete. Since we're 48 | # building in AWS, use their mirrors. arm64 and amd64 use different sources though 49 | COPY sources /tmp/sources 50 | RUN set -eux; \ 51 | source="/tmp/sources/sources.list.$(dpkg --print-architecture)"; \ 52 | mv /etc/apt/sources.list /etc/apt/sources.list.dist; \ 53 | cp "$source" /etc/apt/sources.list; \ 54 | rm -fr /tmp/sources 55 | 56 | # Make sure we're as up-to-date as possible, and install the highlest level dependencies 57 | RUN set -eux; \ 58 | apt-get update; \ 59 | apt-get upgrade -y; \ 60 | apt-get install -y ca-certificates curl gnupg1 gpg gpg-agent locales lsb-release wget unzip 61 | 62 | RUN mkdir -p /build/scripts 63 | RUN chmod 777 /build 64 | WORKDIR /build/ 65 | 66 | RUN curl -Ls https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor --output /usr/share/keyrings/postgresql.keyring 67 | RUN set -eux; \ 68 | for t in deb deb-src; do \ 69 | echo "$t [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/postgresql.keyring] https://apt.postgresql.org/pub/repos/apt/ $(lsb_release -s -c)-pgdg main" >> /etc/apt/sources.list.d/pgdg.list; \ 70 | echo "$t [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/postgresql.keyring] https://apt-archive.postgresql.org/pub/repos/apt $(lsb_release -s -c)-pgdg-archive main" >> /etc/apt/sources.list.d/pgdg.list; \ 71 | done 72 | 73 | # timescaledb-tune, as well as timescaledb-parallel-copy 74 | RUN curl -Ls https://packagecloud.io/timescale/timescaledb/gpgkey | gpg --dearmor --output /usr/share/keyrings/timescaledb.keyring 75 | RUN echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/timescaledb.keyring] https://packagecloud.io/timescale/timescaledb/ubuntu/ $(lsb_release -cs) main" > /etc/apt/sources.list.d/timescaledb.list 76 | 77 | # The following tools are required for some of the processes we (TimescaleDB) regularly 78 | # run inside the containers that use this Docker Image 79 | # awscli is useful in many situations, for example, to list backup buckets etc 80 | RUN set -eux; \ 81 | apt-get update; \ 82 | apt-get upgrade -y; \ 83 | apt-get install -y \ 84 | less jq strace procps awscli vim-tiny gdb gdbserver dumb-init daemontools \ 85 | postgresql-common pgbouncer pgbackrest lz4 libpq-dev libpq5 pgtop libnss-wrapper gosu \ 86 | pg-activity lsof htop; \ 87 | curl -Lso /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_"$(dpkg --print-architecture)"; \ 88 | chmod 755 /usr/local/bin/yq 89 | 90 | # pgbackrest-exporter 91 | ARG PGBACKREST_EXPORTER_VERSION="0.18.0" 92 | RUN set -eux; \ 93 | arch="$(arch)"; [ "$arch" = aarch64 ] && arch=arm64; pkg="pgbackrest_exporter_${PGBACKREST_EXPORTER_VERSION}_linux_${arch}"; \ 94 | curl --silent \ 95 | --location \ 96 | --output /tmp/pkg.deb \ 97 | "https://github.com/woblerr/pgbackrest_exporter/releases/download/v${PGBACKREST_EXPORTER_VERSION}/${pkg}.deb"; \ 98 | cd /tmp; \ 99 | dpkg -i ./pkg.deb; \ 100 | rm -rfv /tmp/pkg.deb 101 | 102 | # pgbouncer-exporter 103 | ARG PGBOUNCER_EXPORTER_VERSION="0.9.0" 104 | RUN set -eux; \ 105 | pkg="pgbouncer_exporter-${PGBOUNCER_EXPORTER_VERSION}.linux-$(dpkg --print-architecture)"; \ 106 | curl --silent \ 107 | --location \ 108 | --output /tmp/pkg.tgz \ 109 | "https://github.com/prometheus-community/pgbouncer_exporter/releases/download/v${PGBOUNCER_EXPORTER_VERSION}/${pkg}.tar.gz"; \ 110 | cd /tmp; \ 111 | tar xvzf /tmp/pkg.tgz "$pkg"/pgbouncer_exporter; \ 112 | mv -v /tmp/"$pkg"/pgbouncer_exporter /usr/local/bin/pgbouncer_exporter; \ 113 | rm -rfv /tmp/pkg.tgz /tmp/"$pkg" 114 | 115 | # forbid creation of a main cluster when package is installed 116 | RUN sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf 117 | 118 | # The next 2 instructions (ENV + RUN) are directly copied from https://github.com/rust-lang/docker-rust/blob/master/stable/bullseye/Dockerfile 119 | ENV RUSTUP_HOME=/usr/local/rustup \ 120 | CARGO_HOME=/usr/local/cargo \ 121 | PATH=/usr/local/cargo/bin:$PATH \ 122 | RUST_VERSION=1.85.0 123 | 124 | RUN set -eux; \ 125 | dpkgArch="$(dpkg --print-architecture)"; \ 126 | case "${dpkgArch##*-}" in \ 127 | amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='6aeece6993e902708983b209d04c0d1dbb14ebb405ddb87def578d41f920f56d' ;; \ 128 | armhf) rustArch='armv7-unknown-linux-gnueabihf'; rustupSha256='3c4114923305f1cd3b96ce3454e9e549ad4aa7c07c03aec73d1a785e98388bed' ;; \ 129 | arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='1cffbf51e63e634c746f741de50649bbbcbd9dbe1de363c9ecef64e278dba2b2' ;; \ 130 | i386) rustArch='i686-unknown-linux-gnu'; rustupSha256='0a6bed6e9f21192a51f83977716466895706059afb880500ff1d0e751ada5237' ;; \ 131 | *) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \ 132 | esac; \ 133 | url="https://static.rust-lang.org/rustup/archive/1.27.1/${rustArch}/rustup-init"; \ 134 | wget "$url"; \ 135 | echo "${rustupSha256} *rustup-init" | sha256sum -c -; \ 136 | chmod +x rustup-init; \ 137 | ./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \ 138 | rm rustup-init; \ 139 | chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \ 140 | rustup --version; \ 141 | cargo --version; \ 142 | rustc --version 143 | 144 | # Setup locales, and make sure we have a en_US.UTF-8 locale available 145 | RUN set -eux; \ 146 | find /usr/share/i18n/charmaps/ -type f ! -name UTF-8.gz -delete; \ 147 | find /usr/share/i18n/locales/ -type f ! -name en_US ! -name en_GB ! -name i18n* ! -name iso14651_t1 ! -name iso14651_t1_common ! -name 'translit_*' -delete; \ 148 | echo 'en_US.UTF-8 UTF-8' > /usr/share/i18n/SUPPORTED; \ 149 | localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 150 | 151 | # We install pip3, as we need it for some of the extensions. This will install a lot of dependencies, all marked as auto to help with cleanup later 152 | RUN apt-get install -y python3 python3-pip 153 | 154 | # using uv with pgai reduces size of dependencies 155 | RUN python3 -m pip install uv 156 | 157 | # We install some build dependencies and mark the installed packages as auto-installed, 158 | # this will cause the cleanup to get rid of all of these packages 159 | ENV BUILD_PACKAGES="binutils cmake devscripts equivs gcc git gpg gpg-agent libc-dev libc6-dev libkrb5-dev libperl-dev libssl-dev lsb-release make patchutils python2-dev python3-dev wget libsodium-dev" 160 | RUN apt-get install -y ${BUILD_PACKAGES} 161 | RUN apt-mark auto ${BUILD_PACKAGES} 162 | 163 | # https://salsa.debian.org/postgresql/postgresql/-/commit/b995beb3cd1c2b8834605007227b3cedab6462e4 164 | # This looks like a build-/test-only dependency, and they expect us to use the real tzdata when actually running. 165 | # TODO: keep watching this to see if they remove the limitation. If the old tzdata becomes unavailable, we'll have to 166 | # do something more drastic. 167 | RUN apt-get install -y --allow-downgrades tzdata="2022a-*" 168 | 169 | COPY --chown=postgres:postgres build_scripts /build/scripts/ 170 | 171 | # We install the PostgreSQL build dependencies and mark the installed packages as auto-installed, 172 | RUN set -eux; \ 173 | for pg in ${PG_VERSIONS}; do \ 174 | mk-build-deps "postgresql-${pg}" && apt-get install -y ./postgresql-${pg}-build-deps*.deb && apt-mark auto postgresql-${pg}-build-deps || exit 1; \ 175 | done 176 | 177 | # TODO: There's currently a build-dependency problem related to tzdata, remove this when it's resolved 178 | RUN apt-get install -y tzdata 179 | 180 | RUN set -eux; \ 181 | packages=""; \ 182 | for pg in ${PG_VERSIONS}; do \ 183 | export FULL_VERSION="$(/build/scripts/pg_version.sh ${pg})*" ; \ 184 | packages="$packages postgresql-client-${pg}=${FULL_VERSION} postgresql-${pg}=${FULL_VERSION} postgresql-server-dev-${pg}=${FULL_VERSION} postgresql-${pg}-dbgsym=${FULL_VERSION} \ 185 | postgresql-plpython3-${pg}=${FULL_VERSION} postgresql-plperl-${pg}=${FULL_VERSION} postgresql-${pg}-pgextwlist postgresql-${pg}-hll \ 186 | postgresql-${pg}-pgrouting postgresql-${pg}-repack postgresql-${pg}-hypopg postgresql-${pg}-unit \ 187 | postgresql-${pg}-pg-stat-kcache postgresql-${pg}-cron postgresql-${pg}-pldebugger postgresql-${pg}-pgpcre \ 188 | postgresql-${pg}-pglogical postgresql-${pg}-wal2json postgresql-${pg}-pgq3 postgresql-${pg}-pg-qualstats \ 189 | postgresql-${pg}-pgaudit postgresql-${pg}-ip4r postgresql-${pg}-pgtap postgresql-${pg}-semver postgresql-${pg}-orafce \ 190 | postgresql-${pg}-pgvector postgresql-${pg}-h3 postgresql-${pg}-rum"; \ 191 | done; \ 192 | apt-get install -y $packages 193 | 194 | ARG POSTGIS_VERSIONS="3" 195 | RUN set -ex; \ 196 | if [ -n "${POSTGIS_VERSIONS}" ]; then \ 197 | for postgisv in ${POSTGIS_VERSIONS}; do \ 198 | for pg in ${PG_VERSIONS}; do \ 199 | apt-get install -y postgresql-${pg}-postgis-${postgisv}; \ 200 | done; \ 201 | done; \ 202 | fi 203 | 204 | # Add a couple 3rd party extension managers to make extension additions easier 205 | RUN set -eux; \ 206 | apt-get install -y pgxnclient 207 | 208 | ## Add pgsodium extension depedencies 209 | RUN set -eux; \ 210 | apt-get install -y libsodium23 211 | 212 | RUN set -eux; \ 213 | for pg in ${PG_VERSIONS}; do \ 214 | for pkg in pg_uuidv7 pgsodium; do \ 215 | PATH="/usr/lib/postgresql/${pg}/bin:$PATH" pgxnclient install --pg_config "/usr/lib/postgresql/${pg}/bin/pg_config" "$pkg"; \ 216 | done; \ 217 | done 218 | 219 | # the strip command is due to the vectors.so size: 450mb before stripping, 12mb after 220 | ARG PGVECTO_RS 221 | RUN set -ex; \ 222 | if [ -n "${PGVECTO_RS}" ]; then \ 223 | for pg in ${PG_VERSIONS}; do \ 224 | # Vecto.rs only support PostgreSQL 14+ 225 | if [ $pg -ge 14 ]; then \ 226 | curl --silent \ 227 | --location \ 228 | --output /tmp/vectors.deb \ 229 | "https://github.com/tensorchord/pgvecto.rs/releases/download/v${PGVECTO_RS}/vectors-pg${pg}_${PGVECTO_RS}_$(dpkg --print-architecture).deb" && \ 230 | dpkg -i /tmp/vectors.deb && \ 231 | rm -rfv /tmp/vectors.deb && \ 232 | strip --strip-unneeded "/usr/lib/postgresql/${pg}/lib/vectors.so"; \ 233 | fi \ 234 | done; \ 235 | fi 236 | 237 | # Some Patroni prerequisites 238 | # This need to be done after the PostgreSQL packages have been installed, 239 | # to ensure we have the preferred libpq installations etc. 240 | RUN apt-get install -y python3-etcd python3-requests python3-pystache python3-kubernetes python3-pysyncobj patroni 241 | 242 | # Barman cloud 243 | # Required for CloudNativePG compatibility 244 | RUN pip3 install --no-cache-dir 'barman[cloud,azure,snappy,google]' 245 | 246 | RUN apt-get install -y timescaledb-tools 247 | 248 | ## Entrypoints as they are from the Timescale image and its default upstream repositories. 249 | ## This ensures the default interface (entrypoint) equals the one of the github.com/timescale/timescaledb-docker one, 250 | ## which allows this Docker Image to be a drop-in replacement for those Docker Images. 251 | ARG GITHUB_TIMESCALEDB_DOCKER_REF=main 252 | ARG GITHUB_DOCKERLIB_POSTGRES_REF=master 253 | 254 | RUN set -ex; \ 255 | cd /build; \ 256 | git clone https://github.com/timescale/timescaledb-docker; \ 257 | cd timescaledb-docker; \ 258 | git checkout ${GITHUB_TIMESCALEDB_DOCKER_REF}; \ 259 | cp -a docker-entrypoint-initdb.d /docker-entrypoint-initdb.d/; \ 260 | ln -s /usr/bin/timescaledb-tune /usr/local/bin/timescaledb-tune 261 | 262 | # Add custom entrypoint to install timescaledb_toolkit 263 | COPY scripts/010_install_timescaledb_toolkit.sh /docker-entrypoint-initdb.d/ 264 | 265 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 266 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh; \ 267 | ln -s /usr/local/bin/docker-entrypoint.sh /docker-entrypoint.sh 268 | 269 | # The following allows *new* files to be created, so that extensions can be added to a running container. 270 | # Existing files are still owned by root and have their sticky bit (the 1 in the 1775 permission mode) set, 271 | # and therefore cannot be overwritten or removed by the unprivileged (postgres) user. 272 | # This ensures the following: 273 | # - libraries and supporting files that have been installed *before this step* are immutable 274 | # - libraries and supporting files that have been installed *after this step* are mutable 275 | # - files owned by postgres can be overwritten in a running container 276 | # - new files can be added to the directories mentioned here 277 | RUN set -ex; \ 278 | for pg in ${PG_VERSIONS}; do \ 279 | for dir in /usr/share/doc \ 280 | "$(/usr/lib/postgresql/${pg}/bin/pg_config --sharedir)/extension" \ 281 | "$(/usr/lib/postgresql/${pg}/bin/pg_config --pkglibdir)" \ 282 | "$(/usr/lib/postgresql/${pg}/bin/pg_config --bindir)" \ 283 | "$(/usr/lib/postgresql/${pg}/bin/pg_config --includedir-server)/extension"; do \ 284 | install --directory "${dir}" --group postgres --mode 1775; \ 285 | find "${dir}" -type d -exec install --directory {} --group postgres --mode 1775 \;; \ 286 | done; \ 287 | done 288 | 289 | RUN for file in $(find /usr/share/postgresql -name 'postgresql.conf.sample'); do \ 290 | # We want timescaledb to be loaded in this image by every created cluster 291 | sed -r -i "s/[#]*\s*(shared_preload_libraries)\s*=\s*'(.*)'/\1 = 'timescaledb,\2'/;s/,'/'/" $file \ 292 | # We need to listen on all interfaces, otherwise PostgreSQL is not accessible 293 | && echo "listen_addresses = '*'" >> $file; \ 294 | done 295 | 296 | RUN chown -R postgres:postgres /usr/local/cargo 297 | 298 | # required to install dbgsym packages 299 | RUN set -ex; \ 300 | chgrp -R postgres /usr/lib/debug; \ 301 | chmod -R g+w /usr/lib/debug 302 | 303 | ## Prepare pgai, needs a separate directory 304 | RUN install -o postgres -g postgres -m 0750 -d /usr/local/lib/pgai 305 | 306 | USER postgres 307 | 308 | ENV MAKEFLAGS=-j4 309 | 310 | # pgai is an extension for artificial intelligence workloads 311 | ARG PGAI_VERSION 312 | RUN set -ex; \ 313 | if [ "${PG_MAJOR}" -gt 15 ] && [ -n "${PGAI_VERSION}" ]; then \ 314 | git clone --branch "${PGAI_VERSION}" https://github.com/timescale/pgai.git /build/pgai; \ 315 | cd /build/pgai; \ 316 | for pg in ${PG_VERSIONS}; do \ 317 | if [ "$pg" -gt 15 ]; then \ 318 | PG_BIN=$(/usr/lib/postgresql/${pg}/bin/pg_config --bindir) PG_MAJOR=${pg} ./projects/extension/build.py install all; \ 319 | fi; \ 320 | done; \ 321 | fi 322 | 323 | 324 | # pg_stat_monitor is a Query Performance Monitoring tool for PostgreSQL 325 | # https://github.com/percona/pg_stat_monitor 326 | ARG PG_STAT_MONITOR 327 | RUN set -ex; \ 328 | if [ -n "${PG_STAT_MONITOR}" ]; then \ 329 | git clone https://github.com/percona/pg_stat_monitor /build/pg_stat_monitor; \ 330 | cd /build/pg_stat_monitor; \ 331 | git checkout "${PG_STAT_MONITOR}"; \ 332 | git reset HEAD --hard; \ 333 | for pg in ${PG_VERSIONS}; do \ 334 | PATH="/usr/lib/postgresql/${pg}/bin:${PATH}" make USE_PGXS=1 clean; \ 335 | PATH="/usr/lib/postgresql/${pg}/bin:${PATH}" make USE_PGXS=1 all; \ 336 | PATH="/usr/lib/postgresql/${pg}/bin:${PATH}" make USE_PGXS=1 install; \ 337 | done; \ 338 | fi 339 | 340 | # pg_auth_mon is an extension to monitor authentication attempts 341 | # It is also useful to determine whether the DB is actively used 342 | # https://github.com/RafiaSabih/pg_auth_mon 343 | ARG PG_AUTH_MON 344 | RUN set -ex; \ 345 | if [ -n "${PG_AUTH_MON}" ]; then \ 346 | git clone https://github.com/RafiaSabih/pg_auth_mon /build/pg_auth_mon; \ 347 | cd /build/pg_auth_mon; \ 348 | git checkout "${PG_AUTH_MON}"; \ 349 | for pg in ${PG_VERSIONS}; do \ 350 | git reset HEAD --hard; \ 351 | PATH="/usr/lib/postgresql/${pg}/bin:${PATH}" make clean; \ 352 | PATH="/usr/lib/postgresql/${pg}/bin:${PATH}" make install; \ 353 | done; \ 354 | fi 355 | 356 | # logerrors is an extension to count the number of errors logged by postgrs, grouped by the error codes 357 | # https://github.com/munakoiso/logerrors 358 | ARG PG_LOGERRORS 359 | RUN set -ex; \ 360 | if [ -n "${PG_LOGERRORS}" ]; then \ 361 | git clone https://github.com/munakoiso/logerrors /build/logerrors; \ 362 | cd /build/logerrors; \ 363 | git checkout "${PG_LOGERRORS}"; \ 364 | for pg in ${PG_VERSIONS}; do \ 365 | git reset HEAD --hard; \ 366 | PATH="/usr/lib/postgresql/${pg}/bin:${PATH}" make clean; \ 367 | PATH="/usr/lib/postgresql/${pg}/bin:${PATH}" make install; \ 368 | done; \ 369 | fi 370 | 371 | # INSTALL_METHOD will show up in the telemetry, which makes it easier to identify these installations 372 | ARG INSTALL_METHOD=docker-ha 373 | ARG OSS_ONLY 374 | 375 | # RUST_RELEASE for some packages passes this to --profile 376 | ARG RUST_RELEASE=release 377 | 378 | # split the extension builds into two steps to allow caching of successful steps 379 | ARG GITHUB_REPO=timescale/timescaledb 380 | ARG TIMESCALEDB_VERSIONS 381 | RUN set -ex; \ 382 | OSS_ONLY="${OSS_ONLY}" \ 383 | GITHUB_REPO="${GITHUB_REPO}" \ 384 | TIMESCALEDB_VERSIONS="${TIMESCALEDB_VERSIONS}" \ 385 | /build/scripts/install_extensions timescaledb 386 | 387 | # install all rust packages in the same step to allow it to optimize for cargo-pgx installs 388 | ARG TOOLKIT_VERSIONS 389 | RUN set -ex; \ 390 | OSS_ONLY="${OSS_ONLY}" \ 391 | RUST_RELEASE="${RUST_RELEASE}" \ 392 | TOOLKIT_VERSIONS="${TOOLKIT_VERSIONS}" \ 393 | /build/scripts/install_extensions rust 394 | 395 | ARG PGVECTORSCALE_VERSIONS 396 | RUN set -ex; \ 397 | OSS_ONLY="${OSS_ONLY}" \ 398 | RUST_RELEASE="${RUST_RELEASE}" \ 399 | PGVECTORSCALE_VERSIONS="${PGVECTORSCALE_VERSIONS}" \ 400 | /build/scripts/install_extensions pgvectorscale 401 | 402 | USER root 403 | 404 | # All the tools that were built in the previous steps have their ownership set to postgres 405 | # to allow mutability. To allow one to build this image with the default privileges (owned by root) 406 | # one can set the ALLOW_ADDING_EXTENSIONS argument to anything but "true". 407 | ARG ALLOW_ADDING_EXTENSIONS=true 408 | RUN set -eu; \ 409 | if [ "${ALLOW_ADDING_EXTENSIONS}" != "true" ]; then \ 410 | for pg in ${PG_VERSIONS}; do \ 411 | for dir in /usr/share/doc "$(/usr/lib/postgresql/${pg}/bin/pg_config --sharedir)/extension" "$(/usr/lib/postgresql/${pg}/bin/pg_config --pkglibdir)" "$(/usr/lib/postgresql/${pg}/bin/pg_config --bindir)"; do \ 412 | chown -R root:root "{dir}"; \ 413 | done; \ 414 | done; \ 415 | fi 416 | 417 | RUN apt-get clean 418 | 419 | ARG PG_MAJOR 420 | ENTRYPOINT ["/docker-entrypoint.sh"] 421 | CMD ["postgres"] 422 | 423 | ## TimescaleDB entrypoints and configuration scripts 424 | ## Within a k8s context, we expect the ENTRYPOINT/CMD to always be explicitly specified 425 | COPY timescaledb_entrypoint.sh / 426 | ## Backwards compatibility, some older deployments use patroni_entrypoint.sh 427 | RUN ln -s /timescaledb_entrypoint.sh /patroni_entrypoint.sh 428 | COPY pgbackrest_entrypoint.sh / 429 | ## Some patroni callbacks are configured by default by the operator. 430 | COPY scripts /scripts/ 431 | 432 | ## The mount being used by the Zalando postgres-operator is /home/postgres/pgdata 433 | ## for Patroni to do it's work it will sometimes move an old/invalid data directory 434 | ## inside the parent directory; therefore we need a subdirectory inside the mount 435 | 436 | ENV PGROOT=/home/postgres \ 437 | PGDATA=/home/postgres/pgdata/data \ 438 | PGLOG=/home/postgres/pg_log \ 439 | PGSOCKET=/home/postgres/pgdata \ 440 | BACKUPROOT=/home/postgres/pgdata/backup \ 441 | PGBACKREST_CONFIG=/home/postgres/pgdata/backup/pgbackrest.conf \ 442 | PGBACKREST_STANZA=poddb \ 443 | PATH=/usr/lib/postgresql/${PG_MAJOR}/bin:${PATH} \ 444 | LC_ALL=C.UTF-8 \ 445 | LANG=C.UTF-8 \ 446 | PAGER="" 447 | 448 | ## The Zalando postgres-operator has strong opinions about the HOME directory of postgres, 449 | ## whereas we do not. Make the operator happy then 450 | RUN usermod postgres --home "${PGROOT}" --move-home 451 | 452 | ## The /etc/supervisor/conf.d directory is a very Spilo (Zalando postgres-operator) oriented directory. 453 | ## However, to make things work the user postgres currently needs to have write access to this directory 454 | ## The /var/lib/postgresql/data is used as PGDATA by alpine/bitnami, which makes it useful to have it be owned by Postgres 455 | RUN install -o postgres -g postgres -m 0750 -d "${PGROOT}" "${PGLOG}" "${PGDATA}" "${BACKUPROOT}" /etc/supervisor/conf.d /scripts /var/lib/postgresql 456 | 457 | ## Making sure that pgbackrest is pointing to the right file 458 | RUN rm /etc/pgbackrest.conf && ln -s "${PGBACKREST_CONFIG}" /etc/pgbackrest.conf 459 | 460 | ## Some configurations allow daily csv files, with foreign data wrappers pointing to the files. 461 | ## to make this work nicely, they need to exist though 462 | RUN for i in $(seq 0 7); do touch "${PGLOG}/postgresql-$i.log" "${PGLOG}/postgresql-$i.csv"; done 463 | 464 | ## Fix permissions 465 | RUN set -e; \ 466 | chown -R postgres:postgres "${PGLOG}" "${PGROOT}" "${PGDATA}" /var/run/postgresql/; \ 467 | chown -R postgres:postgres /var/log/pgbackrest/ /var/lib/pgbackrest /var/spool/pgbackrest; \ 468 | chmod -x /usr/lib/postgresql/*/lib/*.so; \ 469 | chmod 1777 /var/run/postgresql; \ 470 | chmod 755 "${PGROOT}" 471 | 472 | # return /etc/apt/sources.list back to a non-AWS version for anybody that wants to use this image elsewhere 473 | RUN set -eux; \ 474 | mv -f /etc/apt/sources.list /etc/apt/sources.list.aws; \ 475 | mv -f /etc/apt/sources.list.dist /etc/apt/sources.list 476 | 477 | # DOCKER_FROM needs re-importing as any args from before FROM only apply to FROM 478 | ARG DOCKER_FROM 479 | ARG BUILDER_URL 480 | ARG RELEASE_URL 481 | RUN /build/scripts/install_extensions versions > /.image_config; \ 482 | echo "OSS_ONLY=\"$OSS_ONLY\"" >> /.image_config; \ 483 | echo "PG_LOGERRORS=\"${PG_LOGERRORS}\"" >> /.image_config; \ 484 | echo "PG_STAT_MONITOR=\"${PG_STAT_MONITOR}\"" >> /.image_config; \ 485 | echo "PGVECTO_RS=\"${PGVECTO_RS}\"" >> /.image_config; \ 486 | echo "POSTGIS_VERSIONS=\"${POSTGIS_VERSIONS}\"" >> /.image_config; \ 487 | echo "PG_AUTH_MON=\"${PG_AUTH_MON}\"" >> /.image_config; \ 488 | echo "PGBOUNCER_EXPORTER_VERSION=\"${PGBOUNCER_EXPORTER_VERSION}\"" >> /.image_config; \ 489 | echo "PGBACKREST_EXPORTER_VERSION=\"${PGBACKREST_EXPORTER_VERSION}\"" >> /.image_config; \ 490 | echo "PGAI_VERSION=\"${PGAI_VERSION}\"" >> /.image_config; \ 491 | echo "PGVECTORSCALE_VERSIONS=\"${PGVECTORSCALE_VERSIONS}\"" >> /.image_config; \ 492 | echo "PG_MAJOR=\"${PG_MAJOR}\"" >> /.image_config; \ 493 | echo "PG_VERSIONS=\"${PG_VERSIONS}\"" >> /.image_config; \ 494 | echo "FROM=\"${DOCKER_FROM}\"" >> /.image_config; \ 495 | echo "RELEASE_URL=\"${RELEASE_URL}\"" >> /.image_config; \ 496 | echo "BUILDER_URL=\"${BUILDER_URL}\"" >> /.image_config; \ 497 | echo "BUILD_DATE=\"$(date -Iseconds)\"" >> /.image_config 498 | 499 | 500 | 501 | WORKDIR /home/postgres 502 | EXPOSE 5432 8008 8081 503 | USER postgres 504 | 505 | # This is run during the image build process so that the build will fail and the results won't be pushed 506 | # to the registry if there's a problem. It's run independently during CI so the output can be used in the GH summary 507 | # so you don't have to trawl through the huge amount of logs to find the output. 508 | COPY --chown=postgres:postgres cicd /cicd/ 509 | RUN /cicd/install_checks -v 510 | 511 | FROM builder AS trimmed 512 | 513 | USER root 514 | 515 | ENV BUILD_PACKAGES="binutils cmake devscripts equivs gcc git gpg gpg-agent libc-dev libc6-dev libkrb5-dev libperl-dev libssl-dev lsb-release make patchutils python2-dev python3-dev wget libsodium-dev" 516 | 517 | RUN set -ex; \ 518 | apt-get purge -y ${BUILD_PACKAGES}; \ 519 | apt-get autoremove -y; \ 520 | apt-get clean; \ 521 | rm -rf /var/lib/apt/lists/* \ 522 | /var/cache/debconf/* \ 523 | /usr/share/doc \ 524 | /usr/share/man \ 525 | /usr/share/locale/?? \ 526 | /usr/share/locale/??_?? \ 527 | /home/postgres/.pgx \ 528 | /build/ \ 529 | /usr/local/rustup \ 530 | /usr/local/cargo \ 531 | /cicd; \ 532 | find /var/log -type f -exec truncate --size 0 {} \; 533 | 534 | USER postgres 535 | 536 | 537 | ## Create a smaller Docker image from the builder image 538 | FROM scratch AS release 539 | COPY --from=trimmed / / 540 | 541 | ARG PG_MAJOR 542 | 543 | ENV PGROOT=/home/postgres \ 544 | PGDATA=/home/postgres/pgdata/data \ 545 | PGLOG=/home/postgres/pg_log \ 546 | PGSOCKET=/home/postgres/pgdata \ 547 | BACKUPROOT=/home/postgres/pgdata/backup \ 548 | PGBACKREST_CONFIG=/home/postgres/pgdata/backup/pgbackrest.conf \ 549 | PGBACKREST_STANZA=poddb \ 550 | PATH=/usr/lib/postgresql/${PG_MAJOR}/bin:${PATH} \ 551 | LC_ALL=C.UTF-8 \ 552 | LANG=C.UTF-8 \ 553 | PAGER="" 554 | 555 | # https://github.com/docker-library/postgres/commit/bfc5d81c8f5647c690f452dc558e64fddb1802f6 556 | # We set the default STOPSIGNAL to SIGINT, which corresponds to what PostgreSQL 557 | # calls "Fast Shutdown mode" wherein new connections are disallowed and any 558 | # in-progress transactions are aborted, allowing PostgreSQL to stop cleanly and 559 | # flush tables to disk, which is the best compromise available to avoid data 560 | # corruption. 561 | # 562 | # Users who know their applications do not keep open long-lived idle connections 563 | # may way to use a value of SIGTERM instead, which corresponds to "Smart 564 | # Shutdown mode" in which any existing sessions are allowed to finish and the 565 | # server stops when all sessions are terminated. 566 | # 567 | # See https://www.postgresql.org/docs/17/server-shutdown.html for more details 568 | # about available PostgreSQL server shutdown signals. 569 | # 570 | # See also https://www.postgresql.org/docs/17/server-start.html for further 571 | # justification of this as the default value, namely that the example (and 572 | # shipped) systemd service files use the "Fast Shutdown mode" for service 573 | # termination. 574 | # 575 | STOPSIGNAL SIGINT 576 | # 577 | # An additional setting that is recommended for all users regardless of this 578 | # value is the runtime "--stop-timeout" (or your orchestrator/runtime's 579 | # equivalent) for controlling how long to wait between sending the defined 580 | # STOPSIGNAL and sending SIGKILL (which is likely to cause data corruption). 581 | # 582 | # The default in most runtimes (such as Docker) is 10 seconds, and the 583 | # documentation at https://www.postgresql.org/docs/17/server-start.html notes 584 | # that even 90 seconds may not be long enough in many instances. 585 | 586 | WORKDIR /home/postgres 587 | EXPOSE 5432 8008 8081 588 | USER postgres 589 | 590 | ENTRYPOINT ["/docker-entrypoint.sh"] 591 | CMD ["postgres"] 592 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL = bash 2 | .SHELLFLAGS = -ec 3 | .ONESHELL: 4 | .DELETE_ON_ERROR: 5 | 6 | all: help 7 | 8 | PG_MAJOR?=17 9 | # All PG_VERSIONS binaries/libraries will be included in the Dockerfile 10 | # specifying multiple versions will allow things like pg_upgrade etc to work. 11 | PG_VERSIONS?= 12 | 13 | # Additional PostgreSQL extensions we want to include with specific version/commit tags 14 | PGAI_VERSION?=extension-0.10.1 15 | PGVECTORSCALE_VERSIONS?=all 16 | POSTGIS_VERSIONS?=3 17 | PG_AUTH_MON?=v3.0 18 | PG_STAT_MONITOR?=2.1.0 19 | PG_LOGERRORS?=v2.1.3 20 | PGVECTO_RS?=0.4.0 21 | TIMESCALEDB_VERSIONS?=all 22 | TOOLKIT_VERSIONS?=all 23 | PGBOUNCER_EXPORTER_VERSION?=0.9.0 24 | PGBACKREST_EXPORTER_VERSION?=0.18.0 25 | 26 | # This is used to build the docker --platform, so pick amd64 or arm64 27 | PLATFORM?=amd64 28 | 29 | DOCKER_TAG_POSTFIX?= 30 | ALL_VERSIONS?=false 31 | OSS_ONLY?=false 32 | 33 | # If you're using ephemeral runners, then we want to use the cache, otherwise we don't want caching so that 34 | # we always get updated upstream packages 35 | USE_DOCKER_CACHE?=true 36 | ifeq ($(strip $(USE_DOCKER_CACHE)),true) 37 | DOCKER_CACHE := 38 | else 39 | DOCKER_CACHE := --no-cache 40 | endif 41 | 42 | ifeq ($(ALL_VERSIONS),true) 43 | DOCKER_TAG_POSTFIX := $(strip $(DOCKER_TAG_POSTFIX))-all 44 | ifeq ($(PG_MAJOR),17) 45 | PG_VERSIONS := 17 16 15 14 13 46 | else ifeq ($(PG_MAJOR),16) 47 | PG_VERSIONS := 16 15 14 13 48 | else ifeq ($(PG_MAJOR),15) 49 | PG_VERSIONS := 15 14 13 50 | else ifeq ($(PG_MAJOR),14) 51 | PG_VERSIONS := 14 13 52 | else ifeq ($(PG_MAJOR),13) 53 | PG_VERSIONS := 13 54 | else ifeq ($(PG_MAJOR),12) 55 | $(error pg12 is no longer supported) 56 | endif 57 | else 58 | PG_VERSIONS := $(PG_MAJOR) 59 | endif 60 | 61 | ifeq ($(OSS_ONLY),true) 62 | DOCKER_TAG_POSTFIX := $(strip $(DOCKER_TAG_POSTFIX))-oss 63 | endif 64 | 65 | DOCKER_FROM?=ubuntu:22.04 66 | DOCKER_EXTRA_BUILDARGS?= 67 | DOCKER_REGISTRY?=localhost:5000 68 | DOCKER_REPOSITORY?=timescale/timescaledb-ha 69 | DOCKER_PUBLISH_URL?=$(DOCKER_REGISTRY)/$(DOCKER_REPOSITORY) 70 | 71 | DOCKER_BUILDER_URL=$(DOCKER_PUBLISH_URL):pg$(PG_MAJOR)$(DOCKER_TAG_POSTFIX)-builder 72 | DOCKER_BUILDER_ARCH_URL=$(DOCKER_PUBLISH_URL):pg$(PG_MAJOR)$(DOCKER_TAG_POSTFIX)-builder-$(PLATFORM) 73 | DOCKER_RELEASE_URL=$(DOCKER_PUBLISH_URL):pg$(PG_MAJOR)$(DOCKER_TAG_POSTFIX) 74 | DOCKER_RELEASE_ARCH_URL=$(DOCKER_PUBLISH_URL):pg$(PG_MAJOR)$(DOCKER_TAG_POSTFIX)-$(PLATFORM) 75 | CICD_URL=$(DOCKER_PUBLISH_URL):cicd-$(shell printf "%.7s" "$(GITHUB_SHA)") 76 | CICD_ARCH_URL=$(CICD_URL)-$(PLATFORM) 77 | 78 | GITHUB_STEP_SUMMARY?=/dev/null 79 | GITHUB_OUTPUT?=/dev/null 80 | 81 | # These parameters control which entrypoints we add to the scripts 82 | GITHUB_DOCKERLIB_POSTGRES_REF=master 83 | GITHUB_TIMESCALEDB_DOCKER_REF=main 84 | 85 | ALLOW_ADDING_EXTENSIONS?=true 86 | 87 | # These variables have to do with this Docker repository 88 | GIT_REMOTE=$(shell git config --get remote.origin.url | sed 's/.*@//g') 89 | GIT_STATUS=$(shell git status --porcelain | paste -sd "," -) 90 | GIT_REV?=$(shell git rev-parse HEAD) 91 | 92 | INSTALL_METHOD?=docker-ha 93 | 94 | # These variables have to do with what software we pull in from github for timescaledb 95 | GITHUB_REPO?=timescale/timescaledb 96 | 97 | # We need dynamic variables here, that is why we do not use $(shell awk ...) 98 | VERSION_INFO = /tmp/outputs/version_info 99 | VAR_PGMAJORMINOR="$$(awk -F '=' '/postgresql.version=/ {print $$2}' $(VERSION_INFO) 2>/dev/null)" 100 | VAR_TSVERSION="$$(awk -F '=' '/timescaledb.version=/ {print $$2}' $(VERSION_INFO) 2>/dev/null)" 101 | VAR_TSMAJOR="$$(awk -F '[.=]' '/timescaledb.version=/ {print $$3 "." $$4}' $(VERSION_INFO))" 102 | 103 | # In these steps we do some introspection to find out some details of the versions 104 | # that are inside the Docker image. As we use the Ubuntu packages, we do not know until 105 | # after we have built the image, what patch version of PostgreSQL, or PostGIS is installed. 106 | # 107 | # We will then attach this information as OCI labels to the final Docker image 108 | # docker buildx build does a push to export it, so it doesn't exist in the regular local registry yet 109 | VERSION_TAG?= 110 | ifeq ($(VERSION_TAG),) 111 | VERSION_TAG := pg$(PG_MAJOR)$(DOCKER_TAG_POSTFIX)-builder-$(PLATFORM) 112 | $(VERSION_INFO): builder 113 | endif 114 | VERSION_IMAGE := $(DOCKER_PUBLISH_URL):$(VERSION_TAG) 115 | 116 | # The purpose of publishing the images under many tags, is to provide 117 | # some choice to the user as to their appetite for volatility. 118 | # 119 | # 1. timescale/timescaledb-ha:pg17 120 | # 2. timescale/timescaledb-ha:pg17-ts2.19 121 | # 3. timescale/timescaledb-ha:pg17.3-ts2.19 122 | # 4. timescale/timescaledb-ha:pg17.3-ts2.19.0 123 | 124 | $(VERSION_INFO): 125 | docker rm --force builder_inspector >&/dev/null || true 126 | docker run --rm -d --name builder_inspector -e PGDATA=/tmp/pgdata --user=postgres "$(VERSION_IMAGE)" sleep 300 127 | docker cp ./cicd "builder_inspector:/cicd/" 128 | docker exec builder_inspector /cicd/smoketest.sh || (docker logs -n100 builder_inspector && exit 1) 129 | mkdir -p /tmp/outputs 130 | docker cp builder_inspector:/tmp/version_info.log "$(VERSION_INFO)" 131 | docker rm --force builder_inspector || true 132 | 133 | # We require the use of buildkit, as we use the --secret arguments for docker build 134 | export DOCKER_BUILDKIT = 1 135 | 136 | # We label all the Docker Images with the versions of PostgreSQL, TimescaleDB and some other extensions 137 | # afterwards, by using introspection, as minor versions may differ even when using the same 138 | # Dockerfile 139 | DOCKER_BUILD_COMMAND=docker build \ 140 | $(DOCKER_CACHE) \ 141 | --platform "linux/$(PLATFORM)" \ 142 | --pull \ 143 | --progress=plain \ 144 | --build-arg DOCKER_FROM="$(DOCKER_FROM)" \ 145 | --build-arg ALLOW_ADDING_EXTENSIONS="$(ALLOW_ADDING_EXTENSIONS)" \ 146 | --build-arg GITHUB_DOCKERLIB_POSTGRES_REF="$(GITHUB_DOCKERLIB_POSTGRES_REF)" \ 147 | --build-arg GITHUB_REPO="$(GITHUB_REPO)" \ 148 | --build-arg GITHUB_TIMESCALEDB_DOCKER_REF="$(GITHUB_TIMESCALEDB_DOCKER_REF)" \ 149 | --build-arg INSTALL_METHOD="$(INSTALL_METHOD)" \ 150 | --build-arg PGAI_VERSION="$(PGAI_VERSION)" \ 151 | --build-arg PGVECTORSCALE_VERSIONS="$(PGVECTORSCALE_VERSIONS)" \ 152 | --build-arg PG_AUTH_MON="$(PG_AUTH_MON)" \ 153 | --build-arg PG_LOGERRORS="$(PG_LOGERRORS)" \ 154 | --build-arg PG_MAJOR=$(PG_MAJOR) \ 155 | --build-arg PG_STAT_MONITOR="$(PG_STAT_MONITOR)" \ 156 | --build-arg PG_VERSIONS="$(PG_VERSIONS)" \ 157 | --build-arg POSTGIS_VERSIONS=$(POSTGIS_VERSIONS) \ 158 | --build-arg OSS_ONLY="$(OSS_ONLY)" \ 159 | --build-arg TIMESCALEDB_VERSIONS="$(TIMESCALEDB_VERSIONS)" \ 160 | --build-arg TOOLKIT_VERSIONS="$(TOOLKIT_VERSIONS)" \ 161 | --build-arg PGVECTO_RS="$(PGVECTO_RS)" \ 162 | --build-arg RELEASE_URL="$(DOCKER_RELEASE_URL)" \ 163 | --build-arg BUILDER_URL="$(DOCKER_BUILDER_URL)" \ 164 | --build-arg PGBOUNCER_EXPORTER_VERSION=$(PGBOUNCER_EXPORTER_VERSION) \ 165 | --build-arg PGBACKREST_EXPORTER_VERSION=$(PGBACKREST_EXPORTER_VERSION) \ 166 | --label com.timescaledb.image.install_method=$(INSTALL_METHOD) \ 167 | --label org.opencontainers.image.created="$$(date -Iseconds -u)" \ 168 | --label org.opencontainers.image.revision="$(GIT_REV)" \ 169 | --label org.opencontainers.image.source="$(GIT_REMOTE)" \ 170 | --label org.opencontainers.image.vendor=Timescale \ 171 | $(DOCKER_EXTRA_BUILDARGS) \ 172 | . 173 | 174 | # We provide the fast target as the first (=default) target, as it will skip installing 175 | # many optional extensions, and it will only install a single timescaledb (master) version. 176 | # This is basically useful for developers of this repository, to allow fast feedback cycles. 177 | .PHONY: fast 178 | fast: DOCKER_EXTRA_BUILDARGS= --build-arg GITHUB_TAG=master 179 | fast: ALL_VERSIONS=false 180 | fast: PG_AUTH_MON= 181 | fast: PG_LOGERRORS= 182 | fast: PGAI_VERSION= 183 | fast: PG_VERSIONS=17 184 | fast: POSTGIS_VERSIONS= 185 | fast: TOOLKIT_VERSIONS= 186 | fast: PGVECTORSCALE_VERSIONS= 187 | fast: build 188 | 189 | .PHONY: latest 190 | latest: ALL_VERSIONS=false 191 | latest: TIMESCALEDB_VERSIONS=latest 192 | latest: TOOLKIT_VERSIONS=latest 193 | latest: PGVECTORSCALE_VERSIONS=latest 194 | latest: build 195 | 196 | prune: # docker system prune -af 197 | docker system prune -af 198 | 199 | ifeq ($(USE_DOCKER_CACHE),false) 200 | builder: prune 201 | endif 202 | 203 | .PHONY: get-image-config 204 | get-image-config: 205 | docker run --platform "linux/$(PLATFORM)" --rm $(DOCKER_RELEASE_URL) cat /.image_config 206 | 207 | .PHONY: builder 208 | builder: # build the `builder` target image 209 | builder: DOCKER_EXTRA_BUILDARGS=--target builder 210 | builder: 211 | $(DOCKER_BUILD_COMMAND) --tag "$(DOCKER_BUILDER_ARCH_URL)" 212 | 213 | .PHONY: publish-builder 214 | publish-builder: # build and publish the `builder` target image 215 | publish-builder: builder $(VERSION_INFO) 216 | docker push "$(DOCKER_BUILDER_ARCH_URL)" 217 | echo "builder_id=$$(docker inspect "$(DOCKER_BUILDER_ARCH_URL)" | jq -r '.[].RepoDigests[0]')" | tee -a "$(GITHUB_OUTPUT)" 218 | 219 | # The prepare step does not build the final image, as we need to use introspection 220 | # to find out what versions of software are installed in this image 221 | .PHONY: release 222 | release: # build the `release` target image 223 | release: DOCKER_EXTRA_BUILDARGS=--target release 224 | release: $(VERSION_INFO) 225 | $(DOCKER_BUILD_COMMAND) --tag "$(DOCKER_RELEASE_ARCH_URL)" \ 226 | $$(awk -F '=' '{printf "--label com.timescaledb.image."$$1"="$$2" "}' $(VERSION_INFO)) 227 | 228 | publish-release: # build and publish the `release` target image 229 | publish-release: release 230 | docker push "$(DOCKER_RELEASE_ARCH_URL)" 231 | echo "release_id=$$(docker inspect "$(DOCKER_RELEASE_ARCH_URL)" | jq -r '.[].RepoDigests[0]')" | tee -a "$(GITHUB_OUTPUT)" 232 | 233 | .PHONY: build-sha 234 | build-sha: # build a specific git commit 235 | build-sha: DOCKER_EXTRA_BUILDARGS=--target release 236 | build-sha: is_ci 237 | ifeq ($(strip $(GITHUB_SHA)),) 238 | $(error GITHUB_SHA is empty, is this running in github actions?) 239 | endif 240 | $(DOCKER_BUILD_COMMAND) --tag "$(CICD_ARCH_URL)" 241 | 242 | .PHONY: publish-sha 243 | publish-sha: # push the specific git commit image 244 | publish-sha: is_ci 245 | docker push "$(CICD_ARCH_URL)" 246 | 247 | .PHONY: build-tag 248 | build-tag: DOCKER_TAG_POSTFIX?=$(GITHUB_TAG) 249 | build-tag: release 250 | 251 | .PHONY: build-oss 252 | build-oss: # build an OSS-only image 253 | build-oss: OSS_ONLY=true 254 | build-oss: DOCKER_TAG_POSTFIX=-oss 255 | build-oss: 256 | $(DOCKER_BUILD_COMMAND) 257 | 258 | .PHONY: build 259 | build: # build a local docker image 260 | build: DOCKER_TAG_POSTFIX=-local 261 | build: 262 | $(DOCKER_BUILD_COMMAND) 263 | 264 | .PHONY: publish-combined-builder-manifest 265 | publish-combined-builder-manifest: $(VERSION_INFO) # publish a combined builder image manifest 266 | @set -x 267 | images=() 268 | for image in $$(cd /tmp/outputs && echo builder-* | sed 's/builder-/sha256:/g'); do 269 | images+=("--amend" "$(DOCKER_PUBLISH_URL)@$$image") 270 | done 271 | cat $(VERSION_INFO) || true 272 | echo "Creating manifest $(DOCKER_BUILDER_URL) that includes $(DOCKER_BUILDER_URL)-amd64 and $(DOCKER_BUILDER_URL)-arm64 for pg $(VAR_PGMAJORMINOR)}" 273 | for tag in pg$(PG_MAJOR) pg$(VAR_PGMAJORMINOR); do 274 | url="$(DOCKER_PUBLISH_URL):$$tag$(DOCKER_TAG_POSTFIX)-builder" 275 | docker manifest rm "$$url" || true 276 | docker manifest create "$$url" "$${images[@]}" 277 | docker manifest push "$$url" 278 | echo "Pushed $$url ($${images[@]})" | tee -a "$(GITHUB_STEP_SUMMARY)" 279 | done 280 | 281 | .PHONY: publish-combined-manifest 282 | publish-combined-manifest: # publish the main combined manifest that includes amd64 and arm64 images 283 | publish-combined-manifest: $(VERSION_INFO) 284 | @set -x 285 | images=() 286 | for image in $$(cd /tmp/outputs && echo release-* | sed 's/release-/sha256:/g'); do 287 | images+=("--amend" "$(DOCKER_PUBLISH_URL)@$$image") 288 | done 289 | cat $(VERSION_INFO) || true 290 | echo "Creating manifest $(DOCKER_RELEASE_URL) that includes $(DOCKER_RELEASE_URL)-amd64 and $(DOCKER_RELEASE_URL)-arm64 for pg $(VAR_PGMAJORMINOR)" 291 | for tag in pg$(PG_MAJOR) pg$(PG_MAJOR)-ts$(VAR_TSMAJOR) pg$(VAR_PGMAJORMINOR)-ts$(VAR_TSVERSION); do 292 | url="$(DOCKER_PUBLISH_URL):$$tag$(DOCKER_TAG_POSTFIX)" 293 | docker manifest rm "$$url" || true 294 | docker manifest create "$$url" "$${images[@]}" 295 | docker manifest push "$$url" 296 | echo "Pushed $$url ($${images[@]})" | tee -a "$(GITHUB_STEP_SUMMARY)" 297 | done 298 | 299 | .PHONY: publish-manifests 300 | publish-manifests: # publish the combined manifests for the builder and the release images 301 | publish-manifests: publish-combined-builder-manifest publish-combined-manifest 302 | 303 | .PHONY: publish-combined-sha 304 | publish-combined-sha: is_ci # publish a combined image manifest for a CICD branch build 305 | @echo "Creating manifest $(CICD_URL) that includes $(CICD_URL)-amd64 and $(CICD_URL)-arm64" 306 | amddigest_image="$$(./fetch_tag_digest $(CICD_URL)-amd64)" 307 | armdigest_image="$$(./fetch_tag_digest $(CICD_URL)-arm64)" 308 | echo "AMD: $$amddigest_image ARM: $$armdigest_image" 309 | docker manifest rm "$(CICD_URL)" >& /dev/null || true 310 | docker manifest create "$(CICD_URL)" --amend "$$amddigest_image" --amend "$$armdigest_image" 311 | docker manifest push "$(CICD_URL)" 312 | echo "pushed $(CICD_URL)" 313 | echo "Pushed $(CICD_URL) (amd:$$amddigest_image, arm:$$armdigest_image)" >> "$(GITHUB_STEP_SUMMARY)" 314 | 315 | CHECK_NAME=ha-check 316 | .PHONY: check 317 | check: # check images to see if they have all the requested content 318 | @set -x 319 | for arch in amd64 arm64; do 320 | key="$$(mktemp -u XXXXXX)" 321 | check_name="$(CHECK_NAME)-$$key" 322 | echo "### Checking $$arch $(DOCKER_RELEASE_URL)" >> $(GITHUB_STEP_SUMMARY) 323 | docker rm --force "$$check_name" >&/dev/null || true 324 | docker run \ 325 | --platform linux/"$$arch" \ 326 | --pull always \ 327 | -d \ 328 | --name "$$check_name" \ 329 | -e PGDATA=/tmp/pgdata \ 330 | --user=postgres \ 331 | "$(DOCKER_RELEASE_URL)" sleep 300 332 | docker exec -u root "$$check_name" mkdir -p /cicd/scripts 333 | docker exec -u root "$$check_name" chown -R postgres: /cicd 334 | tar -cf - -C ./cicd . | docker exec -i "$$check_name" tar -C /cicd -x 335 | tar -cf - -C ./build_scripts . | docker exec -i "$$check_name" tar -C /cicd/scripts -x 336 | docker exec -e GITHUB_STEP_SUMMARY="/tmp/step_summary-$$key" -e CI="$(CI)" "$$check_name" /cicd/install_checks -v || { docker logs -n100 "$$check_name"; exit 1; } 337 | docker exec "$$check_name" cat "/tmp/step_summary-$$key" >> "$(GITHUB_STEP_SUMMARY)" 2>&1 338 | docker rm --force "$$check_name" >&/dev/null || true 339 | done 340 | 341 | .PHONY: check-sha 342 | check-sha: # check a specific git commit-based image 343 | @echo "### Checking $(CICD_ARCH_URL)" >> $(GITHUB_STEP_SUMMARY) 344 | case "$(CICD_ARCH_URL)" in 345 | *-amd64) arch=amd64;; 346 | *-arm64) arch=arm64;; 347 | *) echo "unknown architecture for $(CICD_ARCH_URL)" >&2; exit 1;; 348 | esac 349 | key="$$(mktemp -u XXXXXX)" 350 | check_name="$(CHECK_NAME)-$$key" 351 | docker rm --force "$$check_name" >&/dev/null || true 352 | docker run \ 353 | --platform linux/"$$arch" \ 354 | -d \ 355 | --name "$$check_name" \ 356 | -e PGDATA=/tmp/pgdata \ 357 | --user=postgres \ 358 | "$(CICD_ARCH_URL)" sleep 300 359 | docker exec -u root "$$check_name" mkdir -p /cicd/scripts 360 | docker exec -u root "$$check_name" chown -R postgres: /cicd 361 | tar -cf - -C ./cicd . | docker exec -i "$$check_name" tar -C /cicd -x 362 | tar -cf - -C ./build_scripts . | docker exec -i "$$check_name" tar -C /cicd/scripts -x 363 | docker exec -e GITHUB_STEP_SUMMARY="/tmp/step_summary-$$key" -e CI="$(CI)" "$$check_name" /cicd/install_checks -v || { docker logs -n100 "$$check_name"; exit 1; } 364 | docker exec -i "$$check_name" cat "/tmp/step_summary-$$key" >> "$(GITHUB_STEP_SUMMARY)" 2>&1 365 | docker rm --force "$$check_name" >&/dev/null || true 366 | 367 | .PHONY: is_ci 368 | is_ci: 369 | @if [ "$${CI}" != "true" ]; then echo "environment variable CI is not set to \"true\", are you running this in Github Actions?"; exit 1; fi 370 | 371 | .PHONY: list-images 372 | list-images: # list local images 373 | docker images --filter "label=com.timescaledb.image.install_method=$(INSTALL_METHOD)" --filter "dangling=false" 374 | 375 | HELP_TARGET_DEPTH ?= \# 376 | help: # Show how to get started & what targets are available 377 | @printf "This is a list of all the make targets that you can run, e.g. $(BOLD)make check$(NORMAL)\n\n" 378 | @awk -F':+ |$(HELP_TARGET_DEPTH)' '/^[0-9a-zA-Z._%-]+:+.+$(HELP_TARGET_DEPTH).+$$/ { printf "$(GREEN)%-20s\033[0m %s\n", $$1, $$3 }' $(MAKEFILE_LIST) 379 | @echo 380 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | TimescaleDB (TM) Docker images and configuration for Kubernetes 2 | 3 | Copyright (c) 2019-2020 Timescale, Inc. All Rights Reserved. 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TimescaleDB Docker image for Kubernetes 2 | 3 | This directory contains everything that allows us to create a Docker image with the following pieces of software: 4 | 5 | - PostgreSQL 6 | - Some PostgreSQL extensions, most notably PostGIS 7 | - TimescaleDB, multiple versions 8 | - pgBackRest 9 | - scripts to make it all work in a Kubernetes Context 10 | 11 | Currently, our base image is Ubuntu, as we require glibc 2.33+. 12 | 13 | It's currently pushing the resulting images to: https://hub.docker.com/r/timescale/timescaledb-ha 14 | 15 | ## Build images 16 | 17 | To build an image, run the following make target: 18 | 19 | ```console 20 | make 21 | ``` 22 | 23 | As building the whole image takes considerably amounts of time, the default will only install 1 timescaledb version: 24 | The head of the `master` branch of the github.com/timescale/timescaledb. 25 | 26 | For more robust build runs do: 27 | 28 | ```console 29 | make build 30 | ``` 31 | 32 | Or, if you only want to exclude Timescale License code you can use the following command: 33 | 34 | ```console 35 | make build-oss 36 | ``` 37 | 38 | > For more information about licensing, please read our [blog post](https://blog.timescale.com/blog/how-we-are-building-an-open-source-business-a7701516a480/) about the subject. 39 | 40 | By default, the Docker image contains many extensions, including [TimescaleDB](https://github.com/timescale/timescaledb) and [PostGIS](https://postgis.net/). 41 | You can override which version of the extensions are built by setting environment variables, some examples: 42 | 43 | ```console 44 | # Build without any PostGIS 45 | POSTGIS_VERSIONS="" make build 46 | ``` 47 | 48 | For further environment variables that can be set, we point you to the [Makefile](Makefile) itself. 49 | 50 | For updating changes in versions for timescaledb, pgvectorscale, or toolkit, update `build_scripts/versions.yaml` 51 | 52 | ## Verify your work 53 | 54 | For every pushed commit to this repository, a Docker Image will be built. Once your commit is pushed, a Docker Image will 55 | be built, and if successful, will be pushed. The tag of this Docker Image will be `cicd--amd64`, 56 | for example, for commit `baddcafe...`, the tag will look like: 57 | ```text 58 | timescale/timescaledb-ha:cicd-baddcaf-amd64 59 | ``` 60 | 61 | #### Find out tag using commandline 62 | 63 | Assuming your current working directory is on the same commit as the one you pushed 64 | 65 | ```console 66 | echo "timescale/timescaledb-ha:cicd-$(git rev-parse HEAD | cut -c 1-7)" 67 | ``` 68 | 69 | #### Find tag using GitHub Web interface 70 | 71 | - Actions 72 | - Click on the **Build branch** Workflow for your commit/branch 73 | - Look at the `Build and push branch summary` for the tag 74 | 75 | ##### Example output 76 | 77 | ```text 78 | Checking docker.io/timescale/timescaledb-ha:cicd-8578fce-amd64 79 | 80 | amd64: the base image was built 93 seconds ago 81 | ... 82 | ``` 83 | 84 | In the above example, your Docker tag is `cicd-8578fce-amd64` and your full image url is: 85 | 86 | ```text 87 | docker.io/timescale/timescaledb-ha:cicd-8578fce-amd64 88 | ``` 89 | 90 | ## Test your Docker Image 91 | 92 | ```console 93 | docker run --rm -ti -e POSTGRES_PASSWORD=smoketest docker.io/timescale/timescaledb-ha:cicd-baddcaf-amd64 94 | ``` 95 | 96 | ## Versioning and Releases 97 | 98 | ### Release Process 99 | 100 | Between releases, we keep track of notable changes in CHANGELOG.md. 101 | 102 | When we want to make a release we should update CHANGELOG.md to contain the release notes for the planned release in a section for 103 | the proposed release number. This update is the commit that will be tagged with as the actual release which ensures that each release 104 | contains a copy of it's own release notes. 105 | 106 | We should also copy the release notes to the Github releases page, but CHANGELOG.md is the primary place to keep the release notes. 107 | 108 | The release commit should be tagged with a signed tag: 109 | 110 | ```console 111 | git tag -s vx.x.x 112 | git push --tags 113 | ``` 114 | 115 | If you use the release notes in the tag commit message and it will automatically appear in the Github release. On the Github releases 116 | page click `Draft a new release` and then type your tag in the drop down contain `@master`. The release will automatically be created 117 | using the tag commit text. 118 | 119 | ### Publish the images to Docker Hub and other registries 120 | 121 | They will be written under quite a few aliases, for example, for PostgreSQL 15.2 and Timescale 2.10.3, the following images will be built and pushed/overwritten: 122 | 123 | - timescale/timescaledb-ha:pg15 124 | - timescale/timescaledb-ha:pg15-all 125 | - timescale/timescaledb-ha:pg15-ts2.10 126 | - timescale/timescaledb-ha:pg15-ts2.10-all 127 | - timescale/timescaledb-ha:pg15.2-ts2.10.3 128 | - timescale/timescaledb-ha:pg15.2-ts2.10.3-all 129 | 130 | For `OSS_ONLY` builds, the following tags will be published: 131 | - timescale/timescaledb-ha:pg15-oss 132 | - timescale/timescaledb-ha:pg15-all-oss 133 | - timescale/timescaledb-ha:pg15-ts2.10-oss 134 | - timescale/timescaledb-ha:pg15-ts2.10-all-oss 135 | - timescale/timescaledb-ha:pg15.2-ts2.10.3-oss 136 | - timescale/timescaledb-ha:pg15.2-ts2.10.3-all-oss 137 | 138 | The `-all` portion of the tags specifies that the image contains pg15, as well as version 13, and 14. Otherwise, only 139 | the single version of PostgreSQL is included in the image. 140 | -------------------------------------------------------------------------------- /build_scripts/install_extensions: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPT_DIR="${BASH_SOURCE[0]%/*}" 4 | . "$SCRIPT_DIR"/shared.sh 5 | 6 | # This takes the requested toolkit and other extensions, figures out what versions of cargo-pgx/pgrx those 7 | # versions want, and then build from the oldest pgx/pgrx to the newest so that `cargo install cargo-pgx/pgrx --version` 8 | # commands are only ran once. Once cargo-pgrx has been installed, run the installation for the extensions, 9 | # and then continue through the next cargo-pgrx version. At the end, we're left with the latest version of 10 | # cargo-pgrx so that the builder image can be used to test master/main branches. 11 | 12 | # we use the following variables to decide on if/what to install: 13 | # - TOOLKIT_VERSIONS 14 | 15 | [ "$1" = -n ] && { DRYRUN=true; shift; } 16 | 17 | [ "$OSS_ONLY" = true ] && log "installing extensions for OSS_ONLY" 18 | 19 | what="$1" 20 | [ -z "$what" ] && what=all 21 | if [[ ! "$what" =~ ^versions|timescaledb|rust|all|pgvectorscale|toolkit$ ]]; then 22 | echo "usage: $0 [-n] [versions|timescaledb|rust|all|pgvectorscale|toolkit]" >&2 23 | exit 1 24 | fi 25 | 26 | case "$what" in 27 | versions) 28 | # this outputs the expanded versions 29 | echo "TIMESCALEDB_VERSIONS=\"$TIMESCALEDB_VERSIONS\"" 30 | echo "TOOLKIT_VERSIONS=\"$TOOLKIT_VERSIONS\"" 31 | echo "PGVECTORSCALE_VERSIONS=\"$PGVECTORSCALE_VERSIONS\"" 32 | ;; 33 | 34 | timescaledb | all) 35 | for ver in $TIMESCALEDB_VERSIONS; do 36 | install_timescaledb "$ver" 37 | done 38 | if [ "$DRYRUN" != true ]; then timescaledb_post_install; fi 39 | ;;& # fallthrough to get rust as well if we're called with 'all' 40 | 41 | pgvectorscale | all) 42 | for ver in $PGVECTORSCALE_VERSIONS; do 43 | install_pgvectorscale "$ver" 44 | done 45 | ;;& # fallthrough to get rust as well if we're called with 'all' 46 | 47 | rust | all) install_rust_extensions;; 48 | 49 | toolkit) install_rust_extensions;; 50 | 51 | esac 52 | -------------------------------------------------------------------------------- /build_scripts/pg_version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MAJOR="${1}" 4 | if [ -z "${MAJOR}" ]; then 5 | echo "missing major version" 6 | exit 2 7 | fi 8 | 9 | PINNED=$(yq ".postgres_versions.${MAJOR}" /build/scripts/versions.yaml) 10 | if [ "${PINNED}" = "null" ]; then 11 | echo "could not find ${MAJOR} pinned version" 12 | exit 2 13 | fi 14 | 15 | echo "${PINNED}" 16 | -------------------------------------------------------------------------------- /build_scripts/shared.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e -o pipefail 4 | 5 | ORIGINAL_PATH="$PATH" 6 | 7 | log() { 8 | echo "$ARCH: $*" >&2 9 | } 10 | 11 | error() { 12 | echo "** $ARCH: ERROR: $* **" >&2 13 | } 14 | 15 | git_clone() { 16 | local src="$1" dst=/build/"$2" err 17 | 18 | [ -d "$dst"/.git ] && return 0 19 | git clone "$src" "$dst" 20 | err=$? 21 | if [ $err -ne 0 ]; then 22 | error "error cloning $dst ($err)" 23 | return $err 24 | fi 25 | log "git cloned to $dst" 26 | return 0 27 | } 28 | 29 | git_checkout() { 30 | local repo=/build/"$1" tag="$2" err 31 | 32 | git -C "$repo" checkout -f "$tag" 33 | err=$? 34 | if [ $err -ne 0 ]; then 35 | error "error checking out $tag for $repo ($err)" 36 | return $err 37 | fi 38 | git -C "$repo" clean -f -d -x 39 | err=$? 40 | if [ $err -ne 0 ]; then 41 | error "error checking out $tag for $repo ($err)" 42 | return $err 43 | fi 44 | return 0 45 | } 46 | 47 | cargo_installed() { 48 | if type -p cargo; then 49 | return 0 50 | fi 51 | return 1 52 | } 53 | 54 | cargo_pgx_installed() { 55 | if ! cargo_installed; then 56 | return 1 57 | fi 58 | if test cargo pgx --version >&/dev/null; then 59 | return 0 60 | fi 61 | return 1 62 | } 63 | 64 | cargo_pgrx_installed() { 65 | if ! cargo_installed; then 66 | return 1 67 | fi 68 | if test cargo pgrx --version >&/dev/null; then 69 | return 0 70 | fi 71 | return 1 72 | } 73 | 74 | cargo_pgx_version() { 75 | if ! cargo_pgx_installed; then 76 | return 1 77 | fi 78 | 79 | local current_pgx=uninstalled 80 | if test cargo pgx --version >&/dev/null; then 81 | current_pgx="$(cargo pgx --version | awk '{print $2}')" 82 | fi 83 | if [ "$current_pgx" != "uninstalled" ]; then 84 | echo "$current_pgx" 85 | fi 86 | return 0 87 | } 88 | 89 | cargo_pgrx_version() { 90 | if ! cargo_pgrx_installed; then 91 | return 1 92 | fi 93 | 94 | local current_pgrx=uninstalled 95 | if test cargo pgrx --version >&/dev/null; then 96 | current_pgrx="$(cargo pgrx --version | awk '{print $2}')" 97 | fi 98 | if [ "$current_pgrx" != "uninstalled" ]; then 99 | echo "$current_pgrx" 100 | fi 101 | return 0 102 | } 103 | 104 | require_cargo_pgx_version() { 105 | local version="$1" err 106 | [ -z "$version" ] && return 1 107 | 108 | if ! cargo_installed; then 109 | error "cargo is not available, cannot install cargo-pgx" 110 | return 1 111 | fi 112 | if ! cargo_pgx_installed; then 113 | cargo install cargo-pgx --version "=$version" 114 | err=$? 115 | if [ $err -ne 0 ]; then 116 | error "failed installing cargo-pgx-$version ($err)" 117 | return $err 118 | fi 119 | log "installed cargo-pgx-$version" 120 | fi 121 | 122 | local current_version 123 | current_version="$(cargo_pgx_version)" 124 | if [[ -z "$current_version" || "$current_version" != "$version" ]]; then 125 | cargo install cargo-pgx --version "=$version" 126 | err=$? 127 | if [ $err -ne 0 ]; then 128 | error "failed installing cargo-pgx-$version ($err)" 129 | return $err 130 | fi 131 | log "installed cargo-pgx-$version" 132 | fi 133 | return 0 134 | } 135 | 136 | require_cargo_pgrx_version() { 137 | local version="$1" err 138 | [ -z "$version" ] && return 1 139 | 140 | if ! cargo_installed; then 141 | error "cargo is not available, cannot install cargo-pgrx" 142 | return 1 143 | fi 144 | if ! cargo_pgrx_installed; then 145 | cargo install cargo-pgrx --version "=$version" 146 | err=$? 147 | if [ $err -ne 0 ]; then 148 | error "failed installing cargo-pgrx-$version ($err)" 149 | return $err 150 | fi 151 | log "installed cargo-pgrx-$version" 152 | fi 153 | 154 | local current_version 155 | current_version="$(cargo_pgrx_version)" 156 | if [[ -z "$current_version" || "$current_version" != "$version" ]]; then 157 | cargo install cargo-pgrx --version "=$version" 158 | err=$? 159 | if [ $err -ne 0 ]; then 160 | error "failed installing cargo-pgrx-$version ($err)" 161 | return $err 162 | fi 163 | log "installed cargo-pgrx-$version" 164 | fi 165 | return 0 166 | } 167 | 168 | available_pg_versions() { 169 | # this allows running out-of-container with dry-run to test script logic 170 | if [[ "$DRYRUN" = true && ! -d /usr/lib/postgresql ]]; then 171 | echo 13 14 15 16 17 172 | else 173 | (cd /usr/lib/postgresql && ls) 174 | fi 175 | } 176 | 177 | cargo_pgrx_cmd() { 178 | local pgrx_version="$1" 179 | if [[ "$pgrx_version" =~ ^0\.[0-7]\. ]]; then echo "pgx"; else echo "pgrx"; fi 180 | } 181 | 182 | cargo_pgrx_init() { 183 | local pgrx_version="$1" pg_ver="$2" pg_versions pgrx_cmd 184 | pgrx_cmd="$(cargo_pgrx_cmd "$pgrx_version")" 185 | 186 | if [ "$pgrx_cmd" = pgx ]; then 187 | if ! require_cargo_pgx_version "$pgrx_version"; then 188 | error "failed requiring cargo-pgx-$pgrx_version ($?)" 189 | return 1 190 | fi 191 | else 192 | if ! require_cargo_pgrx_version "$pgrx_version"; then 193 | error "failed requiring cargo-pgrx-$pgrx_version ($?)" 194 | return 1 195 | fi 196 | fi 197 | 198 | if [[ -z "$pg_ver" || "$pg" -ge 15 && "$pgrx_version" =~ ^0\.[0-5]\.* ]]; then 199 | pg_versions="$(available_pg_versions)" 200 | else 201 | pg_versions="$pg_ver" 202 | fi 203 | args=() 204 | for pg in $pg_versions; do 205 | # pgrx only got the pg15 feature in 0.6.0 206 | [[ "$pgrx_version" =~ ^0\.[0-5]\.* && $pg -ge 15 ]] && continue 207 | 208 | args+=("--pg${pg}" "/usr/lib/postgresql/${pg}/bin/pg_config") 209 | done 210 | rm -f "/home/postgres/.$pgrx_cmd/config.toml" 211 | cargo "$pgrx_cmd" init "${args[@]}" 212 | err=$? 213 | if [ $err -ne 0 ]; then 214 | error "failed cargo $pgrx_cmd init ${args[*]} ($err)" 215 | return $err 216 | fi 217 | # The build script in Toolkit versions <= 1.19.0 assumes that there aren't any spaces between pgXX and = 218 | sed -i 's/ = /=/g' /home/postgres/.$pgrx_cmd/config.toml 219 | return 0 220 | } 221 | 222 | find_deb() { 223 | local name="$1" version="$2" pkg 224 | pkg="$(apt-cache search "$name" 2>/dev/null| awk '{print $1}' | grep -v -- "-dbgsym")" 225 | if [ -n "$pkg" ]; then 226 | # we have a base package, do we have the requested version too? 227 | deb_version="$(apt-cache show "$pkg" 2>/dev/null | awk '/^Version:/ {print $2}' | grep -v forge | grep "$version" | head -n 1 || true)" 228 | if [[ -n "$pkg" && -n "$deb_version" ]]; then 229 | echo "$pkg" "$deb_version" 230 | return 231 | fi 232 | fi 233 | } 234 | 235 | install_deb() { 236 | local pkg="$1" version="$2" err 237 | local tmpdir="/tmp/deb-$pkg.$$" 238 | [ -n "$version" ] && version="=$version" 239 | 240 | mkdir "$tmpdir" 241 | ( 242 | cd "$tmpdir" 243 | apt-get download "$pkg""$version" 244 | dpkg --install --log="$tmpdir"/dpkg.log --admindir="$tmpdir" --force-depends --force-not-root --force-overwrite "$pkg"_*.deb 245 | err=$? 246 | if [ $err -ne 0 ]; then 247 | error "failed installing debian package $pkg$version ($err)" 248 | exit $err 249 | fi 250 | exit 0 251 | ) 252 | err=$? 253 | rm -rf "$tmpdir" 254 | if [ $err -eq 0 ]; then log "installed debian package $pkg$version"; fi 255 | return $err 256 | } 257 | 258 | # This is where we set arch/pg/extension version support checks, used by install and cicd 259 | [ -s "$SCRIPT_DIR/shared_versions.sh" ] && . "$SCRIPT_DIR"/shared_versions.sh 260 | 261 | # This is where the actual installation functions are 262 | [ -s "$SCRIPT_DIR/shared_install.sh" ] && . "$SCRIPT_DIR"/shared_install.sh 263 | 264 | require_supported_arch 265 | -------------------------------------------------------------------------------- /build_scripts/shared_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # these are the functions that perform the actual installations/builds of the extensions 4 | 5 | install_timescaledb() { 6 | local version="$1" pg pkg=timescaledb unsupported_reason oss_only="" 7 | [ "$OSS_ONLY" = true ] && oss_only="-DAPACHE_ONLY=1" 8 | 9 | for pg in $(available_pg_versions); do 10 | unsupported_reason="$(supported_timescaledb "$pg" "$version")" 11 | if [ -n "$unsupported_reason" ]; then 12 | log "$pkg-$version: $unsupported_reason" 13 | continue 14 | fi 15 | 16 | if [ "$version" = main -a "$pg" -lt 14 ]; then 17 | log "$pkg-$version: unsupported for < pg14" 18 | continue 19 | fi 20 | 21 | log "building $pkg-$version for pg$pg" 22 | 23 | [[ "$DRYRUN" = true ]] && continue 24 | 25 | PATH="/usr/lib/postgresql/$pg/bin:${ORIGINAL_PATH}" 26 | git_clone "https://github.com/${GITHUB_REPO}" "$pkg" || continue 27 | git_checkout $pkg "$version" || continue 28 | ( 29 | set -e 30 | cd /build/$pkg 31 | 32 | [ "$version" = "2.2.0" ] && sed -i 's/RelWithDebugInfo/RelWithDebInfo/g' CMakeLists.txt 33 | 34 | ./bootstrap \ 35 | -DTAP_CHECKS=OFF \ 36 | -DWARNINGS_AS_ERRORS=off \ 37 | -DCMAKE_BUILD_TYPE=RelWithDebInfo \ 38 | -DREGRESS_CHECKS=OFF \ 39 | -DGENERATE_DOWNGRADE_SCRIPT=ON \ 40 | -DPROJECT_INSTALL_METHOD="${INSTALL_METHOD}" \ 41 | ${oss_only} 42 | 43 | cd build 44 | 45 | make 46 | 47 | # https://github.com/timescale/timescaledb/commit/531f7ed8b16e4d1a99021d3d2b843bbc939798e3 48 | [ "$version" = "2.5.2" ] && sed -i 's/pg_temp./_timescaledb_internal./g' sql/**/*.sql 49 | 50 | make install 51 | 52 | if [ "$OSS_ONLY" = true ]; then 53 | log "removing timescaledb-tsl due to OSS_ONLY" 54 | rm -f /usr/lib/postgresql/"$pg"/lib/timescaledb-tsl-* 55 | fi 56 | ) 57 | err=$? 58 | if [ $err -eq 0 ]; then 59 | log "installed $pkg-$version for pg$pg" 60 | else 61 | error "failed building $pkg-$version for pg$pg ($err)" 62 | fi 63 | done 64 | PATH="$ORIGINAL_PATH" 65 | } 66 | 67 | install_toolkit() { 68 | local rust_release cargo_pgrx_version="$1" version="$2" pg pkg=toolkit dpkg deb_version unsupported_reason pgrx_cmd 69 | [ -n "$RUST_RELEASE" ] && rust_release=release || rust_release=debug 70 | pgrx_cmd="$(cargo_pgrx_cmd "$cargo_pgrx_version")" 71 | 72 | if [ "$OSS_ONLY" = true ]; then 73 | log "skipped toolkit-$version due to OSS_ONLY" 74 | return 75 | fi 76 | 77 | for pg in $(available_pg_versions); do 78 | unsupported_reason="$(supported_toolkit "$pg" "$version")" 79 | if [ -n "$unsupported_reason" ]; then 80 | log "$pkg-$version: $unsupported_reason" 81 | continue 82 | fi 83 | 84 | read -rs dpkg deb_version <<< "$(find_deb "timescaledb-toolkit-postgresql-$pg" "$version")" 85 | if [[ -n "$dpkg" && -n "$deb_version" ]]; then 86 | [[ "$DRYRUN" = true ]] && { log "would install debian package $dpkg-$deb_version (cargo-$pgrx_cmd: $cargo_pgrx_version)"; continue; } 87 | if install_deb "$dpkg" "$deb_version"; then continue; fi 88 | log "failed installing $dpkg $deb_version" 89 | else 90 | log "couldn't find debian package for timescaleb-toolkit-postgresql-$pg $version" 91 | fi 92 | 93 | log "building $pkg-$version for pg$pg (cargo-$pgrx_cmd: $cargo_pgrx_version)" 94 | 95 | [ "$DRYRUN" = true ] && continue 96 | 97 | PATH="/usr/lib/postgresql/$pg/bin:${ORIGINAL_PATH}" 98 | cargo_pgrx_init "$cargo_pgrx_version" "$pg" || continue 99 | git_clone https://github.com/timescale/timescaledb-toolkit.git $pkg || continue 100 | git_checkout $pkg "$version" || continue 101 | ( 102 | cd /build/$pkg || exit 1 103 | CARGO_TARGET_DIR_NAME=target ./tools/build "-pg$pg" -profile "$rust_release" install || { echo "failed toolkit build for pg$pg, $pkg-$version"; exit 1; } 104 | ) 105 | err=$? 106 | if [ $err -eq 0 ]; then 107 | log "installed $pkg-$version for pg$pg" 108 | else 109 | error "failed building $pkg-$version for pg$pg ($err)" 110 | fi 111 | done 112 | PATH="$ORIGINAL_PATH" 113 | } 114 | 115 | timescaledb_post_install() { 116 | local pg 117 | # https://github.com/timescale/timescaledb/commit/6dddfaa54e8f29e3ea41dab2fe7d9f3e37cd3aae 118 | for pg in $(available_pg_versions); do 119 | for file in "/usr/share/postgresql/$pg/extension/timescaledb--"*.sql; do 120 | cat >>"${file}" <<"__SQL__" 121 | DO $dynsql$ 122 | DECLARE 123 | alter_sql text; 124 | BEGIN 125 | 126 | SET local search_path to 'pg_catalog'; 127 | 128 | FOR alter_sql IN 129 | SELECT 130 | format( 131 | $$ALTER FUNCTION %I.%I(%s) SET search_path = 'pg_catalog'$$, 132 | nspname, 133 | proname, 134 | pg_catalog.pg_get_function_identity_arguments(pp.oid) 135 | ) 136 | FROM 137 | pg_depend 138 | JOIN 139 | pg_extension ON (oid=refobjid) 140 | JOIN 141 | pg_proc pp ON (objid=pp.oid) 142 | JOIN 143 | pg_namespace pn ON (pronamespace=pn.oid) 144 | JOIN 145 | pg_language pl ON (prolang=pl.oid) 146 | LEFT JOIN LATERAL ( 147 | SELECT * FROM unnest(proconfig) WHERE unnest LIKE 'search_path=%' 148 | ) sp(search_path) ON (true) 149 | WHERE 150 | deptype='e' 151 | AND extname='timescaledb' 152 | AND extversion < '2.5.2' 153 | AND lanname NOT IN ('c', 'internal') 154 | AND prokind = 'f' 155 | -- Only those functions/procedures that do not yet have their search_path fixed 156 | AND search_path IS NULL 157 | AND proname != 'time_bucket' 158 | ORDER BY 159 | search_path 160 | LOOP 161 | EXECUTE alter_sql; 162 | END LOOP; 163 | 164 | -- And for the sql time_bucket functions we prefer to *not* set the search_path to 165 | -- allow inlining of these functions 166 | WITH sql_time_bucket_fn AS ( 167 | SELECT 168 | pp.oid 169 | FROM 170 | pg_depend 171 | JOIN 172 | pg_extension ON (oid=refobjid) 173 | JOIN 174 | pg_proc pp ON (objid=pp.oid) 175 | JOIN 176 | pg_namespace pn ON (pronamespace=pn.oid) 177 | JOIN 178 | pg_language pl ON (prolang=pl.oid) 179 | WHERE 180 | deptype = 'e' 181 | AND extname='timescaledb' 182 | AND extversion < '2.5.2' 183 | AND lanname = 'sql' 184 | AND proname = 'time_bucket' 185 | AND prokind = 'f' 186 | AND prosrc NOT LIKE '%OPERATOR(pg_catalog.%' 187 | ) 188 | UPDATE 189 | pg_proc 190 | SET 191 | prosrc = regexp_replace(prosrc, '([-+]{1})', ' OPERATOR(pg_catalog.\1) ', 'g') 192 | FROM 193 | sql_time_bucket_fn AS s 194 | WHERE 195 | s.oid = pg_proc.oid; 196 | END; 197 | $dynsql$; 198 | __SQL__ 199 | done # for file 200 | done # for pg 201 | } 202 | 203 | install_pgvectorscale() { 204 | local version="$1" pg pkg=pgvectorscale unsupported_reason arch_deb="$ARCH" 205 | if [ "$arch_deb" = aarch64 ]; then 206 | arch_deb=arm64 207 | fi 208 | 209 | for pg in $(available_pg_versions); do 210 | unsupported_reason="$(supported_pgvectorscale "$pg" "$version")" 211 | if [ -n "$unsupported_reason" ]; then 212 | log "$pkg-$version: $unsupported_reason" 213 | continue 214 | fi 215 | 216 | log "building $pkg-$version for pg$pg" 217 | 218 | [[ "$DRYRUN" = true ]] && continue 219 | 220 | ( 221 | set -ex 222 | 223 | rm -rf /build/pgvectorscale 224 | mkdir /build/pgvectorscale 225 | cd /build/pgvectorscale 226 | 227 | curl --silent \ 228 | --fail \ 229 | --location \ 230 | --output artifact.zip \ 231 | "https://github.com/timescale/pgvectorscale/releases/download/$version/pgvectorscale-$version-pg${pg}-${arch_deb}.zip" 232 | 233 | unzip artifact.zip 234 | dpkg --install --log=/build/pgvectorscale/dpkg.log --admindir=/build/pgvectorscale/ --force-depends --force-not-root --force-overwrite pgvectorscale*${arch_deb}.deb 235 | ) 236 | done 237 | } -------------------------------------------------------------------------------- /build_scripts/shared_versions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check to make sure these extensions are available in all pg versions 4 | PG_WANTED_EXTENSIONS="pglogical wal2json pgextwlist pgrouting pg-stat-kcache cron pldebugger hypopg unit repack hll \ 5 | pgpcre h3 h3_postgis orafce ip4r pg_uuidv7 pgvector pgaudit pgsodium" 6 | 7 | WANTED_PACKAGES="patroni pgbackrest timescaledb-tools" 8 | 9 | WANTED_FILES="/usr/bin/timescaledb-tune /usr/local/bin/yq /usr/local/bin/pgbouncer_exporter \ 10 | /usr/bin/pgbackrest_exporter" 11 | 12 | # These functions return "" if the combination of architecture, pg version, and package version are supported, 13 | # otherwise it returns a reason string. Both the cicd/install_checks, and the install_extensions scripts use 14 | # this to decide what should be built/included. 15 | 16 | ARCH="$(arch)" 17 | # standardize architecture names 18 | if [ "$ARCH" = arm64 ]; then 19 | ARCH=aarch64 20 | elif [ "$ARCH" = x86_64 ]; then 21 | ARCH=amd64 22 | fi 23 | 24 | if [ -s /build/scripts/versions.yaml ]; then 25 | VERSION_DATA="$(< /build/scripts/versions.yaml)" 26 | elif [ -s /cicd/scripts/versions.yaml ]; then 27 | VERSION_DATA="$(< /cicd/scripts/versions.yaml)" 28 | elif [ -s versions.yaml ]; then 29 | VERSION_DATA="$(< versions.yaml)" 30 | else 31 | error "could not locate versions.yaml" 32 | exit 1 33 | fi 34 | 35 | DEFAULT_PG_MIN="$(yq .default-pg-min <<< "$VERSION_DATA")" 36 | [ -z "$DEFAULT_PG_MIN" ] && { error "default-pg-min is required in versions.yaml"; exit 1; } 37 | DEFAULT_PG_MAX="$(yq .default-pg-max <<< "$VERSION_DATA")" 38 | [ -z "$DEFAULT_PG_MAX" ] && { error "default-pg-max is required in versions.yaml"; exit 1; } 39 | 40 | pkg_versions() { 41 | local pkg="$1" 42 | yq ".$pkg | keys | .[]" <<<"$VERSION_DATA" | xargs 43 | } 44 | 45 | # expand the list of requested package versions (usage: $1=pkg $2=single argument with the contents of the environment 46 | # variable containing the requested versions) 47 | requested_pkg_versions() { 48 | local pkg="$1" envvar="$2" 49 | local -a versions 50 | readarray -t versions <<< "$envvar" 51 | case "${#versions[@]}" in 52 | 0) return;; 53 | 1) case "${versions[0]}" in 54 | all) pkg_versions "$pkg"; return;; 55 | latest) latest_pkg_version "$pkg"; return;; 56 | esac;; 57 | esac 58 | echo "$envvar" 59 | } 60 | 61 | latest_pkg_version() { 62 | local pkg="$1" 63 | local -a versions 64 | readarray -t versions <<< "$(yq ".$pkg | keys | .[]" <<<"$VERSION_DATA")" 65 | echo "${versions[-1]}" 66 | } 67 | 68 | # locate the cargo-pgrx key from versions.yaml 69 | pkg_cargo_pgrx_version() { 70 | local pkg="$1" ver="$2" cargopgrx 71 | 72 | cargopgrx="$(yq ".$pkg | pick([\"$ver\"]) | .[].cargo-pgrx" <<<"$VERSION_DATA")" 73 | if [ "$cargopgrx" = null ]; then return; else echo "$cargopgrx"; fi 74 | } 75 | 76 | # install the rust extensions ordered from oldest required cargo-pgrx to newest to keep 77 | # the number of rebuilds for cargo-pgrx to a minimum 78 | install_rust_extensions() { 79 | local cargopgrx sorted_pgrx_versions 80 | declare -A pgrx_versions=() 81 | 82 | for ver in $TOOLKIT_VERSIONS; do 83 | cargopgrx="$(pkg_cargo_pgrx_version "toolkit" "$ver")" 84 | if [ -z "$cargopgrx" ]; then 85 | error "no cargo-pgrx version found for toolkit-$ver" 86 | continue 87 | fi 88 | pgrx_versions[$cargopgrx]+=" toolkit-$ver" 89 | done 90 | 91 | sorted_pgrx_versions="$(for pgrx_ver in "${!pgrx_versions[@]}"; do echo "$pgrx_ver"; done | sort -Vu)" 92 | for pgrx_ver in $sorted_pgrx_versions; do 93 | ext_versions="$(for ext_ver in ${pgrx_versions[$pgrx_ver]}; do echo "$ext_ver"; done | sort -Vu)" 94 | for ext_ver in $ext_versions; do 95 | ext="$(echo "$ext_ver" | cut -d- -f1)" 96 | ver="$(echo "$ext_ver" | cut -d- -f2-)" 97 | case "$ext" in 98 | toolkit) install_toolkit "$pgrx_ver" "$ver";; 99 | esac 100 | done 101 | done 102 | } 103 | 104 | version_is_supported() { 105 | local pkg="$1" pg="$2" ver="$3" pdata pgmin pgmax arch 106 | pg="$(major_version_only "$pg")" 107 | local -a pgversions 108 | 109 | pdata="$(yq ".$pkg | pick([\"$ver\"]) | .[]" <<<"$VERSION_DATA")" 110 | if [ "$pdata" = null ]; then 111 | echo "not found in versions.yaml" 112 | return 113 | fi 114 | 115 | arch="$(yq .arch <<<"$pdata")" 116 | if [ "$arch" = null ]; then arch="both"; fi 117 | if [[ "$arch" != "both" && "$arch" != "$ARCH" ]]; then echo "unsupported arch $ARCH"; return; fi 118 | 119 | pgmin="$(yq .pg-min <<<"$pdata")" 120 | pgmin="$(major_version_only "$pgmin")" 121 | if [ "$pgmin" = null ]; then pgmin="$DEFAULT_PG_MIN"; fi 122 | if [ "$pg" -lt "$pgmin" ]; then echo "pg$pg is too old"; return; fi 123 | 124 | pgmax="$(yq .pg-max <<<"$pdata")" 125 | pgmax="$(major_version_only "$pgmax")" 126 | if [ "$pgmax" = null ]; then pgmax="$DEFAULT_PG_MAX"; fi 127 | if [ "$pg" -gt "$pgmax" ]; then echo "pg$pg is too new"; return; fi 128 | 129 | pdata="$(yq .pg[] <<<"$pdata")" 130 | if [ -n "$pdata" ]; then 131 | local found=false 132 | readarray -t pgversions <<<"$pdata" 133 | for pgv in "${pgversions[@]}"; do 134 | if [ "$pgv" = "$pg" ]; then found=true; break; fi 135 | done 136 | if [ "$found" = "false" ]; then echo "does not support pg$pg"; return; fi 137 | fi 138 | } 139 | 140 | # Ensure PG version matching is performed only based upon MAJOR version. 141 | # 142 | # This is a bit simplistic, but the system will otherwise break when 143 | # building a version of PG which specifies a minor or patch version. 144 | major_version_only() { 145 | echo -n "${1%%.*}" # Trim the longest matching pattern `.*` (bash pattern, not regex). 146 | } 147 | 148 | supported_timescaledb() { 149 | local pg="$1" ver="$2" 150 | 151 | # just attempt the build for main/master/or other branch build 152 | if [[ "$ver" = main || "$ver" = master || "$ver" =~ [a-z_-]*/[A-Za-z0-9_-]* ]]; then 153 | return 154 | fi 155 | 156 | version_is_supported timescaledb "$pg" "$ver" 157 | } 158 | 159 | supported_toolkit() { 160 | local pg="$1" ver="$2" 161 | 162 | # just attempt the build for main/master/or other branch build 163 | if [[ "$ver" = main || "$ver" = master || "$ver" =~ [a-z_-]*/[A-Za-z0-9_-]* ]]; then 164 | return 165 | fi 166 | 167 | version_is_supported toolkit "$pg" "$ver" 168 | } 169 | 170 | supported_pgvectorscale() { 171 | local pg="$1" ver="$2" 172 | 173 | # just attempt the build for main/master/or other branch build 174 | if [[ "$ver" = main || "$ver" = master || "$ver" =~ [a-z_-]*/[A-Za-z0-9_-]* ]]; then 175 | return 176 | fi 177 | 178 | version_is_supported pgvectorscale "$pg" "$ver" 179 | } 180 | 181 | require_supported_arch() { 182 | if [[ "$ARCH" != amd64 && "$ARCH" != aarch64 ]]; then 183 | echo "unsupported architecture: $ARCH" >&2 184 | exit 1 185 | fi 186 | } 187 | 188 | TIMESCALEDB_VERSIONS="$(requested_pkg_versions timescaledb "$TIMESCALEDB_VERSIONS")" 189 | TOOLKIT_VERSIONS="$(requested_pkg_versions toolkit "$TOOLKIT_VERSIONS")" 190 | PGVECTORSCALE_VERSIONS="$(requested_pkg_versions pgvectorscale "$PGVECTORSCALE_VERSIONS")" 191 | -------------------------------------------------------------------------------- /build_scripts/versions.yaml: -------------------------------------------------------------------------------- 1 | # We need to describe various things for each version of each extension: 2 | # 1. which version of cargo-pgrx is required 3 | # 2. which version of pg is supported 4 | 5 | # extension: 6 | # version: 7 | # key/values 8 | # 9 | # Specify which version of cargo-pgrx is required for this extension 10 | # versions < 0.8.0 were called pgx, but the scripts automatically pick 11 | # the correct name based on the version. 12 | # cargo-pgrx: 13 | # 14 | # Specify which version of postgresql this version supports: 15 | # pg-min: (defaults to 13 if not specified) 16 | # pg-max: (defaults to 15 if not specified) 17 | # pg: [ 13, 14, 15 ] (pick specific versions to build this extension with) 18 | # 19 | # arch: 20 | 21 | default-pg-min: 13 22 | default-pg-max: 15 23 | default-cargo-pgx: 0.6.1 24 | default-arch: both 25 | 26 | ## This session contains the specific pg versions that will be installed. 27 | ## Please notice that pg 12 is EOL now and has no more updates, which is why it was removed: 28 | ## from https://www.postgresql.org/about/news/postgresql-174-168-1512-1417-and-1320-released-3018/ 29 | postgres_versions: 30 | 17: 17.5 31 | 16: 16.9 32 | 15: 15.13 33 | 14: 14.18 34 | 13: 13.21 35 | 36 | timescaledb: 37 | 2.1.0: 38 | pg: [13] 39 | 2.1.1: 40 | pg-max: 13 41 | 2.2.0: 42 | pg-max: 13 43 | 2.2.1: 44 | pg-max: 13 45 | 2.3.0: 46 | pg-max: 13 47 | 2.3.1: 48 | pg-max: 13 49 | 2.4.0: 50 | pg-max: 13 51 | 2.4.1: 52 | pg-max: 13 53 | 2.4.2: 54 | pg-max: 13 55 | 2.5.0: 56 | pg-max: 14 57 | 2.5.1: 58 | pg-max: 14 59 | 2.5.2: 60 | pg-max: 14 61 | 2.6.0: 62 | pg-max: 14 63 | 2.6.1: 64 | pg-max: 14 65 | 2.7.0: 66 | pg-max: 14 67 | 2.7.1: 68 | pg-max: 14 69 | 2.7.2: 70 | pg-max: 14 71 | 2.8.0: 72 | pg-max: 14 73 | 2.8.1: 74 | pg-max: 14 75 | 2.9.0: 76 | 2.9.1: 77 | 2.9.2: 78 | 2.9.3: 79 | 2.10.0: 80 | 2.10.1: 81 | 2.10.2: 82 | 2.10.3: 83 | 2.11.0: 84 | 2.11.1: 85 | 2.11.2: 86 | 2.12.0: 87 | pg-min: 13 88 | 2.12.1: 89 | pg-min: 13 90 | 2.12.2: 91 | pg-min: 13 92 | 2.13.0: 93 | pg-min: 13 94 | pg-max: 16 95 | 2.13.1: 96 | pg-min: 13 97 | pg-max: 16 98 | 2.14.0: 99 | pg-min: 13 100 | pg-max: 16 101 | 2.14.1: 102 | pg-min: 13 103 | pg-max: 16 104 | 2.14.2: 105 | pg-min: 13 106 | pg-max: 16 107 | 2.15.0: 108 | pg-min: 13 109 | pg-max: 16 110 | 2.15.1: 111 | pg-min: 13 112 | pg-max: 16 113 | 2.15.2: 114 | pg-min: 13 115 | pg-max: 16 116 | 2.15.3: 117 | pg-min: 13 118 | pg-max: 16 119 | 2.16.0: 120 | pg-min: 14 121 | pg-max: 16 122 | 2.16.1: 123 | pg-min: 14 124 | pg-max: 16 125 | 2.17.0: 126 | pg-min: 14 127 | pg-max: 17 128 | 2.17.1: 129 | pg-min: 14 130 | pg-max: 17 131 | 2.17.2: 132 | pg-min: 14 133 | pg-max: 17 134 | 2.18.0: 135 | pg-min: 14 136 | pg-max: 17 137 | 2.18.1: 138 | pg-min: 14 139 | pg-max: 17 140 | 2.18.2: 141 | pg-min: 14 142 | pg-max: 17 143 | 2.19.0: 144 | pg-min: 14 145 | pg-max: 17 146 | 2.19.1: 147 | pg-min: 14 148 | pg-max: 17 149 | 2.19.2: 150 | pg-min: 14 151 | pg-max: 17 152 | 2.19.3: 153 | pg-min: 14 154 | pg-max: 17 155 | 2.20.0: 156 | pg-min: 15 157 | pg-max: 17 158 | 2.20.1: 159 | pg-min: 15 160 | pg-max: 17 161 | 2.20.2: 162 | pg-min: 15 163 | pg-max: 17 164 | 165 | 166 | toolkit: 167 | 1.18.0: 168 | cargo-pgrx: 0.10.2 169 | pg-max: 16 170 | 1.19.0: 171 | cargo-pgrx: 0.12.8 172 | pg-max: 17 173 | 1.21.0: 174 | cargo-pgrx: 0.12.9 175 | pg-min: 15 176 | pg-max: 17 177 | 178 | pgvectorscale: 179 | 0.2.0: 180 | pg-min: 15 181 | pg-max: 16 182 | 0.3.0: 183 | pg-min: 15 184 | pg-max: 16 185 | 0.4.0: 186 | pg-min: 13 187 | pg-max: 17 188 | 0.5.0: 189 | pg-min: 13 190 | pg-max: 17 191 | 0.5.1: 192 | pg-min: 13 193 | pg-max: 17 194 | 0.6.0: 195 | pg-min: 13 196 | pg-max: 17 197 | 0.7.0: 198 | pg-min: 13 199 | pg-max: 17 200 | 0.7.1: 201 | pg-min: 13 202 | pg-max: 17 203 | -------------------------------------------------------------------------------- /cicd/install_checks: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Check for the things that are supposed to be installed in the image 4 | 5 | SCRIPT_DIR="${BASH_SOURCE[0]%/*}" 6 | . "$SCRIPT_DIR"/shared.sh 7 | 8 | [ "$1" = -v ] && { VERBOSE=1; shift; } 9 | 10 | for pg in $(available_pg_versions); do 11 | cat /dev/null > "$EXTVERSIONS".pg"$pg" 12 | done 13 | 14 | # make sure the image we're checking is at most 8 hours old, since we're doing a pull from the repository 15 | # for the check, this should make sure we're getting the one we just built 16 | check_base_age 28800 17 | 18 | # Check for packages that aren't pg-version-specific 19 | check_packages 20 | 21 | # Check for specific files, generally things that were installed without package management 22 | check_files 23 | 24 | for pg in $(available_pg_versions); do 25 | check_base_components "$pg" "/usr/lib/postgresql/$pg/lib" 26 | done 27 | 28 | if [ -n "$GITHUB_STEP_SUMMARY" ]; then 29 | ext_version_table >> "$GITHUB_STEP_SUMMARY" 30 | fi 31 | 32 | exit $EXIT_STATUS 33 | -------------------------------------------------------------------------------- /cicd/shared.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e -o pipefail 4 | 5 | if [ ! -s /.image_config ]; then 6 | echo "no, or empty /.image_config found" 7 | exit 1 8 | fi 9 | . /.image_config 10 | 11 | if [ -s /build/scripts/shared_versions.sh ]; then 12 | . /build/scripts/shared_versions.sh 13 | elif [ -s /cicd/scripts/shared_versions.sh ]; then 14 | . /cicd/scripts/shared_versions.sh 15 | else 16 | echo "couldn't find shared_version.sh in /build/scripts, or in /cicd/scripts" 17 | exit 1 18 | fi 19 | 20 | VERBOSE="" 21 | EXIT_STATUS=0 22 | 23 | log() { 24 | local msg 25 | msg="$ARCH: $*" 26 | if [ -n "$GITHUB_STEP_SUMMARY" ]; then echo "$msg" >> "$GITHUB_STEP_SUMMARY"; fi 27 | if [ ! "$VERBOSE" ]; then return; fi 28 | echo "$msg" 29 | } 30 | 31 | error() { 32 | local msg 33 | msg="$ARCH: ERROR: $*" 34 | if [ -n "$GITHUB_STEP_SUMMARY" ]; then echo "**${msg}**" >> "$GITHUB_STEP_SUMMARY"; fi 35 | echo "$msg" >&2 36 | # shellcheck disable=SC2034 # EXIT_STATUS is used by callers, not us 37 | EXIT_STATUS=1 38 | } 39 | 40 | available_pg_versions() { 41 | (cd /usr/lib/postgresql && ls) 42 | } 43 | 44 | check_base_age() { 45 | # only check the BUILD_DATE during CI 46 | if [ "$CI" != true ]; then return 0; fi 47 | local age_threshold="$1" 48 | 49 | build_seconds="$(date -d"$BUILD_DATE" +%s)" 50 | now="$(date +%s)" 51 | age=$((now-build_seconds)) 52 | if [ $age -gt "$age_threshold" ]; then 53 | error "the base image is too old ($age seconds old)" 54 | else 55 | log "the base image was built $age seconds ago" 56 | fi 57 | } 58 | 59 | check_base_components() { 60 | local pg="$1" lib="$2" 61 | 62 | check_timescaledb "$pg" "$lib" 63 | check_pgvectorscale "$pg" "$lib" 64 | check_toolkit "$pg" "$lib" 65 | check_oss_extensions "$pg" "$lib" 66 | check_others "$pg" "$lib" 67 | } 68 | 69 | check_timescaledb() { 70 | local pg="$1" lib="$2" unsupported_reason found=false 71 | 72 | if [ -z "$TIMESCALEDB_VERSIONS" ]; then 73 | error "no timescaledb versions requested, why are we here?" 74 | return 1 75 | fi 76 | 77 | # record an empty version so we'll get an empty table row if we don't have any versions 78 | record_ext_version timescaledb "$pg" "" 79 | 80 | if [ ! -s "$lib/timescaledb.so" ]; then 81 | error "no timescaledb loader found for pg$pg" 82 | fi 83 | 84 | for ver in $TIMESCALEDB_VERSIONS; do 85 | if [[ "$ver" = master || "$ver" = main ]]; then 86 | continue 87 | fi 88 | if [[ -s "$lib/timescaledb-$ver.so" ]]; then 89 | if [ "$OSS_ONLY" = true ]; then 90 | if [ -s "$lib/timescaledb-tsl-$ver.so" ]; then 91 | error "found non-OSS timescaledb-tsl-$ver for pg$pg" 92 | else 93 | found=true 94 | record_ext_version timescaledb "$pg" "$ver" 95 | fi 96 | else 97 | if [ -s "$lib/timescaledb-tsl-$ver.so" ]; then 98 | found=true 99 | record_ext_version timescaledb "$pg" "$ver" 100 | else 101 | error "found timescaledb-$ver, but not tsl-$ver for pg$pg" 102 | fi 103 | fi 104 | else 105 | unsupported_reason="$(supported_timescaledb "$pg" "$ver")" 106 | if [ -n "$unsupported_reason" ]; then 107 | log "skipped: timescaledb-$ver: $unsupported_reason" 108 | else 109 | error "timescaledb-$ver not found for pg$pg" 110 | fi 111 | fi 112 | done 113 | 114 | if [ "$found" = false ]; then error "failed to find any timescaledb extensions for pg$pg"; fi 115 | } 116 | 117 | check_oss_extensions() { 118 | if [ "$OSS_ONLY" != true ]; then return 0; fi 119 | 120 | local pg="$1" lib="$2" 121 | for pattern in timescaledb_toolkit; do 122 | files="$(find "$lib" -maxdepth 1 -name "${pattern}*")" 123 | if [ -n "$files" ]; then error "found $pattern files for pg$pg when OSS_ONLY is true"; fi 124 | done 125 | } 126 | 127 | check_toolkit() { 128 | if [ -z "$TOOLKIT_VERSIONS" ]; then return; fi 129 | local pg="$1" lib="$2" found=false 130 | 131 | if [ "$OSS_ONLY" = true ]; then 132 | # we don't do anything here as we depend on `check_oss_extensions` to flag on inappropriate versions 133 | return 134 | fi 135 | 136 | # record an empty version so we'll get an empty table row if we don't have any versions 137 | record_ext_version toolkit "$pg" "" 138 | 139 | for ver in $TOOLKIT_VERSIONS; do 140 | if [[ "$ver" = master || "$ver" = main ]]; then 141 | log "skipping looking for toolkit-$ver" 142 | continue 143 | fi 144 | 145 | if [ -s "$lib/timescaledb_toolkit-$ver.so" ]; then 146 | found=true 147 | record_ext_version toolkit "$pg" "$ver" 148 | else 149 | unsupported_reason="$(supported_toolkit "$pg" "$ver")" 150 | if [ -n "$unsupported_reason" ]; then 151 | log "skipped: toolkit-$ver: $unsupported_reason" 152 | else 153 | error "toolkit-$ver not found for pg$pg" 154 | fi 155 | fi 156 | done 157 | 158 | if [ "$found" = false ]; then error "no toolkit versions found for pg$pg"; fi 159 | } 160 | 161 | check_pgvectorscale() { 162 | if [ -z "$PGVECTORSCALE_VERSIONS" ]; then return; fi 163 | local pg="$1" lib="$2" found=false 164 | 165 | # record an empty version so we'll get an empty table row if we don't have any versions 166 | record_ext_version pgvectorscale "$pg" "" 167 | 168 | for ver in $PGVECTORSCALE_VERSIONS; do 169 | if [[ "$ver" = master || "$ver" = main ]]; then 170 | log "skipping looking for vectorscale-$ver" 171 | continue 172 | fi 173 | 174 | if [ -s "$lib/vectorscale-$ver.so" ]; then 175 | found=true 176 | record_ext_version pgvectorscale "$pg" "$ver" 177 | else 178 | unsupported_reason="$(supported_pgvectorscale "$pg" "$ver")" 179 | if [ -n "$unsupported_reason" ]; then 180 | log "skipped: pgvectorscale-$ver: $unsupported_reason" 181 | else 182 | error "pgvectorscale-$ver not found for pg$pg" 183 | fi 184 | fi 185 | done 186 | 187 | if [[ "$found" = false && "$pg" -ge 13 && "$pg" -le 17 ]]; then error "no pgvectorscale versions found for pg$pg"; fi 188 | } 189 | 190 | # this checks for other extensions that should always exist 191 | check_others() { 192 | local pg="$1" lib="$2" version status 193 | 194 | record_ext_version logerrors "$pg" "" 195 | if [ -n "$PG_LOGERRORS" ]; then 196 | if [ -s "$lib/logerrors.so" ]; then 197 | record_ext_version logerrors "$pg" "$PG_LOGERRORS" 198 | else 199 | error "logerrors not found for pg$pg" 200 | fi 201 | fi 202 | 203 | record_ext_version pg_stat_monitor "$pg" "" 204 | if [ -n "$PG_STAT_MONITOR" ]; then 205 | if [ -s "$lib/pg_stat_monitor.so" ]; then 206 | record_ext_version pg_stat_monitor "$pg" "$PG_STAT_MONITOR" 207 | else 208 | error "pg_stat_monitor not found for pg$pg" 209 | fi 210 | fi 211 | 212 | record_ext_version pgvector "$pg" "" 213 | if [ -n "$PGVECTOR" ]; then 214 | if [ -s "$lib/vector.so" ]; then 215 | record_ext_version pgvector "$pg" "$PGVECTOR" 216 | else 217 | error "pgvector not found for pg$pg" 218 | fi 219 | fi 220 | 221 | record_ext_version ai "$pg" "" 222 | if [[ -n "$PGAI_VERSION" && "$pg" -gt 15 ]]; then 223 | # pgai has no .so file 224 | pgai_control="$(/usr/lib/postgresql/${pg}/bin/pg_config --sharedir)/extension/ai.control" 225 | if [ -f "$pgai_control" ]; then 226 | record_ext_version ai "$pg" "$PGAI_VERSION" 227 | else 228 | error "ai not found for pg$pg" 229 | fi 230 | fi 231 | 232 | record_ext_version pgvecto.rs "$pg" "" 233 | # TODO: pgvecto.rs hasn't released a pg17 compatible version yet, check https://github.com/tensorchord/pgvecto.rs/releases 234 | if [[ -n "$PGVECTO_RS" && "$pg" -gt 13 && "$pg" -lt 17 ]]; then 235 | if [ -s "$lib/vectors.so" ]; then 236 | record_ext_version pgvecto.rs "$pg" "$PGVECTO_RS" 237 | else 238 | error "pgvecto.rs not found for pg$pg" 239 | fi 240 | fi 241 | 242 | record_ext_version pg_auth_mon "$pg" "" 243 | if [ -n "$PG_AUTH_MON" ]; then 244 | if [ -s "$lib/pg_auth_mon.so" ]; then 245 | record_ext_version pg_auth_mon "$pg" "$PG_AUTH_MON" 246 | else 247 | error "pg_auth_mon not found for pg$pg" 248 | fi 249 | fi 250 | 251 | record_ext_version postgis "$pg" "" 252 | if [ -n "$POSTGIS_VERSIONS" ]; then 253 | for ver in $POSTGIS_VERSIONS; do 254 | IFS=\| read -rs version status <<< "$(dpkg-query -W -f '${version}|${status}' "postgresql-$pg-postgis-$ver")" 255 | if [ "$status" = "install ok installed" ]; then 256 | record_ext_version postgis "$pg" "$version" 257 | else 258 | error "pg$pg extension postgis-$ver not found: $status" 259 | fi 260 | done 261 | fi 262 | 263 | for extname in $PG_WANTED_EXTENSIONS; do 264 | record_ext_version "$extname" "$pg" "" 265 | IFS=\| read -rs version status <<< "$(dpkg-query -W -f '${version}|${status}' "postgresql-$pg-$extname" 2>/dev/null)" 266 | if [ "$status" = "install ok installed" ]; then 267 | record_ext_version "$extname" "$pg" "$version" 268 | else 269 | # it's not a debian package, but is it still installed via other means? 270 | if [ -f "$lib/${extname}.so" ]; then 271 | record_ext_version "$extname" "$pg" "unknown" 272 | else 273 | ls "$lib" 274 | error "pg$pg extension $extname not found: $status (and not at $lib/${extname}.so" 275 | fi 276 | fi 277 | done 278 | } 279 | 280 | check_packages() { 281 | local pkg 282 | for pkg in $WANTED_PACKAGES; do 283 | IFS=\| read -rs version status <<< "$(dpkg-query -W -f '${version}|${status}' "$pkg")" 284 | if [ "$status" = "install ok installed" ]; then 285 | log "found package $pkg-$version" 286 | else 287 | error "package $pkg not found: $status" 288 | fi 289 | done 290 | } 291 | 292 | check_files() { 293 | local file 294 | for file in $WANTED_FILES; do 295 | if [ -f "$file" ]; then 296 | log "found file $file" 297 | else 298 | error "file $file is missing" 299 | fi 300 | done 301 | } 302 | 303 | EXTVERSIONS="$(mktemp -t extversions.$ARCH.XXXX)" 304 | cleanup() { 305 | rm -f "$EXTVERSIONS".* >&/dev/null 306 | } 307 | trap cleanup ERR EXIT 308 | 309 | record_ext_version() { 310 | local pkg="$1" pg="$2" version="$3" 311 | echo "$pkg|$version" >> "$EXTVERSIONS".pg"$pg" 312 | } 313 | 314 | sort_keys() { 315 | for k in "$@"; do echo "$k"; done | xargs -n 1 | sort -ifVu | xargs 316 | } 317 | 318 | ext_version_table() { 319 | local -A pkgs 320 | local versions 321 | echo "#### Installed PG extensions for $ARCH:" 322 | for pg in $(available_pg_versions); do 323 | echo "| PG$pg Extension | Versions |" 324 | echo "|:-|:-|" 325 | 326 | pkgs=() 327 | while read -r line; do 328 | IFS=\| read -rs pkg version <<< "$line" 329 | pkgs["$pkg"]+=" $version" 330 | done < "$EXTVERSIONS".pg"$pg" 331 | 332 | for pkg in $(sort_keys "${!pkgs[@]}"); do 333 | versions="$(sort_keys "${pkgs[$pkg]}")" 334 | echo "| $pkg | ${versions// /, } |" 335 | done 336 | 337 | echo 338 | done 339 | } 340 | -------------------------------------------------------------------------------- /cicd/smoketest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | SCRIPTDIR="$(dirname "$0")" 6 | 7 | initdb 8 | 9 | SHARED_PRELOAD_LIBRARIES="timescaledb" 10 | EXTENSION_DIR="$(pg_config --sharedir)/extension" 11 | 12 | echo "shared_preload_libraries='${SHARED_PRELOAD_LIBRARIES}'" >> "${PGDATA}/postgresql.conf" 13 | pg_ctl start 14 | 15 | while ! pg_isready; do 16 | sleep 0.2 17 | done 18 | 19 | psql -d postgres -f - <<__SQL__ 20 | ALTER SYSTEM set log_statement to 'all'; 21 | SELECT pg_reload_conf(); 22 | 23 | CREATE EXTENSION timescaledb; 24 | 25 | \set ECHO queries 26 | SELECT 27 | format('CREATE EXTENSION IF NOT EXISTS %I CASCADE', name) 28 | FROM 29 | pg_catalog.pg_available_extensions 30 | WHERE 31 | name IN ('timescaledb_toolkit', 'postgis') 32 | ORDER BY 33 | name 34 | \gexec 35 | 36 | __SQL__ 37 | 38 | psql -AtXq -f "${SCRIPTDIR}/version_info.sql" > /tmp/version_info.log 39 | pg_ctl stop -m immediate 40 | exit 0 41 | -------------------------------------------------------------------------------- /cicd/version_info.sql: -------------------------------------------------------------------------------- 1 | \set patroni `patroni --version | awk '{print $2}'` 2 | \set pgbackrest `pgbackrest version | awk '{print $2}'` 3 | \set pgall `for dir in /usr/lib/postgresql/*; do $dir/bin/psql --version | awk '/psql/ {print $3}'; done` 4 | 5 | WITH versions(name, version) AS ( 6 | SELECT 7 | format('%s.version', name), 8 | default_version 9 | FROM 10 | pg_available_extensions 11 | WHERE 12 | name IN ('timescaledb', 'postgis', 'pg_prometheus', 'timescale_prometheus_extra', 'vectorscale', 'ai', 'timescaledb_toolkit', 'timescale_analytics') 13 | UNION ALL 14 | SELECT 15 | 'postgresql.version', 16 | format('%s.%s', (v::int/10000), (v::int%1000)) 17 | FROM 18 | current_setting('server_version_num') AS sub(v) 19 | UNION ALL 20 | SELECT 21 | 'patroni.version', 22 | :'patroni' 23 | UNION ALL 24 | SELECT 25 | 'pgBackRest.version', 26 | :'pgbackrest' 27 | UNION ALL 28 | SELECT 29 | 'timescaledb.available_versions', 30 | string_agg(version, ',' ORDER BY version) 31 | FROM 32 | pg_available_extension_versions 33 | WHERE 34 | name = 'timescaledb' 35 | UNION ALL 36 | SELECT 37 | 'vectorscale.available_versions', 38 | string_agg(version, ',' ORDER BY version) 39 | FROM 40 | pg_available_extension_versions 41 | WHERE 42 | name = 'vectorscale' 43 | UNION ALL 44 | SELECT 45 | 'timescaledb_toolkit.available_versions', 46 | string_agg(version, ',' ORDER BY version) 47 | FROM 48 | pg_available_extension_versions 49 | WHERE 50 | name = 'timescaledb_toolkit' 51 | UNION ALL 52 | SELECT 53 | 'postgresql.available_versions', 54 | string_agg(version, ',' ORDER BY version) 55 | FROM 56 | regexp_split_to_table(:'pgall', '\n') AS rstt(version) 57 | ) 58 | SELECT 59 | format('%s=%s', name, version) 60 | FROM 61 | versions 62 | WHERE 63 | version != ''; 64 | -------------------------------------------------------------------------------- /docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This file started out as: 4 | # https://github.com/docker-library/postgres/blob/41bd7bf3f487e6dc0036fd73efaff6ccb6fbbacd/15/bullseye/docker-entrypoint.sh 5 | 6 | set -Eeo pipefail 7 | # TODO swap to -Eeuo pipefail above (after handling all potentially-unset variables) 8 | 9 | # usage: file_env VAR [DEFAULT] 10 | # ie: file_env 'XYZ_DB_PASSWORD' 'example' 11 | # (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of 12 | # "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature) 13 | file_env() { 14 | local var="$1" 15 | local fileVar="${var}_FILE" 16 | local def="${2:-}" 17 | if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then 18 | printf >&2 'error: both %s and %s are set (but are exclusive)\n' "$var" "$fileVar" 19 | exit 1 20 | fi 21 | local val="$def" 22 | if [ "${!var:-}" ]; then 23 | val="${!var}" 24 | elif [ "${!fileVar:-}" ]; then 25 | val="$(< "${!fileVar}")" 26 | fi 27 | export "$var"="$val" 28 | unset "$fileVar" 29 | } 30 | 31 | # check to see if this file is being run or sourced from another script 32 | _is_sourced() { 33 | # https://unix.stackexchange.com/a/215279 34 | [ "${#FUNCNAME[@]}" -ge 2 ] \ 35 | && [ "${FUNCNAME[0]}" = '_is_sourced' ] \ 36 | && [ "${FUNCNAME[1]}" = 'source' ] 37 | } 38 | 39 | permission_wait_loop() { 40 | # This wait loop is due to macos's docker implementation, where the filesystem syncing can be too slow for 41 | # postgres to see the real ownership of the $PGDATA directory right away. We check for 10s to see if the ownership 42 | # changes to what we expect, and if it doesn't, we just continue and let postgres deal with it (or not) 43 | local uid gid tries=40; uid="$(id -u)"; gid="$(id -g)" 44 | [ "$(stat -c%u:%g "$PGDATA")" = "$uid:$gid" ] && return 45 | 46 | echo -n "Waiting for permissions on $PGDATA ($(stat -c%u:%g "$PGDATA") -> $uid:$gid)" 47 | while [[ "$(stat -c%u:%g "$PGDATA")" != "$uid:$gid" && $tries -gt 0 ]]; do 48 | echo -n . 49 | sleep 0.25 50 | ((tries--)) 51 | done 52 | echo 53 | } 54 | 55 | setup_nss_wrapper() { 56 | local uid gid; uid="$(id -u)"; gid="$(id -g)" 57 | local pg_uid pg_gid; IFS=: read -sr _ _ pg_uid pg_gid _ <<<"$(getent passwd "$POSTGRES_USER")" 58 | 59 | if [[ "$uid" -ne "$pg_uid" || "$gid" -ne "$pg_gid" ]]; then 60 | # we're running under docker with `--user`, and it doesn't match the `postgres` user, so we have to update 61 | # "initdb" is particular about the current user existing in "/etc/passwd", so we use "nss_wrapper" to fake that if necessary 62 | # see https://github.com/docker-library/postgres/pull/253, https://github.com/docker-library/postgres/issues/359, https://cwrap.org/nss_wrapper.html 63 | # see if we can find a suitable "libnss_wrapper.so" (https://salsa.debian.org/sssd-team/nss-wrapper/-/commit/b9925a653a54e24d09d9b498a2d913729f7abb15) 64 | local wrapper 65 | for wrapper in {/usr,}/lib{/*,}/libnss_wrapper.so; do 66 | if [ -s "$wrapper" ]; then 67 | NSS_WRAPPER_PASSWD="$(mktemp /tmp/passwd.XXXXXX)" 68 | NSS_WRAPPER_GROUP="$(mktemp /tmp/group.XXXXXX)" 69 | export LD_PRELOAD="$wrapper" NSS_WRAPPER_PASSWD NSS_WRAPPER_GROUP 70 | printf 'postgres:x:%s:%s:PostgreSQL:%s:/bin/false\n' "$uid" "$gid" "$PGDATA" > "$NSS_WRAPPER_PASSWD" 71 | printf 'postgres:x:%s:\n' "$gid" > "$NSS_WRAPPER_GROUP" 72 | echo "using nss-wrapper" 73 | break 74 | fi 75 | done 76 | fi 77 | } 78 | 79 | # used to create initial postgres directories and if run as root, ensure ownership to the "postgres" user 80 | docker_create_db_directories() { 81 | local uid; uid="$(id -u)" 82 | 83 | [ ! -d "$PGDATA" ] && mkdir -p "$PGDATA" 84 | 85 | # ignore failure since there are cases where we can't chmod (and PostgreSQL might fail later anyhow - it's picky about permissions of this directory) 86 | chmod 700 "$PGDATA" 2>/dev/null || : 87 | 88 | # ignore failure since it will be fine when using the image provided directory; see also https://github.com/docker-library/postgres/pull/289 89 | mkdir -p /var/run/postgresql 2>/dev/null || : 90 | chmod 1777 /var/run/postgresql 2>/dev/null || : 91 | 92 | # Create the transaction log directory before initdb is run so the directory is owned by the correct user 93 | if [ -n "${POSTGRES_INITDB_WALDIR:-}" ]; then 94 | mkdir -p "$POSTGRES_INITDB_WALDIR" 95 | if [ "$uid" -eq 0 ]; then 96 | find "$POSTGRES_INITDB_WALDIR" \! -user postgres -exec chown postgres '{}' + || : 97 | fi 98 | chmod 700 "$POSTGRES_INITDB_WALDIR" || : 99 | fi 100 | 101 | # allow the container to be started with `--user` 102 | if [ "$uid" -eq 0 ]; then 103 | find "$PGDATA" \! -user postgres -exec chown postgres: '{}' + || : 104 | find /var/run/postgresql \! -user postgres -exec chown postgres: '{}' + || : 105 | fi 106 | } 107 | 108 | # initialize empty PGDATA directory with new database via 'initdb' 109 | # arguments to `initdb` can be passed via POSTGRES_INITDB_ARGS or as arguments to this function 110 | # `initdb` automatically creates the "postgres", "template0", and "template1" dbnames 111 | # this is also where the database user is created, specified by `POSTGRES_USER` env 112 | docker_init_database_dir() { 113 | if [ -n "${POSTGRES_INITDB_WALDIR:-}" ]; then 114 | set -- --waldir "$POSTGRES_INITDB_WALDIR" "$@" 115 | fi 116 | 117 | permission_wait_loop 118 | 119 | # --pwfile refuses to handle a properly-empty file (hence the "\n"): https://github.com/docker-library/postgres/issues/1025 120 | eval 'initdb --username="$POSTGRES_USER" --pwfile=<(printf "%s\n" "$POSTGRES_PASSWORD") '"$POSTGRES_INITDB_ARGS"' "$@"' 121 | } 122 | 123 | # print large warning if POSTGRES_PASSWORD is long 124 | # error if both POSTGRES_PASSWORD is empty and POSTGRES_HOST_AUTH_METHOD is not 'trust' 125 | # print large warning if POSTGRES_HOST_AUTH_METHOD is set to 'trust' 126 | # assumes database is not set up, ie: [ -z "$DATABASE_ALREADY_EXISTS" ] 127 | docker_verify_minimum_env() { 128 | # check password first so we can output the warning before postgres 129 | # messes it up 130 | if [ "${#POSTGRES_PASSWORD}" -ge 100 ]; then 131 | cat >&2 <<-'EOWARN' 132 | 133 | WARNING: The supplied POSTGRES_PASSWORD is 100+ characters. 134 | 135 | This will not work if used via PGPASSWORD with "psql". 136 | 137 | https://www.postgresql.org/message-id/flat/E1Rqxp2-0004Qt-PL%40wrigleys.postgresql.org (BUG #6412) 138 | https://github.com/docker-library/postgres/issues/507 139 | 140 | EOWARN 141 | fi 142 | if [ -z "$POSTGRES_PASSWORD" ] && [ 'trust' != "$POSTGRES_HOST_AUTH_METHOD" ]; then 143 | # The - option suppresses leading tabs but *not* spaces. :) 144 | cat >&2 <<-'EOE' 145 | Error: Database is uninitialized and superuser password is not specified. 146 | You must specify POSTGRES_PASSWORD to a non-empty value for the 147 | superuser. For example, "-e POSTGRES_PASSWORD=password" on "docker run". 148 | 149 | You may also use "POSTGRES_HOST_AUTH_METHOD=trust" to allow all 150 | connections without a password. This is *not* recommended. 151 | 152 | See PostgreSQL documentation about "trust": 153 | https://www.postgresql.org/docs/current/auth-trust.html 154 | EOE 155 | exit 1 156 | fi 157 | if [ 'trust' = "$POSTGRES_HOST_AUTH_METHOD" ]; then 158 | cat >&2 <<-'EOWARN' 159 | ******************************************************************************** 160 | WARNING: POSTGRES_HOST_AUTH_METHOD has been set to "trust". This will allow 161 | anyone with access to the Postgres port to access your database without 162 | a password, even if POSTGRES_PASSWORD is set. See PostgreSQL 163 | documentation about "trust": 164 | https://www.postgresql.org/docs/current/auth-trust.html 165 | In Docker's default configuration, this is effectively any other 166 | container on the same system. 167 | 168 | It is not recommended to use POSTGRES_HOST_AUTH_METHOD=trust. Replace 169 | it with "-e POSTGRES_PASSWORD=password" instead to set a password in 170 | "docker run". 171 | ******************************************************************************** 172 | EOWARN 173 | fi 174 | } 175 | 176 | # usage: docker_process_init_files [file [file [...]]] 177 | # ie: docker_process_init_files /always-initdb.d/* 178 | # process initializer files, based on file extensions and permissions 179 | docker_process_init_files() { 180 | # psql here for backwards compatibility "${psql[@]}" 181 | psql=( docker_process_sql ) 182 | 183 | printf '\n' 184 | local f 185 | for f; do 186 | case "$f" in 187 | *.sh) 188 | # https://github.com/docker-library/postgres/issues/450#issuecomment-393167936 189 | # https://github.com/docker-library/postgres/pull/452 190 | if [ -x "$f" ]; then 191 | printf '%s: running %s\n' "$0" "$f" 192 | "$f" 193 | else 194 | printf '%s: sourcing %s\n' "$0" "$f" 195 | . "$f" 196 | fi 197 | ;; 198 | *.sql) printf '%s: running %s\n' "$0" "$f"; docker_process_sql -f "$f"; printf '\n' ;; 199 | *.sql.gz) printf '%s: running %s\n' "$0" "$f"; gunzip -c "$f" | docker_process_sql; printf '\n' ;; 200 | *.sql.xz) printf '%s: running %s\n' "$0" "$f"; xzcat "$f" | docker_process_sql; printf '\n' ;; 201 | *.sql.zst) printf '%s: running %s\n' "$0" "$f"; zstd -dc "$f" | docker_process_sql; printf '\n' ;; 202 | *) printf '%s: ignoring %s\n' "$0" "$f" ;; 203 | esac 204 | printf '\n' 205 | done 206 | } 207 | 208 | # Execute sql script, passed via stdin (or -f flag of pqsl) 209 | # usage: docker_process_sql [psql-cli-args] 210 | # ie: docker_process_sql --dbname=mydb <<<'INSERT ...' 211 | # ie: docker_process_sql -f my-file.sql 212 | # ie: docker_process_sql > "$PGDATA/pg_hba.conf" 276 | } 277 | 278 | # start socket-only postgresql server for setting up or running scripts 279 | # all arguments will be passed along as arguments to `postgres` (via pg_ctl) 280 | docker_temp_server_start() { 281 | if [ "$1" = 'postgres' ]; then 282 | shift 283 | fi 284 | 285 | # internal start of server in order to allow setup using psql client 286 | # does not listen on external TCP/IP and waits until start finishes 287 | set -- "$@" -c listen_addresses='' -p "${PGPORT:-5432}" 288 | 289 | PGUSER="${PGUSER:-$POSTGRES_USER}" \ 290 | pg_ctl -D "$PGDATA" \ 291 | -o "$(printf '%q ' "$@")" \ 292 | -w start 293 | } 294 | 295 | # stop postgresql server after done setting up user and running scripts 296 | docker_temp_server_stop() { 297 | PGUSER="${PGUSER:-postgres}" \ 298 | pg_ctl -D "$PGDATA" -m fast -w stop 299 | } 300 | 301 | # check arguments for an option that would cause postgres to stop 302 | # return true if there is one 303 | _pg_want_help() { 304 | local arg 305 | for arg; do 306 | case "$arg" in 307 | # postgres --help | grep 'then exit' 308 | # leaving out -C on purpose since it always fails and is unhelpful: 309 | # postgres: could not access the server configuration file "/var/lib/postgresql/data/postgresql.conf": No such file or directory 310 | -'?'|--help|--describe-config|-V|--version) 311 | return 0 312 | ;; 313 | esac 314 | done 315 | return 1 316 | } 317 | 318 | _main() { 319 | local uid; uid="$(id -u)" 320 | 321 | # if first arg looks like a flag, assume we want to run postgres server 322 | if [ "${1:0:1}" = '-' ]; then 323 | set -- postgres "$@" 324 | fi 325 | 326 | if [ "$1" = 'postgres' ] && ! _pg_want_help "$@"; then 327 | docker_setup_env 328 | 329 | # setup data directories and permissions (when run as root) 330 | docker_create_db_directories 331 | if [ "$uid" -eq 0 ]; then 332 | # then restart script as postgres user 333 | exec gosu postgres "$BASH_SOURCE" "$@" 334 | fi 335 | 336 | [ "$uid" -ne 0 ] && setup_nss_wrapper 337 | 338 | # only run initialization on an empty data directory 339 | if [ -z "$DATABASE_ALREADY_EXISTS" ]; then 340 | docker_verify_minimum_env 341 | 342 | # check dir permissions to reduce likelihood of half-initialized database 343 | ls /docker-entrypoint-initdb.d/ > /dev/null 344 | 345 | docker_init_database_dir 346 | pg_setup_hba_conf "$@" 347 | 348 | # PGPASSWORD is required for psql when authentication is required for 'local' connections via pg_hba.conf and is otherwise harmless 349 | # e.g. when '--auth=md5' or '--auth-local=md5' is used in POSTGRES_INITDB_ARGS 350 | export PGPASSWORD="${PGPASSWORD:-$POSTGRES_PASSWORD}" 351 | docker_temp_server_start "$@" 352 | 353 | docker_setup_db 354 | docker_process_init_files /docker-entrypoint-initdb.d/* 355 | 356 | docker_temp_server_stop 357 | unset PGPASSWORD 358 | 359 | cat <<-'EOM' 360 | 361 | PostgreSQL init process complete; ready for start up. 362 | 363 | EOM 364 | else 365 | cat <<-'EOM' 366 | 367 | PostgreSQL Database directory appears to contain a database; Skipping initialization 368 | 369 | EOM 370 | fi 371 | 372 | # Note- this permission wait loop is not a duplicate- due to the timing issue with docker, only one might not 373 | # be good enough 374 | permission_wait_loop 375 | fi 376 | 377 | exec "$@" 378 | } 379 | 380 | if ! _is_sourced; then 381 | _main "$@" 382 | fi 383 | -------------------------------------------------------------------------------- /fetch_tag_digest: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | # fetch the SHA digest of a tag 6 | # 7 | # example input: docker.io/timescale/timescaledb-ha:pg15-multi-builder-amd64 8 | 9 | url="$1" 10 | if [ -z "$url" ]; then 11 | echo "usage: $0 " >&2 12 | exit 1 13 | fi 14 | 15 | registry=docker.io # default 16 | tag="${url##*:}" 17 | path="${url%%:$tag}" 18 | slashes="${url//[^\/]}" 19 | 20 | case "${#slashes}" in 21 | 0) echo "the given url requires at least one / character" >&2 22 | exit 1;; 23 | 1) # nothing to do 24 | ;; 25 | 2) registry="${path%%/*}" 26 | if [ "$registry" != docker.io ]; then 27 | echo "this script only supports docker.io registries ($registry from $url)" >&2 28 | exit 1 29 | fi 30 | path="${path##$registry/}";; 31 | *) echo "invalid url ($url)" >&2 32 | exit 1;; 33 | esac 34 | 35 | namespace="${path%%/*}" 36 | repo="${path##*/}" 37 | 38 | digest="$(curl -s "https://hub.docker.com/v2/namespaces/$namespace/repositories/$repo/tags/$tag" | jq -r ".digest")" 39 | if [ "${#digest}" -ne 71 ]; then 40 | echo "invalid digest found: $digest" >&2 41 | exit 1 42 | fi 43 | 44 | echo "$registry/$namespace/$repo@$digest" 45 | -------------------------------------------------------------------------------- /pgbackrest_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function log { 4 | echo "$(date '+%Y-%m-%d %H:%M:%S') - bootstrap - $1" 5 | } 6 | 7 | [ -z "${PGB_REPO1_S3_KEY_SECRET}" ] && { 8 | log "Environment variable PGB_REPO1_S3_KEY_SECRET is not set, you should fully configure this container" 9 | exit 1 10 | } 11 | 12 | # The pgBackRest configuration needs to be shared by all containers in the pod 13 | # at some point in the future we may fetch it from an s3-bucket or some environment configuration, 14 | # however, for now we store the file in a mounted volume that is accessible to all pods. 15 | umask 0077 16 | mkdir -p "$(dirname "${PGBACKREST_CONFIG}")" 17 | cat > "${PGBACKREST_CONFIG}" <<__EOT__ 18 | [global] 19 | process-max=4 20 | 21 | repo1-type=s3 22 | repo1-path=${PGB_REPO1_PATH} 23 | repo1-cipher-type=none 24 | repo1-retention-diff=2 25 | repo1-retention-full=2 26 | repo1-s3-bucket=${PGB_REPO1_S3_BUCKET} 27 | repo1-s3-endpoint=s3.amazonaws.com 28 | repo1-s3-key=${PGB_REPO1_S3_KEY} 29 | repo1-s3-key-secret=${PGB_REPO1_S3_KEY_SECRET} 30 | repo1-s3-region=us-east-2 31 | start-fast=y 32 | 33 | 34 | [poddb] 35 | pg1-port=5432 36 | pg1-host-user=${POSTGRES_USER:-postgres} 37 | pg1-path=${PGDATA} 38 | pg1-socket-path=${PGSOCKET} 39 | 40 | recovery-option=standby_mode=on 41 | recovery-option=recovery_target_timeline=latest 42 | recovery-option=recovery_target_action=shutdown 43 | 44 | 45 | [global:archive-push] 46 | compress-level=3 47 | __EOT__ 48 | 49 | while ! pg_isready -h "${PGSOCKET}" -q; do 50 | log "Waiting for PostgreSQL to become available" 51 | sleep 3 52 | done 53 | 54 | pgbackrest check || { 55 | log "Creating pgBackrest stanza" 56 | pgbackrest stanza-create --log-level-stderr=info || exit 1 57 | } 58 | 59 | log "Starting pgBackrest api to listen for backup requests" 60 | exec python3 /scripts/pgbackrest-rest.py --stanza=poddb --loglevel=debug 61 | -------------------------------------------------------------------------------- /scripts/010_install_timescaledb_toolkit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | create_sql=`mktemp` 4 | 5 | # Checks to support bitnami image with same scripts so they stay in sync 6 | if [ ! -z "${BITNAMI_IMAGE_VERSION:-}" ]; then 7 | if [ -z "${POSTGRES_USER:-}" ]; then 8 | POSTGRES_USER=${POSTGRESQL_USERNAME} 9 | fi 10 | 11 | if [ -z "${POSTGRES_DB:-}" ]; then 12 | POSTGRES_DB=${POSTGRESQL_DATABASE} 13 | fi 14 | 15 | if [ -z "${PGDATA:-}" ]; then 16 | PGDATA=${POSTGRESQL_DATA_DIR} 17 | fi 18 | fi 19 | 20 | if [ -z "${POSTGRESQL_CONF_DIR:-}" ]; then 21 | POSTGRESQL_CONF_DIR=${PGDATA} 22 | fi 23 | 24 | cat <${create_sql} 25 | CREATE EXTENSION IF NOT EXISTS timescaledb_toolkit CASCADE; 26 | EOF 27 | 28 | # create extension timescaledb_toolkit in initial databases 29 | psql -U "${POSTGRES_USER}" postgres -f ${create_sql} 30 | psql -U "${POSTGRES_USER}" template1 -f ${create_sql} 31 | 32 | if [ "${POSTGRES_DB:-postgres}" != 'postgres' ]; then 33 | psql -U "${POSTGRES_USER}" "${POSTGRES_DB}" -f ${create_sql} 34 | fi 35 | -------------------------------------------------------------------------------- /scripts/augment_patroni_configuration.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | This is a hack to get around Issue: https://github.com/zalando/postgres-operator/issues/574 5 | 6 | This script will be deprecated as soon as we configure Patroni fully from k8s. Until that time 7 | the configure_spilo.py script is used, with its valuable output and its quirks. 8 | """ 9 | import yaml 10 | import os 11 | import sys 12 | 13 | TSDB_DEFAULTS = """ 14 | postgresql: 15 | parameters: 16 | logging_collector: 'off' 17 | log_destination: 'stderr' 18 | create_replica_methods: 19 | - pgbackrest 20 | - basebackup 21 | pgbackrest: 22 | command: '/usr/bin/pgbackrest --stanza=poddb --delta restore --log-level-stderr=info' 23 | keep_data: True 24 | no_params: True 25 | no_master: True 26 | bootstrap: 27 | dcs: 28 | postgresql: 29 | recovery_conf: 30 | recovery_target_timeline: latest 31 | standby_mode: 'on' 32 | restore_command: 'pgbackrest --stanza=poddb archive-get %f "%p"' 33 | """ 34 | 35 | 36 | def merge(source, destination): 37 | """Merge source into destination. 38 | 39 | Values from source override those of destination""" 40 | for key, value in source.items(): 41 | if isinstance(value, dict): 42 | # get node or create one 43 | node = destination.setdefault(key, {}) 44 | merge(value, node) 45 | else: 46 | destination[key] = value 47 | 48 | return destination 49 | 50 | 51 | if __name__ == '__main__': 52 | if len(sys.argv) == 1: 53 | print("Usage: {0} ".format(sys.argv[0])) 54 | sys.exit(2) 55 | with open(sys.argv[1], 'r+') as f: 56 | # Not all postgresql parameters that are set in the SPILO_CONFIGURATION environment variables 57 | # are overridden by the configure_spilo.py script. 58 | # 59 | # Therefore, what we do is: 60 | # 61 | # 1. We run configure_spilo.py to generate a sane configuration 62 | # 2. We override that configuration with our sane TSDB_DEFAULTS 63 | # 3. We override that configuration with our explicitly passed on settings 64 | 65 | tsdb_defaults = yaml.safe_load(TSDB_DEFAULTS) or {} 66 | spilo_generated_configuration = yaml.safe_load(f) or {} 67 | operator_generated_configuration = yaml.safe_load(os.environ.get('SPILO_CONFIGURATION', '{}')) or {} 68 | 69 | final_configuration = merge(operator_generated_configuration, merge(tsdb_defaults, spilo_generated_configuration)) 70 | 71 | # This namespace used in etcd/consul 72 | # Other provisions are also available, but this ensures no naming collisions 73 | # for deployments in separate Kubernetes Namespaces will occur 74 | # https://github.com/zalando/patroni/blob/master/docs/ENVIRONMENT.rst#globaluniversal 75 | if 'etcd' in final_configuration and os.getenv('POD_NAMESPACE'): 76 | final_configuration['namespace'] = os.getenv('POD_NAMESPACE') 77 | 78 | f.seek(0) 79 | yaml.dump(final_configuration, f, default_flow_style=False) 80 | f.truncate() 81 | -------------------------------------------------------------------------------- /scripts/on_role_change.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat < /dev/tcp/localhost/8081" 2>/dev/null && break 30 | done 31 | 32 | log "Triggering backup" 33 | curl -i -X POST http://localhost:8081/backups 34 | 35 | # We always exit 0 this script, otherwise the database initialization fails. 36 | exit 0 37 | -------------------------------------------------------------------------------- /scripts/timescaledb/after-create.sql: -------------------------------------------------------------------------------- 1 | -- The pre_restore and post_restore function can only be successfully executed by a very highly privileged 2 | -- user. To ensure the database owner can also execute these functions, we have to alter them 3 | -- from SECURITY INVOKER to SECURITY DEFINER functions. Setting the search_path explicitly is good practice 4 | -- for SECURITY DEFINER functions. 5 | -- As this function does have high impact, we do not want anyone to be able to execute the function, 6 | -- but only the database owner. 7 | ALTER FUNCTION @extschema@.timescaledb_pre_restore() SET search_path = pg_catalog,pg_temp SECURITY DEFINER; 8 | ALTER FUNCTION @extschema@.timescaledb_post_restore() SET search_path = pg_catalog,pg_temp SECURITY DEFINER; 9 | REVOKE EXECUTE ON FUNCTION @extschema@.timescaledb_pre_restore() FROM public; 10 | REVOKE EXECUTE ON FUNCTION @extschema@.timescaledb_post_restore() FROM public; 11 | GRANT EXECUTE ON FUNCTION @extschema@.timescaledb_pre_restore() TO @database_owner@; 12 | GRANT EXECUTE ON FUNCTION @extschema@.timescaledb_post_restore() TO @database_owner@; 13 | 14 | -- To reduce the errors seen on pg_restore we grant access to timescaledb internal tables 15 | DO $$DECLARE r record; 16 | BEGIN 17 | FOR r IN SELECT tsch from unnest(ARRAY['_timescaledb_internal', '_timescaledb_config', '_timescaledb_catalog', '_timescaledb_cache']) tsch 18 | LOOP 19 | EXECUTE 'ALTER DEFAULT PRIVILEGES IN SCHEMA ' || quote_ident(r.tsch) || ' GRANT ALL PRIVILEGES ON TABLES TO @database_owner@'; 20 | EXECUTE 'ALTER DEFAULT PRIVILEGES IN SCHEMA ' || quote_ident(r.tsch) || ' GRANT ALL PRIVILEGES ON SEQUENCES TO @database_owner@'; 21 | EXECUTE 'GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA ' || quote_ident(r.tsch) || ' TO @database_owner@'; 22 | EXECUTE 'GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA ' || quote_ident(r.tsch) || ' TO @database_owner@'; 23 | EXECUTE 'GRANT USAGE, CREATE ON SCHEMA ' || quote_ident(r.tsch) || ' TO @database_owner@'; 24 | END LOOP; 25 | END$$; 26 | 27 | -------------------------------------------------------------------------------- /scripts/timescaledb/after-update.sql: -------------------------------------------------------------------------------- 1 | after-create.sql -------------------------------------------------------------------------------- /scripts/tsdbadmin.sql: -------------------------------------------------------------------------------- 1 | set log_statement=none; set log_min_duration_statement=-1; BEGIN; 2 | -- Source: sql/00_preparation.sql 3 | CREATE EXTENSION IF NOT EXISTS pgcrypto; 4 | 5 | DO LANGUAGE plpgsql 6 | $$ 7 | BEGIN 8 | IF to_regrole('tsdbowner') IS NULL 9 | THEN 10 | CREATE ROLE tsdbowner NOLOGIN; 11 | END IF; 12 | IF to_regrole('tsdbadmin') IS NULL 13 | THEN 14 | CREATE ROLE tsdbadmin; 15 | END IF; 16 | GRANT tsdbowner TO tsdbadmin WITH admin option; 17 | END; 18 | $$; 19 | 20 | CREATE SCHEMA IF NOT EXISTS tsdbadmin AUTHORIZATION tsdbowner; 21 | 22 | -- Default privileges for public are quite liberal in PostgreSQL, therefore 23 | -- we revoke USAGE from public on the schema 24 | -- (comparable to the execute bit on a directory on a UNIX system). 25 | -- The second barrier is to ensure that functions that are created are not 26 | -- allowed to be executed by public, but only by roles that have been (indirectly) 27 | -- granted the tsdb_administrator role. 28 | REVOKE USAGE ON SCHEMA tsdbadmin FROM public; 29 | ALTER DEFAULT PRIVILEGES IN SCHEMA tsdbadmin REVOKE ALL ON FUNCTIONS FROM public; 30 | ALTER DEFAULT PRIVILEGES IN SCHEMA tsdbadmin GRANT EXECUTE ON FUNCTIONS TO tsdbadmin; 31 | -- Source: sql/10_assert_admin.sql 32 | /* 33 | assert_admin will do nothing if the current role is directly or indirectly 34 | a grantee of the username role with the admin option. Otherwise it will raise an exception. 35 | 36 | The reason to use exceptions here is to offer a similar user experience to the regular 37 | way of administrating users, i.e. DROP USER abc normally raises a sqlstate 42501 and 38 | the application can handle that error. The same goes with these functions, they will throw 39 | the permission denied error. 40 | */ 41 | CREATE OR REPLACE FUNCTION tsdbadmin.assert_admin( 42 | username name 43 | ) 44 | RETURNS void 45 | LANGUAGE plpgsql 46 | SET search_path TO 'pg_catalog' 47 | AS $function$ 48 | DECLARE 49 | -- We need to ensure we get the correct role. As we may be called from Security Definer functions, we should 50 | -- be looking at the session_user, however, if that has been overruled by a SET ROLE, we should take 51 | -- the current role. 52 | admin_role name := cast(CASE current_setting('role') WHEN 'none' THEN session_user ELSE current_setting('role')::name END AS name); 53 | BEGIN 54 | -- This will fail fast if the username does not exist 55 | PERFORM cast(username as regrole); 56 | 57 | -- A superuser is allowed to do everything, so we'll just allow it outright in that situation 58 | IF (SELECT rolsuper FROM pg_roles WHERE rolname = admin_role) 59 | THEN 60 | RETURN; 61 | END IF; 62 | 63 | /* 64 | We should never assert admin over a superuser or a createrole if we're not a superuser. 65 | 66 | This is purely a second line of defense - the user to be administered should never have 67 | superuser, but errors can be made, for example: 68 | tsdbadmin was granted a role with the superuser attribute 69 | a role that is managed by tsdbadmin was inadvertently given the createrole attribute 70 | 71 | The exploit would that you could then reset the password for such a user and therefore authenticate 72 | using that user and set/exploit the superuser/createrole for your own (limited) account. 73 | 74 | Therefore we are explicit about rejecting asserting admin 75 | over any role with the superuser or createrole attributes. 76 | */ 77 | IF (SELECT rolsuper or rolcreaterole FROM pg_roles WHERE rolname = username) 78 | THEN 79 | RAISE EXCEPTION USING 80 | ERRCODE = '42501', 81 | MESSAGE = 'must be superuser to alter superusers/createrole users'; 82 | END IF; 83 | 84 | -- We want to find out if the user that is going to be created/altered/dropped is allowed to be 85 | -- administered by the admin_role. 86 | -- We allow this to be the case if admin_role has been granted username (WITH ADMIN OPTION), 87 | -- or if there exists a chain of grants WITH ADMIN that lead to the admin_role. 88 | -- To accomplish this, we use a recursive query, which walks through all the roles that 89 | -- have received the grants. 90 | -- The final step of the query is to figure out if admin_role is actually in this generated list. 91 | -- 92 | -- Note: PostgreSQL itself does not allow recursive grants, so this recursive CTE 93 | -- construct is guaranteed to be finite. 94 | IF EXISTS ( 95 | WITH RECURSIVE admin_parents AS ( 96 | SELECT 97 | member 98 | FROM 99 | pg_auth_members 100 | WHERE 101 | roleid = username::regrole 102 | AND admin_option = true 103 | UNION ALL 104 | SELECT 105 | grandparent.member 106 | FROM 107 | pg_auth_members grandparent 108 | JOIN 109 | admin_parents ON (grandparent.roleid = admin_parents.member) 110 | AND grandparent.admin_option = true 111 | ) 112 | SELECT 113 | FROM 114 | admin_parents 115 | WHERE 116 | member = admin_role::regrole) 117 | THEN 118 | RETURN; 119 | END IF; 120 | 121 | -- We raise an exception by default, to ensure whatever the flow is, if we get to this part of the function 122 | -- no assertion should succeed. 123 | RAISE EXCEPTION USING 124 | ERRCODE = '42501', 125 | MESSAGE = format('user %s does not have admin option on role "%s"', admin_role::regrole::text, username); 126 | END; 127 | $function$; 128 | -- Source: sql/11_assert_password_requirements.sql 129 | /* 130 | The reason to use exceptions here is to offer a similar user experience to the regular 131 | way of enforcing password requirements, for example when using the passwordcheck 132 | extension, you'll get the following error: 133 | 134 | ERROR: 22023: password is too short 135 | 136 | */ 137 | CREATE OR REPLACE FUNCTION tsdbadmin.assert_password_requirements( 138 | password text 139 | ) 140 | RETURNS void 141 | LANGUAGE plpgsql 142 | SET search_path TO 'pg_catalog' 143 | SET log_statement TO 'none' -- We do not want any function handling passwords to be logged 144 | AS $function$ 145 | DECLARE 146 | error_message text; 147 | sqlstate_code text; 148 | minimum_password_length int := 8; 149 | BEGIN 150 | IF length(password) < minimum_password_length 151 | THEN 152 | RAISE EXCEPTION USING 153 | ERRCODE = '28LEN', 154 | MESSAGE = format('New password has length %s, minimum length is %s', length(password), minimum_password_length); 155 | END IF; 156 | EXCEPTION WHEN OTHERS THEN 157 | -- We want to rethrow errors that occured, but we want to remove the context, 158 | -- as the context may contain the password, so we rethrow without that context. 159 | GET STACKED DIAGNOSTICS 160 | error_message = MESSAGE_TEXT, 161 | sqlstate_code = RETURNED_SQLSTATE; 162 | RAISE EXCEPTION USING 163 | ERRCODE = sqlstate_code, 164 | MESSAGE = error_message; 165 | END; 166 | $function$; 167 | -- Source: sql/30_reset_password.sql 168 | CREATE OR REPLACE FUNCTION tsdbadmin.reset_password( 169 | INOUT username name, 170 | INOUT password text DEFAULT NULL, 171 | password_length integer DEFAULT NULL, 172 | password_encryption text DEFAULT NULL 173 | ) 174 | RETURNS record 175 | LANGUAGE plpgsql 176 | SECURITY DEFINER 177 | SET search_path TO 'pg_catalog' 178 | SET log_statement TO 'none' -- We do not want any function handling passwords to be logged 179 | SET log_min_duration_statement TO '-1' 180 | SET pg_stat_statements.track_utility TO 'off' 181 | AS $function$ 182 | DECLARE 183 | minimum_password_length int := 8; 184 | error_message text; 185 | sqlstate_code text; 186 | prev_password_encryption text; 187 | BEGIN 188 | PERFORM tsdbadmin.assert_admin(username); 189 | 190 | -- We're only setting the defaults here, to allow upstream function calls to 191 | -- use NULL as a signal to use the default values 192 | password_length := coalesce(password_length, 16); 193 | 194 | IF password IS NULL 195 | THEN 196 | SELECT substr(encode(random_bytes, 'base64'), 1, password_length) 197 | INTO password 198 | FROM gen_random_bytes(1024) AS s(random_bytes); 199 | END IF; 200 | 201 | PERFORM tsdbadmin.assert_password_requirements(password); 202 | 203 | prev_password_encryption := current_setting('password_encryption'); 204 | password_encryption := coalesce(password_encryption, prev_password_encryption); 205 | PERFORM set_config('password_encryption'::text, password_encryption, true); 206 | 207 | EXECUTE format('ALTER USER %I WITH ENCRYPTED PASSWORD %L', username, password); 208 | 209 | PERFORM set_config('password_encryption'::text, prev_password_encryption, true); 210 | 211 | EXCEPTION WHEN OTHERS THEN 212 | -- We want to rethrow errors that occured, but we want to remove the context, 213 | -- as the context may contain the password, so we rethrow without that context. 214 | GET STACKED DIAGNOSTICS 215 | error_message = MESSAGE_TEXT, 216 | sqlstate_code = RETURNED_SQLSTATE; 217 | RAISE EXCEPTION USING 218 | ERRCODE = sqlstate_code, 219 | MESSAGE = error_message; 220 | END; 221 | $function$; 222 | 223 | DO LANGUAGE plpgsql 224 | $$ 225 | DECLARE 226 | pgcrypto_namespace oid := (SELECT extnamespace FROM pg_extension WHERE extname='pgcrypto'); 227 | BEGIN 228 | EXECUTE format('ALTER FUNCTION tsdbadmin.reset_password SET search_path TO pg_catalog, %s;', pgcrypto_namespace::regnamespace); 229 | 230 | /* We ensure the dependency we created on pgcrypto.gen_random_bytes is part of the catalogs 231 | 232 | ERROR: 2BP01: cannot drop extension pgcrypto because other objects depend on it 233 | DETAIL: function reset_password(name,text,integer,text) depends on function pgcrypto.gen_random_bytes(integer) 234 | 235 | */ 236 | INSERT INTO pg_catalog.pg_depend (classid, objid, objsubid, refclassid, refobjid, refobjsubid, deptype) 237 | SELECT 238 | 'pg_catalog.pg_proc'::regclass, 239 | 'tsdbadmin.reset_password'::regproc, 240 | 0, 241 | 'pg_catalog.pg_proc'::regclass, 242 | format('%s.gen_random_bytes', pgcrypto_namespace::regnamespace)::regproc, 243 | 0, 244 | 'n' 245 | ; 246 | END; 247 | $$; 248 | -- Source: sql/33_alter_user.sql 249 | CREATE OR REPLACE FUNCTION tsdbadmin.alter_user( 250 | INOUT username name, 251 | createdb boolean DEFAULT NULL, 252 | inherit boolean DEFAULT NULL, 253 | login boolean DEFAULT NULL, 254 | connection_limit boolean DEFAULT NULL, 255 | valid_until timestamp with time zone DEFAULT NULL, 256 | new_name name DEFAULT NULL, 257 | password text DEFAULT NULL, 258 | password_encryption text DEFAULT NULL 259 | ) 260 | RETURNS name 261 | LANGUAGE plpgsql 262 | SECURITY DEFINER 263 | SET search_path TO 'pg_catalog' 264 | SET log_statement TO 'none' -- We do not want any function handling passwords to be logged 265 | AS $function$ 266 | DECLARE 267 | error_message text; 268 | sqlstate_code text; 269 | statement text; 270 | role_r pg_catalog.pg_roles; 271 | BEGIN 272 | PERFORM tsdbadmin.assert_admin(username); 273 | 274 | -- Changing a username clears any md5 passwords, therefore 275 | -- we should change the username before setting a potentially new password 276 | IF new_name IS NOT NULL AND new_name != username 277 | THEN 278 | EXECUTE format('ALTER USER %I RENAME TO %I', username, new_name); 279 | username := new_name; 280 | END IF; 281 | 282 | IF password IS NOT NULL THEN 283 | PERFORM tsdbadmin.reset_password(username, password, password_encryption => password_encryption); 284 | END IF; 285 | 286 | EXECUTE format('ALTER USER %I WITH %s %s %s %s %s', 287 | username, 288 | CASE WHEN createdb THEN 'CREATEDB' WHEN NOT createdb THEN 'NOCREATEDB' END, 289 | CASE WHEN inherit THEN 'INHERIT' WHEN NOT inherit THEN 'NOINHERIT' END, 290 | CASE WHEN login THEN 'LOGIN' WHEN NOT login THEN 'NOLOGIN' END, 291 | CASE WHEN connection_limit IS NOT NULL THEN format('CONNECTION LIMIT %s', connection_limit) END, 292 | CASE WHEN valid_until IS NOT NULL THEN format('VALID UNTIL %L', valid_until) END); 293 | 294 | EXCEPTION WHEN OTHERS THEN 295 | -- We want to rethrow errors that occured, but we want to remove the context, 296 | -- as the context may contain the password, so we rethrow without that context. 297 | GET STACKED DIAGNOSTICS 298 | error_message = MESSAGE_TEXT, 299 | sqlstate_code = RETURNED_SQLSTATE; 300 | RAISE EXCEPTION USING 301 | ERRCODE = sqlstate_code, 302 | MESSAGE = error_message; 303 | END; 304 | $function$; 305 | 306 | 307 | DO $$ 308 | BEGIN 309 | IF true != null THEN 310 | RAISE NOTICE 'test'; 311 | END IF; 312 | END; 313 | $$; 314 | -- Source: sql/36_create_user.sql 315 | CREATE OR REPLACE FUNCTION tsdbadmin.create_user( 316 | INOUT username name, 317 | createdb boolean DEFAULT false, 318 | inherit boolean DEFAULT true, 319 | login boolean DEFAULT true, 320 | connection_limit boolean DEFAULT NULL, 321 | valid_until timestamp with time zone DEFAULT NULL, 322 | if_not_exists boolean DEFAULT false, 323 | INOUT password text DEFAULT NULL, 324 | password_length integer DEFAULT 16, 325 | password_encryption text DEFAULT NULL, 326 | OUT created boolean 327 | ) 328 | RETURNS record 329 | LANGUAGE plpgsql 330 | SECURITY DEFINER 331 | SET search_path TO 'pg_catalog' 332 | SET log_statement TO 'none' -- We do not want any function handling passwords to be logged 333 | AS $function$ 334 | DECLARE 335 | error_message text; 336 | sqlstate_code text; 337 | BEGIN 338 | created := false; 339 | IF to_regrole(username) IS NULL OR if_not_exists = false 340 | THEN 341 | EXECUTE format('CREATE USER %I', username); 342 | EXECUTE format('GRANT %I TO %I WITH ADMIN OPTION', username, CASE current_setting('role') WHEN 'none' THEN session_user ELSE current_setting('role') END); 343 | SELECT rp.password 344 | INTO password 345 | FROM tsdbadmin.reset_password(username, password, password_length => password_length, password_encryption => password_encryption) AS rp; 346 | created := true; 347 | ELSIF password IS NOT NULL 348 | THEN 349 | SELECT rp.password 350 | INTO password 351 | FROM tsdbadmin.reset_password(username, password, password_encryption => password_encryption) AS rp; 352 | END IF; 353 | 354 | PERFORM tsdbadmin.alter_user( 355 | username => username, 356 | password => null, 357 | createdb => createdb, 358 | inherit => inherit, 359 | login => login, 360 | connection_limit => connection_limit, 361 | valid_until => valid_until 362 | ); 363 | EXCEPTION WHEN OTHERS THEN 364 | -- We want to rethrow errors that occured, but we want to remove the context, 365 | -- as the context may contain the password, so we rethrow without that context. 366 | GET STACKED DIAGNOSTICS 367 | error_message = MESSAGE_TEXT, 368 | sqlstate_code = RETURNED_SQLSTATE; 369 | RAISE EXCEPTION USING 370 | ERRCODE = sqlstate_code, 371 | MESSAGE = error_message; 372 | END; 373 | $function$; 374 | -- Source: sql/38_drop_user.sql 375 | CREATE OR REPLACE FUNCTION tsdbadmin.drop_user( 376 | INOUT username name, 377 | if_exists boolean DEFAULT false 378 | ) 379 | RETURNS name 380 | LANGUAGE plpgsql 381 | SECURITY DEFINER 382 | SET search_path to 'pg_catalog' 383 | AS $function$ 384 | BEGIN 385 | IF if_exists AND to_regrole(username) IS NULL 386 | THEN 387 | RAISE NOTICE USING 388 | ERRCODE = '00000', 389 | MESSAGE = format('role "%s" does not exist, skipping', username); 390 | RETURN; 391 | END IF; 392 | 393 | PERFORM tsdbadmin.assert_admin(username); 394 | 395 | EXECUTE format('DROP USER %I', username); 396 | END; 397 | $function$ 398 | ; 399 | COMMENT ON SCHEMA tsdbadmin IS $comment$ 400 | # tsdbadmin 401 | 402 | tsdbadmin allows users to create, alter and drop users in a PostgreSQL instance. 403 | It does so by providing some utility functions in the `postgres` database. 404 | 405 | This can also be achieved by giving the user the `CREATEROLE` privilege, however 406 | that privilege extends to *all roles* that are not `superuser` roles, 407 | which does not allow for very fine-grained control over roles and users. 408 | 409 | By using these functions, any user that has the `tsdbowner` role granted, 410 | will be able to: 411 | 412 | * Create new roles 413 | * Alter roles that it created* 414 | * Drop roles that it created* 415 | 416 | *: Or have been created by a role that it created, recursively 417 | 418 | This in turn allows you to delegate the management of roles to separate users, 419 | without them being able to manage each others roles. 420 | 421 | # Functions 422 | These functions allow you to fully administer your roles. 423 | 424 | * [alter_user](#alter_user) 425 | * [create_user](#create_user) 426 | * [drop_user](#drop_user) 427 | * [reset_password](#reset_password) 428 | 429 | And these functions are utility functions that are also installed: 430 | 431 | * [assert_admin](#assert_admin) 432 | * [assert_password_requirements](#assert_password_requirements) 433 | 434 | # User facing functions 435 | 436 | ## alter_user() 437 | alter_user changes the attributes of a PostgreSQL role. 438 | 439 | Users can only alter those users for which they have been granted the 440 | `WITH ADMIN OPTION` in a particular way. 441 | When users are created using the [create_user](#create_user) function, the 442 | `WITH ADMIN OPTION` is automatically granted to the creator. 443 | 444 | For example, user `grandparent` has created the user `child1` and `child2` using 445 | the `create_user` function. 446 | Afterwards, `child1` has created the user `grandchild1` and `child2` has created 447 | the user `grandchild2`. 448 | 449 | * `grandchild1` can now be altered by both `parent1` and `grandparent` 450 | * `grandchild2` can now be altered by both `parent2` and `grandparent` 451 | * `grandchild1` cannot be altered by `parent2` 452 | * `grandchild2` cannot be altered by `parent1` 453 | 454 | ### Arguments 455 | | Name | Description | Example | Default | 456 | |:--|:--|:--|:--| 457 | | username | User or role name to be altered | `jdoe` | | 458 | | createdb | `CREATEDB` parameter¹ | `false` | `NULL` (no change) | 459 | | inherit | `INHERIT` parameter¹ | `true` | `NULL` (no change) | 460 | | login | `LOGIN` parameter¹ | `true` | `NULL` (no change) | 461 | | connection_limit | `CONNECTION LIMIT` parameter¹ | `10` | `NULL` (no change) | 462 | | valid_until | `VALID UNTIL` parameter¹ | `2020-02-01 10:00` | `NULL` (no change) | 463 | | new_name | rename to `new_name`¹ | `johndoe` | `NULL` (no change) | 464 | | password | `PASSWORD` parameter¹ | `g6yuAFCz9Yv5ZMA` | `NULL` (no change) | 465 | | password_encryption | The hashing algorithm² | `scram-sha-256` | `NULL` (default) | 466 | 467 | 1. [ALTER ROLE](https://www.postgresql.org/docs/current/sql-alterrole.html) documentation 468 | 2. [password_encryption]( 469 | https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-PASSWORD-ENCRYPTION) 470 | documentation 471 | 472 | ### Return value 473 | | Name | Description | Example | 474 | |:--|:--|:--| 475 | | username | The user that was altered | `johndoe` | 476 | 477 | ### Alter User Examples 478 | 479 | **Allow `jdoe` to create databases** 480 | ```sql 481 | SELECT * FROM tsdbadmin.alter_user('jdoe', createdb => true); 482 | ``` 483 | **Rename `jdoe` to `johndoe`** 484 | ```sql 485 | SELECT * FROM tsdbadmin.alter_user('jdoe', new_name => 'johndoe'); 486 | ``` 487 | 488 | ## create_user() 489 | create_user adds a new role to a PostgreSQL database cluster. 490 | 491 | Every user that has privileges to execute `create_user` can create new database 492 | roles. Roles created through this function are granted with the [admin option] 493 | (https://www.postgresql.org/docs/current/sql-grant.html#SQL-GRANT-DESCRIPTION-ROLES) 494 | to the creator. 495 | 496 | If no password is provided, a new password will be generated using the 497 | [reset_password](#reset_password) function. 498 | 499 | ### Arguments 500 | | Name | Description | Example | Default | 501 | |:--|:--|:--|:--| 502 | | username | User or role name to be altered | `jdoe` | | 503 | | createdb | `CREATEDB` parameter¹ | `true` | `false` | 504 | | inherit | `INHERIT` parameter¹ | `false` | `true` | 505 | | login | `LOGIN` parameter¹ | `false` | `false` 506 | | connection_limit | `CONNECTION LIMIT` parameter¹ | `10` | `NULL` (no limit) | 507 | | valid_until | `VALID UNTIL` parameter¹ | `2020-02-01` | `NULL` (no limit) | 508 | | if_not_exists | If set, does not raise error if user exists | `true` | `false` | 509 | | password | `PASSWORD` parameter¹ | `g6yuAFCz9Yv5ZMA` | `NULL` (auto-generate) | 510 | | password_length | Set length of auto-generated password | 32 | 16 | 511 | | password_encryption | The hashing algorithm² | `scram-sha-256` | `NULL` (default) | 512 | 513 | 1. [CREATE ROLE](https://www.postgresql.org/docs/current/sql-createrole.html) 514 | documentation 515 | 2. [password_encryption]( 516 | https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-PASSWORD-ENCRYPTION) 517 | documentation 518 | 519 | ### Return values 520 | | Name | Description | Example | 521 | |:--|:--|:--| 522 | | username | The user that was created | `johndoe` | 523 | | password | The password that was set for this user | `lY0WYsa3KI00Myg` | 524 | 525 | ## drop_user() 526 | 527 | drop_user removes the specified role. 528 | 529 | Users can only drop those users for which they have been granted the 530 | `WITH ADMIN OPTION` in a particular way. 531 | When users are created using the [create_user](#create_user) function, the 532 | `WITH ADMIN OPTION` is automatically granted to the creator. 533 | 534 | ### Arguments 535 | | Name | Description | Example | Default | 536 | |:--|:--|:--|:--| 537 | | username | User or role name to be dropped | `jdoe` | | 538 | | if_exists | If set, does not raise error if user does not exist | `true` | `false` | 539 | 540 | ### Return value 541 | | Name | Description | Example | 542 | |:--|:--|:--| 543 | | username | The user that was dropped | `jdoe` | 544 | 545 | ## reset_password() 546 | 547 | reset_password changes the password of the specified user. If no password is specified, 548 | it will auto-generate one. 549 | 550 | The context of the `ALTER ROLE` statement that will (re)set the password is changed so 551 | that the statement containing the password will not be logged, regardless of the 552 | current session value of the[`log_statement`]( 553 | https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-LOG-STATEMENT) 554 | parameter. 555 | 556 | This function relies on the `gen_random_bytes` 557 | function of the [`pgcrypto`](https://www.postgresql.org/docs/current/pgcrypto.html) 558 | extension to generate new passwords, which is documented to generate cryptographically 559 | strong random bytes. 560 | 561 | The currently supported 562 | ### Arguments 563 | | Name | Description | Example | Default | 564 | |:--|:--|:--|:--| 565 | | username | User or role name to be dropped | `jdoe` | | 566 | | password | If set, set this password | `g6yuAFCz9Yv5ZMA` | `NULL` (auto-generate) | 567 | | password_length | Set length of auto-generated password | 32 | 16 | 568 | | password_encryption | The hashing algorithm¹ | `scram-sha-256` | `NULL` (default) | 569 | 570 | 1. [password_encryption]( 571 | https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-PASSWORD-ENCRYPTION) 572 | documentation 573 | 574 | ### Return values 575 | | Name | Description | Example | 576 | |:--|:--|:--| 577 | | username | The user that was created | `johndoe` | 578 | | password | The password that was set for this user | `lY0WYsa3KI00Myg` | 579 | 580 | ### Reset Password examples 581 | 582 | **Generate new password for jdoe** 583 | ```sql 584 | SELECT * FROM tsdbadmin.reset_password('jdoe'); 585 | ``` 586 | **Set new password for jdoe** 587 | ```sql 588 | SELECT * FROM tsdbadmin.reset_password('jdoe', password => 'ThisIsNotAStrongPassword'); 589 | ``` 590 | 591 | **Reset password for jdoe, with `md5` hashing algorithm** 592 | ```sql 593 | SELECT * FROM tsdbadmin.reset_password('jdoe', password_encryption => 'md5'); 594 | ``` 595 | 596 | # Utility functions 597 | 598 | ## assert_admin() 599 | 600 | > WARNING: This function does not return a value, on assertion failure it raises an exception. 601 | 602 | assert_admin will do nothing if the current role is directly or indirectly a grantee 603 | of the `username` role with the admin option. Otherwise it will raise an exception. 604 | 605 | ### Arguments 606 | | Name | Description | Example | Default | 607 | |:--|:--|:--|:--| 608 | | username | User or role name to be verified | `jdoe` | | 609 | 610 | ### Assert Admin Examples 611 | 612 | * Role `grandparent` is member `WITH ADMIN` of `parent` 613 | * Role `grandparent` is member `WITH ADMIN` of `aunt` 614 | * Role `parent` is member `WITH ADMIN` of `child` 615 | 616 | **`grandparent` is admin of `child`** 617 | ```sql 618 | SET ROLE 'grandparent'; 619 | SELECT tsdbadmin.assert_admin('child'); 620 | assert_admin 621 | -------------- 622 | 623 | (1 row) 624 | ``` 625 | 626 | **`aunt` is not admin of `child`** 627 | ```sql 628 | SET ROLE 'aunt'; 629 | SELECT tsdbadmin.assert_admin('child'); 630 | ERROR: user aunt does not have admin option on role "child" 631 | ``` 632 | 633 | ## assert_password_requirements() 634 | 635 | > WARNING: This function does not return a value, on assertion failure it raises an exception. 636 | 637 | assert_password_requirements will do nothing if the password passes validation, it will 638 | raise an exception otherwise. 639 | 640 | Currently implemented requirements: 641 | 642 | 1. Password Length should be 8 characters or more 643 | 644 | ### Arguments 645 | | Name | Description | Example | Default | 646 | |:--|:--|:--|:--| 647 | | password | The password to verify | `ttd8FXLMCKatAfl` | | 648 | $comment$; 649 | COMMIT; 650 | -------------------------------------------------------------------------------- /sources/sources.list.amd64: -------------------------------------------------------------------------------- 1 | # Since we're building in AWS, this should keep package fetches from getting throttled 2 | # 3 | # amd64: 4 | 5 | deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ jammy main restricted 6 | deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ jammy-updates main restricted 7 | deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ jammy universe 8 | deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ jammy-updates universe 9 | deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ jammy multiverse 10 | deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ jammy-updates multiverse 11 | deb http://us-east-1.ec2.archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse 12 | deb http://security.ubuntu.com/ubuntu jammy-security main restricted 13 | deb http://security.ubuntu.com/ubuntu jammy-security universe 14 | deb http://security.ubuntu.com/ubuntu jammy-security multiverse 15 | -------------------------------------------------------------------------------- /sources/sources.list.arm64: -------------------------------------------------------------------------------- 1 | # Since we're building in AWS, this should keep package fetches from getting throttled 2 | # 3 | # arm64: 4 | 5 | deb http://us-east-1.ec2.ports.ubuntu.com/ubuntu-ports/ jammy main restricted 6 | deb http://us-east-1.ec2.ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted 7 | deb http://us-east-1.ec2.ports.ubuntu.com/ubuntu-ports/ jammy universe 8 | deb http://us-east-1.ec2.ports.ubuntu.com/ubuntu-ports/ jammy-updates universe 9 | deb http://us-east-1.ec2.ports.ubuntu.com/ubuntu-ports/ jammy multiverse 10 | deb http://us-east-1.ec2.ports.ubuntu.com/ubuntu-ports/ jammy-updates multiverse 11 | deb http://us-east-1.ec2.ports.ubuntu.com/ubuntu-ports/ jammy-backports main restricted universe multiverse 12 | deb http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted 13 | deb http://ports.ubuntu.com/ubuntu-ports jammy-security universe 14 | deb http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse 15 | -------------------------------------------------------------------------------- /timescaledb_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function log { 4 | echo "$(date '+%Y-%m-%d %H:%M:%S') - bootstrap - $1" 5 | } 6 | 7 | # Making sure the data directory exists and has the right permission 8 | install -m 0700 -d "${PGDATA}" 9 | 10 | # pgBackRest container 11 | [ "${K8S_SIDECAR}" == "pgbackrest" ] && exec /pgbackrest_entrypoint.sh 12 | 13 | # Spilo is the original Docker image containing Patroni. The image uses 14 | # some scripts to convert a SPILO_CONFIGURATION into a configuration for Patroni. 15 | # At some point, we want to probably get rid of this script and do all this ourselves. 16 | # For now, if the environment variable is set, we consider that a feature flag to use 17 | # the original Spilo configuration script 18 | [ ! -z "${SPILO_CONFIGURATION}" ] && { 19 | python3 /scripts/configure_spilo.py patroni patronictl certificate 20 | 21 | # The current postgres-operator does not pass on all the variables set by the Custom Resource. 22 | # We need a bit of extra work to be done 23 | # Issue: https://github.com/zalando/postgres-operator/issues/574 24 | python3 /scripts/augment_patroni_configuration.py /home/postgres/postgres.yml 25 | } 26 | 27 | if [ -f "${PGDATA}/postmaster.pid" ]; then 28 | # the postmaster will refuse to start if the pid of the pidfile is currently 29 | # in use by the same OS user. This protection mechanism however is not strict 30 | # enough in a container environment, as we only have the pids in our own namespace. 31 | # The Volume containing the data directory could accidentally be mounted 32 | # inside multiple containers, so relying on visibility of the pid is not enough. 33 | # 34 | # There is only 1 way for us to communicate to the other postmaster (in another container?) 35 | # on the same $PGDATA: by removing the pidfile. 36 | # 37 | # The other postmaster will shutdown immediately as soon as it determines that its 38 | # pidfile has been removed. This is a Very Good Thing: it prevents multiple postmasters 39 | # on the same directory, even in a container environment. 40 | # See also https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=7e2a18) 41 | # 42 | # The downside of this change is that it will delay the startup of a crashed container; 43 | # as we're dealing with data, we'll choose correctness over uptime in this instance. 44 | log "Removing stale pidfile ..." 45 | rm "${PGDATA}/postmaster.pid" 46 | log "Sleeping a little to ensure no other postmaster is running anymore" 47 | sleep 65 48 | fi 49 | 50 | exec patroni /home/postgres/postgres.yml 51 | --------------------------------------------------------------------------------