├── .dockerignore ├── .editorconfig ├── .github ├── dependabot.yml └── workflows │ └── docker-publish.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── compose_example ├── Dockerfile ├── docker-compose.yml ├── pgadmin_entrypoint.sh ├── powa-web.conf └── servers.json ├── conf.sh ├── docker-compose.test.yml └── tests ├── Dockerfile ├── create_extensions.sql └── wait-for-postgres.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | # ignore .git and .cache folders 2 | .git 3 | .cache 4 | .dccache 5 | 6 | # ignore all markdown files (md) except those that start with README, but including README-secret.md 7 | *.md 8 | !README*.md 9 | README-secret.md 10 | 11 | # ignore secrets 12 | secrets 13 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # https://editorconfig.org/ 2 | 3 | root = true 4 | 5 | [*] 6 | charset = utf-8 7 | end_of_line = lf 8 | indent_size = 4 9 | indent_style = tab 10 | insert_final_newline = true 11 | trim_trailing_whitespace = true 12 | 13 | [*.{conf,html,xml,json,yaml,yml}] 14 | indent_size = 2 15 | indent_style = space 16 | 17 | [*.{md,csv}] 18 | trim_trailing_whitespace = false 19 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: docker 4 | directory: / 5 | schedule: 6 | interval: weekly 7 | groups: 8 | img-dependencies: 9 | patterns: 10 | - "*" 11 | 12 | - package-ecosystem: github-actions 13 | directory: / 14 | schedule: 15 | interval: weekly 16 | groups: 17 | ci-dependencies: 18 | patterns: 19 | - "*" 20 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | name: Create and publish a Docker image 7 | 8 | on: 9 | push: 10 | branches: ['master'] 11 | pull_request: 12 | branches: ['master'] 13 | schedule: 14 | # postgis/postgis update schedule is '15 5 * * 1' 15 | # https://github.com/postgis/docker-postgis/blob/master/.github/workflows/main.yml 16 | - cron: '15 7 * * 1' 17 | 18 | env: 19 | REGISTRY: ghcr.io 20 | TEST_TAG: ${{ github.repository }}:test 21 | COMPOSE_FILE: ./docker-compose.test.yml 22 | LATEST_TAG: 17-3.5 23 | 24 | jobs: 25 | build-and-push-image: 26 | runs-on: ubuntu-latest 27 | permissions: 28 | contents: read 29 | packages: write 30 | pull-requests: read 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | postgres: [17, 16, 15, 14, 13] 36 | postgis: ['3.5'] 37 | 38 | env: 39 | BASE_IMAGE_TAG: ${{ matrix.postgres }}-${{ matrix.postgis }} 40 | 41 | steps: 42 | - name: Checkout repository 43 | uses: actions/checkout@v4 44 | 45 | - name: Set up QEMU 46 | uses: docker/setup-qemu-action@v3 47 | 48 | - name: Set up Docker Buildx 49 | uses: docker/setup-buildx-action@v3 50 | 51 | - name: Extract metadata (tags, labels) for Docker 52 | id: meta 53 | uses: docker/metadata-action@v5 54 | with: 55 | images: ${{ env.REGISTRY }}/${{ github.repository }} 56 | tags: ${{ env.BASE_IMAGE_TAG }} 57 | 58 | - name: Build and export to Docker 59 | uses: docker/build-push-action@v6 60 | with: 61 | context: . 62 | load: true 63 | no-cache: true 64 | pull: true 65 | tags: ${{ env.TEST_TAG }} 66 | labels: ${{ steps.meta.outputs.labels }} 67 | build-args: | 68 | BASE_IMAGE_TAG=${{ env.BASE_IMAGE_TAG }} 69 | 70 | - name: Build test stack 71 | env: 72 | IMAGE_NAME: ${{ env.TEST_TAG }} 73 | run: docker compose up --build --no-start 74 | 75 | - name: Run tests 76 | env: 77 | IMAGE_NAME: ${{ env.TEST_TAG }} 78 | run: docker compose start sut 79 | 80 | - name: Log in to container registry 81 | if: ${{ github.event_name != 'pull_request' }} 82 | uses: docker/login-action@v3 83 | with: 84 | registry: ${{ env.REGISTRY }} 85 | username: ${{ github.actor }} 86 | password: ${{ secrets.GITHUB_TOKEN }} 87 | 88 | - name: Push to registry 89 | if: ${{ github.event_name != 'pull_request' }} 90 | uses: docker/build-push-action@v6 91 | with: 92 | context: . 93 | push: true 94 | tags: ${{ steps.meta.outputs.tags }} 95 | labels: ${{ steps.meta.outputs.labels }} 96 | build-args: | 97 | BASE_IMAGE_TAG=${{ env.BASE_IMAGE_TAG }} 98 | 99 | - name: Login to Docker Hub 100 | if: ${{ github.event_name != 'pull_request' }} 101 | uses: docker/login-action@v3 102 | with: 103 | username: ${{ secrets.DOCKERHUB_USERNAME }} 104 | password: ${{ secrets.DOCKERHUB_TOKEN }} 105 | 106 | - name: Push to Docker Hub 107 | if: ${{ github.event_name != 'pull_request' }} 108 | uses: akhilerm/tag-push-action@v2.2.0 109 | with: 110 | src: ${{ steps.meta.outputs.tags }} 111 | dst: | 112 | docker.io/${{ github.repository }}:${{ env.BASE_IMAGE_TAG }} 113 | 114 | - name: Push latest tag to both registries 115 | if: ${{ (github.event_name != 'pull_request') && (env.BASE_IMAGE_TAG == env.LATEST_TAG) }} 116 | uses: akhilerm/tag-push-action@v2.2.0 117 | with: 118 | src: ${{ steps.meta.outputs.tags }} 119 | dst: | 120 | ${{ env.REGISTRY }}/${{ github.repository }}:latest 121 | docker.io/${{ github.repository }}:latest 122 | 123 | - name: Update Docker Hub description 124 | if: ${{ github.event_name != 'pull_request' }} 125 | uses: peter-evans/dockerhub-description@v4 126 | with: 127 | username: ${{ secrets.DOCKERHUB_USERNAME }} 128 | password: ${{ secrets.DOCKERHUB_PASSWORD }} 129 | repository: ${{ github.repository }} 130 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | secrets 2 | .dccache 3 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_IMAGE_TAG=latest 2 | 3 | FROM postgis/postgis:$BASE_IMAGE_TAG AS base-image 4 | 5 | 6 | 7 | 8 | FROM base-image AS basic-deps 9 | 10 | RUN apt-get update && \ 11 | apt-get install -y --no-install-recommends \ 12 | ca-certificates \ 13 | curl 14 | 15 | 16 | 17 | 18 | FROM basic-deps AS powa-scripts 19 | 20 | WORKDIR /tmp/powa 21 | RUN (curl --fail -LOJ "https://raw.githubusercontent.com/powa-team/powa-podman/master/powa-archivist/$PG_MAJOR/setup_powa-archivist.sh" || \ 22 | curl --fail -LOJ "https://raw.githubusercontent.com/powa-team/powa-podman/master/powa-archivist-git/setup_powa-archivist.sh") && \ 23 | (curl --fail -LOJ "https://raw.githubusercontent.com/powa-team/powa-podman/master/powa-archivist/$PG_MAJOR/install_all_powa_ext.sql" || \ 24 | curl --fail -LOJ "https://raw.githubusercontent.com/powa-team/powa-podman/master/powa-archivist-git/install_all_powa_ext.sql") 25 | 26 | 27 | 28 | 29 | FROM basic-deps AS common-deps 30 | 31 | # /var/lib/apt/lists/ still has the indexes from parent stage, so there's no need to run apt-get update again. 32 | # (unless the parent stage cache is not invalidated...) 33 | RUN apt-get install -y --no-install-recommends \ 34 | build-essential \ 35 | cmake \ 36 | postgresql-server-dev-$PG_MAJOR 37 | 38 | 39 | 40 | 41 | FROM common-deps AS build-timescaledb 42 | 43 | WORKDIR /tmp/timescaledb 44 | RUN apt-get install -y --no-install-recommends libkrb5-dev && \ 45 | URL_END=$(case "$PG_MAJOR" in ("12") echo "tag/2.11.2";; ("13") echo "tag/2.15.3";; (*) echo "latest";; esac) && \ 46 | ASSET_NAME=$(basename $(curl -LIs -o /dev/null -w %{url_effective} https://github.com/timescale/timescaledb/releases/${URL_END})) && \ 47 | curl --fail -L "https://github.com/timescale/timescaledb/archive/${ASSET_NAME}.tar.gz" | tar -zx --strip-components=1 -C . && \ 48 | ./bootstrap 49 | WORKDIR /tmp/timescaledb/build 50 | RUN make -j$(nproc) && \ 51 | make install 52 | 53 | 54 | 55 | 56 | FROM common-deps AS pgxn 57 | 58 | RUN apt-get install -y --no-install-recommends pgxnclient && \ 59 | pgxn install --verbose ddlx && \ 60 | pgxn install --verbose json_accessors && \ 61 | pgxn install --verbose parray_gin && \ 62 | pgxn install --verbose permuteseq && \ 63 | pgxn install --verbose pg_jobmon && \ 64 | pgxn install --verbose pg_rowalesce && \ 65 | pgxn install --verbose pg_uuidv7 && \ 66 | pgxn install --verbose pg_xenophile && \ 67 | pgxn install --verbose pg_xxhash && \ 68 | pgxn install --verbose pgmq && \ 69 | pgxn install --verbose pgsql_tweaks && \ 70 | pgxn install --verbose temporal_tables 71 | 72 | 73 | 74 | 75 | FROM common-deps AS build-pguint 76 | 77 | WORKDIR /tmp/pguint 78 | RUN ASSET_NAME=$(basename $(curl -LIs -o /dev/null -w %{url_effective} https://github.com/petere/pguint/releases/latest)) && \ 79 | curl --fail -L "https://github.com/petere/pguint/archive/${ASSET_NAME}.tar.gz" | tar -zx --strip-components=1 -C . && \ 80 | make && \ 81 | make install 82 | 83 | 84 | 85 | 86 | FROM common-deps AS build-sqlite_fdw 87 | 88 | WORKDIR /tmp/sqlite_fdw 89 | RUN apt-get install -y --no-install-recommends libsqlite3-dev && \ 90 | ASSET_NAME=$(basename $(curl -LIs -o /dev/null -w %{url_effective} https://github.com/pgspider/sqlite_fdw/releases/latest)) && \ 91 | curl --fail -L "https://github.com/pgspider/sqlite_fdw/archive/${ASSET_NAME}.tar.gz" | tar -zx --strip-components=1 -C . && \ 92 | make USE_PGXS=1 && \ 93 | make USE_PGXS=1 install 94 | 95 | 96 | 97 | 98 | FROM base-image AS final-stage 99 | 100 | RUN apt-get update && \ 101 | apt-get install -y --no-install-recommends \ 102 | # runtime requirement for using spatialite with sqlite_fdw 103 | libsqlite3-mod-spatialite \ 104 | pgagent \ 105 | postgresql-$PG_MAJOR-age \ 106 | postgresql-$PG_MAJOR-asn1oid \ 107 | postgresql-$PG_MAJOR-credcheck \ 108 | postgresql-$PG_MAJOR-cron \ 109 | postgresql-$PG_MAJOR-debversion \ 110 | postgresql-$PG_MAJOR-dirtyread \ 111 | postgresql-$PG_MAJOR-extra-window-functions \ 112 | postgresql-$PG_MAJOR-first-last-agg \ 113 | postgresql-$PG_MAJOR-h3 \ 114 | postgresql-$PG_MAJOR-hll \ 115 | postgresql-$PG_MAJOR-hypopg \ 116 | postgresql-$PG_MAJOR-icu-ext \ 117 | postgresql-$PG_MAJOR-ip4r \ 118 | postgresql-$PG_MAJOR-jsquery \ 119 | postgresql-$PG_MAJOR-mobilitydb \ 120 | postgresql-$PG_MAJOR-mysql-fdw \ 121 | postgresql-$PG_MAJOR-numeral \ 122 | postgresql-$PG_MAJOR-ogr-fdw \ 123 | postgresql-$PG_MAJOR-oracle-fdw \ 124 | postgresql-$PG_MAJOR-orafce \ 125 | postgresql-$PG_MAJOR-partman \ 126 | postgresql-$PG_MAJOR-periods \ 127 | postgresql-$PG_MAJOR-pg-fact-loader \ 128 | postgresql-$PG_MAJOR-pg-hint-plan \ 129 | postgresql-$PG_MAJOR-pg-permissions \ 130 | postgresql-$PG_MAJOR-pg-qualstats \ 131 | postgresql-$PG_MAJOR-pg-rrule \ 132 | postgresql-$PG_MAJOR-pg-stat-kcache \ 133 | postgresql-$PG_MAJOR-pg-track-settings \ 134 | postgresql-$PG_MAJOR-pg-wait-sampling \ 135 | postgresql-$PG_MAJOR-pgaudit \ 136 | postgresql-$PG_MAJOR-pgauditlogtofile \ 137 | postgresql-$PG_MAJOR-pgfincore \ 138 | postgresql-$PG_MAJOR-pgl-ddl-deploy \ 139 | postgresql-$PG_MAJOR-pglogical \ 140 | postgresql-$PG_MAJOR-pglogical-ticker \ 141 | postgresql-$PG_MAJOR-pgmemcache \ 142 | postgresql-$PG_MAJOR-pgmp \ 143 | postgresql-$PG_MAJOR-pgpcre \ 144 | postgresql-$PG_MAJOR-pgq-node \ 145 | postgresql-$PG_MAJOR-pgrouting \ 146 | postgresql-$PG_MAJOR-pgrouting-scripts \ 147 | postgresql-$PG_MAJOR-pgsphere \ 148 | postgresql-$PG_MAJOR-pgtap \ 149 | postgresql-$PG_MAJOR-pgvector \ 150 | postgresql-$PG_MAJOR-pldebugger \ 151 | postgresql-$PG_MAJOR-plpgsql-check \ 152 | postgresql-$PG_MAJOR-plprofiler \ 153 | postgresql-$PG_MAJOR-plproxy \ 154 | postgresql-$PG_MAJOR-plsh \ 155 | postgresql-$PG_MAJOR-pointcloud \ 156 | postgresql-$PG_MAJOR-powa \ 157 | postgresql-$PG_MAJOR-prefix \ 158 | postgresql-$PG_MAJOR-prioritize \ 159 | postgresql-$PG_MAJOR-q3c \ 160 | postgresql-$PG_MAJOR-rational \ 161 | postgresql-$PG_MAJOR-repack \ 162 | postgresql-$PG_MAJOR-roaringbitmap \ 163 | postgresql-$PG_MAJOR-rum \ 164 | postgresql-$PG_MAJOR-semver \ 165 | postgresql-$PG_MAJOR-set-user \ 166 | postgresql-$PG_MAJOR-show-plans \ 167 | postgresql-$PG_MAJOR-similarity \ 168 | postgresql-$PG_MAJOR-squeeze \ 169 | postgresql-$PG_MAJOR-tablelog \ 170 | postgresql-$PG_MAJOR-tdigest \ 171 | postgresql-$PG_MAJOR-tds-fdw \ 172 | postgresql-$PG_MAJOR-toastinfo \ 173 | postgresql-$PG_MAJOR-unit \ 174 | postgresql-$PG_MAJOR-wal2json \ 175 | postgresql-plperl-$PG_MAJOR \ 176 | postgresql-plpython3-$PG_MAJOR && \ 177 | if [ "$PG_MAJOR" -ge 14 ]; then \ 178 | apt-get install -y --no-install-recommends postgresql-$PG_MAJOR-pgfaceting; \ 179 | fi && \ 180 | apt-get purge -y --auto-remove && \ 181 | rm -rf /var/lib/apt/lists/* 182 | 183 | COPY --from=powa-scripts \ 184 | /tmp/powa/setup_powa-archivist.sh \ 185 | /docker-entrypoint-initdb.d/setup_powa-archivist.sh 186 | COPY --from=powa-scripts \ 187 | /tmp/powa/install_all_powa_ext.sql \ 188 | /usr/local/src/install_all_powa_ext.sql 189 | 190 | COPY --from=pgxn \ 191 | /usr/share/postgresql/$PG_MAJOR/extension/ \ 192 | /usr/share/postgresql/$PG_MAJOR/extension/ 193 | COPY --from=pgxn \ 194 | /usr/lib/postgresql/$PG_MAJOR/lib \ 195 | /usr/lib/postgresql/$PG_MAJOR/lib 196 | 197 | COPY --from=build-timescaledb \ 198 | /usr/share/postgresql/$PG_MAJOR/extension/timescaledb* \ 199 | /usr/share/postgresql/$PG_MAJOR/extension/ 200 | COPY --from=build-timescaledb \ 201 | /usr/lib/postgresql/$PG_MAJOR/lib/timescaledb* \ 202 | /usr/lib/postgresql/$PG_MAJOR/lib/ 203 | 204 | COPY --from=build-pguint \ 205 | /usr/share/postgresql/$PG_MAJOR/extension/uint* \ 206 | /usr/share/postgresql/$PG_MAJOR/extension/ 207 | COPY --from=build-pguint \ 208 | /usr/lib/postgresql/$PG_MAJOR/lib/uint* \ 209 | /usr/lib/postgresql/$PG_MAJOR/lib/ 210 | 211 | COPY --from=build-sqlite_fdw \ 212 | /usr/share/postgresql/$PG_MAJOR/extension/sqlite_fdw* \ 213 | /usr/share/postgresql/$PG_MAJOR/extension/ 214 | COPY --from=build-sqlite_fdw \ 215 | /usr/lib/postgresql/$PG_MAJOR/lib/bitcode/sqlite_fdw.index.bc \ 216 | /usr/lib/postgresql/$PG_MAJOR/lib/bitcode/sqlite_fdw.index.bc 217 | COPY --from=build-sqlite_fdw \ 218 | /usr/lib/postgresql/$PG_MAJOR/lib/bitcode/sqlite_fdw \ 219 | /usr/lib/postgresql/$PG_MAJOR/lib/bitcode/sqlite_fdw 220 | COPY --from=build-sqlite_fdw \ 221 | /usr/lib/postgresql/$PG_MAJOR/lib/sqlite_fdw.so \ 222 | /usr/lib/postgresql/$PG_MAJOR/lib/sqlite_fdw.so 223 | 224 | COPY ./conf.sh /docker-entrypoint-initdb.d/z_conf.sh 225 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Ivan Donisete Lonel 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #### PostgreSQL image based on [postgis/postgis](https://hub.docker.com/r/postgis/postgis), with quite a few added extensions 2 | 3 | [![ivanlonel/postgis-with-extensions][docker-pulls-image]][docker-hub-url] 4 | [![ivanlonel/postgis-with-extensions][github-last-commit-image]][github-url] 5 | [![ivanlonel/postgis-with-extensions][github-workflow-status-image]][github-url] 6 | 7 | Tag labels follow the pattern `X-Y.Z`, where `X` is the *major* Postgres version (starting from version 13) and `Y.Z` is the *major.minor* Postgis version. 8 | 9 | The `latest` tag currently corresponds to `17-3.5`. 10 | 11 | ## Usage 12 | 13 | In order to run a basic container capable of serving a Postgres database with all extensions below available: 14 | 15 | ```bash 16 | docker run -e POSTGRES_PASSWORD=mysecretpassword -d ivanlonel/postgis-with-extensions 17 | ``` 18 | 19 | [Here](https://github.com/ivanlonel/postgis-with-extensions/tree/master/compose_example) is a sample docker-compose stack definition, which includes a [powa-web](https://hub.docker.com/r/powateam/powa-web) container and a [pgadmin](https://hub.docker.com/r/dpage/pgadmin4) container. The Postgres container is built from a Dockerfile that extends this image by running `localedef` in order to ensure Postgres will use the locale specified in docker-compose.yml. 20 | 21 | For more detailed instructions about how to start and control your Postgres container, see the documentation for the `postgres` image [here](https://hub.docker.com/_/postgres/). 22 | 23 | ## Available extensions 24 | 25 | - [age](https://github.com/apache/age) 26 | - [asn1oid](https://github.com/df7cb/pgsql-asn1oid) 27 | - [credcheck](https://github.com/MigOpsRepos/credcheck) 28 | - [ddlx](https://github.com/lacanoid/pgddl) 29 | - [extra_window_functions](https://github.com/xocolatl/extra_window_functions) 30 | - [first_last_agg](https://github.com/wulczer/first_last_agg) 31 | - [h3-pg](https://github.com/zachasme/h3-pg) 32 | - [hll](https://github.com/citusdata/postgresql-hll) 33 | - [hypopg](https://github.com/HypoPG/hypopg) 34 | - [icu_ext](https://github.com/dverite/icu_ext) 35 | - [ip4r](https://github.com/RhodiumToad/ip4r) 36 | - [json_accessors](https://github.com/theirix/json_accessors) 37 | - [jsquery](https://github.com/postgrespro/jsquery) 38 | - [MobilityDB](https://github.com/MobilityDB/MobilityDB) 39 | - [mysql_fdw](https://github.com/EnterpriseDB/mysql_fdw) 40 | - [numeral](https://github.com/df7cb/postgresql-numeral) 41 | - [ogr_fdw](https://github.com/pramsey/pgsql-ogr-fdw) 42 | - [oracle_fdw](https://github.com/laurenz/oracle_fdw) 43 | - [orafce](https://github.com/orafce/orafce) 44 | - [parray_gin](https://github.com/theirix/parray_gin) 45 | - [periods](https://github.com/xocolatl/periods) 46 | - [permuteseq](https://github.com/dverite/permuteseq) 47 | - [pg_cron](https://github.com/citusdata/pg_cron) 48 | - [pg_dirtyread](https://github.com/df7cb/pg_dirtyread) 49 | - [pg_fact_loader](https://github.com/enova/pg_fact_loader) 50 | - [pg_hint_plan](https://github.com/ossc-db/pg_hint_plan) 51 | - [pg_jobmon](https://github.com/omniti-labs/pg_jobmon) 52 | - [pg_partman](https://github.com/pgpartman/pg_partman) 53 | - [pg_permissions](https://github.com/cybertec-postgresql/pg_permissions) 54 | - [pg_qualstats](https://github.com/powa-team/pg_qualstats) 55 | - [pg_rational](https://github.com/begriffs/pg_rational) 56 | - [pg_repack](https://github.com/reorg/pg_repack) 57 | - [pg_roaringbitmap](https://github.com/ChenHuajun/pg_roaringbitmap) 58 | - [pg_rowalesce](https://github.com/bigsmoke/pg_rowalesce) 59 | - [pg_rrule](https://github.com/petropavel13/pg_rrule) 60 | - [pg_show_plans](https://github.com/cybertec-postgresql/pg_show_plans) 61 | - [pg_similarity](https://github.com/eulerto/pg_similarity) 62 | - [pg_squeeze](https://github.com/cybertec-postgresql/pg_squeeze) 63 | - [pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache) 64 | - [pg_track_settings](https://github.com/rjuju/pg_track_settings) 65 | - [pg_uuidv7](https://github.com/fboulnois/pg_uuidv7) 66 | - [pg_wait_sampling](https://github.com/postgrespro/pg_wait_sampling) 67 | - [pg_xenophile](https://github.com/bigsmoke/pg_xenophile) 68 | - [pg_xxhash](https://github.com/hatarist/pg_xxhash) 69 | - [pgagent](https://github.com/pgadmin-org/pgagent) 70 | - [pgaudit](https://github.com/pgaudit/pgaudit) 71 | - [pgauditlogtofile](https://github.com/fmbiete/pgauditlogtofile) 72 | - [pgfaceting](https://github.com/cybertec-postgresql/pgfaceting) 73 | - [pgfincore](https://github.com/klando/pgfincore) 74 | - [pgl_ddl_deploy](https://github.com/enova/pgl_ddl_deploy) 75 | - [pglogical](https://github.com/2ndQuadrant/pglogical) 76 | - [pglogical_ticker](https://github.com/enova/pglogical_ticker) 77 | - [pgmemcache](https://github.com/ohmu/pgmemcache) 78 | - [pgmp](https://github.com/dvarrazzo/pgmp) 79 | - [pgmq](https://github.com/tembo-io/pgmq) 80 | - [pgpcre](https://github.com/petere/pgpcre) 81 | - [pgq](https://github.com/pgq/pgq) 82 | - [pgq_node](https://github.com/pgq/pgq-node) 83 | - [pgrouting](https://github.com/pgRouting/pgrouting) 84 | - [pgsphere](https://github.com/postgrespro/pgsphere) 85 | - [pgsql_tweaks](https://github.com/sjstoelting/pgsql-tweaks) 86 | - [pgtap](https://github.com/theory/pgtap) 87 | - [pguint](https://github.com/petere/pguint) 88 | - [pgvector](https://github.com/pgvector/pgvector) 89 | - [PL/Perl](https://www.postgresql.org/docs/current/plperl.html) 90 | - [PL/Proxy](https://github.com/plproxy/plproxy) 91 | - [PL/Python](https://www.postgresql.org/docs/current/plpython.html) 92 | - [PL/sh](https://github.com/petere/plsh) 93 | - [pldebugger (pldbgapi)](https://github.com/EnterpriseDB/pldebugger) 94 | - [plpgsql_check](https://github.com/okbob/plpgsql_check) 95 | - [plProfiler](https://github.com/bigsql/plprofiler) 96 | - [pointcloud](https://github.com/pgpointcloud/pointcloud) 97 | - [postgis](https://github.com/postgis/postgis) 98 | - [postgresql-debversion](https://salsa.debian.org/postgresql/postgresql-debversion) 99 | - [powa (archivist)](https://github.com/powa-team/powa-archivist) 100 | - [prefix](https://github.com/dimitri/prefix) 101 | - [prioritize](https://github.com/schmiddy/pg_prioritize) 102 | - [q3c](https://github.com/segasai/q3c) 103 | - [rum](https://github.com/postgrespro/rum) 104 | - [semver](https://github.com/theory/pg-semver) 105 | - [set_user](https://github.com/pgaudit/set_user) 106 | - [sqlite_fdw](https://github.com/pgspider/sqlite_fdw) 107 | - [table_log](https://github.com/credativ/table_log) 108 | - [tdigest](https://github.com/tvondra/tdigest) 109 | - [tds_fdw](https://github.com/tds-fdw/tds_fdw) 110 | - [temporal_tables](https://github.com/arkhipov/temporal_tables) 111 | - [timescaledb](https://github.com/timescale/timescaledb) 112 | - [toastinfo](https://github.com/credativ/toastinfo) 113 | - [unit](https://github.com/df7cb/postgresql-unit) 114 | - [wal2json](https://github.com/eulerto/wal2json) 115 | 116 | [docker-hub-url]: https://hub.docker.com/r/ivanlonel/postgis-with-extensions/ 117 | [github-url]: https://github.com/ivanlonel/postgis-with-extensions/ 118 | [docker-pulls-image]: https://img.shields.io/docker/pulls/ivanlonel/postgis-with-extensions.svg?style=flat 119 | [github-last-commit-image]: https://img.shields.io/github/last-commit/ivanlonel/postgis-with-extensions.svg?style=flat 120 | [github-workflow-status-image]: https://img.shields.io/github/actions/workflow/status/ivanlonel/postgis-with-extensions/docker-publish.yml?branch=master 121 | -------------------------------------------------------------------------------- /compose_example/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_IMAGE_TAG=latest 2 | 3 | FROM ivanlonel/postgis-with-extensions:$BASE_IMAGE_TAG 4 | 5 | ARG LOCALE 6 | ARG ENCODING=UTF-8 7 | 8 | # See "Locale Customization" in https://github.com/docker-library/docs/blob/master/postgres/README.md 9 | RUN localedef -i $LOCALE -c -f $ENCODING -A /usr/share/locale/locale.alias $LOCALE.$ENCODING 10 | -------------------------------------------------------------------------------- /compose_example/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | postgres: 4 | # image: ivanlonel/postgis-with-extensions:17-3.5 5 | build: 6 | context: . # Use image from Dockerfile in current dir 7 | args: 8 | - BASE_IMAGE_TAG=17-3.5 9 | - LOCALE=pt_BR 10 | - ENCODING=UTF-8 11 | command: 12 | - postgres 13 | - -c 14 | - logging_collector=on 15 | - -c 16 | - log_filename=postgresql-%a.log 17 | - -c 18 | - log_rotation_age=1440 19 | - -c 20 | - log_truncate_on_rotation=on 21 | - -c 22 | - "datestyle=ISO, DMY" 23 | - -c 24 | - effective_cache_size=2048MB 25 | - -c 26 | - shared_buffers=256MB 27 | - -c 28 | - work_mem=8MB 29 | - -c 30 | - maintenance_work_mem=256MB 31 | - -c 32 | - track_counts=on 33 | - -c 34 | - autovacuum=on 35 | restart: always 36 | ports: 37 | - "5432:5432" 38 | shm_size: 2gb 39 | stop_grace_period: 2m30s 40 | environment: 41 | # LC_COLLATE=C makes strings comparison (and decurring operations like sorting) faster, 42 | # because it's just byte-to-byte comparison (no complex locale rules) 43 | # LC_CTYPE=C would make Postgres features that use ctype.h 44 | # (e.g. upper(), lower(), initcap(), ILIKE, citext) work as expected only for 45 | # characters in the US-ASCII range (up to codepoint 0x7F in Unicode). 46 | - LANG=pt_BR.utf8 47 | - LC_COLLATE=C 48 | - POSTGRES_PASSWORD_FILE=/run/secrets/postgres-passwd 49 | - TZ=America/Sao_Paulo 50 | volumes: 51 | - postgres_data:/var/lib/postgresql/data 52 | networks: 53 | - pgnetwork 54 | secrets: 55 | - postgres-passwd 56 | 57 | powa-web: 58 | image: powateam/powa-web 59 | restart: always 60 | ports: 61 | - "8888:8888" 62 | environment: 63 | - TZ=America/Sao_Paulo 64 | volumes: 65 | - powa-web.conf:/etc/powa-web.conf 66 | networks: 67 | - pgnetwork 68 | depends_on: 69 | - postgres 70 | 71 | pgadmin: 72 | image: dpage/pgadmin4:latest 73 | restart: always 74 | ports: 75 | - "80:80" 76 | entrypoint: /pgadmin_entrypoint.sh 77 | environment: 78 | - PGADMIN_DEFAULT_EMAIL_FILE=/run/secrets/pgadmin-email 79 | - PGADMIN_DEFAULT_PASSWORD_FILE=/run/secrets/pgadmin-passwd 80 | - TZ=America/Sao_Paulo 81 | volumes: 82 | - pgadmin4_data:/var/lib/pgadmin 83 | - ./pgadmin_entrypoint.sh:/pgadmin_entrypoint.sh 84 | - ./servers.json:/pgadmin4/servers.json 85 | networks: 86 | - pgnetwork 87 | depends_on: 88 | - postgres 89 | secrets: 90 | - pgadmin-email 91 | - pgadmin-passwd 92 | 93 | volumes: 94 | postgres_data: 95 | pgadmin4_data: 96 | 97 | secrets: 98 | postgres-passwd: 99 | file: secrets/postgres-passwd.txt 100 | pgadmin-email: 101 | file: secrets/pgadmin-email.txt 102 | pgadmin-passwd: 103 | file: secrets/pgadmin-passwd.txt 104 | 105 | networks: 106 | pgnetwork: 107 | driver: bridge 108 | ipam: 109 | # defining static IP range for network, so it can be referenced in pg_hba.conf 110 | driver: default 111 | config: 112 | - subnet: 172.24.240.0/24 113 | -------------------------------------------------------------------------------- /compose_example/pgadmin_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Eeuo pipefail 3 | 4 | # Currently, the original entrypoint script only reads credentials from environment. 5 | # This alternative entrypoint allows reading them from files 6 | 7 | file_env() { 8 | local var="$1" 9 | local fileVar="${var}_FILE" 10 | local def="${2:-}" 11 | 12 | if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then 13 | echo >&2 "error: both $var and $fileVar are set (but are exclusive)" 14 | exit 1 15 | fi 16 | local val="$def" 17 | if [ "${!var:-}" ]; then 18 | val="${!var}" 19 | elif [ "${!fileVar:-}" ]; then 20 | val="$(< "${!fileVar}")" 21 | fi 22 | export "$var"="$val" 23 | unset "$fileVar" 24 | } 25 | 26 | file_env "PGADMIN_DEFAULT_EMAIL" 27 | file_env "PGADMIN_DEFAULT_PASSWORD" 28 | 29 | source /entrypoint.sh 30 | -------------------------------------------------------------------------------- /compose_example/powa-web.conf: -------------------------------------------------------------------------------- 1 | servers={ 2 | '1': { 3 | 'host': 'localhost', 4 | 'port': '5432', 5 | 'database': 'powa' 6 | } 7 | } 8 | cookie_secret="I didn't understand how exactly this is used, but it's required." 9 | -------------------------------------------------------------------------------- /compose_example/servers.json: -------------------------------------------------------------------------------- 1 | { 2 | "Servers": { 3 | "1": { 4 | "Name": "PostgreSQL", 5 | "Group": "Servers", 6 | "Host": "postgres", 7 | "Port": 5432, 8 | "MaintenanceDB": "postgres", 9 | "Username": "postgres", 10 | "SSLMode": "prefer" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /conf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Eeuo pipefail 3 | 4 | # The inner sed finds the line number of the last match for the the regex ^\s*shared_preload_libraries\s*= 5 | # The outer sed, operating on that line alone, extracts the text between single quotes after the equals sign 6 | PREVIOUS_PRELOAD_LIBRARIES=$(sed -nE "$(sed -n '/^\s*shared_preload_libraries\s*=/ =' ${PGDATA}/postgresql.conf | tail -n 1) s/^\s*shared_preload_libraries\s*=\s*'(.*?)'/\1/p" ${PGDATA}/postgresql.conf) 7 | 8 | NEW_PRELOAD_LIBRARIES="credcheck,pg_cron,pg_partman_bgw,pg_show_plans,pg_similarity,pg_squeeze,pg_stat_statements,pg_stat_kcache,pg_wait_sampling,pgaudit,pgauditlogtofile,pglogical,pglogical_ticker,pgmemcache,plprofiler,plugin_debugger,postgis-3,set_user,timescaledb" 9 | 10 | cat >> ${PGDATA}/postgresql.conf << EOT 11 | listen_addresses = '*' 12 | 13 | shared_preload_libraries = '$(echo "$PREVIOUS_PRELOAD_LIBRARIES,$NEW_PRELOAD_LIBRARIES" | sed 's/^,//')' 14 | 15 | # pg_cron 16 | cron.database_name = '${PG_CRON_DB:-${POSTGRES_DB:-${POSTGRES_USER:-postgres}}}' 17 | 18 | # pg_partman 19 | pg_partman_bgw.dbname = '${PG_PARTMAN_DB:-${POSTGRES_DB:-${POSTGRES_USER:-postgres}}}' 20 | 21 | # MobilityDB recomendation 22 | max_locks_per_transaction = 128 23 | timescaledb.telemetry_level = off 24 | 25 | # pglogical 26 | wal_level = 'logical' 27 | max_worker_processes = 10 # one per database needed on provider node 28 | # one per node needed on subscriber node 29 | max_replication_slots = 10 # one per node needed on provider node 30 | max_wal_senders = 10 # one per node needed on provider node 31 | track_commit_timestamp = on # needed for last/first update wins conflict resolution 32 | EOT 33 | -------------------------------------------------------------------------------- /docker-compose.test.yml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres: 3 | image: $IMAGE_NAME 4 | environment: 5 | - POSTGRES_PASSWORD=postgres 6 | - TZ=America/Sao_Paulo 7 | - PG_CRON_DB=test 8 | restart: unless-stopped 9 | networks: 10 | - pgtest_network 11 | ports: 12 | - "5433:5432" 13 | 14 | sut: 15 | build: ./tests 16 | command: 17 | - "/tests/wait-for-postgres.sh" 18 | - "psql" 19 | - "postgresql://postgres:postgres@postgres:5432/postgres" 20 | - "-a" 21 | - "-f" 22 | - "/tests/create_extensions.sql" 23 | restart: 'no' 24 | depends_on: 25 | - postgres 26 | networks: 27 | - pgtest_network 28 | 29 | networks: 30 | pgtest_network: 31 | driver: bridge 32 | -------------------------------------------------------------------------------- /tests/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM governmentpaas/psql 2 | 3 | COPY . /tests 4 | RUN chmod +x /tests/wait-for-postgres.sh 5 | -------------------------------------------------------------------------------- /tests/create_extensions.sql: -------------------------------------------------------------------------------- 1 | \set VERBOSITY verbose 2 | \set ON_ERROR_STOP on 3 | 4 | CREATE DATABASE test; 5 | \c test 6 | 7 | 8 | SELECT version(); 9 | SELECT * FROM pg_available_extensions ORDER BY name; 10 | 11 | 12 | -- https://github.com/postgis/postgis 13 | CREATE EXTENSION IF NOT EXISTS address_standardizer; 14 | CREATE EXTENSION IF NOT EXISTS postgis; 15 | CREATE EXTENSION IF NOT EXISTS postgis_topology; 16 | CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder CASCADE; -- requires fuzzystrmatch 17 | CREATE EXTENSION IF NOT EXISTS postgis_raster; 18 | CREATE EXTENSION IF NOT EXISTS postgis_sfcgal; 19 | 20 | SELECT PostGIS_Full_Version(); 21 | 22 | 23 | -- https://github.com/apache/age 24 | CREATE EXTENSION age; 25 | LOAD 'age'; 26 | 27 | BEGIN; 28 | 29 | SET search_path = ag_catalog, "$user", public; 30 | 31 | SELECT create_graph('graph_name'); 32 | 33 | -- create vertices 34 | SELECT * FROM cypher('graph_name', $$CREATE (:label {property:"Node A"})$$) as (v agtype); 35 | SELECT * FROM cypher('graph_name', $$CREATE (:label {property:"Node B"})$$) as (v agtype); 36 | 37 | -- create an edge between two nodes and set its properties 38 | SELECT * FROM cypher( 39 | 'graph_name', 40 | $$ 41 | MATCH (a:label), (b:label) 42 | WHERE a.property = 'Node A' AND b.property = 'Node B' 43 | CREATE (a)-[e:RELTYPE {property:a.property + '<->' + b.property}]->(b) 44 | RETURN e 45 | $$ 46 | ) as (e agtype); 47 | 48 | -- query the connected nodes 49 | SELECT * from cypher( 50 | 'graph_name', 51 | $$ 52 | MATCH (V)-[R]-(V2) 53 | RETURN V,R,V2 54 | $$ 55 | ) as (V agtype, R agtype, V2 agtype); 56 | 57 | ROLLBACK; 58 | 59 | 60 | -- https://github.com/df7cb/pgsql-asn1oid 61 | CREATE EXTENSION IF NOT EXISTS asn1oid; 62 | SELECT '1.3.6.1.4.1'::asn1oid; 63 | 64 | 65 | -- https://github.com/MigOpsRepos/credcheck 66 | \set ON_ERROR_STOP off 67 | 68 | SET credcheck.username_min_length = 4; 69 | CREATE USER abc WITH PASSWORD 'pass'; 70 | 71 | SET credcheck.password_min_special = 1; 72 | CREATE USER abcd WITH PASSWORD 'pass'; 73 | 74 | SET credcheck.password_contain_username = on; 75 | SET credcheck.password_ignore_case = on; 76 | CREATE USER abcd$ WITH PASSWORD 'ABCD$xyz'; 77 | 78 | \set ON_ERROR_STOP on 79 | 80 | 81 | -- https://github.com/lacanoid/pgddl 82 | CREATE EXTENSION IF NOT EXISTS ddlx SCHEMA pg_catalog; 83 | SELECT ddlx_create(oid) FROM pg_database WHERE datname=current_database(); 84 | 85 | 86 | -- https://salsa.debian.org/postgresql/postgresql-debversion 87 | CREATE EXTENSION IF NOT EXISTS debversion; 88 | 89 | SELECT v::debversion 90 | FROM unnest(ARRAY[ 91 | '4.1.5-2', 92 | '4.0.2-1', 93 | '4.1.4-1', 94 | '4.1.5-1', 95 | '4.2.0-1', 96 | '4.1.4-2', 97 | '4.1.5-2.01', 98 | '4.1.99-a2-1', 99 | '5.2.1-2', 100 | '5.0.0-3', 101 | '5.1.98.2-2', 102 | '3.1.4-1', 103 | '5.2.3-1', 104 | '0:5.2.2-1', 105 | '0:5.2.4-1', 106 | '1:3.2.3-1' 107 | ]) AS v; 108 | 109 | 110 | -- https://github.com/xocolatl/extra_window_functions 111 | CREATE EXTENSION IF NOT EXISTS extra_window_functions; 112 | 113 | BEGIN; 114 | 115 | CREATE TEMP TABLE things ( 116 | part integer NOT NULL, 117 | ord integer NOT NULL, 118 | val integer 119 | ); 120 | 121 | COPY things FROM stdin; 122 | 1 1 64664 123 | 1 2 8779 124 | 1 3 14005 125 | 1 4 57699 126 | 1 5 98842 127 | 1 6 88563 128 | 1 7 70453 129 | 1 8 82824 130 | 1 9 62453 131 | 2 1 \N 132 | 2 2 51714 133 | 2 3 17096 134 | 2 4 41605 135 | 2 5 15366 136 | 2 6 87359 137 | 2 7 98990 138 | 2 8 34982 139 | 2 9 3343 140 | 3 1 21903 141 | 3 2 24605 142 | 3 3 6242 143 | 3 4 24947 144 | 3 5 79535 145 | 3 6 66903 146 | 3 7 42269 147 | 3 8 31143 148 | 3 9 \N 149 | 4 1 \N 150 | 4 2 49723 151 | 4 3 23958 152 | 4 4 80796 153 | 4 5 \N 154 | 4 6 41066 155 | 4 7 72991 156 | 4 8 33734 157 | 4 9 \N 158 | 5 1 \N 159 | 5 2 \N 160 | 5 3 \N 161 | 5 4 \N 162 | 5 5 \N 163 | 5 6 \N 164 | 5 7 \N 165 | 5 8 \N 166 | 5 9 \N 167 | \. 168 | 169 | /* FLIP_FLOP */ 170 | SELECT part, ord, val, 171 | flip_flop(val % 2 = 0) OVER w AS flip_flop_1, 172 | flip_flop(val % 2 = 0, val % 2 = 1) OVER w AS flip_flop_2 173 | FROM things 174 | WINDOW w AS (PARTITION BY part ORDER BY ord ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) 175 | ORDER BY part, ord; 176 | 177 | /* LAG */ 178 | SELECT part, ord, val, 179 | lag(val) OVER w AS lag, 180 | lag_ignore_nulls(val) OVER w AS lag_in, 181 | lag_ignore_nulls(val, 2) OVER w AS lag_in_off, 182 | lag_ignore_nulls(val, 2, -9999999) OVER w AS lag_in_off_d 183 | FROM things 184 | WINDOW w AS (PARTITION BY part ORDER BY ord ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) 185 | ORDER BY part, ord; 186 | 187 | /* LEAD */ 188 | SELECT part, ord, val, 189 | lead(val) OVER w AS lead, 190 | lead_ignore_nulls(val) OVER w AS lead_in, 191 | lead_ignore_nulls(val, 2) OVER w AS lead_in_off, 192 | lead_ignore_nulls(val, 2, 9999999) OVER w AS lead_in_off_d 193 | FROM things 194 | WINDOW w AS (PARTITION BY part ORDER BY ord ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) 195 | ORDER BY part, ord; 196 | 197 | /* FIRST_VALUE */ 198 | SELECT part, ord, val, 199 | first_value(val) OVER w AS fv, 200 | first_value_ignore_nulls(val) OVER w AS fv_in, 201 | first_value_ignore_nulls(val, 9999999) OVER w AS fv_in_d 202 | FROM things 203 | WINDOW w AS (PARTITION BY part ORDER BY ord ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) 204 | ORDER BY part, ord; 205 | 206 | /* LAST_VALUE */ 207 | SELECT part, ord, val, 208 | last_value(val) OVER w AS lv, 209 | last_value_ignore_nulls(val) OVER w AS lv_in, 210 | last_value_ignore_nulls(val, -9999999) OVER w AS lv_in_d 211 | FROM things 212 | WINDOW w AS (PARTITION BY part ORDER BY ord ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) 213 | ORDER BY part, ord; 214 | 215 | /* NTH_VALUE */ 216 | SELECT part, ord, val, 217 | nth_value(val, 3) OVER w AS nth, 218 | nth_value_ignore_nulls(val, 3) OVER w AS nth_in 219 | FROM things 220 | WINDOW w AS (PARTITION BY part ORDER BY ord ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) 221 | ORDER BY part, ord; 222 | 223 | SELECT part, ord, val, 224 | nth_value(val, 3) OVER w AS nth, 225 | nth_value_from_last(val, 3) OVER w AS nth_fl 226 | FROM things 227 | WINDOW w AS (PARTITION BY part ORDER BY ord ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) 228 | ORDER BY part, ord; 229 | 230 | SELECT part, ord, val, 231 | nth_value_from_last(val, 3) OVER w AS nth_fl, 232 | nth_value_from_last_ignore_nulls(val, 3) OVER w AS nth_fl_in 233 | FROM things 234 | WINDOW w AS (PARTITION BY part ORDER BY ord ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) 235 | ORDER BY part, ord; 236 | 237 | ROLLBACK; 238 | 239 | 240 | -- https://github.com/wulczer/first_last_agg 241 | CREATE EXTENSION IF NOT EXISTS first_last_agg; 242 | SELECT last(x order by y) FROM (VALUES (1, 3), (2, 1), (3, 2)) AS v(x, y); 243 | SELECT first(x order by y) FROM (VALUES (1, 3), (2, 1), (3, 2)) AS v(x, y); 244 | 245 | 246 | -- https://github.com/zachasme/h3-pg 247 | CREATE EXTENSION IF NOT EXISTS h3; 248 | CREATE EXTENSION IF NOT EXISTS h3_postgis; 249 | SELECT h3_lat_lng_to_cell(ST_Point(-46.629055, -23.559378), 6); 250 | SELECT ST_AsText(h3_cell_to_boundary_geometry('86a8100c7ffffff')); 251 | 252 | 253 | -- https://github.com/citusdata/postgresql-hll 254 | CREATE EXTENSION IF NOT EXISTS hll; 255 | SELECT hll_empty(); 256 | 257 | 258 | -- https://github.com/HypoPG/hypopg 259 | CREATE EXTENSION IF NOT EXISTS hypopg; 260 | 261 | BEGIN; 262 | 263 | CREATE TABLE hypo AS SELECT id, 'line ' || id AS val FROM generate_series(1,10000) id; 264 | EXPLAIN SELECT * FROM hypo WHERE id = 1; 265 | 266 | SELECT * FROM hypopg_create_index('CREATE INDEX ON hypo (id)'); 267 | EXPLAIN SELECT * FROM hypo WHERE id = 1; 268 | 269 | ROLLBACK; 270 | 271 | 272 | -- https://github.com/dverite/icu_ext 273 | CREATE EXTENSION IF NOT EXISTS icu_ext; 274 | SELECT * FROM icu_locales_list() where name like 'pt%'; 275 | 276 | 277 | -- https://github.com/RhodiumToad/ip4r 278 | CREATE EXTENSION IF NOT EXISTS ip4r; 279 | SELECT ipaddress '255.255.255.255' / 31; 280 | SELECT ipaddress 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' / 127; 281 | 282 | 283 | -- https://github.com/theirix/json_accessors 284 | CREATE EXTENSION IF NOT EXISTS json_accessors; 285 | 286 | select json_get_text('{"foo":"qq", "bar": true}', 'foo'); 287 | select json_get_boolean('{"foo":"qq", "bar": true}', 'bar'); 288 | select json_get_int('{"baz": 42, "boo": 42.424242}', 'baz'); 289 | select json_get_bigint('{"baz": 9223372036854, "boo": 42.424242}', 'baz'); 290 | select json_get_numeric('{"baz": 42, "boo": 42.424242}', 'boo'); 291 | select json_get_text('{"foo":"qq", "bar": true}', 'noneofthese') is null; 292 | select json_get_text('{"foo":null, "bar": true}', 'foo') is null; 293 | select json_get_timestamp('{"foo":"qq", "bar": "2009-12-01 01:23:45"}', 'bar'); 294 | 295 | select json_array_to_text_array('["foo", "bar"]'); 296 | select json_array_to_boolean_array('[true, false]'); 297 | select json_array_to_int_array('[42, 43]'); 298 | select json_array_to_bigint_array('[42, 9223372036854]'); 299 | select json_array_to_numeric_array('[42.4242,43.4343]'); 300 | select json_array_to_timestamp_array('["2009-12-01 01:23:45", "2012-12-01 01:23:45"]'); 301 | select json_get_text_array('{"foo":"qq", "bar": ["baz1", "baz2", "baz3"]}', 'bar'); 302 | 303 | select json_get_boolean_array('{"foo":"qq", "bar": [true, false]}', 'bar'); 304 | select json_get_int_array('{"foo":"qq", "bar": [42, 43]}', 'bar'); 305 | select json_get_bigint_array('{"foo":"qq", "bar": [42, 9223372036854]}', 'bar'); 306 | select json_get_numeric_array('{"foo":"qq", "bar": [42.4242,43.4343]}', 'bar'); 307 | select json_get_timestamp_array('{"foo":"qq", "bar": ["2009-12-01 01:23:45", "2012-12-01 01:23:45"]}', 'bar'); 308 | 309 | select json_get_object('{"foo":"qq", "bar": ["baz1", "baz2", "baz3"]}', 'foo'); 310 | select json_get_object('{"foo":"qq", "bar": ["baz1", "baz2", "baz3"]}', 'bar'); 311 | select json_get_object('{"foo":"qq", "bar": {"baz1": "baz2"}}', 'bar'); 312 | select json_array_to_object_array('[{"foo":42}, {"bar":[]}]'); 313 | select json_get_object_keys('{"foo":"qq", "bar": ["baz1", "baz2", "baz3"]}'); 314 | 315 | 316 | -- https://github.com/postgrespro/jsquery 317 | CREATE EXTENSION IF NOT EXISTS jsquery; 318 | SELECT 319 | '{"x": true}' @@ 'x IS boolean'::jsquery, 320 | '{"x": 0.1}' @@ 'x IS numeric'::jsquery, 321 | '{"a": {"b": 1}}' @@ 'a IS object'::jsquery, 322 | '{"a": ["xxx"]}' @@ 'a IS array AND a.#: IS string'::jsquery, 323 | '["xxx"]' @@ '$ IS array'::jsquery, 324 | '{"points": [{"x": 1, "y": 2}, {"x": 3.9, "y": 0.5}]}' @@ 'points.#:(x IS numeric AND y IS numeric)'::jsquery; 325 | 326 | 327 | -- https://github.com/MobilityDB/MobilityDB 328 | BEGIN; -- both btree_gist (used by periods and powa) and MobilityDB create an operator <-> with the same argument types 329 | 330 | CREATE EXTENSION IF NOT EXISTS mobilitydb; 331 | 332 | SELECT bigintset '{1,2,3}'; 333 | SELECT asText(floatset '{1.12345678, 2.123456789}', 6); 334 | SELECT set(ARRAY [date '2000-01-01', '2000-01-02', '2000-01-03']); 335 | SELECT set(ARRAY [timestamptz '2000-01-01', '2000-01-02', '2000-01-03']); 336 | SELECT set(ARRAY[geometry 'Point(1 1)', 'Point(2 2)', 'Point(3 3)']); 337 | SELECT memSize(dateset '{2000-01-01, 2000-01-02, 2000-01-03}'); 338 | SELECT span(tstzset '{2000-01-01, 2000-01-02, 2000-01-03}'); 339 | SELECT shiftScale(intset '{1}', 4, 4); 340 | 341 | SELECT asText(floatspan '[1.12345678, 2.123456789]', 6); 342 | SELECT span(timestamptz '2000-01-01', '2000-01-02'); 343 | SELECT span(timestamptz '2000-01-01', '2000-01-01', true, true); 344 | SELECT range(datespan '[2000-01-01,2000-01-02)'); 345 | SELECT span(daterange'(2000-01-01,2000-01-03)'); 346 | SELECT span(date '2000-01-01'); 347 | SELECT date '2000-01-01'::datespan; 348 | SELECT range(tstzspan '[2000-01-01,2000-01-02)'); 349 | SELECT span(tstzrange'(2000-01-01,2000-01-02)'); 350 | SELECT span(timestamptz '2000-01-01'); 351 | SELECT timestamptz '2000-01-01'::tstzspan; 352 | SELECT intspan '[1,2]'; 353 | SELECT intspan '(1,2]'; 354 | 355 | SELECT bigintspanset '{[1,2),[3,4),[5,6)}'; 356 | SELECT spanset_cmp(datespanset '{[2000-01-01,2000-01-01]}', datespanset '{[2000-01-01,2000-01-02),[2000-01-03,2000-01-04),[2000-01-05,2000-01-06)}'); 357 | SELECT round(floatspanset '{[1.12345,2.12345),[3.12345,4.12345),[5.12345,6.12345)}', 2); 358 | SELECT shift(intspanset '{[1,2),[3,4),[5,6)}', 2); 359 | SELECT shiftScale(tstzspanset '{[2000-01-01,2000-01-02),(2000-01-03,2000-01-04),(2000-01-05,2000-01-06)}', '5 min', '1 hour'); 360 | 361 | ROLLBACK; 362 | 363 | 364 | -- https://github.com/EnterpriseDB/mysql_fdw 365 | CREATE EXTENSION IF NOT EXISTS mysql_fdw; 366 | 367 | BEGIN; 368 | 369 | CREATE SERVER mysql_server 370 | FOREIGN DATA WRAPPER mysql_fdw 371 | OPTIONS (host '127.0.0.1', port '3306'); 372 | 373 | CREATE FOREIGN TABLE mysql_table ( 374 | id integer, 375 | title text 376 | ) SERVER mysql_server OPTIONS (dbname 'db', table_name 'the_table'); 377 | 378 | ROLLBACK; 379 | 380 | 381 | -- https://github.com/df7cb/postgresql-numeral 382 | CREATE EXTENSION IF NOT EXISTS numeral; 383 | SELECT 'thirty'::numeral + 'twelve'::numeral as sum; 384 | 385 | 386 | -- https://github.com/pramsey/pgsql-ogr-fdw 387 | CREATE EXTENSION IF NOT EXISTS ogr_fdw; 388 | 389 | CREATE TABLE apostles ( 390 | fid integer primary key GENERATED ALWAYS AS IDENTITY, 391 | geom geometry(point, 4326), 392 | joined integer, 393 | name text, 394 | height numeric, 395 | born date, 396 | clock time, 397 | ts timestamp 398 | ); 399 | 400 | INSERT INTO apostles (name, geom, joined, height, born, clock, ts) VALUES 401 | ('Peter', 'SRID=4326;POINT(30.31 59.93)', 1, 1.6, '1912-01-10', '10:10:01', '1912-01-10 10:10:01'), 402 | ('Andrew', 'SRID=4326;POINT(-2.8 56.34)', 2, 1.8, '1911-02-11', '10:10:02', '1911-02-11 10:10:02'), 403 | ('James', 'SRID=4326;POINT(-79.23 42.1)', 3, 1.72, '1910-03-12', '10:10:03', '1910-03-12 10:10:03'), 404 | ('John', 'SRID=4326;POINT(13.2 47.35)', 4, 1.45, '1909-04-01', '10:10:04', '1909-04-01 10:10:04'), 405 | ('Philip', 'SRID=4326;POINT(-75.19 40.69)', 5, 1.65, '1908-05-02', '10:10:05', '1908-05-02 10:10:05'), 406 | ('Bartholomew', 'SRID=4326;POINT(-62 18)', 6, 1.69, '1907-06-03', '10:10:06', '1907-06-03 10:10:06'), 407 | ('Thomas', 'SRID=4326;POINT(-80.08 35.88)', 7, 1.68, '1906-07-04', '10:10:07', '1906-07-04 10:10:07'), 408 | ('Matthew', 'SRID=4326;POINT(-73.67 20.94)', 8, 1.65, '1905-08-05', '10:10:08', '1905-08-05 10:10:08'), 409 | ('James Alpheus', 'SRID=4326;POINT(-84.29 34.07)', 9, 1.78, '1904-09-06', '10:10:09', '1904-09-06 10:10:09'), 410 | ('Thaddaeus', 'SRID=4326;POINT(79.13 10.78)', 10, 1.88, '1903-10-07', '10:10:10', '1903-10-07 10:10:10'), 411 | ('Simon', 'SRID=4326;POINT(-85.97 41.75)', 11, 1.61, '1902-11-08', '10:10:11', '1902-11-08 10:10:11'), 412 | ('Judas Iscariot', 'SRID=4326;POINT(35.7 32.4)', 12, 1.71, '1901-12-09', '10:10:12', '1901-12-09 10:10:12'); 413 | 414 | CREATE SERVER wraparound 415 | FOREIGN DATA WRAPPER ogr_fdw 416 | OPTIONS (datasource 'Pg:dbname=test user=postgres', format 'PostgreSQL'); 417 | 418 | CREATE FOREIGN TABLE apostles_fdw ( 419 | fid integer, 420 | geom geometry(point, 4326), 421 | joined integer, 422 | name text, 423 | height numeric, 424 | born date, 425 | clock time, 426 | ts timestamp 427 | ) SERVER wraparound OPTIONS (layer 'apostles'); 428 | 429 | SELECT * FROM apostles_fdw; 430 | 431 | DROP TABLE apostles; 432 | DROP SERVER wraparound CASCADE; 433 | 434 | 435 | -- https://github.com/laurenz/oracle_fdw 436 | CREATE EXTENSION IF NOT EXISTS oracle_fdw; 437 | 438 | BEGIN; 439 | 440 | CREATE SERVER oradb 441 | FOREIGN DATA WRAPPER oracle_fdw 442 | OPTIONS (dbserver '//dbserver.mydomain.com:1521/ORADB'); 443 | 444 | CREATE FOREIGN TABLE oratab ( 445 | id integer OPTIONS (key 'true') NOT NULL, 446 | title text OPTIONS (strip_zeros 'true') 447 | ) SERVER oradb OPTIONS (schema 'ORAUSER', table 'ORATAB'); 448 | 449 | ROLLBACK; 450 | 451 | 452 | -- https://github.com/orafce/orafce 453 | CREATE EXTENSION IF NOT EXISTS orafce; 454 | SELECT oracle.add_months(oracle.date'2021-05-31 10:12:12', 1); 455 | 456 | 457 | -- https://github.com/theirix/parray_gin 458 | CREATE EXTENSION IF NOT EXISTS parray_gin; 459 | 460 | BEGIN; 461 | CREATE TABLE parray_gin_test_table(id integer GENERATED ALWAYS AS IDENTITY, val text[]); 462 | CREATE INDEX test_val_idx on parray_gin_test_table using gin (val parray_gin_ops); 463 | ROLLBACK; 464 | 465 | 466 | -- https://github.com/xocolatl/periods 467 | CREATE EXTENSION IF NOT EXISTS periods CASCADE; -- requires btree_gist 468 | SELECT * FROM periods.periods; 469 | 470 | 471 | -- https://github.com/dverite/permuteseq 472 | CREATE EXTENSION IF NOT EXISTS permuteseq; 473 | 474 | BEGIN; 475 | 476 | CREATE SEQUENCE s MINVALUE -10000 MAXVALUE 15000; 477 | 478 | \set secret_key 123456789012345 479 | 480 | SELECT permute_nextval('s'::regclass, :secret_key) FROM generate_series(-10000, -9990); 481 | SELECT reverse_permute('s'::regclass, -545, :secret_key); 482 | SELECT range_encrypt_element(91919191919, 1e10::bigint, 1e11::bigint, :secret_key); 483 | SELECT range_decrypt_element(83028080992, 1e10::bigint, 1e11::bigint, :secret_key); 484 | 485 | ROLLBACK; 486 | 487 | 488 | -- https://github.com/citusdata/pg_cron 489 | CREATE EXTENSION IF NOT EXISTS pg_cron; 490 | SELECT cron.schedule('nightly-vacuum', '0 3 * * *', 'VACUUM'); 491 | SELECT cron.unschedule('nightly-vacuum'); 492 | 493 | 494 | -- https://github.com/df7cb/pg_dirtyread 495 | CREATE EXTENSION IF NOT EXISTS pg_dirtyread; 496 | 497 | BEGIN; 498 | 499 | -- Create table and disable autovacuum 500 | CREATE TABLE foo (bar bigint, baz text); 501 | ALTER TABLE foo SET ( 502 | autovacuum_enabled = false, 503 | toast.autovacuum_enabled = false 504 | ); 505 | 506 | INSERT INTO foo VALUES (1, 'Test'), (2, 'New Test'); 507 | DELETE FROM foo WHERE bar = 1; 508 | 509 | SELECT * FROM pg_dirtyread('foo') as t(bar bigint, baz text); 510 | 511 | ROLLBACK; 512 | 513 | 514 | -- https://github.com/enova/pg_fact_loader 515 | CREATE EXTENSION IF NOT EXISTS pg_fact_loader; 516 | SELECT fact_loader.worker(); 517 | SELECT * FROM fact_loader.subscription(); 518 | SELECT * FROM fact_loader.subscription_rel(); 519 | 520 | 521 | -- https://github.com/ossc-db/pg_hint_plan 522 | LOAD 'pg_hint_plan'; 523 | 524 | BEGIN; 525 | 526 | CREATE TEMP TABLE t1 AS 527 | SELECT 3*id AS id, random() 528 | FROM generate_series(1, 200000) AS t(id); 529 | ALTER TABLE t1 ADD PRIMARY KEY (id); 530 | 531 | CREATE TEMP TABLE t2 AS 532 | SELECT id, random() 533 | FROM generate_series(1, 600000) AS t(id); 534 | ALTER TABLE t2 ADD PRIMARY KEY (id); 535 | 536 | CREATE TEMP TABLE t3 AS 537 | SELECT 2*id AS id, random() 538 | FROM generate_series(1, 300000) AS t(id); 539 | ALTER TABLE t3 ADD PRIMARY KEY (id); 540 | 541 | ANALYZE t1, t2, t3; 542 | 543 | EXPLAIN (costs off, timing off) 544 | SELECT * 545 | FROM t1 546 | JOIN t2 USING (id) 547 | JOIN t3 USING (id); 548 | 549 | EXPLAIN (costs off, timing off) 550 | /*+ Leading((t3 (t2 t1))) NestLoop(t1 t2 t3) */ 551 | SELECT * 552 | FROM t1 553 | JOIN t2 USING (id) 554 | JOIN t3 USING (id); 555 | 556 | ROLLBACK; 557 | 558 | 559 | -- https://github.com/omniti-labs/pg_jobmon 560 | CREATE EXTENSION IF NOT EXISTS pg_jobmon CASCADE; -- requires dblink 561 | SELECT * FROM check_job_status(); 562 | 563 | 564 | -- https://github.com/pgpartman/pg_partman 565 | CREATE SCHEMA partman; 566 | CREATE EXTENSION pg_partman SCHEMA partman; 567 | 568 | CREATE TABLE public.employees ( 569 | id integer, 570 | fname text, 571 | lname text, 572 | dob date NOT NULL, 573 | joined date NOT NULL 574 | ) PARTITION BY RANGE (joined); 575 | 576 | CREATE TABLE partman.employees_template (LIKE public.employees); 577 | ALTER TABLE partman.employees_template ADD PRIMARY KEY (id); 578 | 579 | SELECT partman.create_parent( 580 | 'public.employees', 581 | p_control := 'joined', 582 | p_type := CASE WHEN current_setting('server_version_num')::int >= 140000 THEN 'range' ELSE 'native' END, 583 | p_interval := CASE WHEN current_setting('server_version_num')::int >= 140000 THEN '1 year' ELSE 'yearly' END, 584 | p_template_table := 'partman.employees_template', 585 | p_premake := 2, 586 | p_start_partition := (CURRENT_TIMESTAMP + '1 hour'::interval)::text 587 | ); 588 | 589 | CREATE OR REPLACE VIEW v_part_employees AS 590 | SELECT c.oid::pg_catalog.regclass, c.relkind, pg_catalog.pg_get_expr(c.relpartbound, c.oid) 591 | FROM pg_catalog.pg_class c 592 | WHERE EXISTS ( 593 | SELECT FROM pg_catalog.pg_inherits i 594 | JOIN pg_catalog.pg_class c0 595 | ON c0.oid = i.inhparent 596 | JOIN pg_catalog.pg_namespace n 597 | ON n.oid = c0.relnamespace 598 | WHERE i.inhrelid = c.oid 599 | AND n.nspname = 'public' 600 | AND c0.relname = 'employees' 601 | ) 602 | ORDER BY pg_catalog.pg_get_expr(c.relpartbound, c.oid) = 'DEFAULT', c.oid::pg_catalog.regclass::pg_catalog.text; 603 | 604 | SELECT * FROM v_part_employees; 605 | 606 | INSERT INTO public.employees (id ,fname,lname,dob ,joined) 607 | VALUES( 608 | generate_series(1,10000), 609 | (array['Oswald', 'Henry', 'Bob', 'Vennie'])[floor(random() * 4 + 1)], 610 | (array['Leo', 'Jack', 'Den', 'Daisy' ,'Woody'])[floor(random() * 5 + 1)], 611 | '1995-01-01'::date + trunc(random() * 366 * 3)::int, 612 | '2023-01-01'::date + trunc(random() * 366 * 3)::int 613 | ); 614 | 615 | CALL partman.partition_data_proc ('public.employees'); 616 | VACUUM ANALYZE public.employees; 617 | 618 | SELECT * FROM v_part_employees; 619 | 620 | UPDATE partman.part_config SET premake = '4' WHERE parent_table ='public.employees'; 621 | 622 | CALL partman.run_maintenance_proc(); 623 | 624 | SELECT * FROM v_part_employees; 625 | 626 | DROP VIEW v_part_employees; 627 | DROP TABLE partman.employees_template; 628 | DROP TABLE public.employees; 629 | 630 | 631 | -- https://github.com/cybertec-postgresql/pg_permissions 632 | CREATE EXTENSION IF NOT EXISTS pg_permissions; 633 | SELECT * FROM database_permissions LIMIT 5; 634 | SELECT * FROM schema_permissions LIMIT 5; 635 | SELECT * FROM table_permissions LIMIT 5; 636 | SELECT * FROM view_permissions LIMIT 5; 637 | SELECT * FROM column_permissions LIMIT 5; 638 | SELECT * FROM function_permissions LIMIT 5; 639 | SELECT * FROM sequence_permissions LIMIT 5; 640 | 641 | 642 | -- https://github.com/powa-team/pg_qualstats 643 | CREATE EXTENSION IF NOT EXISTS pg_qualstats; 644 | 645 | SELECT * FROM pg_qualstats; 646 | 647 | SELECT v 648 | FROM json_array_elements(pg_qualstats_index_advisor(min_filter => 50)->'indexes') v 649 | ORDER BY v::text COLLATE "C"; 650 | 651 | 652 | -- https://github.com/begriffs/pg_rational 653 | CREATE EXTENSION IF NOT EXISTS pg_rational; 654 | SELECT 0.263157894737::float::rational; 655 | 656 | 657 | -- https://github.com/reorg/pg_repack 658 | CREATE EXTENSION IF NOT EXISTS pg_repack; 659 | SELECT repack.version(), repack.version_sql(); 660 | 661 | 662 | -- https://github.com/bigsmoke/pg_rowalesce 663 | DO $$ 664 | BEGIN 665 | IF current_setting('server_version_num')::int >= 140000 THEN 666 | CREATE EXTENSION IF NOT EXISTS pg_rowalesce CASCADE; 667 | CALL test__pg_rowalesce(); 668 | END IF; 669 | END $$; 670 | 671 | 672 | -- https://github.com/petropavel13/pg_rrule 673 | CREATE EXTENSION IF NOT EXISTS pg_rrule; 674 | SELECT get_freq('FREQ=WEEKLY;INTERVAL=1;WKST=MO;UNTIL=20200101T045102Z'::rrule); 675 | SELECT get_byday('FREQ=WEEKLY;INTERVAL=1;WKST=MO;UNTIL=20200101T045102Z;BYDAY=MO,TH,SU'::rrule); 676 | SELECT * FROM 677 | unnest( 678 | get_occurrences( 679 | 'FREQ=WEEKLY;INTERVAL=1;WKST=MO;UNTIL=20200101T045102Z;BYDAY=SA;BYHOUR=10;BYMINUTE=51;BYSECOND=2'::rrule, 680 | '2019-12-07 10:51:02+00'::timestamp 681 | ) 682 | ); 683 | 684 | 685 | -- https://github.com/cybertec-postgresql/pg_show_plans 686 | CREATE EXTENSION IF NOT EXISTS pg_show_plans; 687 | SELECT * FROM pg_show_plans; 688 | 689 | 690 | -- https://github.com/eulerto/pg_similarity 691 | CREATE EXTENSION IF NOT EXISTS pg_similarity; 692 | 693 | BEGIN; 694 | 695 | CREATE TEMP TABLE foo (a text); 696 | CREATE TEMP TABLE bar (b text); 697 | 698 | INSERT INTO foo 699 | VALUES('Euler'),('Oiler'),('Euler Taveira de Oliveira'),('Maria Taveira dos Santos'),('Carlos Santos Silva'); 700 | INSERT INTO bar 701 | VALUES('Euler T. de Oliveira'),('Euller'),('Oliveira, Euler Taveira'),('Sr. Oliveira'); 702 | 703 | SELECT a, b, cosine(a,b), jaro(a, b), euclidean(a, b), qgram(a, b), lev(a, b) FROM foo, bar; 704 | 705 | ROLLBACK; 706 | 707 | 708 | -- https://github.com/postgrespro/pgsphere 709 | CREATE EXTENSION IF NOT EXISTS pg_sphere; 710 | 711 | SELECT set_sphere_output('DEG'); 712 | SELECT npoints( spoly '{(10d,0d),(10d,1d),(15d,0d),(5d,-5d)}'); 713 | SELECT area(spoly '{(0d,0d),(0d,90d),(90d,0d)}')/(4.0*pi()); 714 | SELECT '<(180d,-90d),1.0d>'::scircle ~ spoly '{(0d,-89d),(90d,-89d),(180d,-89d),(270d,-89d)}'; 715 | 716 | SELECT set_sphere_output('DMS'); 717 | SELECT 180.0*dist('<( 0h 2m 30s , 10d 0m 0s), 0.1d>'::scircle,'<( 0h 2m 30s , -10d 0m 0s),0.1d>'::scircle)/pi(); 718 | SELECT scircle('(0d,-90d)'::spoint); 719 | 720 | SELECT set_sphere_output('RAD'); 721 | SELECT dist('( 0h 2m 30s , 95d 0m 0s)'::spoint,'( 12h 2m 30s , 85d 0m 0s)'::spoint); 722 | SELECT long('(24h 2m 30s ,-85d 0m 0s)'::spoint); 723 | SELECT lat('( 0h 2m 30s ,85d 0m 0s)'::spoint); 724 | SELECT spoint(7.28318530717958623 , 0.00); 725 | 726 | 727 | -- https://github.com/cybertec-postgresql/pg_squeeze 728 | CREATE EXTENSION IF NOT EXISTS pg_squeeze; 729 | SELECT * FROM squeeze.tables; 730 | SELECT squeeze.start_worker(); 731 | SELECT squeeze.stop_worker(); 732 | 733 | 734 | -- https://github.com/powa-team/pg_stat_kcache 735 | CREATE EXTENSION IF NOT EXISTS pg_stat_kcache CASCADE; -- requires pg_stat_statements 736 | SELECT * FROM pg_stat_kcache(); 737 | 738 | 739 | -- https://github.com/rjuju/pg_track_settings 740 | CREATE EXTENSION IF NOT EXISTS pg_track_settings; 741 | SELECT pg_track_settings_snapshot(); 742 | 743 | 744 | -- https://github.com/fboulnois/pg_uuidv7 745 | CREATE EXTENSION IF NOT EXISTS pg_uuidv7; 746 | SELECT uuid_generate_v7(); 747 | 748 | 749 | -- https://github.com/postgrespro/pg_wait_sampling 750 | CREATE EXTENSION IF NOT EXISTS pg_wait_sampling; 751 | WITH t as (SELECT sum(0) FROM pg_wait_sampling_current) 752 | SELECT sum(0) FROM generate_series(1, 2), t; 753 | 754 | 755 | -- https://github.com/bigsmoke/pg_xenophile 756 | DO $$ 757 | BEGIN 758 | IF current_setting('server_version_num')::int >= 140000 THEN 759 | CREATE EXTENSION IF NOT EXISTS pg_xenophile CASCADE; 760 | CALL xeno.test__l10n_table(); 761 | END IF; 762 | END $$; 763 | 764 | 765 | -- https://github.com/pgadmin-org/pgagent 766 | CREATE EXTENSION IF NOT EXISTS pgagent; 767 | 768 | /* Create pgAgent job - https://karatejb.blogspot.com/2020/04/postgresql-pgagent-scheduling-agent.html */ 769 | DO $$ 770 | DECLARE 771 | jid integer; 772 | scid integer; 773 | BEGIN 774 | -- Creating a new job 775 | INSERT INTO pgagent.pga_job( 776 | jobjclid, jobname, jobdesc, jobhostagent, jobenabled 777 | ) VALUES ( 778 | 1::integer, 'Routine Clean'::text, ''::text, ''::text, true 779 | ) RETURNING jobid INTO jid; 780 | 781 | -- Steps 782 | -- Inserting a step (jobid: NULL) 783 | INSERT INTO pgagent.pga_jobstep ( 784 | jstjobid, jstname, jstenabled, jstkind, 785 | jstconnstr, jstdbname, jstonerror, 786 | jstcode, jstdesc 787 | ) VALUES ( 788 | jid, 'Clean_News'::text, true, 's'::character(1), 789 | 'host=localhost port=5432 dbname=postgres connect_timeout=10 user=''postgres'''::text, ''::name, 'f'::character(1), 790 | 'DELETE FROM public."News"'::text, ''::text 791 | ) ; 792 | 793 | -- Schedules 794 | -- Inserting a schedule 795 | INSERT INTO pgagent.pga_schedule( 796 | jscjobid, jscname, jscdesc, jscenabled, 797 | jscstart, jscend, jscminutes, jschours, jscweekdays, jscmonthdays, jscmonths 798 | ) VALUES ( 799 | jid, 'Daily'::text, ''::text, true, 800 | '2020-04-24 06:14:44+00'::timestamp with time zone, '2020-04-30 05:51:17+00'::timestamp with time zone, 801 | -- Minutes 802 | ARRAY[false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false]::boolean[], 803 | -- Hours 804 | ARRAY[false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false]::boolean[], 805 | -- Week days 806 | ARRAY[false,false,false,false,false,false,false]::boolean[], 807 | -- Month days 808 | ARRAY[false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false]::boolean[], 809 | -- Months 810 | ARRAY[false,false,false,false,false,false,false,false,false,false,false,false]::boolean[] 811 | ) RETURNING jscid INTO scid; 812 | END 813 | $$; 814 | 815 | SELECT * from pgagent."pga_job"; 816 | SELECT * from pgagent."pga_jobstep"; 817 | SELECT * from pgagent."pga_schedule"; 818 | 819 | /* Delete pgAgent job - https://karatejb.blogspot.com/2020/04/postgresql-pgagent-scheduling-agent.html */ 820 | DO $$ 821 | DECLARE 822 | jname VARCHAR(50) :='Routine Clean'; 823 | jid INTEGER; 824 | BEGIN 825 | 826 | SELECT "jobid" INTO jid from pgagent."pga_job" 827 | WHERE "jobname"=jname; 828 | 829 | DELETE FROM pgagent."pga_schedule" 830 | WHERE "jscjobid"=jid; 831 | 832 | DELETE FROM pgagent.pga_jobstep 833 | WHERE "jstjobid"=jid; 834 | 835 | DELETE FROM pgagent."pga_job" 836 | WHERE "jobid"=jid; 837 | 838 | END 839 | $$; 840 | 841 | 842 | -- https://github.com/pgaudit/pgaudit 843 | CREATE EXTENSION IF NOT EXISTS pgaudit; 844 | SET pgaudit.log = 'all, -misc'; 845 | SET pgaudit.log_level = notice; 846 | 847 | -- https://github.com/fmbiete/pgauditlogtofile 848 | CREATE EXTENSION IF NOT EXISTS pgauditlogtofile; 849 | SHOW pgaudit.log_directory; 850 | SHOW pgaudit.log_filename; 851 | SHOW pgaudit.log_rotation_age; 852 | 853 | 854 | -- https://github.com/klando/pgfincore 855 | CREATE EXTENSION IF NOT EXISTS pgfincore; 856 | SELECT * FROM pgsysconf_pretty(); 857 | 858 | 859 | -- https://github.com/enova/pgl_ddl_deploy 860 | CREATE EXTENSION IF NOT EXISTS pgl_ddl_deploy; 861 | 862 | --Setup permissions 863 | SELECT pgl_ddl_deploy.add_role(oid) FROM pg_roles WHERE rolname in('app_owner', 'replication_role'); 864 | 865 | --Setup configs 866 | INSERT INTO pgl_ddl_deploy.set_configs (set_name, include_schema_regex, lock_safe_deployment, allow_multi_statements) 867 | VALUES ('default', '.*', true, true), ('insert_update', '.*happy.*', true, true); 868 | 869 | 870 | -- https://github.com/2ndQuadrant/pglogical 871 | CREATE EXTENSION IF NOT EXISTS pglogical; 872 | SELECT pglogical.pglogical_version(), pglogical.pglogical_version_num(); 873 | 874 | 875 | -- https://github.com/enova/pglogical_ticker 876 | CREATE EXTENSION IF NOT EXISTS pglogical_ticker; 877 | SELECT pglogical_ticker.deploy_ticker_tables(); 878 | 879 | 880 | -- https://github.com/ohmu/pgmemcache 881 | CREATE EXTENSION IF NOT EXISTS pgmemcache; 882 | SELECT memcache_flush_all(); 883 | SELECT memcache_stats(); 884 | 885 | 886 | -- https://github.com/dvarrazzo/pgmp 887 | CREATE EXTENSION IF NOT EXISTS pgmp; 888 | SELECT 10.1::numeric::mpq; 889 | SELECT 9223372036854775807::mpz; 890 | 891 | 892 | -- https://github.com/tembo-io/pgmq 893 | CREATE EXTENSION IF NOT EXISTS pgmq; 894 | SELECT pgmq.create('my_queue'); 895 | SELECT * from pgmq.send(queue_name => 'my_queue', msg => '{"foo": "bar1"}'); 896 | SELECT * from pgmq.send(queue_name => 'my_queue', msg => '{"foo": "bar2"}', delay => 1); 897 | SELECT * FROM pgmq.read(queue_name => 'my_queue', vt => 30, qty => 2); 898 | SELECT * FROM pgmq.read(queue_name => 'my_queue', vt => 30, qty => 1); 899 | SELECT * FROM pgmq.pop('my_queue'); 900 | SELECT pgmq.archive(queue_name => 'my_queue', msg_id => 2); 901 | SELECT pgmq.send_batch( 902 | queue_name => 'my_queue', 903 | msgs => ARRAY['{"foo": "bar3"}','{"foo": "bar4"}','{"foo": "bar5"}']::jsonb[] 904 | ); 905 | SELECT pgmq.archive(queue_name => 'my_queue', msg_ids => ARRAY[3, 4, 5]); 906 | SELECT * FROM pgmq.a_my_queue; 907 | SELECT pgmq.send('my_queue', '{"foo": "bar6"}'); 908 | SELECT pgmq.delete('my_queue', 6); 909 | SELECT pgmq.drop_queue('my_queue'); 910 | 911 | 912 | -- https://github.com/petere/pgpcre 913 | CREATE EXTENSION IF NOT EXISTS pgpcre; 914 | SELECT 'foo' ~ pcre 'fo+'; 915 | SELECT pcre 'fo+' ~ 'foo'; 916 | 917 | 918 | -- https://github.com/pgq/pgq 919 | CREATE EXTENSION IF NOT EXISTS pgq; 920 | SELECT pgq.create_queue('testqueue1'); 921 | 922 | -- https://github.com/pgq/pgq-node 923 | CREATE EXTENSION IF NOT EXISTS pgq_node; 924 | SELECT * FROM pgq_node.get_queue_locations('testqueue1'); 925 | 926 | SELECT pgq.drop_queue('testqueue1'); 927 | 928 | 929 | -- https://github.com/pgRouting/pgrouting 930 | CREATE EXTENSION IF NOT EXISTS pgrouting CASCADE; 931 | 932 | BEGIN; 933 | 934 | CREATE TABLE edge_table ( 935 | id BIGSERIAL, 936 | dir character varying, 937 | source BIGINT, 938 | target BIGINT, 939 | cost FLOAT, 940 | reverse_cost FLOAT, 941 | capacity BIGINT, 942 | reverse_capacity BIGINT, 943 | category_id INTEGER, 944 | reverse_category_id INTEGER, 945 | x1 FLOAT, 946 | y1 FLOAT, 947 | x2 FLOAT, 948 | y2 FLOAT, 949 | the_geom geometry 950 | ); 951 | 952 | INSERT INTO edge_table ( 953 | category_id, reverse_category_id, 954 | cost, reverse_cost, 955 | capacity, reverse_capacity, 956 | x1, y1, 957 | x2, y2 958 | ) VALUES 959 | (3, 1, 1, 1, 80, 130, 2, 0, 2, 1), 960 | (3, 2, -1, 1, -1, 100, 2, 1, 3, 1), 961 | (2, 1, -1, 1, -1, 130, 3, 1, 4, 1), 962 | (2, 4, 1, 1, 100, 50, 2, 1, 2, 2), 963 | (1, 4, 1, -1, 130, -1, 3, 1, 3, 2), 964 | (4, 2, 1, 1, 50, 100, 0, 2, 1, 2), 965 | (4, 1, 1, 1, 50, 130, 1, 2, 2, 2), 966 | (2, 1, 1, 1, 100, 130, 2, 2, 3, 2), 967 | (1, 3, 1, 1, 130, 80, 3, 2, 4, 2), 968 | (1, 4, 1, 1, 130, 50, 2, 2, 2, 3), 969 | (1, 2, 1, -1, 130, -1, 3, 2, 3, 3), 970 | (2, 3, 1, -1, 100, -1, 2, 3, 3, 3), 971 | (2, 4, 1, -1, 100, -1, 3, 3, 4, 3), 972 | (3, 1, 1, 1, 80, 130, 2, 3, 2, 4), 973 | (3, 4, 1, 1, 80, 50, 4, 2, 4, 3), 974 | (3, 3, 1, 1, 80, 80, 4, 1, 4, 2), 975 | (1, 2, 1, 1, 130, 100, 0.5, 3.5, 1.999999999999,3.5), 976 | (4, 1, 1, 1, 50, 130, 3.5, 2.3, 3.5,4); 977 | 978 | UPDATE edge_table 979 | SET the_geom = st_makeline(st_point(x1,y1), st_point(x2,y2)), 980 | dir = CASE 981 | WHEN (cost>0 AND reverse_cost>0) THEN 'B' -- both ways 982 | WHEN (cost>0 AND reverse_cost<0) THEN 'FT' -- direction of the LINESSTRING 983 | WHEN (cost<0 AND reverse_cost>0) THEN 'TF' -- reverse direction of the LINESTRING 984 | ELSE '' -- unknown 985 | END; 986 | 987 | SELECT pgr_createTopology('edge_table',0.001); 988 | 989 | SELECT pgr_analyzegraph('edge_table', 0.001); 990 | SELECT pgr_nodeNetwork('edge_table', 0.001); 991 | 992 | ROLLBACK; 993 | 994 | 995 | -- https://github.com/theory/pgtap 996 | CREATE EXTENSION IF NOT EXISTS pgtap; 997 | SELECT * FROM no_plan(); 998 | SELECT ok(TRUE); 999 | SELECT * FROM finish(); 1000 | 1001 | 1002 | -- https://github.com/sjstoelting/pgsql-tweaks 1003 | CREATE EXTENSION IF NOT EXISTS pgsql_tweaks; 1004 | 1005 | SELECT is_date('2018-01-01'), is_date('2018-02-31'), is_date('01.01.2018', 'DD.MM.YYYY'); 1006 | SELECT is_time('14:33:55.456574'), is_time('25:33:55.456574'), is_time('14.33.55,456574', 'HH24.MI.SS,US'); 1007 | SELECT is_timestamp('2018-01-01 00:00:00'), is_timestamp('01.01.2018 00:00:00', 'DD.MM.YYYY HH24.MI.SS'); 1008 | SELECT is_real('123.456'), is_real('123,456'), is_double_precision('123.456'), is_double_precision('123,456'); 1009 | SELECT is_numeric('123'), is_numeric('1 2'), is_bigint('9876543210'), is_integer('98765'), is_smallint('321'); 1010 | SELECT is_boolean('yes'), is_boolean('false'), is_boolean('NO'), is_boolean('TRUE'), is_boolean('1'), is_boolean('F'); 1011 | SELECT is_json('{"review": {"date": "1970-12-30", "votes": 10, "rating": 5, "helpful_votes": 0}, "product": {"id": "1551803542", "group": "Book", "title": "Start and Run a Coffee Bar (Start & Run a)", "category": "Business & Investing", "sales_rank": 11611, "similar_ids": ["0471136174", "0910627312", "047112138X", "0786883561", "0201570483"], "subcategory": "General"}, "customer_id": "AE22YDHSBFYIP"}'); 1012 | SELECT is_jsonb('{"review": {"date": "1970-12-30", "votes": 10, "rating": 5, "helpful_votes": 0}, "product": {"id": "1551803542", "group": "Book", "title": "Start and Run a Coffee Bar (Start & Run a)", "category": "Business & Investing", "sales_rank": 11611, "similar_ids": ["0471136174", "0910627312", "047112138X", "0786883561", "0201570483"], "subcategory": "General"}, "customer_id": "AE22YDHSBFYIP"}'); 1013 | SELECT is_empty_b(''), is_empty_b(NULL), is_empty_b('NULL'); -- just "is_empty" if pgtap is not created 1014 | SELECT is_hex('a1b0'), is_hex('a1b0c3c3c3c4b5d3'), hex2bigint('a1b0'); 1015 | SELECT sha256('test-string'::bytea); 1016 | SELECT pg_size_pretty(pg_schema_size('public')); 1017 | SELECT is_encoding('ÀÁÂÃÄÅÇÈÉÊËÌÍÎÏÑÒÓÔÕÖÙÚÛÜÝ', 'LATIN1'), is_encoding('àáâãäåçèéêëìíîïñòóôõöùúûüýÿ', 'LATIN1', 'UTF8'); 1018 | SELECT return_not_part_of_encoding('ağbƵcğeƵ', 'latin1'); 1019 | SELECT to_unix_timestamp('2018-01-01 00:00:00+01'); 1020 | SELECT array_trim(ARRAY['2018-11-11 11:00:00 MEZ',NULL,'2018-11-11 11:00:00 MEZ']::TIMESTAMP WITH TIME ZONE[], TRUE); 1021 | 1022 | 1023 | -- https://github.com/EnterpriseDB/pldebugger 1024 | CREATE EXTENSION IF NOT EXISTS pldbgapi; 1025 | SELECT * FROM pldbg_get_proxy_info(); 1026 | SELECT pldbg_create_listener(); 1027 | 1028 | 1029 | -- https://github.com/okbob/plpgsql_check 1030 | CREATE EXTENSION IF NOT EXISTS plpgsql_check; 1031 | 1032 | SELECT p.proname, tgrelid::regclass, cf.* 1033 | FROM pg_proc p 1034 | JOIN pg_trigger t ON t.tgfoid = p.oid 1035 | JOIN pg_language l ON p.prolang = l.oid 1036 | JOIN pg_namespace n ON p.pronamespace = n.oid, 1037 | LATERAL plpgsql_check_function(p.oid, t.tgrelid) cf 1038 | WHERE n.nspname = 'public' and l.lanname = 'plpgsql'; 1039 | 1040 | 1041 | -- https://www.postgresql.org/docs/current/plperl.html 1042 | CREATE EXTENSION IF NOT EXISTS plperl; 1043 | CREATE OR REPLACE FUNCTION concat_array_elements(text[]) RETURNS TEXT AS $$ 1044 | my $arg = shift; 1045 | my $result = ""; 1046 | return undef if (!defined $arg); 1047 | 1048 | # as an array reference 1049 | for (@$arg) { 1050 | $result .= $_; 1051 | } 1052 | 1053 | # also works as a string 1054 | $result .= $arg; 1055 | 1056 | return $result; 1057 | $$ LANGUAGE plperl; 1058 | 1059 | SELECT concat_array_elements(ARRAY['PL','/','Perl']); 1060 | 1061 | 1062 | -- https://github.com/bigsql/plprofiler 1063 | CREATE EXTENSION IF NOT EXISTS plprofiler; 1064 | SELECT pl_profiler_version(), pl_profiler_versionstr(); 1065 | 1066 | 1067 | -- https://github.com/plproxy/plproxy 1068 | CREATE EXTENSION IF NOT EXISTS plproxy; 1069 | 1070 | CREATE OR REPLACE FUNCTION get_cluster_version(cluster_name text) 1071 | RETURNS INTEGER 1072 | LANGUAGE plpgsql 1073 | AS $$ 1074 | BEGIN 1075 | IF cluster_name = 'testcluster' THEN 1076 | RETURN 5; 1077 | END IF; 1078 | IF cluster_name = 'badcluster' THEN 1079 | RETURN 5; 1080 | END IF; 1081 | RAISE EXCEPTION 'no such cluster: %', cluster_name; 1082 | END; 1083 | $$; 1084 | 1085 | 1086 | -- https://www.postgresql.org/docs/current/plpython.html 1087 | CREATE EXTENSION IF NOT EXISTS plpython3u; 1088 | CREATE EXTENSION IF NOT EXISTS hstore_plpython3u CASCADE; 1089 | CREATE EXTENSION IF NOT EXISTS ltree_plpython3u CASCADE; 1090 | CREATE EXTENSION IF NOT EXISTS jsonb_plpython3u; 1091 | 1092 | CREATE OR REPLACE FUNCTION py_test(val1 hstore[], val2 ltree) 1093 | RETURNS SETOF jsonb 1094 | LANGUAGE plpython3u 1095 | TRANSFORM FOR TYPE hstore, FOR TYPE jsonb, FOR TYPE ltree 1096 | AS $$ 1097 | import sys 1098 | 1099 | with plpy.subtransaction(): 1100 | plpy.info('UPDATE tbl SET {} = {} WHERE key = {}'.format( 1101 | plpy.quote_ident('Test Column'), 1102 | plpy.quote_nullable(None), 1103 | plpy.quote_literal('test value') 1104 | )) 1105 | 1106 | plpy.warning(f'Python version: {sys.version}') 1107 | 1108 | return val1 + [{'ltree': repr(val2)}] 1109 | $$; 1110 | 1111 | SELECT py_test(array['foo=>bar, baz=>NULL'::hstore, 'qux=>0'], 'aa.bb.cc'::ltree); 1112 | 1113 | 1114 | -- https://github.com/petere/plsh 1115 | CREATE EXTENSION IF NOT EXISTS plsh; 1116 | CREATE FUNCTION concat_plsh(text, text) RETURNS text AS ' 1117 | #!/bin/sh 1118 | echo "$1$2" 1119 | ' LANGUAGE plsh; 1120 | SELECT concat_plsh('It ', 'works!'); 1121 | 1122 | 1123 | -- https://github.com/pgpointcloud/pointcloud 1124 | CREATE EXTENSION IF NOT EXISTS pointcloud; 1125 | CREATE EXTENSION IF NOT EXISTS pointcloud_postgis; 1126 | 1127 | INSERT INTO pointcloud_formats (pcid, srid, schema) VALUES (1, 4326, 1128 | ' 1129 | 1131 | 1132 | 1 1133 | 4 1134 | X coordinate as a long integer. You must use the 1135 | scale and offset information of the header to 1136 | determine the double value. 1137 | X 1138 | int32_t 1139 | 0.01 1140 | 1141 | 1142 | 2 1143 | 4 1144 | Y coordinate as a long integer. You must use the 1145 | scale and offset information of the header to 1146 | determine the double value. 1147 | Y 1148 | int32_t 1149 | 0.01 1150 | 1151 | 1152 | 3 1153 | 4 1154 | Z coordinate as a long integer. You must use the 1155 | scale and offset information of the header to 1156 | determine the double value. 1157 | Z 1158 | int32_t 1159 | 0.01 1160 | 1161 | 1162 | 4 1163 | 2 1164 | The intensity value is the integer representation 1165 | of the pulse return magnitude. This value is optional 1166 | and system specific. However, it should always be 1167 | included if available. 1168 | Intensity 1169 | uint16_t 1170 | 1 1171 | 1172 | 1173 | dimensional 1174 | 1175 | '); 1176 | 1177 | SELECT ST_AsText(PC_MakePoint(1, ARRAY[-127, 45, 124.0, 4.0])::geometry); 1178 | 1179 | 1180 | -- https://github.com/powa-team/powa-archivist 1181 | CREATE EXTENSION IF NOT EXISTS powa; 1182 | SELECT * FROM powa_functions; 1183 | 1184 | 1185 | -- https://github.com/dimitri/prefix 1186 | CREATE EXTENSION IF NOT EXISTS prefix; 1187 | SELECT '123'::prefix_range @> '123456'; 1188 | 1189 | 1190 | -- https://github.com/schmiddy/pg_prioritize 1191 | CREATE EXTENSION IF NOT EXISTS prioritize; 1192 | SELECT get_backend_priority(pg_backend_pid()); 1193 | 1194 | 1195 | -- https://github.com/segasai/q3c 1196 | CREATE EXTENSION IF NOT EXISTS q3c; 1197 | SELECT q3c_version(); 1198 | SELECT q3c_ang2ipix(0, 0); 1199 | 1200 | 1201 | -- https://github.com/ChenHuajun/pg_roaringbitmap 1202 | CREATE EXTENSION IF NOT EXISTS roaringbitmap; 1203 | SELECT '{ 1 , -2 , 555555 , -4 ,2147483647,-2147483648}'::roaringbitmap; 1204 | SET roaringbitmap.output_format='array'; 1205 | SELECT '\x3a30000000000000'::roaringbitmap; 1206 | SELECT roaringbitmap('{1,-2,-3}') & roaringbitmap('{-3,-4,5}'); 1207 | SELECT roaringbitmap('{1,2,3}') | roaringbitmap('{3,4,5}'); 1208 | SELECT roaringbitmap('{1,2,3}') | 6; 1209 | SELECT 1 | roaringbitmap('{1,2,3}'); 1210 | SELECT roaringbitmap('{}') # roaringbitmap('{3,4,5}'); 1211 | SELECT roaringbitmap('{1,2,3}') - roaringbitmap('{}'); 1212 | SELECT roaringbitmap('{-1,-2,3}') - -1; 1213 | SELECT roaringbitmap('{-2,-1,0,1,2,3,2147483647,-2147483648}') << 4294967296; 1214 | SELECT roaringbitmap('{-2,-1,0,1,2,3,2147483647,-2147483648}') >> -2; 1215 | SELECT roaringbitmap('{1,2,3}') @> roaringbitmap('{3,2}'); 1216 | SELECT roaringbitmap('{1,2,3}') @> 1; 1217 | SELECT roaringbitmap('{1,-3}') <@ roaringbitmap('{-3,1,1000}'); 1218 | SELECT 6 <@ roaringbitmap('{}'); 1219 | SELECT roaringbitmap('{1,2,3}') && roaringbitmap('{3,4,5}'); 1220 | SELECT roaringbitmap('{}') = roaringbitmap('{}'); 1221 | SELECT roaringbitmap('{1,2,3}') <> roaringbitmap('{3,1,2}'); 1222 | SELECT rb_build('{1,-2,555555,-4,2147483647,-2147483648}'::int[]); 1223 | SELECT rb_to_array('{-1,2,555555,-4}'::roaringbitmap); 1224 | SELECT rb_is_empty('{}'); 1225 | SELECT rb_cardinality('{1,10,100}'); 1226 | SELECT rb_max('{1,10,100,2147483647,-2147483648,-1}'); 1227 | SELECT rb_min('{1,10,100,2147483647,-2147483648,-1}'); 1228 | SELECT rb_iterate('{1,10,100,2147483647,-2147483648,-1}'); 1229 | 1230 | 1231 | -- https://github.com/cybertec-postgresql/pgfaceting 1232 | CREATE TABLE test_faceting( 1233 | facet_name text, 1234 | distinct_values integer, 1235 | cardinality_sum integer 1236 | ); 1237 | 1238 | DO $$ 1239 | BEGIN 1240 | IF current_setting('server_version_num')::int >= 140000 THEN 1241 | CREATE EXTENSION IF NOT EXISTS pgfaceting; -- needs roaringbitmap 1242 | 1243 | CREATE TYPE mimetype AS ENUM ( 1244 | 'application/pdf', 1245 | 'text/html', 1246 | 'image/jpeg', 1247 | 'image/png', 1248 | 'application/msword', 1249 | 'text/csv', 1250 | 'application/zip', 1251 | 'application/vnd.ms-powerpoint' 1252 | ); 1253 | 1254 | CREATE TABLE documents ( 1255 | id int4 primary key, 1256 | created timestamptz not null, 1257 | finished timestamptz, 1258 | category_id int4, 1259 | tags text[], 1260 | type mimetype, 1261 | size int8, 1262 | title text 1263 | ); 1264 | 1265 | INSERT INTO documents (id, created, finished, category_id, tags, type, size, title) VALUES 1266 | (1, '2010-01-01 00:00:42+02', '2010-01-01 09:45:29+02', 8, '{blue,burlywood,antiquewhite,olive}', 'application/pdf', 71205, 'Interracial marriage Science Research'), 1267 | (2, '2010-01-01 00:00:37+02', '2010-01-01 03:55:08+02', 12, '{lightcoral,bisque,blue,"aqua blue","red purple",aqua}', 'text/html', 682069, 'Odour and trials helped to improve the country''s history through the public'), 1268 | (3, '2010-01-01 00:00:33+02', '2010-01-02 18:29:15+02', 9, '{"mustard brown","very light pink"}', 'application/pdf', 143708, 'Have technical scale, ordinary, commonsense notions of absolute time and length independent of the'), 1269 | (4, '2010-01-01 00:00:35+02', '2010-01-02 01:12:08+02', 24, '{orange,green,blue}', 'text/html', 280663, 'Database of (/ˈdɛnmɑːrk/; Danish: Danmark [ˈd̥ænmɑɡ̊]) is a spiral'), 1270 | (5, '2010-01-01 00:01:06+02', '2010-01-01 23:18:56+02', 24, '{orange,chocolate}', 'image/jpeg', 111770, 'Passage to now resumed'), 1271 | (6, '2010-01-01 00:01:05+02', '2010-01-01 10:25:29+02', 8, '{blue,aquamarine}', 'application/pdf', 110809, 'East. Mesopotamia, BCE – 480 BCE), when determining a value that'), 1272 | (7, '2010-01-01 00:00:57+02', '2010-01-02 00:41:01+02', NULL, '{}', 'application/pdf', 230803, 'Bahía de It has also conquered 13 South American finds and another'), 1273 | (8, '2010-01-01 00:01:11+02', '2010-01-01 14:22:11+02', 24, '{blue,burlywood,"dirt brown",orange,ivory,brown,green,olive,lightpink}', 'image/jpeg', 1304196, '15-fold: from the mid- to late-20th'), 1274 | (9, '2010-01-01 00:01:47+02', '2010-01-01 09:59:57+02', 9, '{green,blue,orange}', 'application/pdf', 142410, 'Popular Western localized function model. Psychiatric interventions such as local businesses, but also'), 1275 | (10, '2010-01-01 00:01:31+02', '2010-01-01 05:49:47+02', 24, '{green,lavender,blue,orange,red,darkslateblue}', 'text/html', 199703, 'Rapidly expanding Large Interior Form, 1953-54, Man Enters the Cosmos and Nuclear Energy.'); 1276 | 1277 | PERFORM faceting.add_faceting_to_table( 1278 | 'documents', 1279 | key => 'id', 1280 | facets => array[ 1281 | faceting.datetrunc_facet('created', 'month'), 1282 | faceting.datetrunc_facet('finished', 'month'), 1283 | faceting.plain_facet('category_id'), 1284 | faceting.plain_facet('type'), 1285 | faceting.bucket_facet('size', buckets => array[0,1000,5000,10000,50000,100000,500000]) 1286 | ] 1287 | ); 1288 | 1289 | INSERT INTO test_faceting 1290 | SELECT facet_name, count(distinct facet_value), sum(cardinality) 1291 | FROM faceting.count_results( 1292 | 'documents'::regclass, 1293 | filters => array[row('category_id', 24)]::faceting.facet_filter[] 1294 | ) 1295 | GROUP BY 1; 1296 | END IF; 1297 | END $$; 1298 | 1299 | SELECT * FROM test_faceting; 1300 | DROP TABLE test_faceting; 1301 | 1302 | 1303 | -- https://github.com/postgrespro/rum 1304 | CREATE EXTENSION IF NOT EXISTS rum; 1305 | 1306 | BEGIN; 1307 | 1308 | CREATE TABLE test_rum(t text, a tsvector); 1309 | 1310 | CREATE TRIGGER tsvectorupdate 1311 | BEFORE UPDATE OR INSERT ON test_rum 1312 | FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger('a', 'pg_catalog.english', 't'); 1313 | 1314 | INSERT INTO test_rum(t) VALUES ('The situation is most beautiful'); 1315 | INSERT INTO test_rum(t) VALUES ('It is a beautiful'); 1316 | INSERT INTO test_rum(t) VALUES ('It looks like a beautiful place'); 1317 | 1318 | CREATE INDEX rumidx ON test_rum USING rum (a rum_tsvector_ops); 1319 | 1320 | SELECT t, a <=> to_tsquery('english', 'beautiful | place') AS rank 1321 | FROM test_rum 1322 | WHERE a @@ to_tsquery('english', 'beautiful | place') 1323 | ORDER BY a <=> to_tsquery('english', 'beautiful | place'); 1324 | 1325 | ROLLBACK; 1326 | 1327 | 1328 | -- https://github.com/theory/pg-semver 1329 | CREATE EXTENSION IF NOT EXISTS semver; 1330 | 1331 | SELECT v::semver 1332 | FROM unnest(ARRAY[ 1333 | '1.2.2', 1334 | '0.2.2', 1335 | '0.0.0', 1336 | '0.1.999', 1337 | '9999.9999999.823823', 1338 | '1.0.0-beta1', 1339 | '1.0.0-beta2', 1340 | '1.0.0', 1341 | '1.0.0-1', 1342 | '1.0.0-alpha+d34dm34t', 1343 | '1.0.0+d34dm34t', 1344 | '20110204.0.0', 1345 | '1.0.0-alpha.0a', 1346 | '1.0.0+010', 1347 | '1.0.0+alpha.010', 1348 | '1.0.0-0AEF' 1349 | ]) AS v; 1350 | 1351 | 1352 | -- https://github.com/pgaudit/set_user 1353 | CREATE EXTENSION IF NOT EXISTS set_user; 1354 | 1355 | SELECT set_user('pg_monitor'); 1356 | SELECT CURRENT_USER, SESSION_USER; 1357 | 1358 | SELECT reset_user(); 1359 | SELECT CURRENT_USER, SESSION_USER; 1360 | 1361 | 1362 | -- https://github.com/pgspider/sqlite_fdw 1363 | CREATE EXTENSION IF NOT EXISTS sqlite_fdw; 1364 | 1365 | BEGIN; 1366 | 1367 | CREATE SERVER sqlite_server 1368 | FOREIGN DATA WRAPPER sqlite_fdw 1369 | OPTIONS (database '/tmp/test.db'); 1370 | 1371 | CREATE FOREIGN TABLE sqlite_table( 1372 | id integer OPTIONS (key 'true'), 1373 | title text OPTIONS(column_name 'nm_title'), 1374 | modified timestamp OPTIONS (column_type 'INT') 1375 | ) SERVER sqlite_server OPTIONS (table 't1_sqlite'); 1376 | 1377 | ROLLBACK; 1378 | 1379 | 1380 | -- https://github.com/credativ/table_log 1381 | CREATE EXTENSION IF NOT EXISTS table_log; 1382 | 1383 | BEGIN; 1384 | 1385 | CREATE TABLE drop_test ( 1386 | id integer PRIMARY KEY GENERATED ALWAYS AS IDENTITY, 1387 | col1 text NOT NULL DEFAULT '', 1388 | col2 text NOT NULL DEFAULT '', 1389 | col3 text NOT NULL DEFAULT '' 1390 | ); 1391 | 1392 | SELECT table_log_init(5, 'public', 'drop_test', 'public', 'drop_test_log'); 1393 | 1394 | INSERT INTO drop_test (col1, col2, col3) VALUES ('a1', 'b1', 'c1'); 1395 | SELECT * FROM drop_test; 1396 | SELECT * FROM drop_test_log; 1397 | 1398 | ALTER TABLE drop_test DROP COLUMN col2; 1399 | ALTER TABLE drop_test_log DROP COLUMN col2; 1400 | 1401 | INSERT INTO drop_test (col1, col3) VALUES ('a2', 'c2'); 1402 | SELECT * FROM drop_test; 1403 | SELECT * FROM drop_test_log; 1404 | 1405 | ROLLBACK; 1406 | 1407 | 1408 | -- https://github.com/tvondra/tdigest 1409 | CREATE EXTENSION IF NOT EXISTS tdigest; 1410 | 1411 | BEGIN; 1412 | 1413 | CREATE TEMP TABLE t (a int, b int, c double precision); -- table with some random source data 1414 | 1415 | INSERT INTO t 1416 | SELECT 10 * random(), 10 * random(), random() 1417 | FROM generate_series(1, 100000); 1418 | 1419 | CREATE TEMP TABLE p AS -- table with pre-aggregated digests into table "p" 1420 | SELECT a, b, tdigest(c, 100) AS d FROM t GROUP BY a, b; 1421 | 1422 | -- summarize the data from "p" (compute the 95-th percentile) 1423 | SELECT a, tdigest_percentile(d, 0.95) FROM p GROUP BY a ORDER BY a; 1424 | 1425 | ROLLBACK; 1426 | 1427 | 1428 | -- https://github.com/tds-fdw/tds_fdw 1429 | CREATE EXTENSION IF NOT EXISTS tds_fdw; 1430 | 1431 | BEGIN; 1432 | 1433 | CREATE SERVER mssql_svr 1434 | FOREIGN DATA WRAPPER tds_fdw 1435 | OPTIONS (servername '127.0.0.1', port '1433', database 'tds_fdw_test', tds_version '7.1'); 1436 | 1437 | CREATE FOREIGN TABLE mssql_table ( 1438 | id integer, 1439 | title text OPTIONS (column_name 'nm_title') 1440 | ) SERVER mssql_svr OPTIONS (schema_name 'dbo', table_name 'mytable', row_estimate_method 'showplan_all'); 1441 | 1442 | ROLLBACK; 1443 | 1444 | 1445 | -- https://github.com/arkhipov/temporal_tables 1446 | CREATE EXTENSION IF NOT EXISTS temporal_tables; 1447 | 1448 | CREATE TABLE employees 1449 | ( 1450 | name text NOT NULL PRIMARY KEY, 1451 | department text, 1452 | salary numeric(20, 2), 1453 | sys_period tstzrange NOT NULL 1454 | ); 1455 | CREATE TABLE employees_history (LIKE employees); 1456 | 1457 | CREATE TRIGGER versioning_trigger 1458 | BEFORE INSERT OR UPDATE OR DELETE ON employees 1459 | FOR EACH ROW EXECUTE PROCEDURE versioning('sys_period', 'employees_history', true); 1460 | 1461 | INSERT INTO employees (name, department, salary) 1462 | VALUES 1463 | ('Bernard Marx', 'Hatchery and Conditioning Centre', 10000), 1464 | ('Lenina Crowne', 'Hatchery and Conditioning Centre', 7000), 1465 | ('Helmholtz Watson', 'College of Emotional Engineering', 18500); 1466 | 1467 | SELECT pg_sleep(0.1); 1468 | 1469 | UPDATE employees SET salary = 11200 WHERE name = 'Bernard Marx'; 1470 | DELETE FROM employees WHERE name = 'Helmholtz Watson'; 1471 | 1472 | SELECT * FROM employees; 1473 | SELECT * FROM employees_history; 1474 | 1475 | DROP TABLE employees; 1476 | DROP TABLE employees_history; 1477 | 1478 | 1479 | -- https://github.com/timescale/timescaledb 1480 | CREATE EXTENSION IF NOT EXISTS timescaledb; 1481 | 1482 | BEGIN; 1483 | 1484 | CREATE TABLE conditions ( 1485 | time timestamptz NOT NULL, 1486 | location text NOT NULL, 1487 | temperature double precision, 1488 | humidity double precision 1489 | ); 1490 | 1491 | SELECT create_hypertable('conditions', 'time'); 1492 | 1493 | INSERT INTO conditions(time, location, temperature, humidity) 1494 | VALUES (NOW(), 'office', 70.0, 50.0); 1495 | 1496 | SELECT 1497 | time_bucket('15 minutes', time) AS fifteen_min, 1498 | location, COUNT(*), 1499 | MAX(temperature) AS max_temp, 1500 | MAX(humidity) AS max_hum 1501 | FROM conditions 1502 | WHERE time > NOW() - interval '3 hours' 1503 | GROUP BY fifteen_min, location 1504 | ORDER BY fifteen_min DESC, max_temp DESC; 1505 | 1506 | ROLLBACK; 1507 | 1508 | 1509 | -- https://github.com/credativ/toastinfo 1510 | CREATE EXTENSION IF NOT EXISTS toastinfo; 1511 | 1512 | BEGIN; 1513 | 1514 | CREATE TABLE t ( 1515 | a text, 1516 | b text 1517 | ); 1518 | 1519 | INSERT INTO t VALUES ('null', NULL); 1520 | INSERT INTO t VALUES ('default', 'default'); 1521 | 1522 | ALTER TABLE t ALTER COLUMN b SET STORAGE EXTERNAL; 1523 | INSERT INTO t VALUES ('external-10', 'external'); -- short inline varlena 1524 | INSERT INTO t VALUES ('external-200', repeat('x', 200)); -- long inline varlena, uncompressed 1525 | INSERT INTO t VALUES ('external-10000', repeat('x', 10000)); -- toasted varlena, uncompressed 1526 | INSERT INTO t VALUES ('external-1000000', repeat('x', 1000000)); -- toasted varlena, uncompressed 1527 | 1528 | ALTER TABLE t ALTER COLUMN b SET STORAGE EXTENDED; 1529 | INSERT INTO t VALUES ('extended-10', 'extended'); -- short inline varlena 1530 | INSERT INTO t VALUES ('extended-200', repeat('x', 200)); -- long inline varlena, uncompressed 1531 | INSERT INTO t VALUES ('extended-10000', repeat('x', 10000)); -- long inline varlena, compressed (pglz) 1532 | INSERT INTO t VALUES ('extended-1000000', repeat('x', 1000000)); -- toasted varlena, compressed (pglz) 1533 | 1534 | -- -- Postgres 14+ only 1535 | -- ALTER TABLE t ALTER COLUMN b SET COMPRESSION lz4; 1536 | -- INSERT INTO t VALUES ('extended-10000', repeat('x', 10000)); -- long inline varlena, compressed (lz4) 1537 | -- INSERT INTO t VALUES ('extended-1000000', repeat('x', 1000000)); -- toasted varlena, compressed (lz4) 1538 | 1539 | SELECT a, length(b), pg_column_size(b), pg_toastinfo(b), pg_toastpointer(b) FROM t; 1540 | 1541 | ROLLBACK; 1542 | 1543 | 1544 | -- https://github.com/petere/pguint 1545 | CREATE EXTENSION IF NOT EXISTS uint; 1546 | 1547 | BEGIN; 1548 | 1549 | CREATE TEMP TABLE uint_test ( 1550 | i1 int1, -- signed 8-bit integer 1551 | u1 uint1, -- unsigned 8-bit integer 1552 | u2 uint2, -- unsigned 16-bit integer 1553 | u4 uint4, -- unsigned 32-bit integer 1554 | u8 uint8 -- unsigned 64-bit integer 1555 | ); 1556 | INSERT INTO uint_test VALUES (-128, 0, 0, 0, 0), (127, 255, 65535, 4294967295, 18446744073709551615); 1557 | 1558 | ROLLBACK; 1559 | 1560 | 1561 | -- https://github.com/df7cb/postgresql-unit 1562 | CREATE EXTENSION IF NOT EXISTS unit; 1563 | SELECT '9.81 N'::unit / 'kg' AS gravity; 1564 | 1565 | 1566 | -- https://github.com/pgvector/pgvector 1567 | CREATE EXTENSION IF NOT EXISTS vector; 1568 | 1569 | BEGIN; 1570 | 1571 | CREATE TEMP TABLE items (id bigserial PRIMARY KEY, embedding vector(3)); 1572 | INSERT INTO items (embedding) VALUES ('[1,2,3]'), ('[4,5,6]'); 1573 | SELECT * FROM items ORDER BY embedding <-> '[3,1,2]' LIMIT 5; 1574 | 1575 | ROLLBACK; 1576 | 1577 | 1578 | -- https://github.com/eulerto/wal2json 1579 | CREATE TABLE table2_with_pk (a SERIAL, b VARCHAR(30), c TIMESTAMP NOT NULL, PRIMARY KEY(a, c)); 1580 | CREATE TABLE table2_without_pk (a SERIAL, b NUMERIC(5,2), c TEXT); 1581 | 1582 | SELECT 'init' FROM pg_create_logical_replication_slot('test_slot', 'wal2json'); 1583 | 1584 | BEGIN; 1585 | INSERT INTO table2_with_pk (b, c) VALUES('Backup and Restore', now()); 1586 | INSERT INTO table2_with_pk (b, c) VALUES('Tuning', now()); 1587 | INSERT INTO table2_with_pk (b, c) VALUES('Replication', now()); 1588 | SELECT pg_logical_emit_message(true, 'wal2json', 'this message will be delivered'); 1589 | SELECT pg_logical_emit_message(true, 'pgoutput', 'this message will be filtered'); 1590 | DELETE FROM table2_with_pk WHERE a < 3; 1591 | SELECT pg_logical_emit_message(false, 'wal2json', 'this non-transactional message will be delivered even if you rollback the transaction'); 1592 | 1593 | INSERT INTO table2_without_pk (b, c) VALUES(2.34, 'Tapir'); 1594 | -- it is not added to stream because there isn't a pk or a replica identity 1595 | UPDATE table2_without_pk SET c = 'Anta' WHERE c = 'Tapir'; 1596 | COMMIT; 1597 | 1598 | SELECT data FROM pg_logical_slot_get_changes('test_slot', NULL, NULL, 'pretty-print', '1', 'add-msg-prefixes', 'wal2json'); 1599 | SELECT 'stop' FROM pg_drop_replication_slot('test_slot'); 1600 | 1601 | DROP TABLE table2_with_pk; 1602 | DROP TABLE table2_without_pk; 1603 | 1604 | 1605 | -- https://github.com/hatarist/pg_xxhash 1606 | CREATE EXTENSION IF NOT EXISTS xxhash; 1607 | 1608 | SELECT 1609 | url, 1610 | xxh32(url), 1611 | xxh64(url), 1612 | xxh3_64(url), 1613 | xxh128(url), 1614 | xxh32b(url), 1615 | xxh64b(url), 1616 | xxh3_64b(url), 1617 | xxh128b(url) 1618 | FROM (SELECT 'https://example.com' AS url) x; 1619 | 1620 | 1621 | SELECT * FROM pg_available_extensions ORDER BY name; 1622 | 1623 | 1624 | \c postgres 1625 | 1626 | SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = 'test' AND pid <> pg_backend_pid(); 1627 | 1628 | DROP DATABASE test; 1629 | -------------------------------------------------------------------------------- /tests/wait-for-postgres.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Adapted from https://docs.docker.com/compose/startup-order/ 3 | 4 | set -eu 5 | 6 | uri="$2" 7 | cmd="$@" 8 | 9 | >&2 echo "Sleeping 20 seconds to skip initial server restarts" 10 | sleep 20 11 | 12 | until psql "$uri" -c '\q'; do 13 | >&2 echo "Postgres is unavailable - sleeping" 14 | sleep 1 15 | done 16 | 17 | >&2 echo "Postgres is up - executing command" 18 | exec $cmd 19 | --------------------------------------------------------------------------------