├── .cargo └── config ├── .dockerignore ├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── README.md │ ├── ci.yaml │ ├── docker.yaml │ ├── lint.yaml │ └── release.yaml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── Cargo.lock ├── Cargo.toml ├── DEVELOPMENT.md ├── INSTALL.md ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── RELEASING.md ├── alpine.Dockerfile ├── build.rs ├── create-upgrade-symlinks.sh ├── dev.Dockerfile ├── devenv.sh ├── dist ├── deb.dockerfile ├── deb.test.dockerfile ├── debian │ ├── copyright │ └── lintian-ignore ├── rpm.dockerfile ├── rpm.test.dockerfile └── tester │ └── entrypoint ├── docs ├── sql-api.md └── sql-tracing-tables.md ├── e2e ├── Cargo.toml ├── README.md ├── scripts │ ├── after-create.sql │ ├── load-data.sql │ ├── post-restore.sql │ ├── post-snapshot.sql │ ├── pre-dump.sql │ ├── pre-restore.sql │ └── snapshot.sql └── tests │ ├── config │ └── mod.rs │ ├── dump-restore-test.rs │ ├── incremental-freeze-test.rs │ ├── ps_trace_delete_all_traces.rs │ ├── test.rs │ ├── upgrade-test.rs │ └── util │ └── mod.rs ├── extract-extension-version.sh ├── gendoc ├── Cargo.toml └── src │ └── main.rs ├── ha.Dockerfile ├── install-cargo-pgx.sh ├── migration ├── README.md ├── bootstrap │ ├── 000-migration-table.sql │ ├── 001-create-ext-schema.sql │ └── 002-stop-bgw.sql ├── idempotent │ ├── 001-base.sql │ ├── 002-tag-operators.sql │ ├── 003-matcher-functions.sql │ ├── 004-ha.sql │ ├── 005-metric-metadata.sql │ ├── 006-exemplar.sql │ ├── 007-tracing-tags.sql │ ├── 008-tracing-functions.sql │ ├── 009-tracing-views.sql │ ├── 010-telemetry.sql │ ├── 011-maintenance.sql │ ├── 012-trace-id-functions.sql │ ├── 013-tracing-maintenance.sql │ ├── 014-extension-type-functions.sql │ ├── 015-extension-function-permissions.sql │ ├── 016-vacuum-engine.sql │ └── 999-last.sql └── incremental │ ├── 001-extension.sql │ ├── 002-utils.sql │ ├── 003-users.sql │ ├── 004-schemas.sql │ ├── 005-tag-operators.sql │ ├── 006-tables.sql │ ├── 007-matcher-operators.sql │ ├── 008-install-uda.sql │ ├── 009-tables-ha.sql │ ├── 010-tables-metadata.sql │ ├── 011-tables-exemplar.sql │ ├── 012-tracing.sql │ ├── 013-tracing-well-known-tags.sql │ ├── 014-telemetry.sql │ ├── 015-tracing-redesign.sql │ ├── 016-remove-ee-schemas.sql │ ├── 017-set-search-path.sql │ ├── 018-grant-prom-roles.sql │ ├── 019-prom-installation-info.sql │ ├── 020-series-partitions.sql │ ├── 021-initial-default.sql │ ├── 022-jit-off.sql │ ├── 023-privileges.sql │ ├── 024-adjust_autovacuum.sql │ ├── 025-tag-map-storage-type.sql │ ├── 026-remove-name-param.sql │ ├── 027-tag-map-storage-views.sql │ ├── 028-refactor-mark-unused-series.sql │ ├── 029-remove-unused-ingest-func.sql │ ├── 030-remove-get-confirmed-unused-series.sql │ ├── 031-remove-superfluous-tag_v-ops.sql │ ├── 032-remove-non-empty-span-name-constraint.sql │ ├── 033-metric-view.sql │ ├── 034-maintenance-job-stats.sql │ └── 035-remove-func.sql ├── misc ├── requirements.txt └── tracegen.py ├── pgtap-parse ├── Cargo.toml └── src │ └── lib.rs ├── quick.Dockerfile ├── sql-tests ├── Cargo.toml ├── README.md ├── build.rs ├── testdata │ ├── create_ingest_temp_table.sql │ ├── defaults.sql │ ├── drop_metric.sql │ ├── get_label_key_column_name_for_view.sql │ ├── info_view.sql │ ├── large_tracing_tags_support.sql │ ├── maintenance_jobs_separation.sql │ ├── metric-chunk-interval.sql │ ├── metric-retention.sql │ ├── metric_compression.sql │ ├── ps_trace.delete_all_traces.sql │ ├── scripts │ │ └── pgtap-1.2.0.sql │ ├── support.sql │ ├── tag_map.sql │ ├── trace_compression.sql │ ├── vacuum_engine.sql │ └── views.sql └── tests │ ├── snapshots │ ├── tests__testdata__defaults.sql.snap │ ├── tests__testdata__info_view.sql.snap │ ├── tests__testdata__large_tracing_tags_support.sql.snap │ ├── tests__testdata__support.sql.snap │ ├── tests__testdata__trace_compression.sql.snap │ └── tests__testdata__views.sql.snap │ └── tests.rs ├── sql ├── promscale--0.0.0--0.5.0.sql ├── promscale--0.0.0--0.5.1.sql ├── promscale--0.0.0--0.5.2.sql ├── promscale--0.0.0--0.5.4.sql ├── promscale--0.0.0--0.6.0.sql ├── promscale--0.0.0--0.7.0.sql ├── promscale--0.0.0--0.8.0.sql ├── promscale--0.1--0.1.1.sql ├── promscale--0.1.1--0.1.2.sql ├── promscale--0.1.1.sql ├── promscale--0.1.2--0.1.3-beta.sql ├── promscale--0.1.2.sql ├── promscale--0.1.3-beta--0.2.0.sql ├── promscale--0.1.3-beta.sql ├── promscale--0.1.sql ├── promscale--0.2.0--0.3.0.sql ├── promscale--0.2.0.sql ├── promscale--0.3.0--0.3.1.sql ├── promscale--0.3.0.sql ├── promscale--0.3.1--0.3.2.sql ├── promscale--0.3.1.sql ├── promscale--0.3.2.sql ├── promscale--0.5.0--0.5.1.sql ├── promscale--0.5.0--0.5.2.sql ├── promscale--0.5.0--0.5.4.sql ├── promscale--0.5.0--0.6.0.sql ├── promscale--0.5.0--0.7.0.sql ├── promscale--0.5.0--0.8.0.sql ├── promscale--0.5.0.sql ├── promscale--0.5.1--0.5.2.sql ├── promscale--0.5.1--0.5.4.sql ├── promscale--0.5.1--0.6.0.sql ├── promscale--0.5.1--0.7.0.sql ├── promscale--0.5.1--0.8.0.sql ├── promscale--0.5.1.sql ├── promscale--0.5.2--0.5.4.sql ├── promscale--0.5.2--0.6.0.sql ├── promscale--0.5.2--0.7.0.sql ├── promscale--0.5.2--0.8.0.sql ├── promscale--0.5.2.sql ├── promscale--0.5.3--0.5.4.sql ├── promscale--0.5.3--0.6.0.sql ├── promscale--0.5.3--0.7.0.sql ├── promscale--0.5.3--0.8.0.sql ├── promscale--0.5.4--0.6.0.sql ├── promscale--0.5.4--0.7.0.sql ├── promscale--0.5.4--0.8.0.sql ├── promscale--0.5.4.sql ├── promscale--0.6.0--0.7.0.sql ├── promscale--0.6.0--0.8.0.sql ├── promscale--0.6.0.sql ├── promscale--0.7.0--0.8.0.sql ├── promscale--0.7.0.sql └── promscale--0.8.0.sql ├── src ├── aggregate_utils.rs ├── aggregates │ ├── gapfill_delta.rs │ ├── mod.rs │ ├── prom_delta.rs │ ├── prom_increase.rs │ ├── prom_rate.rs │ └── vector_selector.rs ├── iterable_jsonb.rs ├── jsonb_digest.rs ├── lib.rs ├── palloc.rs ├── pg_imports.rs ├── raw.rs ├── regex.rs ├── schema.rs ├── support.rs ├── type_builder.rs └── util.rs ├── templates ├── idempotent-wrapper.sql ├── incremental-wrapper.sql ├── promscale--0.0.0.sql └── promscale.control ├── test-common ├── Cargo.toml └── src │ ├── lib.rs │ ├── local_postgres_instance.rs │ ├── postgres_container │ ├── blueprint.rs │ └── mod.rs │ ├── postgres_test_connection.rs │ └── test_container_instance.rs ├── tools ├── changelog ├── package └── smoke-test └── update-version.sh /.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | # Postgres symbols won't be available until runtime 3 | rustflags = ["-C", "link-args=-Wl,-undefined,dynamic_lookup"] -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | .github/ 3 | dist/*.tar 4 | target/ 5 | */target/ 6 | *.md 7 | !CHANGELOG.md 8 | LICENSE 9 | NOTICE 10 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @timescale/o11y-data-platform 2 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | 4 | 5 | ## Merge requirements 6 | 7 | Please take into account the following non-code changes that you may need to make with your PR: 8 | 9 | - [ ] CHANGELOG entry for user-facing changes 10 | - [ ] Updated the relevant documentation -------------------------------------------------------------------------------- /.github/workflows/README.md: -------------------------------------------------------------------------------- 1 | # Continuous integration setup 2 | 3 | ### File: `lint.yaml` 4 | A workflow that runs `cargo fmt`, `clippy` on Rust codebase and `pgspot` on SQL 5 | migrations. Each linter runs as a separate job with a corresponding name. 6 | 7 | ### File: `ci.yaml` 8 | Contains just one job `test` that executes Rust/PGX unit tests on a matrix of 9 | supported PostgreSQL versions. 10 | 11 | ### File: `docker.yaml` 12 | 13 | Quite a lot is going on in this one. The `docker` job goes first. It builds docker 14 | images for the `(pgversion x tsversion x base)` matrix. The base is either `ha.Dockerfile`, 15 | that builds on top of `timescale/timescaledb-ha` or `alpine.Dockerfile`, which 16 | is maintained for legacy reasons and because it's quicker to build in local 17 | development. The resulting images are pushed to `ghcr.io/timescale/dev_promscale_extension` 18 | Then each image is validated by running [`e2e` suite](../e2e/README.md) of this repository. 19 | And the last step of the `docker` job validates that there are no undocumented API changes. 20 | 21 | 22 | `pick-connector-branch` picks branch name if there is a branch with the 23 | same name in `timescale/promscale` (if not it defaults to `master`). 24 | `call-connector-e2e` job embeds and executes the `go-e2e.yml` workflow from 25 | `timescale/promscale` repo. The workflow checks out `promscale` repo on picked out branch, 26 | then executes `promscale` `e2e` suite against the HA image built by `docker` job. 27 | 28 | Finally, `docker-result` job aggregates the results of all other jobs and is used to 29 | signal the overall status of this workflow to GitHub checks. 30 | 31 | 32 | ### File: `release.yaml` 33 | 34 | This workflow builds packages for a pretty hairy matrix of `(arch x os x postgres)`. 35 | At the moment only x86_64 arch is supported. The OS, in the end, is merely a synonym 36 | for a Linux distribution and the only actual distinction is made between `.deb` and `.rpm`. 37 | 38 | Only pushes to main and new tags trigger this workflow. 39 | 40 | The workflow contains two jobs: 41 | - `package` builds packages using `dist/*.dockerfile`, extracts it from within a builder container. Then it tests the resulting packages using `dist/*.test.dockerfile` and `tools/smoke-test`. Finally tested artifacts are uploaded to GitHub and PackageCloud. 42 | - `release` collects artifacts, release notes and creates a GitHub release. -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | pull_request: 4 | 5 | jobs: 6 | test: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | postgres: 11 | - version: "15" 12 | - version: "14" 13 | - version: "13" 14 | - version: "12" 15 | env: 16 | RUSTC_WRAPPER: sccache 17 | SCCACHE_BUCKET: promscale-extension-sccache 18 | AWS_ACCESS_KEY_ID: ${{ secrets.PROMSCALE_EXTENSION_SCCACHE_AWS_ACCESS_KEY_ID }} 19 | AWS_SECRET_ACCESS_KEY: ${{ secrets.PROMSCALE_EXTENSION_SCCACHE_AWS_SECRET_ACCESS_KEY }} 20 | steps: 21 | - uses: actions/checkout@v3 22 | - name: Install rust 23 | uses: dtolnay/rust-toolchain@1.64.0 24 | 25 | - name: Setup sccache 26 | run: | 27 | curl -L "https://github.com/mozilla/sccache/releases/download/v0.2.15/sccache-v0.2.15-x86_64-unknown-linux-musl.tar.gz" | tar zxf - 28 | chmod +x sccache-*/sccache 29 | sudo mv sccache-*/sccache /usr/local/bin/sccache 30 | sccache --show-stats 31 | 32 | - uses: Swatinem/rust-cache@v2 33 | with: 34 | key: ${{ matrix.postgres.version }} 35 | 36 | - name: Install cargo-pgx 37 | run: | 38 | ./install-cargo-pgx.sh 39 | 40 | - name: Cache pgx 41 | id: cache-pgx 42 | uses: actions/cache@v3 43 | with: 44 | path: ~/.pgx 45 | key: dot-pgx-${{ matrix.postgres.version }}-cargo-${{ hashFiles('**/Cargo.*') }} 46 | 47 | - name: Initialize pgx 48 | if: ${{ steps.cache-pgx.outputs.cache-hit != 'true' }} 49 | run: cargo pgx init --pg${{ matrix.postgres.version }} download 50 | 51 | - name: Run cargo test 52 | run: cargo pgx test pg${{ matrix.postgres.version }} 53 | 54 | # Note: pgx puts the postgres test configuration data in target/pgx-test-data- 55 | # Swatinem/rust-cache "cleans" and then caches this directory. This 56 | # "cleaning" breaks pgx when the cache is restored. By removing the 57 | # directory, we prevent it from being incorrectly cached. 58 | - name: Remove pgx-test-data directory 59 | run: | 60 | rm -rf target/pgx-test-data* 61 | 62 | - name: Show sccache stats 63 | run: sccache --show-stats 64 | -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: lint 2 | on: 3 | pull_request: 4 | paths-ignore: 5 | 6 | jobs: 7 | fmt: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v3 11 | - name: Install rust 12 | uses: dtolnay/rust-toolchain@1.64.0 13 | with: 14 | components: rustfmt, clippy 15 | - run: cargo fmt --all -- --check 16 | 17 | clippy: 18 | runs-on: ubuntu-latest 19 | env: 20 | RUSTC_WRAPPER: sccache 21 | SCCACHE_BUCKET: promscale-extension-sccache 22 | AWS_ACCESS_KEY_ID: ${{ secrets.PROMSCALE_EXTENSION_SCCACHE_AWS_ACCESS_KEY_ID }} 23 | AWS_SECRET_ACCESS_KEY: ${{ secrets.PROMSCALE_EXTENSION_SCCACHE_AWS_SECRET_ACCESS_KEY }} 24 | steps: 25 | - uses: actions/checkout@v3 26 | - name: Install rust 27 | uses: dtolnay/rust-toolchain@1.64.0 28 | with: 29 | components: rustfmt, clippy 30 | 31 | - name: Setup sccache 32 | run: | 33 | curl -L "https://github.com/mozilla/sccache/releases/download/v0.2.15/sccache-v0.2.15-x86_64-unknown-linux-musl.tar.gz" | tar zxf - 34 | chmod +x sccache-*/sccache 35 | sudo mv sccache-*/sccache /usr/local/bin/sccache 36 | sccache --show-stats 37 | 38 | - uses: Swatinem/rust-cache@v2 39 | 40 | - name: Install cargo-pgx 41 | run: | 42 | ./install-cargo-pgx.sh 43 | 44 | - name: Initialize pgx 45 | run: cargo pgx init --pg15 download 46 | 47 | - run: cargo clippy --no-default-features --features pg15 -- -D warnings 48 | 49 | pgspot: 50 | runs-on: ubuntu-latest 51 | env: 52 | RUSTC_WRAPPER: sccache 53 | SCCACHE_BUCKET: promscale-extension-sccache 54 | AWS_ACCESS_KEY_ID: ${{ secrets.PROMSCALE_EXTENSION_SCCACHE_AWS_ACCESS_KEY_ID }} 55 | AWS_SECRET_ACCESS_KEY: ${{ secrets.PROMSCALE_EXTENSION_SCCACHE_AWS_SECRET_ACCESS_KEY }} 56 | steps: 57 | - name: Install rust 58 | uses: dtolnay/rust-toolchain@1.64.0 59 | with: 60 | components: rustfmt, clippy 61 | 62 | - name: Checkout extension code 63 | uses: actions/checkout@v3 64 | 65 | - name: Setup python 3.10 66 | uses: actions/setup-python@v4 67 | with: 68 | python-version: '3.10' 69 | 70 | - name: Install pgspot 71 | run: pip install pgspot==0.3.3 72 | 73 | - name: Setup sccache 74 | run: | 75 | curl -L "https://github.com/mozilla/sccache/releases/download/v0.2.15/sccache-v0.2.15-x86_64-unknown-linux-musl.tar.gz" | tar zxf - 76 | chmod +x sccache-*/sccache 77 | sudo mv sccache-*/sccache /usr/local/bin/sccache 78 | sccache --show-stats 79 | 80 | - uses: Swatinem/rust-cache@v2 81 | 82 | - name: Install cargo-pgx 83 | run: | 84 | ./install-cargo-pgx.sh 85 | 86 | - name: Initialize pgx 87 | run: cargo pgx init --pg15 download 88 | 89 | - name: Prepare control file 90 | run: make promscale.control 91 | 92 | - name: Generate schema 93 | run: cargo pgx schema pg15 --out /tmp/schema.sql 94 | 95 | - name: Run pgspot 96 | run: pgspot --sql-accepting=execute_everywhere --sql-accepting=distributed_exec --ignore PS005 /tmp/schema.sql ./sql/promscale--0.0.0.sql 97 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | dist/*.rpm 3 | dist/*.deb 4 | dist/*.tar 5 | target 6 | /hand-written-migration.sql 7 | /promscale.control 8 | /bootstrap.sql 9 | /sql 10 | venv 11 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | The Timescale Code of Conduct can be found at . 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["test-common", "e2e", "sql-tests", "gendoc", "pgtap-parse"] 3 | 4 | [package] 5 | name = "promscale" 6 | version = "0.8.1-dev" 7 | edition = "2018" 8 | 9 | [lib] 10 | crate-type = ["cdylib"] 11 | 12 | [profile.release] 13 | panic = "unwind" # Ensures that we don't abort a Postgres process 14 | opt-level = 3 15 | lto = "fat" 16 | debug = true 17 | codegen-units = 1 18 | 19 | [profile.dev] 20 | panic = "unwind" 21 | # It's currently broken on Apple Silicon. Nightly seems to include a fix. 22 | # If your tests fail with SIGSEGV try and use 1.64 or nightly. 23 | # Finally, we can just comment it out because it no longer serves as 24 | # a workaround for another issue https://github.com/tcdi/pgx/pull/208 25 | # lto = "thin" 26 | 27 | [features] 28 | default = ["pg15", "serde_json", "proptest"] # used by rust-analyzer in VSCode 29 | pg12 = ["pgx/pg12", "pgx-tests/pg12"] 30 | pg13 = ["pgx/pg13", "pgx-tests/pg13"] 31 | pg14 = ["pgx/pg14", "pgx-tests/pg14"] 32 | pg15 = ["pgx/pg15", "pgx-tests/pg15"] 33 | pg_test = ["serde_json", "proptest"] 34 | 35 | [dependencies] 36 | bincode = "1.3.3" 37 | num_cpus = "1.13.1" 38 | pgx = "0.6.1" 39 | pgx-macros = "0.6.1" 40 | proptest = { version = "1.0.0", optional = true } 41 | regex = "1.5.6" 42 | sha2 = "0.10.6" 43 | serde = { version = "1.0", features = ["derive"] } 44 | serde_json = { version = "1.0.89", optional = true } 45 | uluru = "3.0.0" 46 | 47 | [build-dependencies] 48 | askama = "0.11.1" 49 | 50 | [dev-dependencies] 51 | pgx-tests = "0.6.1" 52 | 53 | # from e2e workspace 54 | test-generator = { git = "https://github.com/JamesGuthrie/test-generator" } 55 | test-common = { path = "./test-common" } 56 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Source code in this repository, and any binaries built from this source code, 2 | in whole or in part, are licensed under the Timescale License (the "License"). 3 | You may not use these files except in compliance with the License. 4 | 5 | You may obtain a copy of the License at 6 | 7 | https://github.com/timescale/timescaledb/blob/master/tsl/LICENSE-TIMESCALE 8 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Promscale Extension by Timescale (TM) 2 | 3 | Copyright (c) 2020-2021 Timescale, Inc. All Rights Reserved. 4 | 5 | Source code in this repository, and any binaries built from this source code, 6 | in whole or in part, are licensed under the Timescale License (the "License"). 7 | You may not use these files except in compliance with the License. 8 | 9 | You may obtain a copy of the License at 10 | 11 | https://github.com/timescale/timescaledb/blob/master/tsl/LICENSE-TIMESCALE 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > **Warning** 2 | > 3 | > Promscale has been discontinued and is deprecated. 4 | > 5 | > The code in this repository is no longer maintained. 6 | > 7 | > [Learn more](https://github.com/timescale/promscale/issues/1836). 8 | 9 | # Promscale Extension 10 | 11 | From Promscale version 0.11.0, this [Postgres extension](https://www.postgresql.org/docs/12/extend-extensions.html) 12 | is an integral part of Promscale. It is required to be installed. 13 | Check the [release notes](https://github.com/timescale/promscale/releases/tag/0.11.0) 14 | for more details. 15 | 16 | The extension plays two important roles: 17 | 1. It manages the SQL data schema and [migrations](migration/README.md) that manipulate it. 18 | 2. It encompasses code that runs within a database instance, both PL/pgSQL and native. 19 | 20 | ## Motivation 21 | 22 | It's fairly common for backend applications to manage their database schema via a migration 23 | system. Altering a table and adding an index are typical operations that come to mind. 24 | As Promscale grew in scope and complexity we found ourselves defining custom data types, 25 | aggregates and background jobs. Having the extension manage both the migration logic and 26 | various extensions helps to deal with situations when one depends on the other. 27 | 28 | Yet, developer convenience is not the main reason this extension exists. It enables complex 29 | optimizations for both PromQL and SQL users. Let's have a look at two examples. 30 | 31 | Custom aggregates like `prom_rate`, `prom_delta` and a few others are implemented in Rust 32 | and enable Promscale to push corresponding PromQL down to native code that is executed 33 | within PostgreSQL. The alternatives are either transferring all the data to the Promscale 34 | application and doing aggregation there, or a PL/pgSQL stored procedure. Both are substantially slower. 35 | 36 | [Support functions](https://www.postgresql.org/docs/current/xfunc-optimization.html) that 37 | transparently rewrite some queries to reduce the amount of computation required or take 38 | advantage of indices and tables specific to Promscale. For instance, the following query: 39 | 40 | ```SQL 41 | SELECT trace_id 42 | FROM ps_trace.span 43 | WHERE 44 | span_tags -> 'pwlen' = '25'::jsonb 45 | AND resource_tags -> 'service.name' = '"generator"'; 46 | ``` 47 | 48 | will have an additional `InitPlan` stage that precomputes a set of matching tags, 49 | then uses a GIN index on a private `_ps_trace.span` table. While the naive version 50 | can only evaluate matching tags per row. 51 | 52 | ## Requirements 53 | 54 | To run the extension: 55 | 56 | - PostgreSQL version 12 or newer. 57 | 58 | To compile the extension (see instructions below): 59 | 60 | - Rust compiler 61 | - PGX framework 62 | 63 | ## Installation 64 | 65 | - [Precompiled OS Packages](./INSTALL.md#precompiled-os-packages) 66 | - [Docker images](./INSTALL.md#docker-images) 67 | - [Compile From Source](./INSTALL.md#compile-from-source) 68 | 69 | ## Development 70 | 71 | To quickly setup a development environment, see [DEVELOPMENT.md](DEVELOPMENT.md) 72 | To understand more about how to write SQL migration files for this extension, consult [this](migration/README.md) guide. 73 | To get a better understanding of our CI pipeline see [this document](.github/workflows/README.md). 74 | 75 | ## Releasing 76 | 77 | A full checklist of the steps necessary to release a new version of the extension is available in [RELEASING.md](RELEASING.md). -------------------------------------------------------------------------------- /RELEASING.md: -------------------------------------------------------------------------------- 1 | # Releasing the Promscale extension 2 | 3 | The following are step-by-step instructions for releasing the Promscale extension. 4 | 5 | ## Create an issue to track the release 6 | 7 | Create a new issue titled "Release ``". Copy everything below this line into that issue, and use it to keep track of the release tasks. 8 | 9 | --- 10 | 11 | ## Pre-release 12 | - [ ] Create a git branch named `pre-release-x.x.x` 13 | - [ ] Ensure `upgradeable_from` in `templates/promscale.control` contains the previously released version. 14 | - [ ] Update `CHANGELOG.md` with release version and date, ensuring that all relevant changes are reflected. 15 | - [ ] Update the version in all places with `./update-version.sh ` 16 | - [ ] Freeze any new incremental sql scripts released in this version in `incremental_freeze_test` 17 | - [ ] Create a PR from the `pre-release-x.x.x` branch. Get it approved. Merge the PR to master. 18 | 19 | ## Release 20 | - [ ] Create a tag on the master branch `git tag ` 21 | - [ ] Push the tag `git push origin ` 22 | - [ ] CI will trigger and create a draft release with assets attached. 23 | - [ ] Prepare extension release notes, share with team asking for feedback. 24 | - [ ] Attach release notes to draft release created above. 25 | - [ ] Wait for CI to generate the packages files 26 | - [ ] Create a PR to update the HA image in [timescaledb-docker-ha](https://github.com/timescale/timescaledb-docker-ha). [EXAMPLE](https://github.com/timescale/timescaledb-docker-ha/pull/285/files) 27 | - [ ] In the timescaledb-docker-ha repo, update `TIMESCALE_PROMSCALE_EXTENSIONS` in the `Makefile` to include the version just released 28 | - [ ] Update the CHANGELOG entry in the timescaledb-docker-ha repo and wait for the CI to complete and request review from the Cloud team and merge it when approved. 29 | - [ ] Create a new PR in the timescaledb-docker-ha repo. Stamp the version in the CHANGELOG. Merge it with master and push the correct tag to trigger CI ([see instructions in the repo](https://github.com/timescale/timescaledb-docker-ha#release-process)) [EXAMPLE](https://github.com/timescale/timescaledb-docker-ha/pull/286/files) 30 | - [ ] Publish the GitHub release on the promscale_extension repo. 31 | 32 | 33 | ## Post-Release 34 | - [ ] Create a new git branch named `post-release-x.x.x` 35 | - [ ] Run `make post-release` which will generate the `sql/promscale--x.x.x.sql` file of the version just released and create all the upgrade path sql files. 36 | - [ ] Add and commit the newly created sql files to git. They are ignored by default. e.g. `git add sql/*--0.5.5.sql --force` 37 | - [ ] Determine the development version (determined by bumping the patch version and appending `-dev`) 38 | - [ ] Set the version in all places necessary with `./update-version.sh ` 39 | - [ ] Update `upgradeable_from` in templates/promscale.control to add the previously released version 40 | - [ ] Update `e2e/tests/config/mod.rs` to refer to the new docker images 41 | - [ ] Create a PR and get it merged 42 | - [ ] Bump the version in the promscale repo's `EXTENSION_VERSION` file to the version just released (Renovate should automatically create a PR for this). 43 | -------------------------------------------------------------------------------- /create-upgrade-symlinks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | prev_versions=$(cat promscale.control | sed -n 's/# upgradeable_from = \(.*\)/\1/p' | sed "s/[[:space:]']//g" | tr ',' '\n') 4 | cur_version=$(./extract-extension-version.sh | tr -d '\n') 5 | 6 | for prev_version in $prev_versions; do 7 | if [ -n "${prev_version}" ] && [ -n "${cur_version}" ]; then 8 | ln -s -f "promscale--${cur_version}.sql" "sql/promscale--${prev_version}--${cur_version}.sql" 9 | fi 10 | done 11 | -------------------------------------------------------------------------------- /dev.Dockerfile: -------------------------------------------------------------------------------- 1 | # Note: in future we should use the timescaledb-ha image. Unfortunately it 2 | # doesn't have arm64 builds, so we're doing things from scratch. 3 | FROM ubuntu:22.04 4 | 5 | SHELL ["/bin/bash", "-eE", "-o", "pipefail", "-c"] 6 | 7 | RUN apt update && apt install -y sudo wget curl gnupg2 lsb-release 8 | 9 | # Setup a non-root user that we'll use 10 | RUN adduser --disabled-password --gecos "" ubuntu && \ 11 | usermod -aG sudo ubuntu && \ 12 | echo "ubuntu ALL=(ALL:ALL) NOPASSWD: ALL" > /etc/sudoers.d/ubuntu 13 | 14 | ENV DEBIAN_FRONTEND=noninteractive 15 | 16 | # Install timescaledb 17 | RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -c -s)-pgdg main" > /etc/apt/sources.list.d/pgdg.list 18 | RUN wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - 19 | RUN echo "deb [signed-by=/usr/share/keyrings/timescale.keyring] https://packagecloud.io/timescale/timescaledb/ubuntu/ $(lsb_release -c -s) main" > /etc/apt/sources.list.d/timescaledb.list 20 | RUN wget --quiet -O - https://packagecloud.io/timescale/timescaledb/gpgkey | gpg --dearmor -o /usr/share/keyrings/timescale.keyring 21 | RUN apt-get update && apt-get install -y timescaledb-2{,-loader}-postgresql-{12,13,14,15}='2.9.1*' 22 | RUN apt-get install -y build-essential clang libssl-dev pkg-config libreadline-dev zlib1g-dev postgresql-server-dev-{12,13,14,15} 23 | 24 | # These directories need to be writeable for pgx to install the extension into 25 | RUN chmod a+w /usr/share/postgresql/*/extension /usr/lib/postgresql/*/lib 26 | 27 | USER ubuntu 28 | 29 | # Install rust 30 | ENV RUST_VERSION=1.64.0 31 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --profile minimal --component rustfmt --default-toolchain ${RUST_VERSION} 32 | ENV PATH=/home/ubuntu/.cargo/bin:$PATH 33 | 34 | COPY install-cargo-pgx.sh /usr/local/bin 35 | RUN install-cargo-pgx.sh 36 | 37 | RUN cargo pgx init --pg15 /usr/lib/postgresql/15/bin/pg_config --pg14 /usr/lib/postgresql/14/bin/pg_config --pg13 /usr/lib/postgresql/13/bin/pg_config --pg12 /usr/lib/postgresql/12/bin/pg_config 38 | 39 | RUN timescaledb-tune --profile=promscale --quiet --yes -conf-path ~/.pgx/data-12/postgresql.conf 40 | RUN timescaledb-tune --profile=promscale --quiet --yes -conf-path ~/.pgx/data-13/postgresql.conf 41 | RUN timescaledb-tune --profile=promscale --quiet --yes -conf-path ~/.pgx/data-14/postgresql.conf 42 | RUN timescaledb-tune --profile=promscale --quiet --yes -conf-path ~/.pgx/data-15/postgresql.conf 43 | 44 | # Make Postgres accessible from host 45 | RUN sed -i "s/#listen_addresses = 'localhost'/listen_addresses = '*'/" ~/.pgx/data-{12,13,14,15}/postgresql.conf 46 | RUN sed -i "s#127.0.0.1/32#0.0.0.0/0#" ~/.pgx/data-{12,13,14,15}/pg_hba.conf 47 | # Disable telemetry 48 | RUN echo "timescaledb.telemetry_level=off" | tee -a ~/.pgx/data-{12,13,14,15}/postgresql.conf 49 | 50 | RUN sudo apt-get install -y --fix-missing vim lld 51 | 52 | RUN mkdir -p ~/.cargo 53 | # Make cargo put compile artifacts in non-bind-mounted directory 54 | # To re-use compiled artifacts, mount a docker volume to /tmp/target 55 | # We have seen issues with the docker container running out of memory 56 | # Limiting cargo to 2 jobs ought to reduce/limit memory usage 57 | # 1 job led to very slow build times. 2 is hopefully a good balance 58 | RUN echo -e '[build]\ntarget-dir="/tmp/target"\njobs=2' > ~/.cargo/config.toml 59 | # Tell rustc to use a fast linker 60 | RUN echo 'rustflags=["-C", "link-arg=-fuse-ld=lld"]' >> ~/.cargo/config.toml 61 | 62 | # Sources should be bind-mounted to /code/ 63 | WORKDIR /code/ 64 | 65 | RUN sudo apt-get install -y entr 66 | COPY devenv.sh /usr/local/bin/ 67 | CMD ["devenv.sh"] 68 | -------------------------------------------------------------------------------- /devenv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | trap 'trap "" SIGINT SIGTERM; kill 0' SIGINT SIGTERM EXIT 4 | 5 | # Ensure that we have the correct postgres tools on path 6 | export PATH="/usr/lib/postgresql/${DEVENV_PG_VERSION}/bin:${PATH}" 7 | # Ensure that the correct postgres tools are available for `docker exec` 8 | echo "PATH=${PATH}" >> ~/.bashrc 9 | 10 | # Set sensible postgres env vars 11 | export PGPORT="288${DEVENV_PG_VERSION}" 12 | export PGHOST=localhost 13 | # Set sensible postgres env vars for `docker exec` 14 | echo "export PGPORT=${PGPORT}" >> ~/.bashrc 15 | echo "export PGHOST=${PGHOST}" >> ~/.bashrc 16 | 17 | wait_for_db() { 18 | echo "waiting for DB" 19 | 20 | for _ in $(seq 10) ; do 21 | if pg_isready -d postgres -U postgres 1>/dev/null 2>&1; then 22 | echo "DB up" 23 | return 0 24 | fi 25 | echo -n "." 26 | sleep 1 27 | done 28 | echo 29 | echo "FAIL waiting for DB" 30 | exit 1 31 | } 32 | 33 | cargo pgx start "pg${DEVENV_PG_VERSION}" 34 | wait_for_db 35 | for db in template1 postgres; do 36 | psql -h localhost -U "$(whoami)" -p "${PGPORT}" -d $db -c 'CREATE EXTENSION IF NOT EXISTS timescaledb'; 37 | done 38 | createdb -h localhost -p "${PGPORT}" "$(whoami)" 39 | 40 | if [ "$DEVENV_ENTR" -eq 1 ]; then 41 | echo "entr enabled. rebuilds will be triggered on source file changes" 42 | # This allows entr to work correctly on docker for mac 43 | export ENTR_INOTIFY_WORKAROUND=true 44 | 45 | # Note: this is not a comprehensive list of source files, if you think one is missing, add it 46 | SOURCE_FILES="src migration" 47 | find ${SOURCE_FILES} | entr make devenv-internal-build-install > "${HOME}/compile.log" & 48 | 49 | tail -f "${HOME}/.pgx/${DEVENV_PG_VERSION}.log" "${HOME}/compile.log" & 50 | else 51 | echo "entr disabled. you must trigger rebuilds manually with 'make dev-build'" 52 | make devenv-internal-build-install 53 | tail -f "${HOME}/.pgx/${DEVENV_PG_VERSION}.log" 54 | fi 55 | 56 | wait 57 | -------------------------------------------------------------------------------- /dist/deb.dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1.3-labs 2 | 3 | ## Build base system 4 | ARG DOCKER_DISTRO_NAME=debian 5 | ARG OS_NAME=debian 6 | ARG OS_VERSION=11 7 | ARG PG_VERSION 8 | FROM ${DOCKER_DISTRO_NAME}:${OS_VERSION} as base 9 | 10 | SHELL ["/bin/bash", "-eu", "-o", "pipefail", "-c"] 11 | 12 | ENV DEBIAN_FRONTEND=noninteractive 13 | 14 | # Setup base system 15 | RUN <> /etc/apt/sources.list.d/pgdg.list 66 | wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - 67 | apt-get update -y 68 | 69 | apt-get install -y \ 70 | postgresql-server-dev-${PG_VERSION} \ 71 | postgresql-${PG_VERSION} 72 | 73 | # User with which package builds will run 74 | useradd --uid 1000 -m -d /home/builder -s /bin/bash builder 75 | 76 | # Create directory in which output artifacts can be dropped 77 | mkdir -p /dist 78 | chmod a+rw /dist 79 | EOF 80 | 81 | ## Build extension 82 | FROM base-postgres AS builder 83 | ARG PG_VERSION 84 | ARG RUST_VERSION 85 | 86 | USER builder 87 | WORKDIR /home/builder 88 | ENV HOME=/home/builder \ 89 | PATH=/home/builder/.cargo/bin:${PATH} 90 | 91 | # Install Rust 92 | RUN < /etc/apt/sources.list.d/timescaledb.list" 20 | RUN wget --quiet -O - https://packagecloud.io/timescale/timescaledb/gpgkey | apt-key add - 21 | RUN apt-get update && apt-get install -y "timescaledb-2-postgresql-${PG_VERSION}" 22 | 23 | RUN sed -ri "s!^#?(listen_addresses)\s*=\s*\S+.*!\1 = '*'!" /usr/share/postgresql/${PG_VERSION}/postgresql.conf.sample 24 | RUN echo "shared_preload_libraries = 'timescaledb'" >> /usr/share/postgresql/${PG_VERSION}/postgresql.conf.sample 25 | 26 | # Install the Promscale extension 27 | COPY ${RELEASE_FILE_NAME} /var/lib/postgresql/ 28 | RUN dpkg -i "/var/lib/postgresql/$(basename ${RELEASE_FILE_NAME})" 29 | 30 | COPY --chown=postgres dist/tester/entrypoint /usr/local/bin/entrypoint 31 | 32 | USER postgres 33 | 34 | WORKDIR /var/lib/postgresql/${PG_VERSION}/ 35 | ENV PGDATA=/var/lib/postgresql/${PG_VERSION}/data \ 36 | PATH=/usr/lib/postgresql/${PG_VERSION}/bin:$PATH 37 | 38 | # Initialize Postgres data directory 39 | RUN <> /usr/pgsql-${PG_VERSION}/share/postgresql.conf.sample 77 | 78 | COPY --chown=postgres dist/tester/entrypoint /usr/local/bin/entrypoint 79 | 80 | USER postgres 81 | 82 | WORKDIR /var/lib/pgsql-${PG_VERSION} 83 | ENV PGDATA=/var/lib/pgsql/${PG_VERSION}/data \ 84 | PATH=/usr/pgsql-${PG_VERSION}/bin:$PATH 85 | 86 | # Initialize Postgres data directory 87 | RUN <> "$PGDATA/pg_hba.conf" 39 | } 40 | 41 | # If first argument is option-like, assume user wants to run the postgres server 42 | if [ "${1:0:1}" = '-' ]; then 43 | set -- postgres "$@" 44 | fi 45 | 46 | if [ "$1" = 'postgres' ] && ! _pg_want_help "$@"; then 47 | # If run as root, re-run script as postgres user 48 | if [ "$(id -u)" = '0' ]; then 49 | exec sudo -u postgres "$BASH_SOURCE" "$@" 50 | fi 51 | 52 | pg_setup_hba_conf "$@" 53 | fi 54 | 55 | exec "$@" 56 | -------------------------------------------------------------------------------- /docs/sql-tracing-tables.md: -------------------------------------------------------------------------------- 1 | # Tracing tables internals 2 | 3 | We expect end-users to interact with the views defined inside `ps_trace` schema. 4 | These views closely match [OpenTelemetry data model](https://opentelemetry.io/docs/concepts/signals/traces/) 5 | and are denormalized for convenience. This document aims to describe the normalized 6 | layout of tables, powering these views. 7 | (That being said, [view definitions](../migration/idempotent/009-tracing-views.sql) themselves are a good reference.) 8 | 9 | ## Tag maps 10 | 11 | Before we can look closer at the `span`, `event` and `link` tables, we need to 12 | introduce the `tag_map` type and the corresponding `tag` and `tag_key` tables. 13 | 14 | It would be unwise to store raw JSON tags with every entry in span or event tables, 15 | so we needed a compact, normalized representation. Take the following set of 16 | tags for example: `{"service.name": "foo", "telemetry.sdk.name": "opentelemetry"}`, 17 | just two standard tags yet they occupy 62 bytes. Enter `tag_key` and `tag` tables. 18 | 19 | For each tag key (e.g. `"service.name"` and `"telemetry.sdk.name"` from the 20 | example above) we add a tuple to the `tag_key` table. In our example, `"service.name"` 21 | is a pre-defined tag key with the `key_id=1`. Then, for each tag, a tuple is added 22 | to the `tag` table, referencing the corresponding `tag_key`. In this example the 23 | tuple would contain: `key_id=1, key='service.name', value='foo'` and it will be 24 | assigned an `id`, say `114`. Finally, we can replace our raw JSON tag 25 | `{"service.name": "foo"}` with the `{"1": 114}`. And this form is what we store 26 | in `span` and other tables. To denormalize and enable intuitive querying the views 27 | rely on `_ps_trace.tag_map_denormalize` which performs lookups and returns a raw 28 | JSON tag map. 29 | 30 | The `tag_map` type itself is merely a wrapper around built-in `jsonb`, but some 31 | operators have [`tag_map_rewrite` support function](../src/support.rs) attached 32 | to them. The support function has a detailed comment, but the gist of it is 33 | replacing triplets of `tag_map -> key OP value` in WHERE clauses with 34 | `tag_map @> (SELECT ...)` enabling the use of GIN indexes and precomputing 35 | a set of matching tags in an InitPlan. 36 | 37 | Additionally, we maintain `tag_type` bit mask in both `tag_key` and `tag` tables. 38 | It tracks in what context or contexts a given tag is used: `resource`, `span`, 39 | `event` or `link`. 40 | 41 | ## Span table 42 | 43 | The `span` table tracks the OpenTelemetry data model closely, and most fields 44 | match their counterparts in the public `ps_trace.span` view. The attributes of 45 | note are: 46 | - `resource_tags` is a tag map ([see above](#tag-maps)) 47 | - `span_tags` is another tag map 48 | - `instrumentation_lib_id` is a foreign key into `instrumentation_lib` table 49 | - similarily `operation_id` references the `operation` table. 50 | 51 | The reasoning behind the normalization of `instrumentation_lib` and `operation` 52 | is similar to that of tag maps: we expect the same values to reoccur often 53 | across multiple spans. 54 | 55 | ```mermaid 56 | classDiagram 57 | direction BT 58 | class instrumentation_lib { 59 | text name 60 | text version 61 | bigint schema_url_id 62 | bigint id 63 | } 64 | class operation { 65 | bigint service_name_id 66 | span_kind span_kind 67 | text span_name 68 | bigint id 69 | } 70 | class schema_url { 71 | text url 72 | bigint id 73 | } 74 | class span { 75 | bigint parent_span_id 76 | bigint operation_id 77 | timestamp with time zone end_time 78 | double precision duration_ms 79 | bigint instrumentation_lib_id 80 | bigint resource_schema_url_id 81 | tstzrange event_time 82 | integer dropped_tags_count 83 | integer dropped_events_count 84 | integer dropped_link_count 85 | integer resource_dropped_tags_count 86 | status_code status_code 87 | text trace_state 88 | tag_map span_tags 89 | text status_message 90 | tag_map resource_tags 91 | trace_id trace_id 92 | bigint span_id 93 | timestamp with time zone start_time 94 | } 95 | 96 | instrumentation_lib --> schema_url : schema_url_id 97 | span --> instrumentation_lib : instrumentation_lib_id 98 | span --> operation : operation_id 99 | ``` 100 | 101 | ## Event and Link tables 102 | 103 | The `event` table is joined with the `span` table via `span_id` 104 | and `trace_id` and carries an additional `tags` tag map. It contains 105 | all events for each span. 106 | 107 | The `link` table is very similar to the `event`. It also has an additional 108 | `tags` tag map attribute, but joins with the `span` table twice: 109 | - on `span_id` and `trace_id` 110 | - and on `linked_span_id`, `linked_trace_id`. 111 | -------------------------------------------------------------------------------- /e2e/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "e2e" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dev-dependencies] 7 | duct = "0.13.5" 8 | hex = "0.4.3" 9 | log = "0.4.17" 10 | md-5 = "0.10.1" 11 | postgres = "0.19.2" 12 | pretty_env_logger = "0.4" 13 | regex = "1.5.5" 14 | semver = "1.0.12" 15 | similar = "2.1.0" 16 | tempdir = "0.3.7" 17 | test-common = { path = "../test-common" } 18 | 19 | [build-dependencies] 20 | build-deps = "^0.1" 21 | -------------------------------------------------------------------------------- /e2e/README.md: -------------------------------------------------------------------------------- 1 | # End-to-end testing 2 | 3 | This directory contains end-to-end tests for the promscale extension. Tests should be written in Rust. 4 | As the tests run in a Docker container, the `docker` command must be present on the local system. 5 | 6 | ## Running tests 7 | 8 | Run the tests with `cargo test -p e2e`. The tests are run against a docker image. Set the value of 9 | the `TS_DOCKER_IMAGE` env var to override the default docker image, e.g.: 10 | 11 | ``` 12 | TS_DOCKER_IMAGE=ghcr.io/timescale/dev_promscale_extension:master-ts2-pg13 cargo test -p e2e 13 | ``` 14 | 15 | ## Rust tests 16 | 17 | Tests of arbitrary complexity can be written in Rust. There is no default setup or teardown, but 18 | tests can use Docker to start and stop containers. There is not much infrastructure or convention 19 | here yet. -------------------------------------------------------------------------------- /e2e/scripts/after-create.sql: -------------------------------------------------------------------------------- 1 | \set ECHO all 2 | \set ON_ERROR_STOP 1 3 | 4 | -- create a user named tsdbadmin 5 | -- we will try to give tsdbadmin as few privileges as possible 6 | -- and use tsdbadmin to do as much of the dump/restore process 7 | -- as possible 8 | do $block$ 9 | declare 10 | _version int; 11 | begin 12 | select setting::int / 10000 13 | into strict _version 14 | from pg_settings where name = 'server_version_num'; 15 | 16 | -- trusted extensions were introduced in postgres v13 17 | -- prior to that you must be a superuser to create extension 18 | if _version < 13 then 19 | create user tsdbadmin superuser; 20 | else 21 | create user tsdbadmin; 22 | end if; 23 | end; 24 | $block$; 25 | alter database db owner to tsdbadmin; 26 | grant all on database db to tsdbadmin; 27 | 28 | create extension if not exists timescaledb with schema public; 29 | 30 | -- the following code emulates what is done for privileges on cloud instances 31 | -- https://github.com/timescale/timescaledb-docker-ha/blob/master/scripts/timescaledb/after-create.sql 32 | 33 | -- The pre_restore and post_restore function can only be successfully executed by a very highly privileged 34 | -- user. To ensure the database owner can also execute these functions, we have to alter them 35 | -- from SECURITY INVOKER to SECURITY DEFINER functions. Setting the search_path explicitly is good practice 36 | -- for SECURITY DEFINER functions. 37 | -- As this function does have high impact, we do not want anyone to be able to execute the function, 38 | -- but only the database owner. 39 | ALTER FUNCTION public.timescaledb_pre_restore() SET search_path = pg_catalog,pg_temp SECURITY DEFINER; 40 | ALTER FUNCTION public.timescaledb_post_restore() SET search_path = pg_catalog,pg_temp SECURITY DEFINER; 41 | REVOKE EXECUTE ON FUNCTION public.timescaledb_pre_restore() FROM public; 42 | REVOKE EXECUTE ON FUNCTION public.timescaledb_post_restore() FROM public; 43 | GRANT EXECUTE ON FUNCTION public.timescaledb_pre_restore() TO tsdbadmin; 44 | GRANT EXECUTE ON FUNCTION public.timescaledb_post_restore() TO tsdbadmin; 45 | 46 | -- To reduce the errors seen on pg_restore we grant access to timescaledb internal tables 47 | DO $$DECLARE r record; 48 | BEGIN 49 | FOR r IN SELECT tsch from unnest(ARRAY['_timescaledb_internal', '_timescaledb_config', '_timescaledb_catalog', '_timescaledb_cache']) tsch 50 | LOOP 51 | EXECUTE 'ALTER DEFAULT PRIVILEGES IN SCHEMA ' || quote_ident(r.tsch) || ' GRANT ALL PRIVILEGES ON TABLES TO tsdbadmin'; 52 | EXECUTE 'ALTER DEFAULT PRIVILEGES IN SCHEMA ' || quote_ident(r.tsch) || ' GRANT ALL PRIVILEGES ON SEQUENCES TO tsdbadmin'; 53 | EXECUTE 'GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA ' || quote_ident(r.tsch) || ' TO tsdbadmin'; 54 | EXECUTE 'GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA ' || quote_ident(r.tsch) || ' TO tsdbadmin'; 55 | EXECUTE 'GRANT USAGE, CREATE ON SCHEMA ' || quote_ident(r.tsch) || ' TO tsdbadmin'; 56 | END LOOP; 57 | END$$; 58 | -------------------------------------------------------------------------------- /e2e/scripts/post-restore.sql: -------------------------------------------------------------------------------- 1 | select prom_api.promscale_post_restore(); 2 | select public.timescaledb_post_restore(); 3 | -------------------------------------------------------------------------------- /e2e/scripts/pre-restore.sql: -------------------------------------------------------------------------------- 1 | select public.timescaledb_pre_restore(); 2 | create extension promscale; -- this MUST happen AFTER timescaledb_pre_restore! 3 | -------------------------------------------------------------------------------- /e2e/tests/config/mod.rs: -------------------------------------------------------------------------------- 1 | //! This file contains fixed docker container versions for the upgrade tests which 2 | //! must be bumped every time that we release a new version of the extension. 3 | pub(crate) const ALPINE_WITH_EXTENSION_LAST_RELEASED_PREFIX: &str = 4 | "timescaledev/promscale-extension:0.8.0-ts2.9.1-pg"; 5 | // TODO as soon as PG15 image is ready upstream 6 | //pub(crate) const HA_WITH_LAST_RELEASED_EXTENSION_PG15: &str = 7 | // "TODO"; 8 | pub(crate) const HA_WITH_LAST_RELEASED_EXTENSION_PG14: &str = 9 | "timescale/timescaledb-ha:pg14.6-ts2.9.1-p1"; 10 | pub(crate) const HA_WITH_LAST_RELEASED_EXTENSION_PG13: &str = 11 | "timescale/timescaledb-ha:pg13.9-ts2.9.1-p1"; 12 | pub(crate) const HA_WITH_LAST_RELEASED_EXTENSION_PG12: &str = 13 | "timescale/timescaledb-ha:pg12.13-ts2.9.1-p1"; 14 | -------------------------------------------------------------------------------- /e2e/tests/ps_trace_delete_all_traces.rs: -------------------------------------------------------------------------------- 1 | use test_common::PostgresContainerBlueprint; 2 | use test_common::{new_test_container_instance, PostgresTestInstance}; 3 | 4 | #[test] 5 | fn delete_all_traces_blocks_if_advisory_lock_already_taken() { 6 | let _ = pretty_env_logger::try_init(); 7 | let pg_blueprint = PostgresContainerBlueprint::new(); 8 | let test_pg_instance = new_test_container_instance(&pg_blueprint); 9 | let mut conn_one = test_pg_instance.connect(); 10 | let mut conn_two = test_pg_instance.connect(); 11 | 12 | let result = conn_one 13 | .simple_query("CREATE EXTENSION promscale;") 14 | .unwrap(); 15 | assert_eq!(result.len(), 1); 16 | let result = conn_one 17 | .query("SELECT pg_advisory_lock(5585198506344173278);", &[]) 18 | .unwrap(); 19 | assert_eq!(result.len(), 1); 20 | 21 | // Set statement timeout low, because we expect the following query to block on the advisory lock 22 | let result = conn_two 23 | .simple_query("SET statement_timeout=1000;") 24 | .unwrap(); 25 | assert_eq!(result.len(), 1); 26 | 27 | let result = conn_two.query("SELECT ps_trace.delete_all_traces();", &[]); 28 | 29 | assert!(result.is_err()); 30 | let error = result.expect_err("expected error"); 31 | assert_eq!( 32 | error.as_db_error().unwrap().message(), 33 | "canceling statement due to statement timeout" 34 | ); 35 | } 36 | -------------------------------------------------------------------------------- /e2e/tests/test.rs: -------------------------------------------------------------------------------- 1 | use log::info; 2 | use test_common::*; 3 | 4 | #[test] 5 | fn create_drop_promscale_extension() { 6 | let _ = pretty_env_logger::try_init(); 7 | let pg_blueprint = PostgresContainerBlueprint::new(); 8 | let test_pg_instance = new_test_container_instance(&pg_blueprint); 9 | let mut test_conn = test_pg_instance.connect(); 10 | 11 | let result = test_conn 12 | .simple_query("CREATE EXTENSION promscale;") 13 | .unwrap(); 14 | assert_eq!(result.len(), 1); 15 | 16 | let result = test_conn 17 | .simple_query("DROP EXTENSION promscale CASCADE;") 18 | .unwrap(); 19 | 20 | assert_eq!(result.len(), 1); 21 | } 22 | 23 | #[test] 24 | fn upgrade_promscale_extension_all_versions() { 25 | let _ = pretty_env_logger::try_init(); 26 | let pg_blueprint = PostgresContainerBlueprint::new(); 27 | let test_pg_instance = new_test_container_instance(&pg_blueprint); 28 | let mut test_conn = test_pg_instance.connect(); 29 | // This query gets all possible upgrade paths for the extension. An upgrade 30 | // path looks like: "0.1.0--0.2.0--0.2.1--0.3.0". 31 | let path_rows = test_conn 32 | .query(r#" 33 | SELECT path 34 | FROM pg_extension_update_paths('promscale') 35 | WHERE 36 | path IS NOT NULL 37 | -- We want to skip all versions before 0.5.0 because they can't be installed directly 38 | AND NOT ( 39 | split_part(source, '.', 1)::INT = 0 40 | AND 41 | split_part(source, '.', 2)::INT < 5 42 | ) 43 | -- When running on PG15 skip all versions before 0.8.0 because they don't exist for PG15 44 | AND NOT ( 45 | current_setting('server_version_num')::integer >= 150000 46 | AND 47 | split_part(source, '.', 1)::INT = 0 48 | AND 49 | split_part(source, '.', 2)::INT < 8 50 | ) 51 | AND source IN (SELECT version FROM pg_available_extension_versions WHERE name = 'promscale') 52 | "#, &[]) 53 | .unwrap(); 54 | 55 | let version_paths: Vec> = path_rows 56 | .iter() 57 | .map(|r| { 58 | let path = r.get::<&str, &str>("path"); 59 | // Split string "0.1.0--0.2.0" into vec ["0.1.0", "0.2.0"]. 60 | path.split("--") 61 | .map(str::to_string) 62 | .collect::>() 63 | }) 64 | .collect(); 65 | 66 | for version_path in version_paths { 67 | let mut prev_version: Option = None; 68 | for version in version_path { 69 | match prev_version { 70 | None => { 71 | info!("Creating extension at version {}", version); 72 | let res = test_conn.query( 73 | &format!("CREATE EXTENSION promscale VERSION '{}'", version), 74 | &[], 75 | ); 76 | assert!( 77 | res.is_ok(), 78 | "cannot create extension at version {}: {}", 79 | version, 80 | res.unwrap_err() 81 | ); 82 | } 83 | Some(prev_version) => { 84 | info!( 85 | "Upgrading extension from version {} to {}", 86 | prev_version, version 87 | ); 88 | let res = test_conn.query( 89 | &format!("ALTER EXTENSION promscale UPDATE TO '{}'", version), 90 | &[], 91 | ); 92 | assert!( 93 | res.is_ok(), 94 | "cannot upgrade extension from version {} to {}: {}", 95 | prev_version, 96 | version, 97 | res.unwrap_err(), 98 | ); 99 | } 100 | } 101 | prev_version = Some(version); 102 | } 103 | let res = test_conn.query("DROP EXTENSION promscale CASCADE;", &[]); 104 | assert!(res.is_ok(), "cannot drop extension: {}", res.unwrap_err()); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /e2e/tests/util/mod.rs: -------------------------------------------------------------------------------- 1 | use log::debug; 2 | 3 | pub fn debug_lines(stdout: Vec) { 4 | String::from_utf8(stdout).unwrap().lines().for_each(|line| { 5 | debug!("{}", line); 6 | }) 7 | } 8 | -------------------------------------------------------------------------------- /extract-extension-version.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Note: we cut on both ':' and '@' here to support pre-1.62.0 and post 1.62.0 `cargo pkgid` output 4 | command -v cargo >/dev/null && cargo pkgid | cut -d'#' -f2 | cut -d':' -f2 | cut -d'@' -f2 5 | -------------------------------------------------------------------------------- /gendoc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "gendoc" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | markdown-gen = "1.2.1" 8 | postgres = "0.19.2" 9 | test-common = { path = "../test-common" } 10 | rand = "0.8.5" 11 | -------------------------------------------------------------------------------- /ha.Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1.3-labs 2 | ARG PG_VERSION=14 3 | ARG TIMESCALEDB_VERSION_MAJMIN=2.9 4 | FROM ubuntu:22.04 as builder 5 | ARG PG_VERSION 6 | 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | RUN apt-get update 9 | RUN apt-get install -y clang pkg-config wget lsb-release libssl-dev curl gnupg2 binutils devscripts equivs git libkrb5-dev libperl-dev make 10 | 11 | RUN wget -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor --output /usr/share/keyrings/postgresql.keyring 12 | RUN for t in deb deb-src; do \ 13 | echo "$t [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/postgresql.keyring] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -s -c)-pgdg main" >> /etc/apt/sources.list.d/pgdg.list; \ 14 | done 15 | 16 | RUN apt-get update 17 | 18 | RUN apt-get install -y postgresql-${PG_VERSION} postgresql-server-dev-${PG_VERSION} 19 | 20 | RUN < 0 17 | INTO STRICT _is_timescaledb_installed 18 | FROM pg_extension 19 | WHERE extname='timescaledb'; 20 | 21 | IF _is_timescaledb_installed THEN 22 | PERFORM _timescaledb_internal.restart_background_workers(); 23 | END IF; 24 | END; 25 | $stop_bgw$; 26 | -------------------------------------------------------------------------------- /migration/idempotent/005-metric-metadata.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION _prom_catalog.insert_metric_metadatas(t TIMESTAMPTZ[], metric_family_name TEXT[], metric_type TEXT[], metric_unit TEXT[], metric_help TEXT[]) 2 | RETURNS BIGINT 3 | SET search_path = pg_catalog, pg_temp 4 | AS 5 | $$ 6 | DECLARE 7 | num_rows BIGINT; 8 | BEGIN 9 | INSERT INTO _prom_catalog.metadata (last_seen, metric_family, type, unit, help) 10 | SELECT * FROM UNNEST($1, $2, $3, $4, $5) res(last_seen, metric_family, type, unit, help) 11 | ORDER BY res.metric_family, res.type, res.unit, res.help 12 | ON CONFLICT (metric_family, type, unit, help) DO 13 | UPDATE SET last_seen = EXCLUDED.last_seen; 14 | GET DIAGNOSTICS num_rows = ROW_COUNT; 15 | RETURN num_rows; 16 | END; 17 | $$ LANGUAGE plpgsql; 18 | GRANT EXECUTE ON FUNCTION _prom_catalog.insert_metric_metadatas(TIMESTAMPTZ[], TEXT[], TEXT[], TEXT[], TEXT[]) TO prom_writer; 19 | 20 | CREATE OR REPLACE FUNCTION prom_api.get_metric_metadata(metric_family_name TEXT) 21 | RETURNS TABLE (metric_family TEXT, type TEXT, unit TEXT, help TEXT) 22 | SET search_path = pg_catalog, pg_temp 23 | AS 24 | $$ 25 | SELECT metric_family, type, unit, help FROM _prom_catalog.metadata WHERE metric_family = metric_family_name ORDER BY last_seen DESC 26 | $$ LANGUAGE SQL; 27 | GRANT EXECUTE ON FUNCTION prom_api.get_metric_metadata(TEXT) TO prom_reader; 28 | 29 | -- metric_families should have unique elements, otherwise there will be duplicate rows in the returned table. 30 | CREATE OR REPLACE FUNCTION prom_api.get_multiple_metric_metadata(metric_families TEXT[]) 31 | RETURNS TABLE (metric_family TEXT, type TEXT, unit TEXT, help TEXT) 32 | SET search_path = pg_catalog, pg_temp 33 | AS 34 | $$ 35 | SELECT info.* 36 | FROM unnest(metric_families) AS family(name) 37 | INNER JOIN LATERAL ( 38 | SELECT metric_family, type, unit, help FROM _prom_catalog.metadata WHERE metric_family = family.name ORDER BY last_seen DESC LIMIT 1 39 | ) AS info ON (true) 40 | $$ LANGUAGE SQL; 41 | GRANT EXECUTE ON FUNCTION prom_api.get_multiple_metric_metadata(TEXT[]) TO prom_reader; 42 | -------------------------------------------------------------------------------- /migration/idempotent/013-tracing-maintenance.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE _ps_trace.execute_tracing_compression(hypertable_name text, log_verbose BOOLEAN = false) 2 | AS $$ 3 | DECLARE 4 | startT TIMESTAMPTZ; 5 | BEGIN 6 | -- Note: We cannot use SET in the procedure declaration because we do transaction control 7 | -- and we can _only_ use SET LOCAL in a procedure which _does_ transaction control 8 | SET LOCAL search_path = pg_catalog, pg_temp; 9 | 10 | startT := clock_timestamp(); 11 | 12 | PERFORM _prom_catalog.set_app_name(format('promscale tracing compression: %s', hypertable_name)); 13 | IF log_verbose THEN 14 | RAISE LOG 'promscale tracing compression: % starting', hypertable_name; 15 | END IF; 16 | 17 | CALL _prom_catalog.compress_old_chunks('_ps_trace', hypertable_name, now() - INTERVAL '1 hour'); 18 | 19 | IF log_verbose THEN 20 | RAISE LOG 'promscale tracing compression: % finished in %', hypertable_name, clock_timestamp()-startT; 21 | END IF; 22 | END; 23 | $$ LANGUAGE PLPGSQL; 24 | COMMENT ON PROCEDURE _ps_trace.execute_tracing_compression(text, boolean) 25 | IS 'Execute tracing compression compresses tracing tables'; 26 | GRANT EXECUTE ON PROCEDURE _ps_trace.execute_tracing_compression(text, boolean) TO prom_maintenance; 27 | 28 | --job boilerplate 29 | CREATE OR REPLACE PROCEDURE _ps_trace.execute_tracing_compression_job(job_id int, config jsonb) 30 | AS $$ 31 | DECLARE 32 | log_verbose boolean; 33 | ae_key text; 34 | ae_value text; 35 | ae_load boolean := FALSE; 36 | hypertable_name name; 37 | BEGIN 38 | -- Note: We cannot use SET in the procedure declaration because we do transaction control 39 | -- and we can _only_ use SET LOCAL in a procedure which _does_ transaction control 40 | SET LOCAL search_path = pg_catalog, pg_temp; 41 | log_verbose := coalesce(config->>'log_verbose', 'false')::boolean; 42 | hypertable_name := config->>'hypertable_name'; 43 | 44 | --if auto_explain enabled in config, turn it on in a best-effort way 45 | --i.e. if it fails (most likely due to lack of superuser priviliges) move on anyway. 46 | BEGIN 47 | FOR ae_key, ae_value IN 48 | SELECT * FROM jsonb_each_text(config->'auto_explain') 49 | LOOP 50 | IF NOT ae_load THEN 51 | ae_load := true; 52 | LOAD 'auto_explain'; 53 | END IF; 54 | 55 | PERFORM set_config('auto_explain.'|| ae_key, ae_value, FALSE); 56 | END LOOP; 57 | EXCEPTION WHEN OTHERS THEN 58 | RAISE WARNING 'could not set auto_explain options'; 59 | END; 60 | 61 | 62 | CALL _ps_trace.execute_tracing_compression(hypertable_name, log_verbose=>log_verbose); 63 | END 64 | $$ LANGUAGE PLPGSQL; 65 | GRANT EXECUTE ON PROCEDURE _ps_trace.execute_tracing_compression_job(int, jsonb) TO prom_maintenance; -------------------------------------------------------------------------------- /migration/idempotent/014-extension-type-functions.sql: -------------------------------------------------------------------------------- 1 | -- These type-related extension functions are not emitted by PGX, so must be 2 | -- added to our idempotent scripts in order to point the functions at the most 3 | -- recent version of our versioned extension binary. 4 | 5 | -- src/aggregates/gapfill_delta.rs:29 6 | -- promscale::aggregates::gapfill_delta::gapfilldeltatransition_in 7 | CREATE OR REPLACE FUNCTION _prom_ext."gapfilldeltatransition_in"( 8 | "input" cstring /* &cstr_core::CStr */ 9 | ) RETURNS _prom_ext.GapfillDeltaTransition /* promscale::aggregates::gapfill_delta::GapfillDeltaTransition */ 10 | IMMUTABLE PARALLEL SAFE STRICT 11 | LANGUAGE c /* Rust */ 12 | AS '$libdir/promscale-{{extension_version}}', 'gapfilldeltatransition_in_wrapper'; 13 | 14 | -- src/aggregates/gapfill_delta.rs:29 15 | -- promscale::aggregates::gapfill_delta::gapfilldeltatransition_out 16 | CREATE OR REPLACE FUNCTION _prom_ext."gapfilldeltatransition_out"( 17 | "input" _prom_ext.GapfillDeltaTransition /* promscale::aggregates::gapfill_delta::GapfillDeltaTransition */ 18 | ) RETURNS cstring /* &cstr_core::CStr */ 19 | IMMUTABLE PARALLEL SAFE STRICT 20 | LANGUAGE c /* Rust */ 21 | AS '$libdir/promscale-{{extension_version}}', 'gapfilldeltatransition_out_wrapper'; 22 | -------------------------------------------------------------------------------- /migration/idempotent/015-extension-function-permissions.sql: -------------------------------------------------------------------------------- 1 | GRANT EXECUTE ON FUNCTION _prom_ext.re2_match(TEXT, TEXT) TO prom_reader; -------------------------------------------------------------------------------- /migration/idempotent/016-vacuum-engine.sql: -------------------------------------------------------------------------------- 1 | DO $block$ 2 | BEGIN 3 | IF NOT _prom_catalog.is_timescaledb_installed() THEN 4 | RETURN; 5 | END IF; 6 | 7 | -- https://www.postgresql.org/docs/current/storage-vm.html 8 | -- we cannot tell if a rel is frozen or not without an extension (pg_visible), but a page must be 9 | -- all visible in order to be all frozen, so if there are fewer all visible pages than there are total 10 | -- pages, then we know that the table must not be frozen yet and a vacuum may determine that it can be 11 | CREATE OR REPLACE VIEW _ps_catalog.compressed_chunks_to_freeze AS 12 | SELECT 13 | cc.id, 14 | cc.schema_name, 15 | cc.table_name, 16 | greatest 17 | ( 18 | pg_catalog.pg_stat_get_last_vacuum_time(k.oid), 19 | pg_catalog.pg_stat_get_last_autovacuum_time(k.oid) 20 | ) as last_vacuum, 21 | k.relfrozenxid 22 | FROM _timescaledb_catalog.chunk c 23 | INNER JOIN _timescaledb_catalog.chunk cc 24 | ON (c.dropped OPERATOR(pg_catalog.=) false AND c.compressed_chunk_id OPERATOR(pg_catalog.=) cc.id) 25 | INNER JOIN pg_catalog.pg_class k 26 | ON (k.relname OPERATOR(pg_catalog.=) cc.table_name) 27 | INNER JOIN pg_catalog.pg_namespace n 28 | ON (k.relnamespace OPERATOR(pg_catalog.=) n.oid AND n.nspname OPERATOR(pg_catalog.=) cc.schema_name) 29 | WHERE k.relkind OPERATOR(pg_catalog.=) 'r' 30 | AND k.relallvisible OPERATOR(pg_catalog.<) k.relpages 31 | ; 32 | GRANT SELECT ON _ps_catalog.compressed_chunks_to_freeze TO prom_reader; 33 | COMMENT ON VIEW _ps_catalog.compressed_chunks_to_freeze IS 'Lists compressed chunks that need to be frozen'; 34 | 35 | -- if a compressed chunk is missing statistics and is never later modified after initial compression 36 | -- then the autovacuum will completely ignore it until it passes vacuum_freeze_max_age 37 | -- this is not ideal. if many end up in this state, we might have great performance until a bunch of 38 | -- chunks hit the threshold and the autovacuum engine finally sees them and starts working them 39 | CREATE OR REPLACE VIEW _ps_catalog.compressed_chunks_missing_stats AS 40 | SELECT 41 | cc.id, 42 | cc.schema_name, 43 | cc.table_name, 44 | k.relfrozenxid 45 | FROM _timescaledb_catalog.chunk c 46 | INNER JOIN _timescaledb_catalog.chunk cc 47 | ON (c.dropped OPERATOR(pg_catalog.=) false AND c.compressed_chunk_id OPERATOR(pg_catalog.=) cc.id) 48 | INNER JOIN pg_catalog.pg_class k 49 | ON (k.relname OPERATOR(pg_catalog.=) cc.table_name) 50 | INNER JOIN pg_catalog.pg_namespace n 51 | ON (k.relnamespace OPERATOR(pg_catalog.=) n.oid AND n.nspname OPERATOR(pg_catalog.=) cc.schema_name) 52 | WHERE k.relkind OPERATOR(pg_catalog.=) 'r' 53 | AND k.relallvisible OPERATOR(pg_catalog.<) k.relpages 54 | AND pg_catalog.pg_stat_get_last_autovacuum_time(k.oid) IS NULL -- never autovacuumed 55 | AND pg_catalog.pg_stat_get_last_vacuum_time(k.oid) IS NULL -- never vacuumed 56 | AND k.reltuples > 0 -- there are tuples, but... 57 | AND pg_catalog.pg_stat_get_live_tuples(k.oid) = 0 -- stats appear to be missing 58 | ; 59 | GRANT SELECT ON _ps_catalog.compressed_chunks_missing_stats TO prom_reader; 60 | COMMENT ON VIEW _ps_catalog.compressed_chunks_missing_stats IS 'Lists compressed chunks that need to be vacuum'; 61 | END; 62 | $block$; 63 | -------------------------------------------------------------------------------- /migration/incremental/001-extension.sql: -------------------------------------------------------------------------------- 1 | -- The contents of this file was auto-generated by the pgx extension, but was 2 | -- placed here manually 3 | 4 | -- src/aggregates/gapfill_delta.rs:29 5 | -- promscale::aggregates::gapfill_delta::GapfillDeltaTransition 6 | CREATE TYPE _prom_ext.GapfillDeltaTransition; 7 | 8 | -- src/aggregates/gapfill_delta.rs:29 9 | -- promscale::aggregates::gapfill_delta::gapfilldeltatransition_in 10 | CREATE OR REPLACE FUNCTION _prom_ext."gapfilldeltatransition_in"( 11 | "input" cstring /* &cstr_core::CStr */ 12 | ) RETURNS _prom_ext.GapfillDeltaTransition /* promscale::aggregates::gapfill_delta::GapfillDeltaTransition */ 13 | IMMUTABLE PARALLEL SAFE STRICT 14 | LANGUAGE c /* Rust */ 15 | AS '$libdir/promscale-{{extension_version}}', 'gapfilldeltatransition_in_wrapper'; 16 | 17 | -- src/aggregates/gapfill_delta.rs:29 18 | -- promscale::aggregates::gapfill_delta::gapfilldeltatransition_out 19 | CREATE OR REPLACE FUNCTION _prom_ext."gapfilldeltatransition_out"( 20 | "input" _prom_ext.GapfillDeltaTransition /* promscale::aggregates::gapfill_delta::GapfillDeltaTransition */ 21 | ) RETURNS cstring /* &cstr_core::CStr */ 22 | IMMUTABLE PARALLEL SAFE STRICT 23 | LANGUAGE c /* Rust */ 24 | AS '$libdir/promscale-{{extension_version}}', 'gapfilldeltatransition_out_wrapper'; 25 | 26 | -- src/aggregates/gapfill_delta.rs:29 27 | -- promscale::aggregates::gapfill_delta::GapfillDeltaTransition 28 | CREATE TYPE _prom_ext.GapfillDeltaTransition ( 29 | INTERNALLENGTH = variable, 30 | INPUT = _prom_ext.gapfilldeltatransition_in, /* promscale::aggregates::gapfill_delta::gapfilldeltatransition_in */ 31 | OUTPUT = _prom_ext.gapfilldeltatransition_out, /* promscale::aggregates::gapfill_delta::gapfilldeltatransition_out */ 32 | STORAGE = extended 33 | ); 34 | -------------------------------------------------------------------------------- /migration/incremental/002-utils.sql: -------------------------------------------------------------------------------- 1 | --perms for schema will be addressed later; 2 | CREATE SCHEMA _prom_catalog; 3 | 4 | --table to save commands so they can be run when adding new nodes 5 | CREATE TABLE _prom_catalog.remote_commands( 6 | key TEXT PRIMARY KEY, 7 | seq SERIAL, 8 | transactional BOOLEAN, 9 | command TEXT 10 | ); 11 | GRANT ALL ON SEQUENCE _prom_catalog.remote_commands_seq_seq TO current_user; 12 | 13 | CREATE OR REPLACE PROCEDURE _prom_catalog.execute_everywhere(command_key text, command TEXT, transactional BOOLEAN = true) 14 | SET search_path = pg_catalog, pg_temp 15 | AS $func$ 16 | DECLARE 17 | _is_restore_in_progress boolean = false; 18 | BEGIN 19 | IF command_key IS NOT NULL THEN 20 | INSERT INTO _prom_catalog.remote_commands(key, command, transactional) VALUES(command_key, command, transactional) 21 | ON CONFLICT (key) DO UPDATE SET command = excluded.command, transactional = excluded.transactional; 22 | END IF; 23 | 24 | EXECUTE command; 25 | 26 | -- do not call distributed_exec if we are in the middle of restoring from backup 27 | _is_restore_in_progress = coalesce((SELECT setting::boolean from pg_catalog.pg_settings where name = 'timescaledb.restoring'), false); 28 | IF _is_restore_in_progress THEN 29 | RAISE NOTICE 'restore in progress. skipping %', coalesce(command_key, 'anonymous command'); 30 | RETURN; 31 | END IF; 32 | BEGIN 33 | CALL public.distributed_exec(command); 34 | EXCEPTION 35 | WHEN undefined_function THEN 36 | -- we're not on Timescale 2, just return 37 | RETURN; 38 | WHEN SQLSTATE '0A000' THEN 39 | -- we're not the access node, just return 40 | RETURN; 41 | END; 42 | END 43 | $func$ LANGUAGE PLPGSQL; 44 | REVOKE ALL ON PROCEDURE _prom_catalog.execute_everywhere(text, text, boolean) FROM PUBLIC; 45 | 46 | CREATE OR REPLACE PROCEDURE _prom_catalog.update_execute_everywhere_entry(command_key text, command TEXT, transactional BOOLEAN = true) 47 | SET search_path = pg_catalog, pg_temp 48 | AS $func$ 49 | BEGIN 50 | UPDATE _prom_catalog.remote_commands 51 | SET 52 | command=update_execute_everywhere_entry.command, 53 | transactional=update_execute_everywhere_entry.transactional 54 | WHERE key = command_key; 55 | END 56 | $func$ LANGUAGE PLPGSQL; 57 | REVOKE ALL ON PROCEDURE _prom_catalog.update_execute_everywhere_entry(text, text, boolean) FROM PUBLIC; 58 | -------------------------------------------------------------------------------- /migration/incremental/003-users.sql: -------------------------------------------------------------------------------- 1 | CALL _prom_catalog.execute_everywhere('create_prom_reader', $ee$ 2 | DO $$ 3 | BEGIN 4 | CREATE ROLE prom_reader; 5 | EXCEPTION WHEN duplicate_object THEN 6 | RAISE NOTICE 'role prom_reader already exists, skipping create'; 7 | RETURN; 8 | END 9 | $$; 10 | $ee$); 11 | 12 | CALL _prom_catalog.execute_everywhere('create_prom_writer', $ee$ 13 | DO $$ 14 | BEGIN 15 | CREATE ROLE prom_writer; 16 | EXCEPTION WHEN duplicate_object THEN 17 | RAISE NOTICE 'role prom_writer already exists, skipping create'; 18 | RETURN; 19 | END 20 | $$; 21 | $ee$); 22 | 23 | CALL _prom_catalog.execute_everywhere('create_prom_modifier', $ee$ 24 | DO $$ 25 | BEGIN 26 | CREATE ROLE prom_modifier; 27 | EXCEPTION WHEN duplicate_object THEN 28 | RAISE NOTICE 'role prom_modifier already exists, skipping create'; 29 | RETURN; 30 | END 31 | $$; 32 | $ee$); 33 | 34 | CALL _prom_catalog.execute_everywhere('create_prom_admin', $ee$ 35 | DO $$ 36 | BEGIN 37 | CREATE ROLE prom_admin; 38 | EXCEPTION WHEN duplicate_object THEN 39 | RAISE NOTICE 'role prom_admin already exists, skipping create'; 40 | RETURN; 41 | END 42 | $$; 43 | $ee$); 44 | 45 | CALL _prom_catalog.execute_everywhere('create_prom_maintenance', $ee$ 46 | DO $$ 47 | BEGIN 48 | CREATE ROLE prom_maintenance; 49 | EXCEPTION WHEN duplicate_object THEN 50 | RAISE NOTICE 'role prom_maintenance already exists, skipping create'; 51 | RETURN; 52 | END 53 | $$; 54 | $ee$); 55 | 56 | CALL _prom_catalog.execute_everywhere('grant_prom_reader_prom_writer',$ee$ 57 | DO $$ 58 | BEGIN 59 | GRANT prom_reader TO prom_writer; 60 | GRANT prom_reader TO prom_maintenance; 61 | GRANT prom_writer TO prom_modifier; 62 | GRANT prom_modifier TO prom_admin; 63 | GRANT prom_maintenance TO prom_admin; 64 | END 65 | $$; 66 | $ee$); 67 | 68 | CALL _prom_catalog.execute_everywhere('grant_all_roles_to_extowner', 69 | format( 70 | $ee$ 71 | DO $$ 72 | BEGIN 73 | GRANT prom_reader TO %1$I WITH ADMIN OPTION; 74 | GRANT prom_writer TO %1$I WITH ADMIN OPTION; 75 | GRANT prom_maintenance TO %1$I WITH ADMIN OPTION; 76 | GRANT prom_modifier TO %1$I WITH ADMIN OPTION; 77 | GRANT prom_admin TO %1$I WITH ADMIN OPTION; 78 | END 79 | $$; 80 | $ee$, session_user) 81 | ); -------------------------------------------------------------------------------- /migration/incremental/004-schemas.sql: -------------------------------------------------------------------------------- 1 | -- Note: This whole block of schema creation was previously run in an 2 | -- execute_everywhere block, which would have broken either single-node or 3 | -- multi-node installs. By removing the execute_everywhere we have definitely 4 | -- broken multi-node, but single-node is intact. The tracking issue for this 5 | -- is: https://github.com/timescale/promscale_extension/issues/187 6 | 7 | -- _prom_catalog is created before 8 | GRANT USAGE ON SCHEMA _prom_catalog TO prom_reader; 9 | 10 | CREATE SCHEMA prom_api; -- public functions 11 | GRANT USAGE ON SCHEMA prom_api TO prom_reader; 12 | 13 | -- _prom_ext is created by postgres on extension creation 14 | GRANT USAGE ON SCHEMA _prom_ext TO prom_reader; 15 | 16 | CREATE SCHEMA prom_series; -- series views 17 | GRANT USAGE ON SCHEMA prom_series TO prom_reader; 18 | 19 | CREATE SCHEMA prom_metric; -- metric views 20 | GRANT USAGE ON SCHEMA prom_metric TO prom_reader; 21 | 22 | CREATE SCHEMA prom_data; 23 | GRANT USAGE ON SCHEMA prom_data TO prom_reader; 24 | 25 | CREATE SCHEMA prom_data_series; 26 | GRANT USAGE ON SCHEMA prom_data_series TO prom_reader; 27 | 28 | CREATE SCHEMA prom_info; 29 | GRANT USAGE ON SCHEMA prom_info TO prom_reader; 30 | 31 | CREATE SCHEMA prom_data_exemplar; 32 | GRANT USAGE ON SCHEMA prom_data_exemplar TO prom_reader; 33 | GRANT ALL ON SCHEMA prom_data_exemplar TO prom_writer; 34 | 35 | CREATE SCHEMA ps_tag; 36 | GRANT USAGE ON SCHEMA ps_tag TO prom_reader; 37 | 38 | CREATE SCHEMA _ps_trace; 39 | GRANT USAGE ON SCHEMA _ps_trace TO prom_reader; 40 | 41 | CREATE SCHEMA ps_trace; 42 | GRANT USAGE ON SCHEMA ps_trace TO prom_reader; 43 | 44 | -- _ps_catalog is created before 45 | GRANT USAGE ON SCHEMA _ps_catalog TO prom_reader; 46 | -------------------------------------------------------------------------------- /migration/incremental/005-tag-operators.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE ps_tag.tag_op_jsonb_path_exists AS (tag_key text, value jsonpath); 2 | CREATE TYPE ps_tag.tag_op_regexp_matches AS (tag_key text, value text); 3 | CREATE TYPE ps_tag.tag_op_regexp_not_matches AS (tag_key text, value text); 4 | CREATE TYPE ps_tag.tag_op_equals AS (tag_key text, value jsonb); 5 | CREATE TYPE ps_tag.tag_op_not_equals AS (tag_key text, value jsonb); 6 | CREATE TYPE ps_tag.tag_op_less_than AS (tag_key text, value jsonb); 7 | CREATE TYPE ps_tag.tag_op_less_than_or_equal AS (tag_key text, value jsonb); 8 | CREATE TYPE ps_tag.tag_op_greater_than AS (tag_key text, value jsonb); 9 | CREATE TYPE ps_tag.tag_op_greater_than_or_equal AS (tag_key text, value jsonb); 10 | -------------------------------------------------------------------------------- /migration/incremental/008-install-uda.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION _prom_catalog.get_timescale_major_version() 2 | RETURNS INT 3 | SET search_path = pg_catalog, pg_temp 4 | AS $func$ 5 | SELECT split_part(extversion, '.', 1)::INT FROM pg_catalog.pg_extension WHERE extname='timescaledb' LIMIT 1; 6 | $func$ 7 | LANGUAGE SQL STABLE PARALLEL SAFE; 8 | 9 | CREATE OR REPLACE FUNCTION _prom_catalog.get_timescale_minor_version() 10 | RETURNS INT 11 | SET search_path = pg_catalog, pg_temp 12 | AS $func$ 13 | SELECT split_part(extversion, '.', 2)::INT FROM pg_catalog.pg_extension WHERE extname='timescaledb' LIMIT 1; 14 | $func$ 15 | LANGUAGE SQL STABLE PARALLEL SAFE; 16 | GRANT EXECUTE ON FUNCTION _prom_catalog.get_timescale_minor_version() TO prom_reader; 17 | 18 | --just a stub will be replaced in the idempotent scripts 19 | CREATE OR REPLACE PROCEDURE _prom_catalog.execute_maintenance_job(job_id int, config jsonb) 20 | AS $$ 21 | BEGIN 22 | RAISE 'calling execute_maintenance_job stub, should have been replaced'; 23 | END 24 | $$ LANGUAGE PLPGSQL; 25 | 26 | CREATE OR REPLACE FUNCTION _prom_catalog.is_timescaledb_installed() 27 | RETURNS BOOLEAN 28 | SET search_path = pg_catalog, pg_temp 29 | AS $func$ 30 | SELECT count(*) > 0 FROM pg_extension WHERE extname='timescaledb'; 31 | $func$ 32 | LANGUAGE SQL STABLE; 33 | GRANT EXECUTE ON FUNCTION _prom_catalog.is_timescaledb_installed() TO prom_reader; 34 | 35 | CREATE OR REPLACE FUNCTION _prom_catalog.is_timescaledb_oss() 36 | RETURNS BOOLEAN 37 | SET search_path = pg_catalog, pg_temp 38 | AS 39 | $$ 40 | BEGIN 41 | IF _prom_catalog.is_timescaledb_installed() THEN 42 | RETURN (SELECT current_setting('timescaledb.license') = 'apache'); 43 | END IF; 44 | RETURN false; 45 | END; 46 | $$ 47 | LANGUAGE plpgsql; 48 | GRANT EXECUTE ON FUNCTION _prom_catalog.is_timescaledb_oss() TO prom_reader; 49 | 50 | CREATE OR REPLACE FUNCTION _prom_catalog.is_multinode() 51 | RETURNS BOOLEAN 52 | SET search_path = pg_catalog, pg_temp 53 | AS $func$ 54 | SELECT count(*) > 0 FROM timescaledb_information.data_nodes 55 | $func$ 56 | LANGUAGE sql STABLE; 57 | GRANT EXECUTE ON FUNCTION _prom_catalog.is_multinode() TO prom_reader; 58 | 59 | --add 2 jobs executing every 30 min by default for timescaledb 2.0 60 | DO $$ 61 | DECLARE 62 | _is_restore_in_progress boolean = false; 63 | BEGIN 64 | _is_restore_in_progress = coalesce((SELECT setting::boolean from pg_catalog.pg_settings where name = 'timescaledb.restoring'), false); 65 | IF NOT _prom_catalog.is_timescaledb_oss() 66 | AND _prom_catalog.get_timescale_major_version() >= 2 67 | AND NOT _is_restore_in_progress 68 | THEN 69 | PERFORM public.add_job('_prom_catalog.execute_maintenance_job', '30 min'); 70 | PERFORM public.add_job('_prom_catalog.execute_maintenance_job', '30 min'); 71 | END IF; 72 | END 73 | $$; 74 | -------------------------------------------------------------------------------- /migration/incremental/009-tables-ha.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE _prom_catalog.ha_leases 2 | ( 3 | cluster_name TEXT PRIMARY KEY, 4 | leader_name TEXT, 5 | lease_start TIMESTAMPTZ, 6 | lease_until TIMESTAMPTZ 7 | ); 8 | GRANT SELECT ON TABLE _prom_catalog.ha_leases TO prom_reader; 9 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.ha_leases TO prom_writer; 10 | 11 | CREATE TABLE _prom_catalog.ha_leases_logs 12 | ( 13 | cluster_name TEXT NOT NULL, 14 | leader_name TEXT NOT NULL, 15 | lease_start TIMESTAMPTZ NOT NULL, -- inclusive 16 | lease_until TIMESTAMPTZ, -- exclusive 17 | PRIMARY KEY (cluster_name, leader_name, lease_start) 18 | ); 19 | GRANT SELECT ON TABLE _prom_catalog.ha_leases_logs TO prom_reader; 20 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.ha_leases_logs TO prom_writer; 21 | 22 | 23 | -- STUB for function that trigger to automatically keep the log calls - real implementation in ha.sql 24 | CREATE OR REPLACE FUNCTION _prom_catalog.ha_leases_audit_fn() 25 | RETURNS TRIGGER 26 | AS 27 | $func$ 28 | BEGIN 29 | RAISE 'Just a stub, should be overwritten'; 30 | RETURN NEW; 31 | END; 32 | $func$ LANGUAGE plpgsql VOLATILE; 33 | 34 | -- trigger to automatically keep the log 35 | CREATE TRIGGER ha_leases_audit 36 | AFTER INSERT OR UPDATE 37 | ON _prom_catalog.ha_leases 38 | FOR EACH ROW 39 | EXECUTE PROCEDURE _prom_catalog.ha_leases_audit_fn(); 40 | 41 | -- default values for lease 42 | INSERT INTO _prom_catalog.default(key, value) 43 | VALUES ('ha_lease_timeout', '1m'), 44 | ('ha_lease_refresh', '10s') 45 | ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value; -------------------------------------------------------------------------------- /migration/incremental/010-tables-metadata.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS _prom_catalog.metadata 2 | ( 3 | last_seen TIMESTAMPTZ NOT NULL, 4 | metric_family TEXT NOT NULL, 5 | type TEXT DEFAULT NULL, 6 | unit TEXT DEFAULT NULL, 7 | help TEXT DEFAULT NULL, 8 | PRIMARY KEY (metric_family, type, unit, help) 9 | ); 10 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.metadata TO prom_writer; 11 | GRANT SELECT ON TABLE _prom_catalog.metadata TO prom_reader; 12 | 13 | CREATE INDEX IF NOT EXISTS metadata_index ON _prom_catalog.metadata 14 | ( 15 | metric_family, last_seen 16 | ); 17 | -------------------------------------------------------------------------------- /migration/incremental/011-tables-exemplar.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS _prom_catalog.exemplar_label_key_position ( 2 | metric_name TEXT NOT NULL, 3 | key TEXT NOT NULL, 4 | pos INTEGER NOT NULL, 5 | PRIMARY KEY (metric_name, key) INCLUDE (pos) 6 | ); 7 | GRANT SELECT ON TABLE _prom_catalog.exemplar_label_key_position TO prom_reader; 8 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.exemplar_label_key_position TO prom_writer; 9 | 10 | CREATE TABLE IF NOT EXISTS _prom_catalog.exemplar ( 11 | id SERIAL PRIMARY KEY, 12 | metric_name TEXT NOT NULL, 13 | table_name TEXT NOT NULL, 14 | UNIQUE (metric_name) INCLUDE (table_name, id) 15 | ); 16 | GRANT SELECT ON TABLE _prom_catalog.exemplar TO prom_reader; 17 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.exemplar TO prom_writer; 18 | 19 | GRANT USAGE, SELECT ON SEQUENCE _prom_catalog.exemplar_id_seq TO prom_writer; 20 | -------------------------------------------------------------------------------- /migration/incremental/014-telemetry.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS _ps_catalog.promscale_instance_information ( 2 | uuid UUID NOT NULL PRIMARY KEY, 3 | last_updated TIMESTAMPTZ NOT NULL, 4 | promscale_ingested_samples_total BIGINT DEFAULT 0 NOT NULL, 5 | promscale_metrics_queries_success_total BIGINT DEFAULT 0 NOT NULL, 6 | promscale_metrics_queries_timedout_total BIGINT DEFAULT 0 NOT NULL, 7 | promscale_metrics_queries_failed_total BIGINT DEFAULT 0 NOT NULL, 8 | promscale_trace_query_requests_executed_total BIGINT DEFAULT 0 NOT NULL, 9 | promscale_trace_dependency_requests_executed_total BIGINT DEFAULT 0 NOT NULL, 10 | is_counter_reset_row BOOLEAN DEFAULT FALSE NOT NULL, -- counter reset row has '00000000-0000-0000-0000-000000000000' uuid 11 | promscale_ingested_spans_total BIGINT DEFAULT 0 NOT NULL 12 | CHECK((uuid = '00000000-0000-0000-0000-000000000000' OR NOT is_counter_reset_row) AND (uuid != '00000000-0000-0000-0000-000000000000' OR is_counter_reset_row)) 13 | ); 14 | GRANT SELECT ON TABLE _ps_catalog.promscale_instance_information TO prom_reader; 15 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _ps_catalog.promscale_instance_information TO prom_writer; 16 | 17 | -- Write a counter reset row, i.e., the first row in the table. Purpose: 18 | -- The above promscale_.* rows logically behave as counter. They get deleted by 19 | -- telemetry-housekeeper promscale when last_updated is too old to be stale. Since 20 | -- counters are always increasing, if these rows get deleted, it will result in data-loss. 21 | -- To avoid this loss of data, we treat the first row as immutable, and use it for incrementing 22 | -- the attributes of this row, with the values of the stale rows before they are deleted. 23 | INSERT INTO _ps_catalog.promscale_instance_information (uuid, last_updated, is_counter_reset_row) 24 | VALUES ('00000000-0000-0000-0000-000000000000', '2021-12-09 00:00:00'::TIMESTAMPTZ, TRUE); -------------------------------------------------------------------------------- /migration/incremental/016-remove-ee-schemas.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM _prom_catalog.remote_commands WHERE key='create_schemas'; -------------------------------------------------------------------------------- /migration/incremental/017-set-search-path.sql: -------------------------------------------------------------------------------- 1 | -- We had these operators in the private _prom_catalog schema instead of the 2 | -- public prom_api schema. With the new (restricted) search path, we need to 3 | -- move these. 4 | ALTER OPERATOR _prom_catalog.? (prom_api.label_array, ps_tag.tag_op_equals) SET SCHEMA prom_api; 5 | ALTER OPERATOR _prom_catalog.? (prom_api.label_array, ps_tag.tag_op_not_equals) SET SCHEMA prom_api; 6 | ALTER OPERATOR _prom_catalog.? (prom_api.label_array, ps_tag.tag_op_regexp_matches) SET SCHEMA prom_api; 7 | ALTER OPERATOR _prom_catalog.? (prom_api.label_array, ps_tag.tag_op_regexp_not_matches) SET SCHEMA prom_api; 8 | 9 | -- Setup the database-wide search path, so that users who want to interact with 10 | -- the promscale extension's SQL objects are able to do so without much fuss. 11 | DO $$ 12 | DECLARE 13 | base_components TEXT[]; 14 | new_components TEXT[]; 15 | final_components TEXT[]; 16 | new_path TEXT; 17 | BEGIN 18 | -- we use the `reset_val` for `search_path` as the "clean" base of our search path 19 | SELECT regexp_split_to_array(reset_val, ',\s*') FROM pg_settings WHERE name = 'search_path' INTO base_components; 20 | -- we only want to add our components to the search path if they're not already in there 21 | WITH only_new_components AS ( 22 | SELECT UNNEST(ARRAY ['ps_tag', 'prom_api', 'prom_metric', 'ps_trace']) as v 23 | EXCEPT 24 | SELECT UNNEST(base_components) as v 25 | ) 26 | SELECT array_agg(v) FROM only_new_components INTO new_components; 27 | 28 | final_components := base_components || new_components; 29 | 30 | SELECT array_to_string(final_components, ', ') INTO new_path; 31 | EXECUTE format('ALTER DATABASE %I SET search_path = %s', current_database(), new_path); 32 | EXECUTE format('SET search_path = %s', new_path); 33 | END 34 | $$; 35 | -------------------------------------------------------------------------------- /migration/incremental/018-grant-prom-roles.sql: -------------------------------------------------------------------------------- 1 | -- Grant roles to the session user (the one that is installing the extension) 2 | GRANT prom_reader TO SESSION_USER WITH ADMIN OPTION; 3 | GRANT prom_writer TO SESSION_USER WITH ADMIN OPTION; 4 | GRANT prom_maintenance TO SESSION_USER WITH ADMIN OPTION; 5 | GRANT prom_modifier TO SESSION_USER WITH ADMIN OPTION; 6 | GRANT prom_admin TO SESSION_USER WITH ADMIN OPTION; 7 | -------------------------------------------------------------------------------- /migration/incremental/019-prom-installation-info.sql: -------------------------------------------------------------------------------- 1 | 2 | DROP TABLE IF EXISTS public.prom_installation_info; 3 | -------------------------------------------------------------------------------- /migration/incremental/021-initial-default.sql: -------------------------------------------------------------------------------- 1 | 2 | DELETE FROM _prom_catalog.default x 3 | WHERE x.key IN 4 | ( 5 | SELECT d.key 6 | FROM _prom_catalog.default d 7 | LEFT OUTER JOIN 8 | ( 9 | -- the view won't exists yet since it's in the idempotent 10 | -- so use a values clause 11 | VALUES 12 | ('chunk_interval' , (INTERVAL '8 hours')::text), 13 | ('retention_period' , (90 * INTERVAL '1 day')::text), 14 | ('metric_compression' , (exists(select 1 from pg_catalog.pg_proc where proname = 'compress_chunk')::text)), 15 | ('trace_retention_period' , (30 * INTERVAL '1 days')::text), 16 | ('ha_lease_timeout' , '1m'), 17 | ('ha_lease_refresh' , '10s') 18 | ) dd(key, value) ON (d.key = dd.key) 19 | WHERE d.value is not distinct from dd.value 20 | ); 21 | -------------------------------------------------------------------------------- /migration/incremental/022-jit-off.sql: -------------------------------------------------------------------------------- 1 | -- PG JIT compilation doesn't play nicely with TimescaleDB planner and can cause a huge 2 | -- slowdown for specific queries so we are turning it off. 3 | -- We might revisit this once JIT issues are fixed in TimescaleDB */ 4 | DO $$ 5 | BEGIN 6 | EXECUTE format('ALTER DATABASE %I SET jit = off', current_database()); 7 | END 8 | $$; 9 | -------------------------------------------------------------------------------- /migration/incremental/023-privileges.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE _prom_catalog.series OWNER TO prom_admin; 2 | 3 | GRANT SELECT ON TABLE _prom_catalog.remote_commands TO prom_reader; 4 | GRANT SELECT, INSERT, UPDATE, DELETE ON TABLE _prom_catalog.remote_commands TO prom_admin; 5 | GRANT SELECT ON TABLE _ps_catalog.migration TO prom_reader; 6 | 7 | GRANT EXECUTE ON FUNCTION _prom_ext.num_cpus() TO prom_reader; 8 | GRANT EXECUTE ON FUNCTION _prom_ext.jsonb_digest(JSONB) TO prom_reader; 9 | GRANT EXECUTE ON FUNCTION _prom_ext.prom_delta(TIMESTAMPTZ, TIMESTAMPTZ, BIGINT, BIGINT, TIMESTAMPTZ, DOUBLE PRECISION) TO prom_reader; 10 | GRANT EXECUTE ON FUNCTION _prom_ext.prom_increase(TIMESTAMPTZ, TIMESTAMPTZ, BIGINT, BIGINT, TIMESTAMPTZ, DOUBLE PRECISION) TO prom_reader; 11 | GRANT EXECUTE ON FUNCTION _prom_ext.prom_rate(TIMESTAMPTZ, TIMESTAMPTZ, BIGINT, BIGINT, TIMESTAMPTZ, DOUBLE PRECISION) TO prom_reader; 12 | GRANT EXECUTE ON FUNCTION _prom_ext.vector_selector(TIMESTAMPTZ, TIMESTAMPTZ, BIGINT, BIGINT, TIMESTAMPTZ, DOUBLE PRECISION) TO prom_reader; 13 | GRANT EXECUTE ON FUNCTION _prom_ext.rewrite_fn_call_to_subquery(internal) TO prom_reader; 14 | 15 | GRANT EXECUTE ON PROCEDURE _prom_catalog.execute_everywhere(text, text, boolean) TO prom_admin; 16 | GRANT EXECUTE ON PROCEDURE _prom_catalog.update_execute_everywhere_entry(text, text, boolean) TO prom_admin; 17 | -------------------------------------------------------------------------------- /migration/incremental/024-adjust_autovacuum.sql: -------------------------------------------------------------------------------- 1 | DO $doit$ 2 | DECLARE 3 | r RECORD; 4 | _compressed_schema TEXT; 5 | _compressed_hypertable TEXT; 6 | BEGIN 7 | FOR r IN 8 | SELECT * 9 | FROM _prom_catalog.metric 10 | WHERE table_schema = 'prom_data' 11 | LOOP 12 | IF current_setting('server_version_num')::integer >= 130000 THEN 13 | EXECUTE FORMAT($$ 14 | ALTER TABLE prom_data.%I SET 15 | ( 16 | autovacuum_vacuum_insert_threshold=50000, 17 | autovacuum_vacuum_insert_scale_factor=2.0, 18 | autovacuum_analyze_threshold = 50000, 19 | autovacuum_analyze_scale_factor = 0.5 20 | ) 21 | $$, r.table_name); 22 | ELSE 23 | EXECUTE FORMAT($$ 24 | ALTER TABLE prom_data.%I SET 25 | ( 26 | autovacuum_analyze_threshold = 50000, 27 | autovacuum_analyze_scale_factor = 0.5 28 | ) 29 | $$, r.table_name); 30 | END IF; 31 | EXECUTE FORMAT($$ ALTER TABLE prom_data.%I RESET (autovacuum_vacuum_threshold) $$, r.table_name); 32 | 33 | SELECT c.schema_name, c.table_name 34 | INTO _compressed_schema, _compressed_hypertable 35 | FROM _timescaledb_catalog.hypertable h 36 | INNER JOIN _timescaledb_catalog.hypertable c ON (h.compressed_hypertable_id= c.id) 37 | WHERE h.schema_name = 'prom_data' AND h.table_name = r.table_name; 38 | 39 | CONTINUE WHEN NOT FOUND; 40 | 41 | IF current_setting('server_version_num')::integer >= 130000 THEN 42 | EXECUTE FORMAT($$ 43 | ALTER TABLE %I.%I SET 44 | ( 45 | autovacuum_freeze_min_age=0, 46 | autovacuum_freeze_table_age=0, 47 | autovacuum_vacuum_insert_threshold=1, 48 | autovacuum_vacuum_insert_scale_factor=0.0 49 | ) 50 | $$, _compressed_schema, _compressed_hypertable); 51 | ELSE 52 | EXECUTE FORMAT($$ 53 | ALTER TABLE %I.%I SET 54 | ( 55 | autovacuum_freeze_min_age=0, 56 | autovacuum_freeze_table_age=0 57 | ) 58 | $$, _compressed_schema, _compressed_hypertable); 59 | END IF; 60 | END LOOP; 61 | END 62 | $doit$; -------------------------------------------------------------------------------- /migration/incremental/025-tag-map-storage-type.sql: -------------------------------------------------------------------------------- 1 | -- We have to mutate the catalog for two reasons: 2 | -- 1. ALTER TYPE tag_map SET (STORAGE = EXTENDED); is not supported on PG12 3 | -- 2. We can not ALTER hypertables that have compression enabled. 4 | 5 | -- tag_v 6 | -- This statement can become ALTER TYPE once we drop pg12 support 7 | UPDATE pg_catalog.pg_type AS t 8 | SET typstorage = 'x' 9 | WHERE t.oid = '_ps_trace.tag_v'::regtype::oid; 10 | 11 | -- This statement can become ALTER TABLE once Timescale allows altering compressed tables. 12 | UPDATE pg_catalog.pg_attribute AS a 13 | SET attstorage = 'x' 14 | WHERE a.attrelid = '_ps_trace.tag'::regclass::oid 15 | AND a.atttypid = '_ps_trace.tag_v'::regtype::oid; 16 | 17 | -- tag_map 18 | -- This statement can become ALTER TYPE once we drop pg12 support 19 | UPDATE pg_catalog.pg_type AS t 20 | SET typstorage = 'x' 21 | WHERE t.oid = 'ps_trace.tag_map'::regtype::oid; 22 | 23 | -- This statement can become ALTER TABLE once Timescale allows altering compressed tables. 24 | UPDATE pg_catalog.pg_attribute AS a 25 | SET attstorage = 'x' 26 | WHERE a.attrelid IN ('_ps_trace.span'::regclass::oid, '_ps_trace.event'::regclass::oid, '_ps_trace.link'::regclass::oid) 27 | AND a.atttypid = 'ps_trace.tag_map'::regtype::oid; -------------------------------------------------------------------------------- /migration/incremental/026-remove-name-param.sql: -------------------------------------------------------------------------------- 1 | 2 | -- we changed the data types (name → text) used in these function signatures 3 | -- need to drop the old versions 4 | DROP FUNCTION IF EXISTS _prom_catalog.hypertable_local_size(name) CASCADE; 5 | DROP FUNCTION IF EXISTS _prom_catalog.hypertable_node_up(name) CASCADE; 6 | DROP FUNCTION IF EXISTS _prom_catalog.hypertable_compression_stats_for_schema(name) CASCADE; 7 | DROP FUNCTION IF EXISTS _prom_catalog.hypertable_remote_size(name) CASCADE; 8 | DROP FUNCTION IF EXISTS _prom_catalog.metric_view() CASCADE; 9 | DROP FUNCTION IF EXISTS _prom_catalog.get_new_pos_for_key(text, name, text[], boolean) CASCADE; 10 | DROP FUNCTION IF EXISTS _prom_catalog.delete_series_catalog_row(name, bigint[]) CASCADE; 11 | DROP FUNCTION IF EXISTS _prom_catalog.get_or_create_label_ids(TEXT, NAME, text[], text[]) CASCADE; 12 | DROP FUNCTION IF EXISTS _prom_catalog.get_or_create_label_ids(text, name, text[], text[]) CASCADE; 13 | DROP FUNCTION IF EXISTS _prom_catalog.create_series(int, NAME, prom_api.label_array, OUT BIGINT) CASCADE; 14 | DROP FUNCTION IF EXISTS _prom_catalog.resurrect_series_ids(name, bigint) CASCADE; 15 | DROP FUNCTION IF EXISTS _prom_catalog.get_or_create_series_id_for_label_array(INT, NAME, prom_api.label_array, OUT BIGINT) CASCADE; 16 | DROP FUNCTION IF EXISTS _prom_catalog.get_confirmed_unused_series(NAME, NAME, NAME, BIGINT[], TIMESTAMPTZ) CASCADE; 17 | DROP FUNCTION IF EXISTS prom_api.register_metric_view(name, name, BOOLEAN) CASCADE; 18 | DROP FUNCTION IF EXISTS prom_api.unregister_metric_view(name, name, BOOLEAN) CASCADE; 19 | DROP FUNCTION IF EXISTS _prom_catalog.delay_compression_job(name, timestamptz) CASCADE; 20 | DROP FUNCTION IF EXISTS _prom_catalog.decompress_chunk_for_metric(TEXT, name, name) CASCADE; 21 | DROP PROCEDURE IF EXISTS _prom_catalog.do_decompress_chunks_after(NAME, TIMESTAMPTZ, BOOLEAN) CASCADE; 22 | DROP PROCEDURE IF EXISTS _prom_catalog.decompress_chunks_after(NAME, TIMESTAMPTZ, BOOLEAN) CASCADE; 23 | DROP FUNCTION IF EXISTS _prom_catalog.compress_chunk_for_hypertable(name, name, name, name) CASCADE; 24 | DROP PROCEDURE IF EXISTS _prom_catalog.compress_old_chunks(NAME, NAME, TIMESTAMPTZ) CASCADE; 25 | DROP FUNCTION IF EXISTS _prom_catalog.insert_metric_row(name, timestamptz[], DOUBLE PRECISION[], bigint[]) CASCADE; 26 | DROP FUNCTION IF EXISTS _prom_catalog.insert_exemplar_row(NAME, TIMESTAMPTZ[], BIGINT[], prom_api.label_value_array[], DOUBLE PRECISION[]) CASCADE; 27 | DROP PROCEDURE IF EXISTS _ps_trace.execute_tracing_compression(name, BOOLEAN) CASCADE; 28 | -------------------------------------------------------------------------------- /migration/incremental/027-tag-map-storage-views.sql: -------------------------------------------------------------------------------- 1 | -- When we changed the storage type of `ps_trace.tag_map` and 2 | -- `_ps_trace.tag_v` from `PLAIN` to `EXTENDED`, we didn't update all 3 | -- `pg_attribute` entries. This meant that the definitions for our views 4 | -- (`ps_trace.{event,link,span}`) were incorrect. 5 | -- 6 | -- While we noticed this specifically with our views, it could also cascade 7 | -- to relations which end users built on top of our types. 8 | UPDATE pg_catalog.pg_attribute AS a 9 | SET attstorage = 'x' 10 | WHERE a.atttypid = '_ps_trace.tag_v'::regtype::oid; 11 | 12 | UPDATE pg_catalog.pg_attribute AS a 13 | SET attstorage = 'x' 14 | WHERE a.atttypid = 'ps_trace.tag_map'::regtype::oid; 15 | -------------------------------------------------------------------------------- /migration/incremental/028-refactor-mark-unused-series.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS _prom_catalog.mark_unused_series(text, text, text, timestamptz, timestamptz); 2 | -------------------------------------------------------------------------------- /migration/incremental/029-remove-unused-ingest-func.sql: -------------------------------------------------------------------------------- 1 | -- We've changed the function signature by adding new argument so dropping old one 2 | DROP FUNCTION IF EXISTS _prom_catalog.create_ingest_temp_table(TEXT, TEXT) CASCADE; 3 | -------------------------------------------------------------------------------- /migration/incremental/030-remove-get-confirmed-unused-series.sql: -------------------------------------------------------------------------------- 1 | -- renaming an argument, so we have to drop the prev version first 2 | DROP FUNCTION IF EXISTS _prom_catalog.get_confirmed_unused_series(TEXT, TEXT, TEXT, BIGINT[], TIMESTAMPTZ); 3 | -------------------------------------------------------------------------------- /migration/incremental/031-remove-superfluous-tag_v-ops.sql: -------------------------------------------------------------------------------- 1 | /* Initially a set of operators were defined for the btree-opclass. 2 | * Those caused type coercion conflicts with operators intended to be 3 | * used with tag_v. 4 | * Here we drop those operators and other objects created by pg implicitly 5 | */ 6 | 7 | DROP OPERATOR IF EXISTS ps_trace.= (_ps_trace.tag_v, _ps_trace.tag_v) CASCADE; 8 | DROP OPERATOR IF EXISTS ps_trace.<> (_ps_trace.tag_v, _ps_trace.tag_v) CASCADE; 9 | DROP OPERATOR IF EXISTS ps_trace.> (_ps_trace.tag_v, _ps_trace.tag_v) CASCADE; 10 | DROP OPERATOR IF EXISTS ps_trace.>= (_ps_trace.tag_v, _ps_trace.tag_v) CASCADE; 11 | DROP OPERATOR IF EXISTS ps_trace.< (_ps_trace.tag_v, _ps_trace.tag_v) CASCADE; 12 | DROP OPERATOR IF EXISTS ps_trace.<= (_ps_trace.tag_v, _ps_trace.tag_v) CASCADE; 13 | 14 | DROP OPERATOR CLASS IF EXISTS public.btree_tag_v_ops USING btree; 15 | DROP OPERATOR FAMILY IF EXISTS public.btree_tag_v_ops USING btree; 16 | -------------------------------------------------------------------------------- /migration/incremental/032-remove-non-empty-span-name-constraint.sql: -------------------------------------------------------------------------------- 1 | -- Our tracing implementation is based on OTLP spec, though OTLP spec 2 | -- doesn't mandate span name to be non empty[1], we have a constraint 3 | -- in the _ps_trace.operation table which raises error on empty span name. 4 | -- On the other hand, Jaeger span name can be empty and their storage 5 | -- integration tests validates the same[2]. Jaeger author even suggested to 6 | -- remove[3] the constraint as it shouldn't trouble any part of the system. 7 | -- [1] https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/trace/v1/trace.proto#L110-L121 8 | -- [2] https://github.com/jaegertracing/jaeger/blob/7872d1b07439c3f2d316065b1fd53e885b26a66f/plugin/storage/integration/fixtures/traces/default.json#L6 9 | -- [3] https://github.com/jaegertracing/jaeger/pull/3922#issuecomment-1256505785 10 | ALTER TABLE _ps_trace.operation DROP CONSTRAINT operation_span_name_check; 11 | -------------------------------------------------------------------------------- /migration/incremental/033-metric-view.sql: -------------------------------------------------------------------------------- 1 | DO $block$ 2 | BEGIN 3 | DROP VIEW IF EXISTS prom_info.metric; 4 | DROP FUNCTION IF EXISTS _prom_catalog.metric_view(); 5 | EXCEPTION WHEN dependent_objects_still_exist THEN 6 | RAISE EXCEPTION dependent_objects_still_exist USING 7 | DETAIL = 'The signature of prom_info.metric is changing. ' || 8 | 'Dependent objects need to be dropped before the upgrade, and recreated afterwards.', 9 | HINT = 'Drop any objects that depend on prom_info.metric' 10 | ; 11 | END; 12 | $block$; 13 | -------------------------------------------------------------------------------- /migration/incremental/034-maintenance-job-stats.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE _ps_catalog.signal_type 2 | AS ENUM ('metrics', 'traces'); 3 | GRANT USAGE ON TYPE _ps_catalog.signal_type TO prom_maintenance; 4 | 5 | CREATE TYPE _ps_catalog.job_type 6 | AS ENUM ('retention', 'compression'); 7 | GRANT USAGE ON TYPE _ps_catalog.job_type TO prom_maintenance; 8 | 9 | -- Copy-pasted from idempotent 10 | CREATE FUNCTION _prom_catalog.add_job(proc regproc, schedule_interval interval, config jsonb = NULL) 11 | RETURNS INTEGER 12 | --security definer to add jobs as the logged-in user 13 | SECURITY DEFINER 14 | VOLATILE 15 | SET search_path = pg_catalog, pg_temp 16 | AS $func$ 17 | BEGIN 18 | IF NOT _prom_catalog.is_timescaledb_oss() 19 | AND _prom_catalog.get_timescale_major_version() >= 2 20 | AND _prom_catalog.get_timescale_minor_version() >= 9 21 | THEN 22 | RETURN 23 | public.add_job( 24 | proc, 25 | schedule_interval, 26 | config=>config, 27 | -- shift the inital start time to avoid a thundering herd 28 | -- now + random[schedule_interval / 2; schedule_interval] 29 | initial_start=>now() + (random() / 2.0 + 0.5) * schedule_interval, 30 | fixed_schedule=>false 31 | ); 32 | ELSE 33 | RETURN 34 | public.add_job( 35 | proc, 36 | schedule_interval, 37 | config=>config, 38 | -- shift the inital start time to avoid a thundering herd 39 | -- now + random[schedule_interval / 2; schedule_interval] 40 | initial_start=>now() + (random() / 2.0 + 0.5) * schedule_interval 41 | -- fixed schedule didn't exist prior to TS 2.9 42 | ); 43 | END IF; 44 | END 45 | $func$ 46 | LANGUAGE PLPGSQL; 47 | REVOKE ALL ON FUNCTION _prom_catalog.add_job(regproc, interval, jsonb) FROM public; 48 | 49 | --add jobs for each workload executing every 30 min by default 50 | DO $$ 51 | DECLARE 52 | _is_restore_in_progress boolean = false; 53 | BEGIN 54 | _is_restore_in_progress = coalesce((SELECT setting::boolean from pg_catalog.pg_settings where name = 'timescaledb.restoring'), false); 55 | IF NOT _prom_catalog.is_timescaledb_oss() 56 | AND _prom_catalog.get_timescale_major_version() >= 2 57 | AND NOT _is_restore_in_progress 58 | THEN 59 | -- migrate the execute_maintenance_job 60 | -- delete jobs with the old config style 61 | PERFORM public.delete_job(job_id) 62 | FROM timescaledb_information.jobs 63 | WHERE proc_schema = '_prom_catalog' 64 | AND proc_name = 'execute_maintenance_job' 65 | AND NOT coalesce(config, '{}'::jsonb) ?& ARRAY['signal', 'type']; 66 | -- 2 metric retention jobs 67 | PERFORM _prom_catalog.add_job('_prom_catalog.execute_maintenance_job', '30 min', config=>'{"signal": "metrics", "type": "retention"}'); 68 | PERFORM _prom_catalog.add_job('_prom_catalog.execute_maintenance_job', '31 min', config=>'{"signal": "metrics", "type": "retention"}'); 69 | -- 3 metric compression jobs 70 | PERFORM _prom_catalog.add_job('_prom_catalog.execute_maintenance_job', '29 min', config=>'{"signal": "metrics", "type": "compression"}'); 71 | PERFORM _prom_catalog.add_job('_prom_catalog.execute_maintenance_job', '30 min', config=>'{"signal": "metrics", "type": "compression"}'); 72 | PERFORM _prom_catalog.add_job('_prom_catalog.execute_maintenance_job', '31 min', config=>'{"signal": "metrics", "type": "compression"}'); 73 | -- 1 traces retention job 74 | PERFORM _prom_catalog.add_job('_prom_catalog.execute_maintenance_job', '30 min', config=>'{"signal": "traces", "type": "retention"}'); 75 | 76 | -- migrate the execute_tracing_compression_job, their config didn't change, but the add_job itself did. 77 | -- delete jobs with the old config style 78 | PERFORM public.delete_job(job_id) 79 | FROM timescaledb_information.jobs 80 | WHERE proc_schema = '_ps_trace' 81 | AND proc_name = 'execute_tracing_compression_job'; 82 | -- re-introduce the jobs 83 | PERFORM _prom_catalog.add_job('_ps_trace.execute_tracing_compression_job', '1 hour', config=>'{"log_verbose":false,"hypertable_name":"span"}'); 84 | PERFORM _prom_catalog.add_job('_ps_trace.execute_tracing_compression_job', '1 hour', config=>'{"log_verbose":false,"hypertable_name":"event"}'); 85 | PERFORM _prom_catalog.add_job('_ps_trace.execute_tracing_compression_job', '1 hour', config=>'{"log_verbose":false,"hypertable_name":"link"}'); 86 | END IF; 87 | END 88 | $$; -------------------------------------------------------------------------------- /migration/incremental/035-remove-func.sql: -------------------------------------------------------------------------------- 1 | 2 | DROP FUNCTION IF EXISTS _prom_catalog.get_metrics_that_need_compression(); 3 | DROP VIEW IF EXISTS _ps_catalog.chunks_to_freeze; 4 | -------------------------------------------------------------------------------- /misc/requirements.txt: -------------------------------------------------------------------------------- 1 | psycopg2-binary==2.9.5 2 | -------------------------------------------------------------------------------- /pgtap-parse/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pgtap-parse" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | log = "0.4.1" 8 | regex = "1.7.0" 9 | 10 | -------------------------------------------------------------------------------- /quick.Dockerfile: -------------------------------------------------------------------------------- 1 | # This dockerfile is a helper for quick iteration on the extension SQL 2 | # To use, first run: `make docker-image-14`, then edit SQL and run `make docker-quick-14` 3 | ARG PG_VERSION 4 | ARG TIMESCALEDB_VERSION_MAJOR 5 | ARG EXTENSION_VERSION 6 | FROM local/dev_promscale_extension:head-ts${TIMESCALEDB_VERSION_MAJOR}-pg${PG_VERSION} 7 | 8 | ARG EXTENSION_VERSION 9 | COPY sql/promscale-${EXTENSION_VERSION}.sql /usr/local/share/postgresql/extension/promscale--${EXTENSION_VERSION}.sql 10 | # TODO (james): This probably needs to be extended to be created for all `upgradeable_from` in promscale.control 11 | COPY sql/promscale-${EXTENSION_VERSION}.sql /usr/local/share/postgresql/extension/promscale--0.0.0--${EXTENSION_VERSION}.sql 12 | # TODO (john): for now, we need the "takeover" script copied too since it's still not final 13 | COPY sql/promscale--0.0.0.sql /usr/local/share/postgresql/extension/promscale--0.0.0.sql 14 | -------------------------------------------------------------------------------- /sql-tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sql-tests" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dev-dependencies] 7 | insta = "1.14.0" 8 | test-generator = "0.3.0" 9 | test-common = { path = "../test-common" } 10 | pgtap-parse = { path = "../pgtap-parse" } 11 | 12 | [build-dependencies] 13 | build-deps = "^0.1" 14 | -------------------------------------------------------------------------------- /sql-tests/README.md: -------------------------------------------------------------------------------- 1 | # SQL test 2 | 3 | This directory contains SQL tests for the promscale extension. Tests must be written in SQL and 4 | the usage of `pgtap.sql` is highly encouraged. 5 | 6 | ## Running tests 7 | 8 | Run the tests with `cargo test -p sql-tests`. By default, the tests are run 9 | against `localhost:5432`. You can override it either by specifying 10 | `POSTGRES_URL` or a combination of `POSTGRES_USER`, `POSTGRES_HOST`, `POSTGRES_PORT` 11 | and `POSTGRES_DB` environment variables. 12 | 13 | ``` 14 | POSTGRES_URL=postgres://ubuntu@localhost:54321/ cargo test -p sql-tests 15 | ``` 16 | 17 | or 18 | 19 | ``` 20 | POSTGRES_USER=postgres POSTGRES_HOST=localhost POSTGRES_PORT=5432 cargo test -p sql-tests 21 | ``` 22 | 23 | To run the tests against a docker image set the value of the `TS_DOCKER_IMAGE` to the desired docker image, e.g.: 24 | 25 | ``` 26 | TS_DOCKER_IMAGE=ghcr.io/timescale/dev_promscale_extension:master-ts2-pg13 cargo test -p sql-tests 27 | ``` 28 | 29 | ## Testing methods 30 | 31 | Each `.sql` file in the `testdata` directory is executed as its own test, against a fresh database. 32 | We provide two ways of testing: pgTAP tests, and snapshot tests. Snapshot tests are like golden tests: 33 | when they break, it's often unclear why they broke, because the assertion is run over the whole test output. 34 | Please write new tests as pgTAP tests, as these should be more robust. 35 | 36 | ### pgTAP tests 37 | 38 | If pgTAP is used in a test, then the output of the test run is parsed with our 39 | pgTAP parser. If any tests failed, the whole test run will fail. 40 | 41 | To add a new pgTAP test: 42 | 43 | 1. create new `.sql` file in the `testdata` directory 44 | 2. use pgTAP in your test file, see the existing pgTAP examples 45 | 3. run the tests with `cargo test -p sql-tests` (see [Running tests][Running tests]) 46 | 47 | ### Snapshot tests 48 | 49 | The output of the script is recorded as a snapshot, and compared on the next test run. 50 | 51 | To add a new snapshot test: 52 | 0. (prerequisite) run `cargo install cargo-insta` 53 | 1. create a new `.sql` file in the `testdata` directory 54 | 2. run the tests with `cargo test -p sql-tests` 55 | 3. the tests will fail 56 | 4. validate that the new snapshot output is as you expect it to be 57 | 5. run `cargo insta review` to interactively review the snapshot outputs (or `cargo insta accept` to accept them all) -------------------------------------------------------------------------------- /sql-tests/build.rs: -------------------------------------------------------------------------------- 1 | extern crate build_deps; 2 | 3 | /// This build.rs ensures that the tests are recompiled when new test cases are 4 | /// added to the `testdata` directory. 5 | fn main() { 6 | build_deps::rerun_if_changed_paths("testdata/*.sql").unwrap(); 7 | build_deps::rerun_if_changed_paths("testdata").unwrap(); 8 | } 9 | -------------------------------------------------------------------------------- /sql-tests/testdata/create_ingest_temp_table.sql: -------------------------------------------------------------------------------- 1 | \unset ECHO 2 | \set QUIET 1 3 | \i 'testdata/scripts/pgtap-1.2.0.sql' 4 | 5 | CREATE TEMP TABLE test_data(metric_name TEXT, prefix TEXT); 6 | 7 | INSERT INTO test_data VALUES 8 | ('cpu_usage', 'exe_'), 9 | ('aMultiCase"fun"Metric!', 'eXe_'), 10 | ('aVeryLongMultiCaseMetricWhich"might"BeTruncatedIfIt''sTOOOOOOOOOOOOOOOLONGZOMG!','ThisIsLong_'); 11 | 12 | -- Note: Metric creation must be outside of the test function below, to avoid 13 | -- 'invalid transaction termination' 14 | SELECT _prom_catalog.get_or_create_metric_table_name(metric_name) FROM test_data; 15 | CALL _prom_catalog.finalize_metric_creation(); 16 | 17 | CREATE OR REPLACE FUNCTION test_create_ingest_temp_table() 18 | RETURNS SETOF TEXT LANGUAGE plpgsql AS $$ 19 | DECLARE 20 | metric_table_name TEXT; 21 | temp_table_name TEXT; 22 | temp_schema TEXT; 23 | test RECORD; 24 | BEGIN 25 | FOR test IN SELECT t.metric_name, t.prefix FROM test_data t 26 | LOOP 27 | SELECT table_name INTO metric_table_name FROM _prom_catalog.get_or_create_metric_table_name(test.metric_name); 28 | 29 | RETURN NEXT has_table('prom_data', metric_table_name, format('table %I.%I exists', 'prom_data', metric_table_name)); 30 | 31 | SELECT _prom_catalog.create_ingest_temp_table(metric_table_name, 'prom_data', test.prefix) INTO temp_table_name; 32 | RETURN NEXT is(temp_table_name, left(test.prefix || metric_table_name, 62) , 'temp table name is well-formed'); 33 | SELECT nspname INTO temp_schema FROM pg_namespace WHERE oid = pg_my_temp_schema(); 34 | RETURN NEXT has_table(temp_schema, temp_table_name, format('temp table %I exists', temp_table_name)); 35 | END LOOP; 36 | END; 37 | $$; 38 | 39 | SELECT * FROM runtests('test_create_ingest_temp_table'); 40 | 41 | -------------------------------------------------------------------------------- /sql-tests/testdata/defaults.sql: -------------------------------------------------------------------------------- 1 | \set ECHO all 2 | \set ON_ERROR_STOP 1 3 | 4 | \echo make sure the view returns what we expect it to 5 | select key, value 6 | from _prom_catalog.initial_default dd 7 | except 8 | select key, value 9 | from 10 | ( 11 | values 12 | ('chunk_interval' , (INTERVAL '8 hours')::text), 13 | ('retention_period' , (90 * INTERVAL '1 day')::text), 14 | ('metric_compression' , (exists(select 1 from pg_catalog.pg_proc where proname = 'compress_chunk')::text)), 15 | ('trace_retention_period' , (30 * INTERVAL '1 days')::text), 16 | ('ha_lease_timeout' , '1m'), 17 | ('ha_lease_refresh' , '10s'), 18 | ('epoch_duration' , (INTERVAL '12 hours')::text) 19 | ) x(key, value) 20 | ; 21 | 22 | \echo make sure the getter returns the expected value 23 | select _prom_catalog.get_default_value('chunk_interval') = (INTERVAL '8 hours')::text; 24 | select count(*) = 0 from _prom_catalog.default d where d.key = 'chunk_interval'; 25 | 26 | \echo setting the default value to the same as the initial default 27 | select _prom_catalog.set_default_value('chunk_interval', (INTERVAL '8 hours')::text); 28 | select count(*) = 1 from _prom_catalog.default d where d.key = 'chunk_interval'; 29 | select _prom_catalog.get_default_value('chunk_interval') = (INTERVAL '8 hours')::text; 30 | 31 | \echo overriding the initial default 32 | select _prom_catalog.set_default_value('chunk_interval', (INTERVAL '99 hours')::text); 33 | select count(*) = 1 from _prom_catalog.default d where d.key = 'chunk_interval'; 34 | select _prom_catalog.get_default_value('chunk_interval') = (INTERVAL '99 hours')::text; 35 | 36 | \echo setting the default value BACK to the same as the initial default 37 | select _prom_catalog.set_default_value('chunk_interval', (INTERVAL '8 hours')::text); 38 | select count(*) = 1 from _prom_catalog.default d where d.key = 'chunk_interval'; 39 | select _prom_catalog.get_default_value('chunk_interval') = (INTERVAL '8 hours')::text; 40 | -------------------------------------------------------------------------------- /sql-tests/testdata/get_label_key_column_name_for_view.sql: -------------------------------------------------------------------------------- 1 | \unset ECHO 2 | \set QUIET 1 3 | \i 'testdata/scripts/pgtap-1.2.0.sql' 4 | 5 | SELECT * FROM plan(20); 6 | 7 | SELECT 8 | is( 9 | _prom_catalog.get_label_key_column_name_for_view(key_, true), 10 | format('label_%s_id', key_)::NAME, 11 | format('%s is a restricted keyword and sanitized. When id=true the column name is returned suffixed with `_id`', key_) 12 | ) 13 | FROM ( 14 | VALUES 15 | ('time'), 16 | ('value'), 17 | ('series_id'), 18 | ('labels'), 19 | ('series') 20 | ) as reserved_keys (key_); 21 | 22 | SELECT is( 23 | _prom_catalog.get_label_key_column_name_for_view('my-key', true), 24 | 'my-key_id'::NAME, 25 | 'when id=true the column name is returned suffixed with `_id`' 26 | ); 27 | 28 | -- Entries in _prom_catalog.label_key are created 29 | SELECT is( 30 | k.*, 31 | ROW(id, test.key_, test.key_, format('%s_id', test.key_))::_prom_catalog.label_key, 32 | format('_prom_catalog.label_key entry for %s exists', test.key_) 33 | ) 34 | FROM ( 35 | VALUES 36 | ('label_time'), 37 | ('label_value'), 38 | ('label_series_id'), 39 | ('label_labels'), 40 | ('label_series'), 41 | ('my-key') 42 | ) as test (key_) 43 | LEFT JOIN _prom_catalog.label_key k on (k.key = test.key_); 44 | 45 | SELECT is( 46 | count(*), 47 | 6::BIGINT, 48 | '6 entries were created for _prom_catalog.label_key' 49 | ) FROM _prom_catalog.label_key; 50 | 51 | SELECT is( 52 | _prom_catalog.get_label_key_column_name_for_view(key_, false), 53 | format('label_%s', key_)::NAME, 54 | format('%s is a restricted keyword and sanitized', key_) 55 | ) 56 | FROM ( 57 | VALUES 58 | ('time'), 59 | ('value'), 60 | ('series_id'), 61 | ('labels'), 62 | ('series') 63 | ) as reserved_keys (key_); 64 | 65 | SELECT is( 66 | _prom_catalog.get_label_key_column_name_for_view('my-key', false), 67 | 'my-key'::NAME, 68 | 'column name is the same as key' 69 | ); 70 | 71 | SELECT is( 72 | count(*), 73 | 6::BIGINT, 74 | 'no additional _prom_catalog.label_key are created on subsequent calls for the same keys' 75 | ) FROM _prom_catalog.label_key; 76 | 77 | SELECT * FROM finish(); 78 | -------------------------------------------------------------------------------- /sql-tests/testdata/info_view.sql: -------------------------------------------------------------------------------- 1 | \set ECHO all 2 | \set ON_ERROR_STOP 1 3 | 4 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 5 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_total'); 6 | CALL _prom_catalog.finalize_metric_creation(); 7 | INSERT INTO prom_data.cpu_usage 8 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"}') 9 | FROM generate_series(1,10) g; 10 | INSERT INTO prom_data.cpu_usage 11 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"production", "node": "pinky", "new_tag":"foo"}') 12 | FROM generate_series(1,10) g; 13 | INSERT INTO prom_data.cpu_total 14 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"dev", "node": "brain"}') 15 | FROM generate_series(1,10) g; 16 | INSERT INTO prom_data.cpu_total 17 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"production", "node": "pinky", "new_tag_2":"bar"}') 18 | FROM generate_series(1,10) g; 19 | 20 | SELECT 21 | id, 22 | metric_name, 23 | table_name, 24 | label_keys, 25 | retention_period, 26 | chunk_interval > interval '7 hours', 27 | compressed_interval > interval '7 hours', 28 | total_chunks, 29 | compressed_chunks 30 | FROM prom_info.metric 31 | ORDER BY id; 32 | 33 | SELECT 34 | id, 35 | metric_name, 36 | table_name, 37 | retention_period, 38 | chunk_interval > interval '7 hour', 39 | compressed_interval > interval '7 hour', 40 | before_compression_bytes, 41 | after_compression_bytes, 42 | label_keys, 43 | total_size, 44 | total_size_bytes, 45 | compression_ratio, 46 | total_chunks, 47 | compressed_chunks 48 | FROM prom_info.metric_detail 49 | ORDER BY id; 50 | -- compress chunks 51 | SELECT public.compress_chunk(public.show_chunks('prom_data.cpu_usage')); 52 | SELECT public.compress_chunk(public.show_chunks('prom_data.cpu_total')); 53 | -- fetch stats with compressed chunks 54 | 55 | SET ROLE prom_reader; 56 | 57 | SELECT 58 | id, 59 | metric_name, 60 | table_name, 61 | label_keys, 62 | retention_period, 63 | chunk_interval > interval '7 hours', 64 | compressed_interval > interval '7 hours', 65 | total_chunks, 66 | compressed_chunks 67 | FROM prom_info.metric 68 | ORDER BY id; 69 | 70 | SELECT 71 | id, 72 | metric_name, 73 | table_name, 74 | retention_period, 75 | chunk_interval > interval '7 hour', 76 | compressed_interval > interval '7 hour', 77 | before_compression_bytes, 78 | after_compression_bytes, 79 | label_keys, 80 | total_size, 81 | total_size_bytes, 82 | compression_ratio, 83 | total_chunks, 84 | compressed_chunks 85 | FROM prom_info.metric_detail 86 | ORDER BY id; 87 | 88 | SELECT * FROM prom_info.label ORDER BY key; 89 | SELECT * FROM prom_info.metric_stats ORDER BY num_series_approx; 90 | SELECT * FROM prom_info.system_stats; 91 | SELECT prom_api.label_cardinality(1); 92 | SELECT prom_api.label_cardinality(2); 93 | SELECT prom_api.label_cardinality(1) + prom_api.label_cardinality(2); 94 | -------------------------------------------------------------------------------- /sql-tests/testdata/large_tracing_tags_support.sql: -------------------------------------------------------------------------------- 1 | \set ECHO all 2 | \set ON_ERROR_STOP 1 3 | 4 | -- We don't want retention to mess with the test data 5 | SELECT ps_trace.set_trace_retention_period('100 years'::INTERVAL); 6 | 7 | CREATE FUNCTION assert(assertion BOOLEAN, msg TEXT) 8 | RETURNS BOOLEAN 9 | LANGUAGE plpgsql VOLATILE AS 10 | $fnc$ 11 | BEGIN 12 | ASSERT assertion, msg; 13 | RETURN assertion; 14 | END; 15 | $fnc$; 16 | 17 | /** 18 | * Tag table 19 | **/ 20 | 21 | SELECT put_tag('service.namespace', gen.kilobytes_of_garbage::jsonb, resource_tag_type()) 22 | FROM ( 23 | SELECT string_agg(n::text, '') AS kilobytes_of_garbage 24 | FROM generate_series(1, 15272) AS gs (n) 25 | ) AS gen; 26 | 27 | SELECT 28 | assert(pg_column_size(value) > 2704, 29 | 'tag value is indeed larger than btree''s version 4 maximum row size for an index' 30 | ) 31 | FROM _ps_trace.tag t WHERE t.key = 'service.namespace'; 32 | 33 | SELECT 34 | assert(pg_column_size(value) > 8160, 35 | 'tag value is indeed larger than maximum non-toasted row size' 36 | ) 37 | FROM _ps_trace.tag t WHERE t.key = 'service.namespace'; 38 | 39 | SELECT put_tag('service.namespace', '"testvalue"', resource_tag_type()) AS t1; 40 | \gset 41 | SELECT put_tag('faas.name', '"testvalue"', resource_tag_type()) AS t2; 42 | \gset 43 | SELECT assert(:t1 != :t2, 'tag ids must be distinct when tag keys are'); 44 | 45 | SELECT put_tag('service.namespace', '{"testvalue": 1}'::jsonb, resource_tag_type()) AS t1; 46 | \gset 47 | SELECT put_tag('service.namespace', '{"testvalue": 2}'::jsonb, resource_tag_type()) AS t2; 48 | \gset 49 | SELECT assert(:t1 != :t2, 'tag ids must be distinct when tag values are'); 50 | 51 | SELECT put_operation('myservice', 'test', 'unspecified') AS op_tag_id; 52 | \gset 53 | SELECT put_tag('service.name', '"myservice"'::jsonb, resource_tag_type()) AS srvc_tag_id; 54 | \gset 55 | 56 | SELECT id AS op_tag_id_stored 57 | FROM _ps_trace.operation 58 | WHERE span_kind = 'unspecified' 59 | AND span_name = 'test' 60 | AND service_name_id = :srvc_tag_id; 61 | \gset 62 | 63 | SELECT assert(:op_tag_id_stored = :op_tag_id, 'operation lookup by tag id must return the same tag'); 64 | 65 | SELECT put_tag('host.name', '"foobar"'::jsonb, resource_tag_type()) AS host_tag_id; 66 | \gset 67 | 68 | SELECT assert( 69 | get_tag_map(('{"host.name": "foobar", "service.name": "myservice"}')::jsonb)::jsonb 70 | = 71 | jsonb_build_object('1', :srvc_tag_id, '33', :host_tag_id), 72 | 'get tag map must produce the expected result' 73 | ); 74 | 75 | SELECT _ps_trace.tag_v_eq_matching_tags('service.name', '"myservice"'::jsonb); 76 | 77 | SELECT jsonb_object_agg(n::text, n) AS gigantic_tagmap 78 | FROM generate_series(1, 15272) AS gs (n) 79 | \gset 80 | 81 | /** 82 | * Span table 83 | **/ 84 | 85 | INSERT INTO _ps_trace.span(trace_id,span_id,parent_span_id,operation_id,start_time,end_time,duration_ms,trace_state,span_tags,dropped_tags_count,event_time,dropped_events_count,dropped_link_count,status_code,status_message,instrumentation_lib_id,resource_tags,resource_dropped_tags_count,resource_schema_url_id) 86 | VALUES 87 | (E'05a8be0f-bb79-c052-223e-48608580efcf',2625299614982951051,NULL,19,E'2022-04-26 11:44:55.185962+00',E'2022-04-26 11:44:55.288812+00',102.85,NULL,:'gigantic_tagmap',0,E'["2022-04-26 11:44:55.185999+00","2022-04-26 11:44:55.288781+00")',0,0,E'error',E'Exception: FAILED to fetch a lower char',5,:'gigantic_tagmap',0,NULL); 88 | 89 | SELECT 90 | assert(pg_column_size(r) > 8160, 91 | 'row with tag_map values is indeed larger than maximum non-toasted row size' 92 | ) 93 | FROM _ps_trace.span r 94 | WHERE trace_id = E'05a8be0f-bb79-c052-223e-48608580efcf'; 95 | 96 | /** 97 | * Events table 98 | **/ 99 | 100 | INSERT INTO _ps_trace.event(time,trace_id,span_id,name,tags) 101 | VALUES 102 | (E'2022-04-26 11:44:55.185962+00',E'05a8be0f-bb79-c052-223e-48608580efcf',2625299614982951051,'fooabar',:'gigantic_tagmap'); 103 | 104 | SELECT 105 | assert(pg_column_size(r) > 8160, 106 | 'row with tag_map values is indeed larger than maximum non-toasted row size' 107 | ) 108 | FROM _ps_trace.event r 109 | WHERE trace_id = E'05a8be0f-bb79-c052-223e-48608580efcf'; 110 | 111 | /** 112 | * Link table 113 | **/ 114 | 115 | INSERT INTO _ps_trace.link(trace_id,span_id,span_start_time,linked_trace_id,linked_span_id,trace_state,tags) 116 | VALUES 117 | (E'05a8be0f-bb79-c052-223e-48608580efcf',2625299614982951051,E'2022-04-26 11:44:55.185962+00',E'05a8be0f-bb79-c052-223e-48608580efcf',2625299614982951051,'zzz',:'gigantic_tagmap'); 118 | 119 | SELECT 120 | assert(pg_column_size(r) > 8160, 121 | 'row with tag_map values is indeed larger than maximum non-toasted row size' 122 | ) 123 | FROM _ps_trace.link r 124 | WHERE trace_id = E'05a8be0f-bb79-c052-223e-48608580efcf'; -------------------------------------------------------------------------------- /sql-tests/testdata/maintenance_jobs_separation.sql: -------------------------------------------------------------------------------- 1 | \unset ECHO 2 | \set QUIET 1 3 | \set ON_ERROR_STOP 1 4 | \i 'testdata/scripts/pgtap-1.2.0.sql' 5 | 6 | -- Make sure backwards compatible call works 7 | CALL prom_api.execute_maintenance(); 8 | 9 | -- None of the new maintenance job configurations fail to parse or start 10 | CALL _prom_catalog.execute_maintenance_job(0, '{"signal": "metrics", "type": "retention"}'::jsonb); 11 | CALL _prom_catalog.execute_maintenance_job(0, '{"signal": "traces", "type": "retention"}'::jsonb); 12 | CALL _prom_catalog.execute_maintenance_job(0, '{"signal": "metrics", "type": "compression"}'::jsonb); 13 | 14 | 15 | SELECT * FROM plan(9); 16 | 17 | -- No old-style configurations are present at the beginning 18 | SELECT ok(COUNT(*) = 0, 'No old-style configurations are present at the beginning') 19 | FROM timescaledb_information.jobs 20 | WHERE proc_schema = '_prom_catalog' 21 | AND NOT coalesce(config, '{}'::jsonb) ?& ARRAY ['signal', 'type']; 22 | 23 | SELECT ok(COUNT(*) = 2, 'Two metrics retention jobs by default.') 24 | FROM timescaledb_information.jobs 25 | WHERE proc_schema = '_prom_catalog' 26 | AND config ->> 'signal' = 'metrics' 27 | AND config ->> 'type' = 'retention'; 28 | 29 | SELECT ok(COUNT(*) = 3, 'Three metrics compression jobs by default.') 30 | FROM timescaledb_information.jobs 31 | WHERE proc_schema = '_prom_catalog' 32 | AND config ->> 'signal' = 'metrics' 33 | AND config ->> 'type' = 'compression'; 34 | 35 | SELECT ok(COUNT(*) = 1, 'And one traces retention job by default.') 36 | FROM timescaledb_information.jobs 37 | WHERE proc_schema = '_prom_catalog' 38 | AND config ->> 'signal' = 'traces' 39 | AND config ->> 'type' = 'retention'; 40 | 41 | -- Add two old-style configurations 42 | SELECT _prom_catalog.add_job('_prom_catalog.execute_maintenance_job', '30 min'); 43 | SELECT _prom_catalog.add_job('_prom_catalog.execute_maintenance_job', '30 min'); 44 | 45 | -- Run config maintenance reconfiguration 46 | SELECT prom_api.config_maintenance_jobs(1, '10 min'); 47 | 48 | SELECT ok(COUNT(*) = 0, 'Old-style config jobs should be deleted by config_maintenance_jobs') 49 | FROM timescaledb_information.jobs 50 | WHERE proc_schema = '_prom_catalog' 51 | AND NOT coalesce(config, '{}'::jsonb) ?& ARRAY ['signal', 'type']; 52 | 53 | SELECT ok(COUNT(*) = 3, 'Only the new-style configurations should be present') 54 | FROM timescaledb_information.jobs 55 | WHERE proc_schema = '_prom_catalog' 56 | AND (schedule_interval >= '10 min' OR schedule_interval < '15 min'); 57 | 58 | -- Increase the number of jobs 59 | SELECT prom_api.config_maintenance_jobs(2, '15 min', '{"log_verbose": true}'); 60 | 61 | SELECT ok(COUNT(*) = 6) FROM timescaledb_information.jobs 62 | WHERE proc_schema = '_prom_catalog' 63 | AND config ?& ARRAY ['signal', 'type'] 64 | AND (schedule_interval >= '15 min' OR schedule_interval < '17 min') 65 | AND coalesce(config ->> 'log_verbose', 'false')::boolean = true; 66 | 67 | -- Decrease the number of jobs 68 | SELECT prom_api.config_maintenance_jobs(1, '16 min', '{"log_verbose": false}'); 69 | 70 | SELECT ok(COUNT(*) = 3) FROM timescaledb_information.jobs 71 | WHERE proc_schema = '_prom_catalog' 72 | AND config ?& ARRAY ['signal', 'type'] 73 | AND (schedule_interval >= '16 min' OR schedule_interval < '18 min') 74 | AND coalesce(config ->> 'log_verbose', 'true')::boolean = false; 75 | 76 | SELECT throws_like( 77 | $$SELECT prom_api.config_maintenance_jobs(1, '100 min', '{"log_verbose": "err"}')$$, 78 | 'invalid input syntax for type boolean: "err"' 79 | ); 80 | 81 | SELECT * FROM finish(true); -------------------------------------------------------------------------------- /sql-tests/testdata/metric-chunk-interval.sql: -------------------------------------------------------------------------------- 1 | \unset ECHO 2 | \set QUIET 1 3 | \i 'testdata/scripts/pgtap-1.2.0.sql' 4 | 5 | CREATE OR REPLACE FUNCTION approx_is(range1 INTERVAL, range2 INTERVAL, bound NUMERIC, description TEXT) 6 | RETURNS TEXT AS $$ 7 | DECLARE 8 | result BOOLEAN; 9 | BEGIN 10 | result := range1 > range2 * (1-bound) AND range1 < range2 * (1+bound); 11 | RETURN ok( result, description); 12 | END 13 | $$ LANGUAGE plpgsql; 14 | 15 | 16 | SELECT * FROM plan(4); 17 | 18 | SELECT is(prom_api.get_default_chunk_interval(), '8 hours'::INTERVAL, 'default metric chunk interval is 8 hours'); 19 | 20 | SELECT prom_api.set_default_chunk_interval('1 hour'::INTERVAL); 21 | 22 | SELECT is(prom_api.get_default_chunk_interval(), '1 hour'::INTERVAL, 'default metric chunk interval is 1 hour'); 23 | 24 | 25 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 26 | CALL _prom_catalog.finalize_metric_creation(); 27 | 28 | SELECT is(prom_api.get_metric_chunk_interval('cpu_usage'), '1 hour'::INTERVAL, 'get_metric_chunk_interval returns default chunk interval'); 29 | 30 | SELECT prom_api.set_metric_chunk_interval('cpu_usage', '15 minutes'::INTERVAL); 31 | 32 | SELECT approx_is(prom_api.get_metric_chunk_interval('cpu_usage'), '15 minutes'::INTERVAL, 0.01, 'get_metric_chunk_interval returns chunk interval'); 33 | 34 | SELECT * FROM finish(true); 35 | -------------------------------------------------------------------------------- /sql-tests/testdata/metric-retention.sql: -------------------------------------------------------------------------------- 1 | \unset ECHO 2 | \set QUIET 1 3 | \i 'testdata/scripts/pgtap-1.2.0.sql' 4 | 5 | SELECT * FROM plan(4); 6 | 7 | SELECT is(prom_api.get_default_metric_retention_period(), '90 days'::INTERVAL, 'default metric retention period is 90 days'); 8 | 9 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 10 | CALL _prom_catalog.finalize_metric_creation(); 11 | SELECT prom_api.set_metric_retention_period('cpu_usage', '1 day'::INTERVAL); 12 | 13 | SELECT is(prom_api.get_metric_retention_period('cpu_usage'), '1 day'::INTERVAL, 'get_metric_retention_period returns retention period'); 14 | SELECT is(prom_api.get_metric_retention_period('prom_data', 'cpu_usage'), '1 day'::INTERVAL, 'get_metric_retention_period returns retention period'); 15 | 16 | SELECT prom_api.set_default_retention_period('55 days'::INTERVAL); 17 | 18 | SELECT is(prom_api.get_default_metric_retention_period(), '55 days'::INTERVAL, 'get_default_metric_retention_period returns retention period'); 19 | 20 | SELECT * FROM finish(true); -------------------------------------------------------------------------------- /sql-tests/testdata/metric_compression.sql: -------------------------------------------------------------------------------- 1 | \unset ECHO 2 | \set QUIET 1 3 | \i 'testdata/scripts/pgtap-1.2.0.sql' 4 | 5 | \set older_than '1 hour' 6 | 7 | select * from plan(11); 8 | 9 | -- we don't want the maintenance job run automatically during this test 10 | select delete_job(job_id) 11 | from timescaledb_information.jobs 12 | where proc_name = 'execute_maintenance_job' 13 | ; 14 | 15 | -- create a metric named cpu_usage 16 | select _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 17 | call _prom_catalog.finalize_metric_creation(); 18 | select prom_api.set_metric_retention_period('cpu_usage', '100 years'::interval); 19 | select prom_api.set_metric_chunk_interval('cpu_usage', '1 day'::interval); 20 | 21 | -- create a series for the cpu_usage metric. put series_id into :series_id variable 22 | select x.series_id 23 | from _prom_catalog.get_or_create_series_id_for_kv_array( 24 | 'cpu_usage', 25 | array['__name__', 'test'], 26 | array['cpu_usage', 'value1'] 27 | ) x 28 | \gset 29 | 30 | -- create 1 old chunk 31 | insert into prom_data.cpu_usage(time, value, series_id) 32 | values ('1982-01-01 00:00:00+00', 0.1, :series_id) 33 | ; 34 | 35 | -- 1 36 | select ok(count(*) = 1, 'expect cpu_usage to have 1 chunk') 37 | from public.show_chunks('prom_data.cpu_usage'::regclass) 38 | ; 39 | 40 | -- 2 41 | select ok(count(*) = 0, 'expect no chunks to compress b/c we do not compress the most recent chunk') 42 | from _prom_catalog.metric_chunks_that_need_to_be_compressed(interval :'older_than') x 43 | where metric_name = 'cpu_usage' 44 | ; 45 | 46 | -- this should do nothing 47 | call _prom_catalog.execute_compression_policy(log_verbose=>true); 48 | 49 | -- 3 50 | select ok(count(*) = 0, 'expect no chunks to compress b/c we do not compress the most recent chunk') 51 | from _prom_catalog.metric_chunks_that_need_to_be_compressed(interval :'older_than') x 52 | where metric_name = 'cpu_usage' 53 | ; 54 | 55 | -- create a second old chunk 56 | insert into prom_data.cpu_usage(time, value, series_id) 57 | values ('1982-02-01 00:00:00+00', 0.1, :series_id) 58 | ; 59 | 60 | -- 4 61 | select ok(count(*) = 2, 'expect cpu_usage to have 2 chunks') 62 | from public.show_chunks('prom_data.cpu_usage'::regclass) 63 | ; 64 | 65 | -- 5 66 | select ok(jsonb_array_length(x.chunks_to_compress) = 1, 'expect 1 chunk to compress') 67 | from _prom_catalog.metric_chunks_that_need_to_be_compressed(interval :'older_than') x 68 | where metric_name = 'cpu_usage' 69 | ; 70 | 71 | -- this should compress 1 chunk 72 | call _prom_catalog.execute_compression_policy(log_verbose=>true); 73 | 74 | -- 6 75 | select ok(count(*) = 2, 'expect cpu_usage to have 2 chunks') 76 | from public.show_chunks('prom_data.cpu_usage'::regclass) 77 | ; 78 | 79 | -- 7 80 | select ok(count(*) = 0, 'expect cpu_usage metric to have NO chunks to compress') 81 | from _prom_catalog.metric_chunks_that_need_to_be_compressed(interval :'older_than') x 82 | where metric_name = 'cpu_usage' 83 | ; 84 | 85 | -- create a two more old chunks 86 | insert into prom_data.cpu_usage(time, value, series_id) 87 | values 88 | ('1982-03-01 00:00:00+00', 0.1, :series_id), 89 | ('1982-04-01 00:00:00+00', 0.1, :series_id) 90 | ; 91 | 92 | -- 8 93 | select ok(count(*) = 4, 'expect cpu_usage to have 4 chunks') 94 | from public.show_chunks('prom_data.cpu_usage'::regclass) 95 | ; 96 | 97 | -- 9 98 | select ok(jsonb_array_length(x.chunks_to_compress) = 2, 'expect 2 chunks to compress') 99 | from _prom_catalog.metric_chunks_that_need_to_be_compressed(interval :'older_than') x 100 | where metric_name = 'cpu_usage' 101 | ; 102 | 103 | -- this should compress 2 chunks 104 | call _prom_catalog.execute_compression_policy(log_verbose=>true); 105 | 106 | -- 10 107 | select ok(count(*) = 4, 'expect cpu_usage to have 4 chunks') 108 | from public.show_chunks('prom_data.cpu_usage'::regclass) 109 | ; 110 | 111 | -- 11 112 | select ok(count(*) = 0, 'expect cpu_usage metric to have NO chunks to compress') 113 | from _prom_catalog.metric_chunks_that_need_to_be_compressed(interval :'older_than') x 114 | where metric_name = 'cpu_usage' 115 | ; 116 | 117 | select * from finish(true); 118 | -------------------------------------------------------------------------------- /sql-tests/testdata/ps_trace.delete_all_traces.sql: -------------------------------------------------------------------------------- 1 | \unset ECHO 2 | \set QUIET 1 3 | \i 'testdata/scripts/pgtap-1.2.0.sql' 4 | 5 | SELECT * FROM plan(16); 6 | 7 | INSERT INTO _ps_trace.schema_url (url) 8 | VALUES ('fake.url.com'); 9 | 10 | INSERT INTO _ps_trace.instrumentation_lib (name, version, schema_url_id) 11 | ( 12 | SELECT 'inst_lib_1', '1.0.0', id 13 | FROM _ps_trace.schema_url 14 | WHERE url = 'fake.url.com' 15 | LIMIT 1 16 | ); 17 | 18 | SELECT ps_trace.put_operation('my.service.name', 'my.span.name', 'unspecified'); 19 | 20 | SELECT ps_trace.put_tag_key('my.tag.key', 1::ps_trace.tag_type); 21 | 22 | SELECT ps_trace.put_tag('my.tag.key', 'true'::jsonb, 1::ps_trace.tag_type); 23 | 24 | INSERT INTO _ps_trace.span 25 | (trace_id, span_id, parent_span_id, operation_id, start_time, end_time, duration_ms, span_tags, status_code, 26 | resource_tags, resource_schema_url_id) 27 | VALUES ('3dadb2bf-0035-433e-b74b-9075cc9260e8', 28 | 1234, 29 | null, 30 | -1, 31 | now(), 32 | now(), 33 | 0, 34 | '{}'::jsonb::tag_map, 35 | 'ok', 36 | '{}'::jsonb::tag_map, 37 | -1); 38 | 39 | INSERT INTO _ps_trace.link 40 | (trace_id, span_id, span_start_time, linked_trace_id, linked_span_id, link_nbr, trace_state, 41 | tags, dropped_tags_count) 42 | SELECT s.trace_id, 43 | s.span_id, 44 | s.start_time, 45 | s.trace_id, 46 | s.span_id, 47 | 1, 48 | 'OK', 49 | '{}'::jsonb::tag_map, 50 | 0 51 | FROM _ps_trace.span s; 52 | 53 | INSERT INTO _ps_trace.event 54 | (time, trace_id, span_id, event_nbr, name, tags, dropped_tags_count) 55 | SELECT now(), 56 | s.trace_id, 57 | s.span_id, 58 | 1, 59 | 'my.event', 60 | '{}'::jsonb::tag_map, 61 | 0 62 | FROM _ps_trace.span s; 63 | 64 | SELECT is(count(*), 1::BIGINT, '_ps_trace.schema_url has 1 row') FROM _ps_trace.schema_url; 65 | SELECT is(count(*), 1::BIGINT, '_ps_trace.instrumentation_lib has 1 row') FROM _ps_trace.instrumentation_lib; 66 | SELECT is(count(*), 1::BIGINT, '_ps_trace.operation has 1 row') FROM _ps_trace.operation; 67 | SELECT is(count(*), 1::BIGINT, '_ps_trace.tag_key has 1 row') FROM _ps_trace.tag_key WHERE id >= 1000; 68 | SELECT is(count(*), 2::BIGINT, '_ps_trace.tag has 2 rows') FROM _ps_trace.tag; 69 | SELECT is(count(*), 1::BIGINT, '_ps_trace.span has 1 row') FROM _ps_trace.span; 70 | SELECT is(count(*), 1::BIGINT, '_ps_trace.link has 1 row') FROM _ps_trace.link; 71 | SELECT is(count(*), 1::BIGINT, '_ps_trace.event has 1 row') FROM _ps_trace.event; 72 | 73 | SELECT ps_trace.delete_all_traces(); 74 | 75 | SELECT is(count(*), 0::BIGINT, '_ps_trace.schema_url has 0 rows') FROM _ps_trace.schema_url; 76 | SELECT is(count(*), 0::BIGINT, '_ps_trace.instrumentation_lib has 0 rows') FROM _ps_trace.instrumentation_lib; 77 | SELECT is(count(*), 0::BIGINT, '_ps_trace.operation has 0 rows') FROM _ps_trace.operation; 78 | SELECT is(count(*), 0::BIGINT, '_ps_trace.tag_key has 0 rows') FROM _ps_trace.tag_key WHERE id >= 1000; 79 | SELECT is(count(*), 0::BIGINT, '_ps_trace.tag has 0 rows') FROM _ps_trace.tag; 80 | SELECT is(count(*), 0::BIGINT, '_ps_trace.span has 0 rows') FROM _ps_trace.span; 81 | SELECT is(count(*), 0::BIGINT, '_ps_trace.link has 0 rows') FROM _ps_trace.link; 82 | SELECT is(count(*), 0::BIGINT, '_ps_trace.event has 0 rows') FROM _ps_trace.event; 83 | 84 | SELECT * FROM finish(true); -------------------------------------------------------------------------------- /sql-tests/testdata/support.sql: -------------------------------------------------------------------------------- 1 | \set ECHO all 2 | \set ON_ERROR_STOP 1 3 | 4 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 5 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_total'); 6 | CALL _prom_catalog.finalize_metric_creation(); 7 | INSERT INTO prom_data.cpu_usage 8 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"}') 9 | FROM generate_series(1,10) g; 10 | INSERT INTO prom_data.cpu_usage 11 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"production", "node": "pinky", "new_tag":"foo"}') 12 | FROM generate_series(1,10) g; 13 | INSERT INTO prom_data.cpu_total 14 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"dev", "node": "brain"}') 15 | FROM generate_series(1,10) g; 16 | INSERT INTO prom_data.cpu_total 17 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"production", "node": "pinky", "new_tag_2":"bar"}') 18 | FROM generate_series(1,10) g; 19 | 20 | --this should use a subquery with the Promscale extension but not without 21 | --this is thanks to the support function make_call_subquery_support 22 | ANALYZE; 23 | EXPLAIN (costs off) SELECT time, value, prom_api.jsonb(labels), prom_api.val(namespace_id) FROM prom_metric.cpu_usage WHERE labels OPERATOR(prom_api.?) ('namespace' OPERATOR(ps_tag.!==) 'dev' ) ORDER BY time, series_id LIMIT 5; 24 | -------------------------------------------------------------------------------- /sql-tests/testdata/trace_compression.sql: -------------------------------------------------------------------------------- 1 | \set ECHO all 2 | \set ON_ERROR_STOP 1 3 | 4 | -- We don't want retention to mess with the test data 5 | SELECT ps_trace.set_trace_retention_period('100 years'::INTERVAL); 6 | 7 | INSERT INTO _ps_trace.span(trace_id,span_id,parent_span_id,operation_id,start_time,end_time,duration_ms,trace_state,span_tags,dropped_tags_count,event_time,dropped_events_count,dropped_link_count,status_code,status_message,instrumentation_lib_id,resource_tags,resource_dropped_tags_count,resource_schema_url_id) 8 | VALUES 9 | (E'18dd078e-8c69-e10a-d2fe-9e9f47de7728',-2771219554170079234,NULL,19,E'2022-04-26 11:44:55.185139+00',E'2022-04-26 11:44:55.38517+00',200.031,NULL,E'{"1003": 247}',0,E'["2022-04-26 11:44:55.185659+00","2022-04-26 11:44:55.385148+00")',0,0,E'error',E'Exception: FAILED to fetch a lower char',5,E'{"1": 114, "5": 94, "6": 93, "7": 95}',0,NULL), 10 | (E'15a8be0f-bb79-c052-223e-48608580efce',2625299614982951051,NULL,19,E'2022-04-26 11:44:55.185962+00',E'2022-04-26 11:44:55.288812+00',102.85,NULL,E'{"1003": 242}',0,E'["2022-04-26 11:44:55.185999+00","2022-04-26 11:44:55.288781+00")',0,0,E'error',E'Exception: FAILED to fetch a lower char',5,E'{"1": 114, "5": 94, "6": 93, "7": 95}',0,NULL); 11 | 12 | SELECT hypertable_schema, hypertable_name, chunk_schema, chunk_name, is_compressed FROM timescaledb_information.chunks order by range_end desc; 13 | CALL _ps_trace.execute_tracing_compression_job(0, jsonb '{"log_verbose":false,"hypertable_name":"span"}'); --noop 14 | 15 | SELECT hypertable_schema, hypertable_name, chunk_schema, chunk_name, is_compressed FROM timescaledb_information.chunks order by range_end desc; 16 | 17 | INSERT INTO _ps_trace.span(trace_id,span_id,parent_span_id,operation_id,start_time,end_time,duration_ms,trace_state,span_tags,dropped_tags_count,event_time,dropped_events_count,dropped_link_count,status_code,status_message,instrumentation_lib_id,resource_tags,resource_dropped_tags_count,resource_schema_url_id) 18 | VALUES 19 | (E'28dd078e-8c69-e10a-d2fe-9e9f47de7728',-2771219554170079234,NULL,19,E'2022-04-26 12:44:55.185139+00',E'2022-04-26 11:44:55.38517+00',200.031,NULL,E'{"1003": 247}',0,E'["2022-04-26 11:44:55.185659+00","2022-04-26 11:44:55.385148+00")',0,0,E'error',E'Exception: FAILED to fetch a lower char',5,E'{"1": 114, "5": 94, "6": 93, "7": 95}',0,NULL), 20 | (E'25a8be0f-bb79-c052-223e-48608580efce',2625299614982951051,NULL,19,E'2022-04-26 12:44:55.185962+00',E'2022-04-26 11:44:55.288812+00',102.85,NULL,E'{"1003": 242}',0,E'["2022-04-26 11:44:55.185999+00","2022-04-26 11:44:55.288781+00")',0,0,E'error',E'Exception: FAILED to fetch a lower char',5,E'{"1": 114, "5": 94, "6": 93, "7": 95}',0,NULL); 21 | 22 | CALL _ps_trace.execute_tracing_compression_job(0, jsonb '{"log_verbose":false,"hypertable_name":"span"}'); --compress one 23 | SELECT hypertable_schema, hypertable_name, chunk_schema, chunk_name, is_compressed FROM timescaledb_information.chunks order by range_end desc; 24 | CALL _ps_trace.execute_tracing_compression_job(0, jsonb '{"log_verbose":false,"hypertable_name":"span"}'); --noop 25 | SELECT hypertable_schema, hypertable_name, chunk_schema, chunk_name, is_compressed FROM timescaledb_information.chunks order by range_end desc; 26 | 27 | 28 | INSERT INTO _ps_trace.span(trace_id,span_id,parent_span_id,operation_id,start_time,end_time,duration_ms,trace_state,span_tags,dropped_tags_count,event_time,dropped_events_count,dropped_link_count,status_code,status_message,instrumentation_lib_id,resource_tags,resource_dropped_tags_count,resource_schema_url_id) 29 | VALUES 30 | (E'38dd078e-8c69-e10a-d2fe-9e9f47de7728',-2771219554170079234,NULL,19,E'2022-04-26 13:44:55.185139+00',E'2022-04-26 11:44:55.38517+00',200.031,NULL,E'{"1003": 247}',0,E'["2022-04-26 11:44:55.185659+00","2022-04-26 11:44:55.385148+00")',0,0,E'error',E'Exception: FAILED to fetch a lower char',5,E'{"1": 114, "5": 94, "6": 93, "7": 95}',0,NULL), 31 | (E'35a8be0f-bb79-c052-223e-48608580efce',2625299614982951051,NULL,19,E'2022-04-26 13:44:55.185962+00',E'2022-04-26 11:44:55.288812+00',102.85,NULL,E'{"1003": 242}',0,E'["2022-04-26 11:44:55.185999+00","2022-04-26 11:44:55.288781+00")',0,0,E'error',E'Exception: FAILED to fetch a lower char',5,E'{"1": 114, "5": 94, "6": 93, "7": 95}',0,NULL); 32 | 33 | SELECT hypertable_schema, hypertable_name, chunk_schema, chunk_name, is_compressed FROM timescaledb_information.chunks order by range_end desc; 34 | CALL _ps_trace.execute_tracing_compression_job(0, jsonb '{"log_verbose":false,"hypertable_name":"span"}'); --compress another one 35 | SELECT hypertable_schema, hypertable_name, chunk_schema, chunk_name, is_compressed FROM timescaledb_information.chunks order by range_end desc; 36 | CALL _ps_trace.execute_tracing_compression_job(0, jsonb '{"log_verbose":false,"hypertable_name":"span"}'); --noop 37 | SELECT hypertable_schema, hypertable_name, chunk_schema, chunk_name, is_compressed FROM timescaledb_information.chunks order by range_end desc; -------------------------------------------------------------------------------- /sql-tests/testdata/views.sql: -------------------------------------------------------------------------------- 1 | \set ECHO all 2 | \set ON_ERROR_STOP 1 3 | 4 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 5 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_total'); 6 | CALL _prom_catalog.finalize_metric_creation(); 7 | INSERT INTO prom_data.cpu_usage 8 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"}') 9 | FROM generate_series(1,10) g; 10 | INSERT INTO prom_data.cpu_usage 11 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"production", "node": "pinky", "new_tag":"foo"}') 12 | FROM generate_series(1,10) g; 13 | INSERT INTO prom_data.cpu_total 14 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"dev", "node": "brain"}') 15 | FROM generate_series(1,10) g; 16 | INSERT INTO prom_data.cpu_total 17 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"production", "node": "pinky", "new_tag_2":"bar"}') 18 | FROM generate_series(1,10) g; 19 | 20 | SELECT * FROM prom_info.label ORDER BY key; 21 | 22 | \set ON_ERROR_STOP 0 23 | SELECT count(public.compress_chunk(i)) from public.show_chunks('prom_data.cpu_usage') i; 24 | \set ON_ERROR_STOP 0 25 | 26 | SET role prom_reader; 27 | SELECT * FROM prom_metric.cpu_usage ORDER BY time, series_id LIMIT 5; 28 | SELECT time, value, prom_api.jsonb(labels), prom_api.val(namespace_id) FROM prom_metric.cpu_usage ORDER BY time, series_id LIMIT 5; 29 | SELECT * FROM prom_series.cpu_usage ORDER BY series_id; 30 | -------------------------------------------------------------------------------- /sql-tests/tests/snapshots/tests__testdata__defaults.sql.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: sql-tests/tests/tests.rs 3 | expression: query_result 4 | --- 5 | \set ON_ERROR_STOP 1 6 | \echo make sure the view returns what we expect it to 7 | make sure the view returns what we expect it to 8 | select key, value 9 | from _prom_catalog.initial_default dd 10 | except 11 | select key, value 12 | from 13 | ( 14 | values 15 | ('chunk_interval' , (INTERVAL '8 hours')::text), 16 | ('retention_period' , (90 * INTERVAL '1 day')::text), 17 | ('metric_compression' , (exists(select 1 from pg_catalog.pg_proc where proname = 'compress_chunk')::text)), 18 | ('trace_retention_period' , (30 * INTERVAL '1 days')::text), 19 | ('ha_lease_timeout' , '1m'), 20 | ('ha_lease_refresh' , '10s'), 21 | ('epoch_duration' , (INTERVAL '12 hours')::text) 22 | ) x(key, value) 23 | ; 24 | key | value 25 | -----+------- 26 | (0 rows) 27 | 28 | \echo make sure the getter returns the expected value 29 | make sure the getter returns the expected value 30 | select _prom_catalog.get_default_value('chunk_interval') = (INTERVAL '8 hours')::text; 31 | ?column? 32 | ---------- 33 | t 34 | (1 row) 35 | 36 | select count(*) = 0 from _prom_catalog.default d where d.key = 'chunk_interval'; 37 | ?column? 38 | ---------- 39 | t 40 | (1 row) 41 | 42 | \echo setting the default value to the same as the initial default 43 | setting the default value to the same as the initial default 44 | select _prom_catalog.set_default_value('chunk_interval', (INTERVAL '8 hours')::text); 45 | set_default_value 46 | ------------------- 47 | 48 | (1 row) 49 | 50 | select count(*) = 1 from _prom_catalog.default d where d.key = 'chunk_interval'; 51 | ?column? 52 | ---------- 53 | t 54 | (1 row) 55 | 56 | select _prom_catalog.get_default_value('chunk_interval') = (INTERVAL '8 hours')::text; 57 | ?column? 58 | ---------- 59 | t 60 | (1 row) 61 | 62 | \echo overriding the initial default 63 | overriding the initial default 64 | select _prom_catalog.set_default_value('chunk_interval', (INTERVAL '99 hours')::text); 65 | set_default_value 66 | ------------------- 67 | 68 | (1 row) 69 | 70 | select count(*) = 1 from _prom_catalog.default d where d.key = 'chunk_interval'; 71 | ?column? 72 | ---------- 73 | t 74 | (1 row) 75 | 76 | select _prom_catalog.get_default_value('chunk_interval') = (INTERVAL '99 hours')::text; 77 | ?column? 78 | ---------- 79 | t 80 | (1 row) 81 | 82 | \echo setting the default value BACK to the same as the initial default 83 | setting the default value BACK to the same as the initial default 84 | select _prom_catalog.set_default_value('chunk_interval', (INTERVAL '8 hours')::text); 85 | set_default_value 86 | ------------------- 87 | 88 | (1 row) 89 | 90 | select count(*) = 1 from _prom_catalog.default d where d.key = 'chunk_interval'; 91 | ?column? 92 | ---------- 93 | t 94 | (1 row) 95 | 96 | select _prom_catalog.get_default_value('chunk_interval') = (INTERVAL '8 hours')::text; 97 | ?column? 98 | ---------- 99 | t 100 | (1 row) 101 | 102 | 103 | -------------------------------------------------------------------------------- /sql-tests/tests/snapshots/tests__testdata__support.sql.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: sql-tests/tests/tests.rs 3 | expression: query_result 4 | --- 5 | \set ON_ERROR_STOP 1 6 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 7 | get_or_create_metric_table_name 8 | --------------------------------- 9 | (1,cpu_usage,t) 10 | (1 row) 11 | 12 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_total'); 13 | get_or_create_metric_table_name 14 | --------------------------------- 15 | (2,cpu_total,t) 16 | (1 row) 17 | 18 | CALL _prom_catalog.finalize_metric_creation(); 19 | CALL 20 | INSERT INTO prom_data.cpu_usage 21 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"}') 22 | FROM generate_series(1,10) g; 23 | INSERT 0 10 24 | INSERT INTO prom_data.cpu_usage 25 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"production", "node": "pinky", "new_tag":"foo"}') 26 | FROM generate_series(1,10) g; 27 | INSERT 0 10 28 | INSERT INTO prom_data.cpu_total 29 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"dev", "node": "brain"}') 30 | FROM generate_series(1,10) g; 31 | INSERT 0 10 32 | INSERT INTO prom_data.cpu_total 33 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"production", "node": "pinky", "new_tag_2":"bar"}') 34 | FROM generate_series(1,10) g; 35 | INSERT 0 10 36 | --this should use a subquery with the Promscale extension but not without 37 | --this is thanks to the support function make_call_subquery_support 38 | ANALYZE; 39 | ANALYZE 40 | EXPLAIN (costs off) SELECT time, value, prom_api.jsonb(labels), prom_api.val(namespace_id) FROM prom_metric.cpu_usage WHERE labels OPERATOR(prom_api.?) ('namespace' OPERATOR(ps_tag.!==) 'dev' ) ORDER BY time, series_id LIMIT 5; 41 | QUERY PLAN 42 | ---------------------------------------------------------------------------------------- 43 | Limit 44 | InitPlan 1 (returns $0) 45 | -> Result 46 | -> Result 47 | -> Sort 48 | Sort Key: data."time", data.series_id 49 | -> Hash Join 50 | Hash Cond: (data.series_id = series.id) 51 | -> Seq Scan on _hyper_13_1_chunk data 52 | -> Hash 53 | -> Seq Scan on cpu_usage series 54 | Filter: (NOT ((labels)::integer[] && ($0)::integer[])) 55 | (12 rows) 56 | 57 | 58 | -------------------------------------------------------------------------------- /sql-tests/tests/snapshots/tests__testdata__views.sql.snap: -------------------------------------------------------------------------------- 1 | --- 2 | source: sql-tests/tests/tests.rs 3 | expression: query_result 4 | --- 5 | \set ON_ERROR_STOP 1 6 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage'); 7 | get_or_create_metric_table_name 8 | --------------------------------- 9 | (1,cpu_usage,t) 10 | (1 row) 11 | 12 | SELECT _prom_catalog.get_or_create_metric_table_name('cpu_total'); 13 | get_or_create_metric_table_name 14 | --------------------------------- 15 | (2,cpu_total,t) 16 | (1 row) 17 | 18 | CALL _prom_catalog.finalize_metric_creation(); 19 | CALL 20 | INSERT INTO prom_data.cpu_usage 21 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"}') 22 | FROM generate_series(1,10) g; 23 | INSERT 0 10 24 | INSERT INTO prom_data.cpu_usage 25 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_usage", "namespace":"production", "node": "pinky", "new_tag":"foo"}') 26 | FROM generate_series(1,10) g; 27 | INSERT 0 10 28 | INSERT INTO prom_data.cpu_total 29 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"dev", "node": "brain"}') 30 | FROM generate_series(1,10) g; 31 | INSERT 0 10 32 | INSERT INTO prom_data.cpu_total 33 | SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, _prom_catalog.get_or_create_series_id('{"__name__": "cpu_total", "namespace":"production", "node": "pinky", "new_tag_2":"bar"}') 34 | FROM generate_series(1,10) g; 35 | INSERT 0 10 36 | SELECT * FROM prom_info.label ORDER BY key; 37 | key | value_column_name | id_column_name | values | num_values 38 | -----------+-------------------+----------------+-----------------------+------------ 39 | __name__ | __name__ | __name___id | {cpu_total,cpu_usage} | 2 40 | namespace | namespace | namespace_id | {dev,production} | 2 41 | new_tag | new_tag | new_tag_id | {foo} | 1 42 | new_tag_2 | new_tag_2 | new_tag_2_id | {bar} | 1 43 | node | node | node_id | {brain,pinky} | 2 44 | (5 rows) 45 | 46 | \set ON_ERROR_STOP 0 47 | SELECT count(public.compress_chunk(i)) from public.show_chunks('prom_data.cpu_usage') i; 48 | count 49 | ------- 50 | 1 51 | (1 row) 52 | 53 | \set ON_ERROR_STOP 0 54 | SET role prom_reader; 55 | SET 56 | SELECT * FROM prom_metric.cpu_usage ORDER BY time, series_id LIMIT 5; 57 | time | value | series_id | labels | node_id | namespace_id | new_tag_id 58 | ------------------------+-------+-----------+-----------+---------+--------------+------------ 59 | 2000-01-01 02:03:05+00 | 101.1 | 1 | {1,3,4} | 3 | 4 | 60 | 2000-01-01 02:03:05+00 | 101.1 | 2 | {1,5,7,6} | 5 | 7 | 6 61 | 2000-01-01 02:03:06+00 | 102.1 | 1 | {1,3,4} | 3 | 4 | 62 | 2000-01-01 02:03:06+00 | 102.1 | 2 | {1,5,7,6} | 5 | 7 | 6 63 | 2000-01-01 02:03:07+00 | 103.1 | 1 | {1,3,4} | 3 | 4 | 64 | (5 rows) 65 | 66 | SELECT time, value, prom_api.jsonb(labels), prom_api.val(namespace_id) FROM prom_metric.cpu_usage ORDER BY time, series_id LIMIT 5; 67 | time | value | jsonb | val 68 | ------------------------+-------+-----------------------------------------------------------------------------------------+------------ 69 | 2000-01-01 02:03:05+00 | 101.1 | {"node": "brain", "__name__": "cpu_usage", "namespace": "dev"} | dev 70 | 2000-01-01 02:03:05+00 | 101.1 | {"node": "pinky", "new_tag": "foo", "__name__": "cpu_usage", "namespace": "production"} | production 71 | 2000-01-01 02:03:06+00 | 102.1 | {"node": "brain", "__name__": "cpu_usage", "namespace": "dev"} | dev 72 | 2000-01-01 02:03:06+00 | 102.1 | {"node": "pinky", "new_tag": "foo", "__name__": "cpu_usage", "namespace": "production"} | production 73 | 2000-01-01 02:03:07+00 | 103.1 | {"node": "brain", "__name__": "cpu_usage", "namespace": "dev"} | dev 74 | (5 rows) 75 | 76 | SELECT * FROM prom_series.cpu_usage ORDER BY series_id; 77 | series_id | labels | node | namespace | new_tag 78 | -----------+-----------+-------+------------+--------- 79 | 1 | {1,3,4} | brain | dev | 80 | 2 | {1,5,7,6} | pinky | production | foo 81 | (2 rows) 82 | 83 | 84 | -------------------------------------------------------------------------------- /sql-tests/tests/tests.rs: -------------------------------------------------------------------------------- 1 | use insta::assert_snapshot; 2 | use pgtap_parse::parse_pgtap_result; 3 | use std::{ 4 | env, 5 | mem::MaybeUninit, 6 | sync::{Mutex, Once}, 7 | }; 8 | use test_common::*; 9 | use test_generator::test_resources; 10 | 11 | const TESTDATA: &'static str = concat!(env!("CARGO_MANIFEST_DIR"), "/testdata"); 12 | 13 | fn init_lock() -> &'static Mutex<()> { 14 | static mut SINGLETON: MaybeUninit> = MaybeUninit::uninit(); 15 | static ONCE: Once = Once::new(); 16 | 17 | unsafe { 18 | ONCE.call_once(|| { 19 | let singleton = Mutex::new(()); 20 | SINGLETON.write(singleton); 21 | }); 22 | 23 | SINGLETON.assume_init_ref() 24 | } 25 | } 26 | 27 | // TODO fix how test_resources works in nexted workspaces 28 | #[test_resources("sql-tests/testdata/*.sql")] 29 | fn sql_tests(full_resource: &str) { 30 | let pg_blueprint = PostgresContainerBlueprint::new().with_testdata(TESTDATA); 31 | let test_pg_instance = new_test_instance_from_env(&pg_blueprint); 32 | let mut init_conn = test_pg_instance.connect(); 33 | // This lock prevents multiple callers from running 34 | // CREATE EXTENSION simultaneously. In case of 35 | // promscale_extension concurrent CREATE EXTENSION causes 36 | // issues when migration incremental/003-users.sql executes. 37 | { 38 | let _init_lock = init_lock().lock().unwrap(); 39 | init_conn 40 | .simple_query("CREATE EXTENSION promscale;") 41 | .expect("Unable to create extension promscale."); 42 | 43 | init_conn 44 | .simple_query("SELECT 1;") 45 | .expect("Health-check query failed"); 46 | } 47 | 48 | let resource = if let Some((_, rest)) = full_resource.split_once('/') { 49 | rest 50 | } else { 51 | full_resource 52 | }; 53 | let query_result = test_pg_instance.exec_sql_script(resource); 54 | 55 | match parse_pgtap_result(&query_result) { 56 | Some(result) => { 57 | // Result parsed as pgtap output 58 | if result.executed_test_count != result.planned_test_count 59 | || result.success_count != result.planned_test_count 60 | { 61 | assert!(false, "{}", query_result); 62 | } 63 | } 64 | None => { 65 | // Result is not pgtap output, fall back to snapshot compare 66 | assert_snapshot!(resource, query_result) 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /sql/promscale--0.0.0--0.5.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.0.0--0.5.1.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.1.sql -------------------------------------------------------------------------------- /sql/promscale--0.0.0--0.5.2.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.2.sql -------------------------------------------------------------------------------- /sql/promscale--0.0.0--0.5.4.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.4.sql -------------------------------------------------------------------------------- /sql/promscale--0.0.0--0.6.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.6.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.0.0--0.7.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.7.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.0.0--0.8.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.8.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.1--0.1.1.sql: -------------------------------------------------------------------------------- 1 | SET LOCAL search_path = pg_catalog; 2 | 3 | CREATE OR REPLACE FUNCTION @extschema@.update_tsprom_metadata(meta_key text, meta_value text, send_telemetry BOOLEAN) 4 | RETURNS VOID 5 | SET search_path TO pg_catalog 6 | AS $func$ 7 | INSERT INTO _timescaledb_catalog.metadata(key, value, include_in_telemetry) 8 | VALUES ('promscale_' OPERATOR(pg_catalog.||) meta_key,meta_value, send_telemetry) 9 | ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value, include_in_telemetry = EXCLUDED.include_in_telemetry 10 | $func$ 11 | LANGUAGE SQL VOLATILE SECURITY DEFINER; 12 | 13 | -------------------------------------------------------------------------------- /sql/promscale--0.1.1--0.1.2.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale_extension/a60372f5961002a248c0d00e21ab2f5be0d50bd3/sql/promscale--0.1.1--0.1.2.sql -------------------------------------------------------------------------------- /sql/promscale--0.1.2--0.1.3-beta.sql: -------------------------------------------------------------------------------- 1 | SET LOCAL search_path = pg_catalog; 2 | 3 | CREATE OR REPLACE FUNCTION @extschema@.prom_delta_transition(state internal, lowest_time timestamptz, 4 | greatest_time timestamptz, step bigint, range bigint, 5 | sample_time timestamptz, sample_value double precision) 6 | RETURNS internal AS '$libdir/promscale', 'prom_delta_transition_wrapper' 7 | LANGUAGE C IMMUTABLE PARALLEL SAFE; 8 | 9 | CREATE OR REPLACE FUNCTION @extschema@.prom_rate_transition(state internal, lowest_time timestamptz, 10 | greatest_time timestamptz, step bigint, range bigint, 11 | sample_time timestamptz, sample_value double precision) 12 | RETURNS internal AS '$libdir/promscale', 'prom_rate_transition_wrapper' 13 | LANGUAGE C IMMUTABLE PARALLEL SAFE; 14 | 15 | CREATE OR REPLACE FUNCTION @extschema@.prom_increase_transition(state internal, lowest_time timestamptz, 16 | greatest_time timestamptz, step bigint, range bigint, 17 | sample_time timestamptz, sample_value double precision) 18 | RETURNS internal AS '$libdir/promscale', 'prom_increase_transition_wrapper' 19 | LANGUAGE C IMMUTABLE PARALLEL SAFE; 20 | 21 | CREATE OR REPLACE FUNCTION @extschema@.prom_extrapolate_final(state internal) 22 | RETURNS DOUBLE PRECISION[] 23 | AS '$libdir/promscale', 'prom_delta_final_wrapper' 24 | LANGUAGE C IMMUTABLE PARALLEL SAFE; 25 | 26 | CREATE FUNCTION @extschema@."vector_selector_transition"("state" internal, "start_time" TimestampTz, "end_time" TimestampTz, "bucket_width" bigint, "lookback" bigint, "time" TimestampTz, "val" double precision) RETURNS internal IMMUTABLE PARALLEL SAFE LANGUAGE c AS 'MODULE_PATHNAME', 'vector_selector_transition_wrapper'; 27 | -- ./src/lib.rs:337:0 28 | CREATE FUNCTION @extschema@."vector_selector_final"("state" internal) RETURNS double precision[] IMMUTABLE PARALLEL SAFE LANGUAGE c AS 'MODULE_PATHNAME', 'vector_selector_final_wrapper'; 29 | -- ./src/lib.rs:345:0 30 | CREATE FUNCTION @extschema@."vector_selector_serialize"("state" internal) RETURNS bytea IMMUTABLE STRICT PARALLEL SAFE LANGUAGE c AS 'MODULE_PATHNAME', 'vector_selector_serialize_wrapper'; 31 | -- ./src/lib.rs:350:0 32 | CREATE FUNCTION @extschema@."vector_selector_deserialize"("bytes" bytea, "_internal" internal) RETURNS internal IMMUTABLE PARALLEL SAFE LANGUAGE c AS 'MODULE_PATHNAME', 'vector_selector_deserialize_wrapper'; 33 | -- ./src/lib.rs:358:0 34 | CREATE FUNCTION @extschema@."vector_selector_combine"("state1" internal, "state2" internal) RETURNS internal IMMUTABLE PARALLEL SAFE LANGUAGE c AS 'MODULE_PATHNAME', 'vector_selector_combine_wrapper'; 35 | CREATE AGGREGATE @extschema@.vector_selector( 36 | start_time timestamptz, 37 | end_time timestamptz, 38 | bucket_width bigint, 39 | lookback bigint, 40 | sample_time timestamptz, 41 | sample_value DOUBLE PRECISION) 42 | ( 43 | sfunc = @extschema@.vector_selector_transition, 44 | stype = internal, 45 | finalfunc = @extschema@.vector_selector_final, 46 | combinefunc = @extschema@.vector_selector_combine, 47 | serialfunc = @extschema@.vector_selector_serialize, 48 | deserialfunc = @extschema@.vector_selector_deserialize, 49 | parallel = safe 50 | ); -------------------------------------------------------------------------------- /sql/promscale--0.1.2.sql: -------------------------------------------------------------------------------- 1 | promscale--0.1.1.sql -------------------------------------------------------------------------------- /sql/promscale--0.1.3-beta--0.2.0.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale_extension/a60372f5961002a248c0d00e21ab2f5be0d50bd3/sql/promscale--0.1.3-beta--0.2.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.2.0--0.3.0.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale_extension/a60372f5961002a248c0d00e21ab2f5be0d50bd3/sql/promscale--0.2.0--0.3.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.2.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.1.3-beta.sql -------------------------------------------------------------------------------- /sql/promscale--0.3.0--0.3.1.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timescale/promscale_extension/a60372f5961002a248c0d00e21ab2f5be0d50bd3/sql/promscale--0.3.0--0.3.1.sql -------------------------------------------------------------------------------- /sql/promscale--0.3.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.2.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.3.1--0.3.2.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION @extschema@.label_find_key_equal(key_to_match prom_api.label_key, pat prom_api.pattern) 2 | RETURNS prom_api.matcher_positive 3 | AS $func$ 4 | SELECT COALESCE(pg_catalog.array_agg(l.id), array[]::int[])::prom_api.matcher_positive 5 | FROM _prom_catalog.label l 6 | WHERE l.key OPERATOR(pg_catalog.=) key_to_match and l.value OPERATOR(pg_catalog.=) pat 7 | $func$ 8 | LANGUAGE SQL STABLE PARALLEL SAFE 9 | SUPPORT @extschema@.make_call_subquery_support; 10 | ALTER FUNCTION @extschema@.label_find_key_equal(prom_api.label_key, prom_api.pattern) OWNER TO CURRENT_USER; 11 | 12 | CREATE OR REPLACE FUNCTION @extschema@.label_find_key_not_equal(key_to_match prom_api.label_key, pat prom_api.pattern) 13 | RETURNS prom_api.matcher_negative 14 | AS $func$ 15 | SELECT COALESCE(pg_catalog.array_agg(l.id), array[]::int[])::prom_api.matcher_negative 16 | FROM _prom_catalog.label l 17 | WHERE l.key OPERATOR(pg_catalog.=) key_to_match and l.value OPERATOR(pg_catalog.=) pat 18 | $func$ 19 | LANGUAGE SQL STABLE PARALLEL SAFE 20 | SUPPORT @extschema@.make_call_subquery_support; 21 | ALTER FUNCTION @extschema@.label_find_key_not_equal(prom_api.label_key, prom_api.pattern) OWNER TO CURRENT_USER; 22 | 23 | CREATE OR REPLACE FUNCTION @extschema@.label_find_key_regex(key_to_match prom_api.label_key, pat prom_api.pattern) 24 | RETURNS prom_api.matcher_positive 25 | AS $func$ 26 | SELECT COALESCE(pg_catalog.array_agg(l.id), array[]::int[])::prom_api.matcher_positive 27 | FROM _prom_catalog.label l 28 | WHERE l.key OPERATOR(pg_catalog.=) key_to_match and l.value OPERATOR(pg_catalog.~) pat 29 | $func$ 30 | LANGUAGE SQL STABLE PARALLEL SAFE 31 | SUPPORT @extschema@.make_call_subquery_support; 32 | ALTER FUNCTION @extschema@.label_find_key_regex(prom_api.label_key, prom_api.pattern) OWNER TO CURRENT_USER; 33 | 34 | CREATE OR REPLACE FUNCTION @extschema@.label_find_key_not_regex(key_to_match prom_api.label_key, pat prom_api.pattern) 35 | RETURNS prom_api.matcher_negative 36 | AS $func$ 37 | SELECT COALESCE(pg_catalog.array_agg(l.id), array[]::int[])::prom_api.matcher_negative 38 | FROM _prom_catalog.label l 39 | WHERE l.key OPERATOR(pg_catalog.=) key_to_match and l.value OPERATOR(pg_catalog.~) pat 40 | $func$ 41 | LANGUAGE SQL STABLE PARALLEL SAFE 42 | SUPPORT @extschema@.make_call_subquery_support; 43 | ALTER FUNCTION @extschema@.label_find_key_not_regex(prom_api.label_key, prom_api.pattern) OWNER TO CURRENT_USER; 44 | 45 | CREATE OR REPLACE FUNCTION @extschema@.update_tsprom_metadata(meta_key text, meta_value text, send_telemetry BOOLEAN) 46 | RETURNS VOID 47 | SET search_path TO pg_catalog 48 | AS $func$ 49 | INSERT INTO _timescaledb_catalog.metadata(key, value, include_in_telemetry) 50 | VALUES ('promscale_' OPERATOR(pg_catalog.||) meta_key,meta_value, send_telemetry) 51 | ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value, include_in_telemetry = EXCLUDED.include_in_telemetry 52 | $func$ 53 | LANGUAGE SQL VOLATILE SECURITY DEFINER; 54 | ALTER FUNCTION @extschema@.update_tsprom_metadata(text, text, BOOLEAN) OWNER TO CURRENT_USER; -------------------------------------------------------------------------------- /sql/promscale--0.3.1.sql: -------------------------------------------------------------------------------- 1 | promscale--0.3.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.3.2.sql: -------------------------------------------------------------------------------- 1 | promscale--0.3.1.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.0--0.5.1.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.1.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.0--0.5.2.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.2.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.0--0.5.4.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.4.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.0--0.6.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.6.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.0--0.7.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.7.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.0--0.8.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.8.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.1--0.5.2.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.2.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.1--0.5.4.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.4.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.1--0.6.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.6.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.1--0.7.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.7.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.1--0.8.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.8.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.2--0.5.4.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.4.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.2--0.6.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.6.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.2--0.7.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.7.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.2--0.8.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.8.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.3--0.5.4.sql: -------------------------------------------------------------------------------- 1 | promscale--0.5.4.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.3--0.6.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.6.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.3--0.7.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.7.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.3--0.8.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.8.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.4--0.6.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.6.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.4--0.7.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.7.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.5.4--0.8.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.8.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.6.0--0.7.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.7.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.6.0--0.8.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.8.0.sql -------------------------------------------------------------------------------- /sql/promscale--0.7.0--0.8.0.sql: -------------------------------------------------------------------------------- 1 | promscale--0.8.0.sql -------------------------------------------------------------------------------- /src/aggregate_utils.rs: -------------------------------------------------------------------------------- 1 | use std::ptr::null_mut; 2 | 3 | use pgx::pg_sys; 4 | 5 | // TODO move to func_utils once there are enough function to warrant one 6 | #[allow(dead_code)] 7 | pub unsafe fn get_collation(fcinfo: pg_sys::FunctionCallInfo) -> Option { 8 | if (*fcinfo).fncollation == 0 { 9 | None 10 | } else { 11 | Some((*fcinfo).fncollation) 12 | } 13 | } 14 | 15 | pub unsafe fn in_aggregate_context T>( 16 | fcinfo: pg_sys::FunctionCallInfo, 17 | f: F, 18 | ) -> T { 19 | let mctx = 20 | aggregate_mctx(fcinfo).unwrap_or_else(|| pgx::error!("cannot call as non-aggregate")); 21 | crate::palloc::in_memory_context(mctx, f) 22 | } 23 | 24 | pub fn aggregate_mctx(fcinfo: pg_sys::FunctionCallInfo) -> Option { 25 | let mut mctx = null_mut(); 26 | let is_aggregate = unsafe { pg_sys::AggCheckCallContext(fcinfo, &mut mctx) }; 27 | if is_aggregate == 0 { 28 | None 29 | } else { 30 | debug_assert!(!mctx.is_null()); 31 | Some(mctx) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/aggregates/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::aggregates::gapfill_delta::_prom_ext::GapfillDeltaTransition; 2 | 3 | mod gapfill_delta; 4 | mod prom_delta; 5 | mod prom_increase; 6 | mod prom_rate; 7 | mod vector_selector; 8 | 9 | pub type Milliseconds = i64; 10 | pub type Microseconds = i64; 11 | 12 | pub const STALE_NAN: u64 = 0x7ff0000000000002; 13 | pub const USECS_PER_SEC: i64 = 1_000_000; 14 | pub const USECS_PER_MS: i64 = 1_000; 15 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | use pgx::pg_sys::AsPgCStr; 2 | use pgx::*; 3 | 4 | mod aggregate_utils; 5 | mod aggregates; 6 | mod iterable_jsonb; 7 | mod jsonb_digest; 8 | mod palloc; 9 | mod pg_imports; 10 | mod raw; 11 | mod regex; 12 | mod schema; 13 | mod support; 14 | mod type_builder; 15 | mod util; 16 | 17 | pg_module_magic!(); 18 | 19 | /// A helper function for building [`pgx::PgList`] out of 20 | /// iterable collection of `str`. 21 | /// 22 | /// For safety reasons it p-allocates and copies its arguemnts 23 | /// every time. Which is OK for our current once-per-query usage, 24 | /// but don't attempt it on per-row basis. 25 | pub fn build_pg_list_of_cstrings<'a, I>(parts: I) -> PgList 26 | where 27 | I: IntoIterator, 28 | { 29 | let mut res = PgList::new(); 30 | for p in parts { 31 | res.push(unsafe { pg_sys::makeString(p.as_pg_cstr() as _) }); 32 | } 33 | res 34 | } 35 | 36 | #[cfg(test)] 37 | #[pg_schema] 38 | pub mod pg_test { 39 | pub fn setup(_options: Vec<&str>) { 40 | // perform one-off initialization when the pg_test framework starts 41 | } 42 | 43 | pub fn postgresql_conf_options() -> Vec<&'static str> { 44 | // return any postgresql.conf settings that are required for your tests 45 | vec!["search_path = 'public, _prom_ext, ps_trace, _ps_trace'"] 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/palloc.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ffi::CStr, 3 | ops::{Deref, DerefMut}, 4 | os::raw::c_char, 5 | ptr::NonNull, 6 | }; 7 | 8 | use pgx::*; 9 | 10 | pub unsafe fn in_memory_context T>(mctx: pg_sys::MemoryContext, f: F) -> T { 11 | let prev_ctx = pg_sys::CurrentMemoryContext; 12 | pg_sys::CurrentMemoryContext = mctx; 13 | let t = f(); 14 | pg_sys::CurrentMemoryContext = prev_ctx; 15 | t 16 | } 17 | 18 | /// The type to take ownership of string values 19 | /// that a caller is supposed to pfree. 20 | pub struct PallocdString { 21 | pg_box: PgBox, 22 | } 23 | 24 | impl PallocdString { 25 | /// SAFETY: the pointer passed into this function must be a NULL-terminated string 26 | /// and conform to the requirements of [`std::ffi::CStr`] 27 | pub unsafe fn from_ptr(ptr: *mut c_char) -> Option { 28 | if ptr.is_null() { 29 | None 30 | } else { 31 | Some(PallocdString { 32 | pg_box: PgBox::<_, AllocatedByRust>::from_rust(ptr), 33 | }) 34 | } 35 | } 36 | 37 | pub fn as_c_str(&self) -> &CStr { 38 | unsafe { CStr::from_ptr(self.pg_box.as_ptr()) } 39 | } 40 | } 41 | 42 | use pgx::pg_sys::Datum; 43 | pub use pgx::Internal; 44 | 45 | #[allow(clippy::missing_safety_doc)] 46 | pub unsafe trait InternalAsValue { 47 | unsafe fn to_inner(self) -> Option>; 48 | } 49 | 50 | unsafe impl InternalAsValue for Internal { 51 | unsafe fn to_inner(self) -> Option> { 52 | self.unwrap() 53 | .map(|p| Inner(NonNull::new(p.cast_mut_ptr()).unwrap())) 54 | } 55 | } 56 | 57 | #[allow(clippy::missing_safety_doc)] 58 | pub unsafe trait ToInternal { 59 | fn internal(self) -> Internal; 60 | } 61 | 62 | pub struct Inner(pub NonNull); 63 | 64 | impl Deref for Inner { 65 | type Target = T; 66 | 67 | fn deref(&self) -> &Self::Target { 68 | unsafe { self.0.as_ref() } 69 | } 70 | } 71 | 72 | impl DerefMut for Inner { 73 | fn deref_mut(&mut self) -> &mut Self::Target { 74 | unsafe { self.0.as_mut() } 75 | } 76 | } 77 | 78 | unsafe impl ToInternal for Option> { 79 | fn internal(self) -> Internal { 80 | self.map(|p| Datum::from(p.0.as_ptr())).into() 81 | } 82 | } 83 | 84 | unsafe impl ToInternal for Inner { 85 | fn internal(self) -> Internal { 86 | Some(Datum::from(self.0.as_ptr())).into() 87 | } 88 | } 89 | 90 | impl From for Inner { 91 | fn from(t: T) -> Self { 92 | unsafe { Internal::new(t).to_inner().unwrap() } 93 | } 94 | } 95 | 96 | unsafe impl ToInternal for *mut T { 97 | fn internal(self) -> Internal { 98 | Internal::from(Some(Datum::from(self))) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/pg_imports.rs: -------------------------------------------------------------------------------- 1 | //! This module contains functions that are either not imported 2 | //! by our version of pgx or diverge between versions of 3 | //! PostrgeSQL and thus require conditional compilation. 4 | 5 | use pgx::*; 6 | 7 | #[cfg(any(feature = "pg15"))] 8 | pub type PgString = pg_sys::String; 9 | #[cfg(any(feature = "pg12", feature = "pg13", feature = "pg14"))] 10 | pub type PgString = pg_sys::Value; 11 | 12 | #[derive(Default, Debug)] 13 | pub struct FuncDetail { 14 | pub func_oid: pg_sys::Oid, 15 | pub ret_type_oid: pg_sys::Oid, 16 | pub retset: bool, 17 | pub nvargs: ::std::os::raw::c_int, 18 | pub vatype: pg_sys::Oid, 19 | pub code: pg_sys::FuncDetailCode, 20 | } 21 | 22 | #[cfg(not(any(feature = "pg12", feature = "pg13")))] 23 | #[inline] 24 | pub fn func_get_detail<'a, I>(func_path: I, types: &mut [pg_sys::Oid]) -> FuncDetail 25 | where 26 | I: IntoIterator, 27 | { 28 | let arg_cnt = types.len() as i32; 29 | let fully_qualified_name = crate::build_pg_list_of_cstrings(func_path); 30 | let mut true_typeoids: *mut pg_sys::Oid = std::ptr::null_mut(); 31 | let mut fd_struct: FuncDetail = Default::default(); 32 | fd_struct.code = unsafe { 33 | pg_sys::func_get_detail( 34 | fully_qualified_name.as_ptr(), 35 | std::ptr::null_mut(), 36 | std::ptr::null_mut(), 37 | arg_cnt, 38 | types.as_mut_ptr(), 39 | false, 40 | false, 41 | false, 42 | &mut fd_struct.func_oid, 43 | &mut fd_struct.ret_type_oid, 44 | &mut fd_struct.retset, 45 | &mut fd_struct.nvargs, 46 | &mut fd_struct.vatype, 47 | &mut true_typeoids, 48 | std::ptr::null_mut(), 49 | ) 50 | }; 51 | fd_struct 52 | } 53 | 54 | // When compiling against PG12 and PG13 the underlying function takes fewer arguments 55 | #[cfg(any(feature = "pg12", feature = "pg13"))] 56 | #[inline] 57 | pub fn func_get_detail<'a, I>(func_path: I, types: &mut [pg_sys::Oid]) -> FuncDetail 58 | where 59 | I: IntoIterator, 60 | { 61 | let arg_cnt = types.len() as i32; 62 | let fully_qualified_name = crate::build_pg_list_of_cstrings(func_path); 63 | let mut true_typeoids: *mut pg_sys::Oid = std::ptr::null_mut(); 64 | let mut fd_struct: FuncDetail = Default::default(); 65 | fd_struct.code = unsafe { 66 | pg_sys::func_get_detail( 67 | fully_qualified_name.as_ptr(), 68 | std::ptr::null_mut(), 69 | std::ptr::null_mut(), 70 | arg_cnt, 71 | types.as_mut_ptr(), 72 | false, 73 | false, 74 | &mut fd_struct.func_oid, 75 | &mut fd_struct.ret_type_oid, 76 | &mut fd_struct.retset, 77 | &mut fd_struct.nvargs, 78 | &mut fd_struct.vatype, 79 | &mut true_typeoids, 80 | std::ptr::null_mut(), 81 | ) 82 | }; 83 | fd_struct 84 | } 85 | 86 | #[cfg(not(any(feature = "pg12", feature = "pg13")))] 87 | #[inline] 88 | pub fn set_sa_hashfuncid( 89 | scalar_array_op: &mut pg_sys::ScalarArrayOpExpr, 90 | hash_func_oid: pg_sys::Oid, 91 | ) { 92 | scalar_array_op.hashfuncid = hash_func_oid; 93 | } 94 | 95 | #[cfg(any(feature = "pg12", feature = "pg13"))] 96 | #[inline] 97 | pub fn set_sa_hashfuncid( 98 | _scalar_array_op: &mut pg_sys::ScalarArrayOpExpr, 99 | _hash_func_oid: pg_sys::Oid, 100 | ) { 101 | // the field didn't exist prior to pg14 102 | } 103 | 104 | // pg_guard doesn't compile, so we have to do without it for now. 105 | // TODO maybe suggest adding "parser/parse_oper.h" to PGX's pg_sys 106 | // See https://github.com/tcdi/pgx/pull/549 107 | extern "C" { 108 | pub fn LookupOperName( 109 | pstate: *mut pg_sys::ParseState, 110 | opername: *mut pg_sys::List, 111 | oprleft: pg_sys::Oid, 112 | oprright: pg_sys::Oid, 113 | noError: bool, 114 | location: ::std::os::raw::c_int, 115 | ) -> pg_sys::Oid; 116 | } 117 | -------------------------------------------------------------------------------- /src/raw.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_camel_case_types)] 2 | 3 | use pgx::pg_sys::Datum; 4 | use pgx::utils::sql_entity_graph::metadata::{ 5 | ArgumentError, Returns, ReturnsError, SqlMapping, SqlTranslatable, 6 | }; 7 | use pgx::*; 8 | 9 | // TODO: Is this the right approach to declaring `bytea` and `TimestampTz`? 10 | extension_sql!( 11 | "", 12 | name = "pseudo_create_types", 13 | creates = [Type(bytea), Type(TimestampTz)], 14 | ); 15 | 16 | macro_rules! raw_type { 17 | ($name:ident, $tyid: path, $arrayid: path) => { 18 | impl FromDatum for $name { 19 | unsafe fn from_polymorphic_datum( 20 | datum: pg_sys::Datum, 21 | is_null: bool, 22 | _typoid: pg_sys::Oid, 23 | ) -> Option 24 | where 25 | Self: Sized, 26 | { 27 | if is_null { 28 | return None; 29 | } 30 | Some(Self(datum)) 31 | } 32 | } 33 | 34 | impl IntoDatum for $name { 35 | fn into_datum(self) -> Option { 36 | Some(self.0) 37 | } 38 | fn type_oid() -> pg_sys::Oid { 39 | $tyid 40 | } 41 | fn array_type_oid() -> pg_sys::Oid { 42 | $arrayid 43 | } 44 | } 45 | 46 | impl From for $name { 47 | fn from(d: pg_sys::Datum) -> Self { 48 | Self(d) 49 | } 50 | } 51 | 52 | #[allow(clippy::from_over_into)] 53 | impl Into for $name { 54 | fn into(self) -> pg_sys::Datum { 55 | self.0 56 | } 57 | } 58 | }; 59 | } 60 | 61 | #[derive(Clone, Copy)] 62 | pub struct bytea(pub Datum); 63 | 64 | #[allow(clippy::extra_unused_lifetimes)] // pgx sorcery caused this 65 | unsafe impl<'a> SqlTranslatable for bytea { 66 | fn argument_sql() -> Result { 67 | Ok(SqlMapping::literal("bytea")) 68 | } 69 | fn return_sql() -> Result { 70 | Ok(Returns::One(SqlMapping::literal("bytea"))) 71 | } 72 | } 73 | 74 | raw_type!(bytea, pg_sys::BYTEAOID, pg_sys::BYTEAARRAYOID); 75 | -------------------------------------------------------------------------------- /src/schema.rs: -------------------------------------------------------------------------------- 1 | use pgx_macros::extension_sql_file; 2 | 3 | extension_sql_file!("../bootstrap.sql", name = "bootstrap", bootstrap); 4 | 5 | extension_sql_file!( 6 | "../hand-written-migration.sql", 7 | name = "migration", 8 | finalize 9 | ); 10 | -------------------------------------------------------------------------------- /src/type_builder.rs: -------------------------------------------------------------------------------- 1 | #[repr(u8)] 2 | pub enum SerializationType { 3 | Default = 1, 4 | } 5 | 6 | #[macro_export] 7 | macro_rules! do_serialize { 8 | ($state: ident) => { 9 | { 10 | $crate::do_serialize!($state, version: 1) 11 | } 12 | }; 13 | ($state: ident, version: $version: expr) => { 14 | { 15 | use $crate::type_builder::SerializationType; 16 | use std::io::{Cursor, Write}; 17 | use std::convert::TryInto; 18 | 19 | let state = &*$state; 20 | let serialized_size = bincode::serialized_size(state) 21 | .unwrap_or_else(|e| pgx::error!("serialization error {}", e)); 22 | let our_size = serialized_size + 2; // size of serialized data + our version flags 23 | let allocated_size = our_size + 4; // size of our data + the varlena header 24 | let allocated_size = allocated_size.try_into() 25 | .unwrap_or_else(|e| pgx::error!("serialization error {}", e)); 26 | // valena tyes have a maximum size 27 | if allocated_size > 0x3FFFFFFF { 28 | pgx::error!("size {} bytes is to large", allocated_size) 29 | } 30 | 31 | let bytes: &mut [u8] = unsafe { 32 | let bytes = pgx::pg_sys::palloc0(allocated_size); 33 | std::slice::from_raw_parts_mut(bytes.cast(), allocated_size) 34 | }; 35 | let mut writer = Cursor::new(bytes); 36 | // varlena header space 37 | let varsize = [0; 4]; 38 | writer.write_all(&varsize) 39 | .unwrap_or_else(|e| pgx::error!("serialization error {}", e)); 40 | // type version 41 | writer.write_all(&[$version]) 42 | .unwrap_or_else(|e| pgx::error!("serialization error {}", e)); 43 | // serialization version; 1 for bincode is currently the only option 44 | writer.write_all(&[SerializationType::Default as u8]) 45 | .unwrap_or_else(|e| pgx::error!("serialization error {}", e)); 46 | bincode::serialize_into(&mut writer, state) 47 | .unwrap_or_else(|e| pgx::error!("serialization error {}", e)); 48 | unsafe { 49 | let len = writer.position().try_into().expect("serialized size too large"); 50 | ::pgx::set_varsize(writer.get_mut().as_mut_ptr() as *mut _, len); 51 | } 52 | bytea::from(pgx::pg_sys::Datum::from(writer.into_inner().as_mut_ptr())) 53 | } 54 | }; 55 | } 56 | #[macro_export] 57 | macro_rules! do_deserialize { 58 | ($bytes: ident, $t: ty) => {{ 59 | use $crate::type_builder::SerializationType; 60 | 61 | let state: $t = unsafe { 62 | let input: bytea = $bytes; 63 | let input: pgx::pg_sys::Datum = input.into(); 64 | let detoasted = pg_sys::pg_detoast_datum_packed(input.cast_mut_ptr()); 65 | let len = pgx::varsize_any_exhdr(detoasted); 66 | let data = pgx::vardata_any(detoasted); 67 | let bytes = std::slice::from_raw_parts(data as *mut u8, len); 68 | if bytes.len() < 1 { 69 | pgx::error!("deserialization error, no bytes") 70 | } 71 | if bytes[0] != 1 { 72 | pgx::error!( 73 | "deserialization error, invalid serialization version {}", 74 | bytes[0] 75 | ) 76 | } 77 | if bytes[1] != SerializationType::Default as u8 { 78 | pgx::error!( 79 | "deserialization error, invalid serialization type {}", 80 | bytes[1] 81 | ) 82 | } 83 | bincode::deserialize(&bytes[2..]) 84 | .unwrap_or_else(|e| pgx::error!("deserialization error {}", e)) 85 | }; 86 | state.into() 87 | }}; 88 | } 89 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use pgx::*; 2 | 3 | #[pg_schema] 4 | mod _prom_ext { 5 | use num_cpus::get; 6 | use pgx::*; 7 | 8 | #[pg_extern(immutable, parallel_safe, create_or_replace)] 9 | pub fn num_cpus() -> i32 { 10 | get() as i32 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /templates/idempotent-wrapper.sql: -------------------------------------------------------------------------------- 1 | 2 | -- {{filename}} 3 | DO $outer_idempotent_block$ 4 | BEGIN 5 | 6 | -- Note: this weird indentation is important. We compare SQL across upgrade paths, 7 | -- and the comparison is indentation-sensitive. 8 | {{body}} 9 | 10 | END; 11 | $outer_idempotent_block$; 12 | -------------------------------------------------------------------------------- /templates/incremental-wrapper.sql: -------------------------------------------------------------------------------- 1 | 2 | -- {{filename}} 3 | DO 4 | $outer_migration_block$ 5 | DECLARE 6 | _migration_name TEXT = NULL; 7 | _body_differs BOOL = false; 8 | _migration _ps_catalog.migration = row ('{{filename}}', '{{version}}'); 9 | _body TEXT = $migrationbody${{body}}$migrationbody$; 10 | BEGIN 11 | SELECT migration.name, migration.body <> _body 12 | INTO _migration_name, _body_differs 13 | FROM _ps_catalog.migration 14 | WHERE name = _migration.name; 15 | IF _migration_name IS NOT NULL THEN 16 | RAISE LOG 'Migration "{{filename}}" already applied, skipping'; 17 | -- 001-extension.sql changes are expected until this issue is resolved: https://github.com/timescale/promscale_extension/issues/350 18 | -- 024-adjust_autovacuum.sql had a bug in it and had to be changed 19 | IF _body_differs AND _migration_name != '001-extension.sql' and _migration_name != '024-adjust_autovacuum.sql' THEN 20 | RAISE WARNING 'The contents of migration "{{filename}}" have changed'; 21 | END IF; 22 | RETURN; 23 | END IF; 24 | 25 | -- Note: this weird indentation is important. We compare SQL across upgrade paths, 26 | -- and the comparison is indentation-sensitive. 27 | DO $inner_migration_block$ 28 | BEGIN 29 | {{body}} 30 | END; 31 | $inner_migration_block$; 32 | 33 | INSERT INTO _ps_catalog.migration (name, applied_at_version, body) VALUES (_migration.name, _migration.applied_at_version, _body); 34 | RAISE LOG 'Applied migration {{filename}}'; 35 | END; 36 | $outer_migration_block$; 37 | -------------------------------------------------------------------------------- /templates/promscale.control: -------------------------------------------------------------------------------- 1 | # promscale extension 2 | comment = 'tables, types and functions supporting Promscale' 3 | default_version = '@CARGO_VERSION@' 4 | # we do not set module_pathname in order to activate pgx's "versioned .so" mode 5 | # module_pathname = '$libdir/promscale' 6 | relocatable = false 7 | schema = public 8 | superuser = true 9 | {%if requires_timescaledb -%} 10 | requires = 'timescaledb' 11 | {%-endif%} 12 | {%if !is_pg_12 -%} 13 | trusted = true 14 | {%-endif%} 15 | # comma-separated list of previous versions this version can be upgraded from 16 | # directly. This is used to generate upgrade scripts. 17 | # upgradeable_from = '0.0.0', '0.5.0', '0.5.1', '0.5.2', '0.5.3', '0.5.4', '0.6.0', '0.7.0', '0.8.0' 18 | -------------------------------------------------------------------------------- /test-common/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-common" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | log = "0.4.17" 8 | postgres = "0.19.2" 9 | pretty_env_logger = "0.4" 10 | testcontainers = "0.14.0" 11 | rand = "0.8.5" -------------------------------------------------------------------------------- /test-common/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::{env, rc::Rc}; 2 | 3 | pub mod postgres_container; 4 | pub use postgres_container::{PostgresContainer, PostgresContainerBlueprint}; 5 | 6 | mod test_container_instance; 7 | pub use test_container_instance::TestContainerInstance; 8 | 9 | mod local_postgres_instance; 10 | pub use local_postgres_instance::LocalPostgresInstance; 11 | 12 | mod postgres_test_connection; 13 | pub use postgres_test_connection::PostgresTestConnection; 14 | 15 | /// This trait provides an interface sufficient for the 16 | /// majority of our tests and should be the first choice. 17 | /// 18 | /// That being said, if your test requires a lower 19 | /// level access to a test container [`postgres_container`] 20 | /// is another option. 21 | pub trait PostgresTestInstance { 22 | fn connect<'pg_inst>(&'pg_inst self) -> PostgresTestConnection<'pg_inst>; 23 | fn exec_sql_script(&self, script_path: &str) -> String; 24 | } 25 | 26 | /// Creates an instance of [`PostgresTestInstance`] that 27 | /// can be backed either by a test container configured 28 | /// according to the provided [`PostgresContainerBlueprint`] 29 | /// or by a local PostgreSQL database connection. 30 | /// 31 | /// The behaviour is controlled by the number of environment 32 | /// variables: 33 | /// - `USE_DOCKER` determines which backend to use 34 | /// - `TS_DOCKER_IMAGE` determines the image to be used by the test container backend 35 | /// - `POSTGRES_URL` or a combination of 36 | /// `POSTGRES_USER`, `POSTGRES_HOST`, `POSTGRES_PORT`, `POSTGRES_DB` 37 | /// are used by the local database connection backend. 38 | pub fn new_test_instance_from_env<'h>( 39 | pg_blueprint: &'h PostgresContainerBlueprint, 40 | ) -> Rc { 41 | let use_docker = env::var("USE_DOCKER") 42 | .map(|val| val.to_ascii_lowercase() == "true") 43 | .unwrap_or(false); 44 | 45 | if use_docker { 46 | Rc::new(TestContainerInstance::fresh_instance(pg_blueprint)) 47 | } else { 48 | Rc::new(LocalPostgresInstance::temporary_local_db()) 49 | } 50 | } 51 | 52 | /// Creates an instance of [`PostgresTestInstance`] that 53 | /// is guaranteed to be backed by a test container configured 54 | /// according to the provided [`PostgresContainerBlueprint`]. 55 | /// 56 | /// - `TS_DOCKER_IMAGE` determines the image to be used. 57 | pub fn new_test_container_instance<'h>( 58 | pg_blueprint: &'h PostgresContainerBlueprint, 59 | ) -> TestContainerInstance<'h> { 60 | TestContainerInstance::fresh_instance(pg_blueprint) 61 | } 62 | -------------------------------------------------------------------------------- /test-common/src/local_postgres_instance.rs: -------------------------------------------------------------------------------- 1 | use postgres::config::Host; 2 | use postgres::{Client, Config}; 3 | use rand::Rng; 4 | use std::env; 5 | use std::process::{Command, Stdio}; 6 | use std::str::from_utf8; 7 | 8 | use super::postgres_test_connection::PostgresTestConnection; 9 | use super::PostgresTestInstance; 10 | 11 | pub struct LocalPostgresInstance { 12 | db_name: String, 13 | admin_conn: Client, 14 | } 15 | 16 | impl LocalPostgresInstance { 17 | fn generate_random_db_name() -> String { 18 | let mut rng = rand::thread_rng(); 19 | let n2: u16 = rng.gen(); 20 | format!("test_database_{}", n2) 21 | } 22 | 23 | fn local_config_from_env(database: Option<&str>) -> Config { 24 | env::var("POSTGRES_URL") 25 | .map(|url| { 26 | let mut config = url.parse::().unwrap(); 27 | database.map(|db| config.dbname(db)); 28 | config 29 | }) 30 | .unwrap_or_else(|_err| { 31 | let connection_string = &format!( 32 | "postgres://{}@{}:{}/{}", 33 | env::var("POSTGRES_USER").unwrap_or(String::from("postgres")), 34 | env::var("POSTGRES_HOST").unwrap_or(String::from("localhost")), 35 | env::var("POSTGRES_PORT").unwrap_or(String::from("5432")), 36 | database 37 | .map(|db_str| String::from(db_str)) 38 | .unwrap_or_else( 39 | || env::var("POSTGRES_DB").unwrap_or(String::from("postgres")) 40 | ) 41 | ); 42 | connection_string.parse::().unwrap() 43 | }) 44 | } 45 | 46 | fn connect_local(database: Option<&str>) -> Client { 47 | let config = Self::local_config_from_env(database); 48 | config.connect(postgres::NoTls).unwrap() 49 | } 50 | 51 | pub(crate) fn temporary_local_db() -> Self { 52 | let db_name = Self::generate_random_db_name(); 53 | 54 | let mut admin_conn = Self::connect_local(None); 55 | admin_conn 56 | .simple_query(format!("CREATE DATABASE {};", db_name).as_str()) 57 | .unwrap(); 58 | 59 | LocalPostgresInstance { 60 | db_name, 61 | admin_conn, 62 | } 63 | } 64 | } 65 | 66 | impl Drop for LocalPostgresInstance { 67 | fn drop(&mut self) { 68 | self.admin_conn 69 | .simple_query(format!("DROP DATABASE {};", self.db_name).as_str()) 70 | .unwrap(); 71 | } 72 | } 73 | 74 | impl PostgresTestInstance for LocalPostgresInstance { 75 | fn connect<'pg_inst>(&'pg_inst self) -> PostgresTestConnection<'pg_inst> { 76 | PostgresTestConnection { 77 | client: LocalPostgresInstance::connect_local(Some(&self.db_name)), 78 | _parent: self, 79 | } 80 | } 81 | 82 | fn exec_sql_script(&self, script_path: &str) -> String { 83 | let conf = Self::local_config_from_env(Some(&self.db_name)); 84 | let mut cmd = Command::new("psql"); 85 | 86 | conf.get_user().map(|u| cmd.arg("-U").arg(u)); 87 | conf.get_hosts() 88 | .first() 89 | .and_then(|h| match h { 90 | Host::Tcp(hostname) => Some(hostname), 91 | _ => None, 92 | }) 93 | .map(|h| cmd.arg("-h").arg(h)); 94 | conf.get_ports() 95 | .first() 96 | .map(|p| cmd.arg("-p").arg(format!("{}", p))); 97 | conf.get_dbname().map(|db| cmd.arg("-d").arg(db)); 98 | conf.get_password() 99 | .map(|pwd| cmd.env("PGPASSWORD", from_utf8(pwd).unwrap())); 100 | 101 | // a workaround to forward stderr to stdout 102 | // in the same way Docker backend does. 103 | let str_cmd = format!("{:?} 2>&1", cmd.arg("-f").arg(script_path)); 104 | 105 | let output = Command::new("bash") 106 | .arg("-c") 107 | .arg(str_cmd) 108 | .stdout(Stdio::piped()) 109 | .spawn() 110 | .unwrap() 111 | .wait_with_output() 112 | .unwrap(); 113 | from_utf8(&output.stdout).unwrap().to_string() 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /test-common/src/postgres_container/blueprint.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::env; 3 | use std::rc::Rc; 4 | use testcontainers::clients; 5 | use testcontainers::clients::Cli; 6 | use testcontainers::core::WaitFor; 7 | use testcontainers::images; 8 | use testcontainers::images::generic::GenericImage; 9 | 10 | use super::*; 11 | 12 | #[derive(Debug, Clone)] 13 | pub struct PostgresContainerBlueprint { 14 | pub docker: Rc, 15 | image_uri: String, 16 | volumes: HashMap, 17 | env_vars: HashMap, 18 | } 19 | 20 | impl PostgresContainerBlueprint { 21 | const DB: &'static str = "postgres-db-test"; 22 | const USER: &'static str = "postgres-user-test"; 23 | const PASSWORD: &'static str = "postgres-password-test"; 24 | 25 | /// Returns the name of the docker image to use for Postgres containers. 26 | /// If the `TS_DOCKER_IMAGE` environment variable is set, it will return that value. 27 | /// Otherwise, it returns a default image. 28 | pub fn default_image_uri() -> String { 29 | env::var("TS_DOCKER_IMAGE").unwrap_or_else(|_| { 30 | String::from(postgres_image_uri(ImageOrigin::Local, PgVersion::V14)) 31 | }) 32 | } 33 | 34 | fn init_docker() -> Cli { 35 | clients::Cli::default() 36 | } 37 | 38 | fn prepare_image(&self) -> GenericImage { 39 | let img_uri = self.image_uri(); 40 | let (img_name, tag) = img_uri 41 | .rsplit_once(":") 42 | .unwrap_or_else(|| (img_uri, "latest")); 43 | 44 | images::generic::GenericImage::new(img_name, tag).with_wait_for(WaitFor::message_on_stderr( 45 | "database system is ready to accept connections", 46 | )) 47 | } 48 | 49 | pub fn new() -> Self { 50 | Self { 51 | docker: Rc::new(Self::init_docker()), 52 | image_uri: PostgresContainerBlueprint::default_image_uri(), 53 | volumes: HashMap::default(), 54 | env_vars: HashMap::default(), 55 | } 56 | .with_db(Self::DB) 57 | .with_user(Self::USER) 58 | .with_password(Self::PASSWORD) 59 | .with_env_var("POSTGRES_HOST_AUTH_METHOD", "trust") 60 | } 61 | 62 | pub fn with_image_uri(mut self, image_uri: String) -> Self { 63 | self.image_uri = image_uri; 64 | self 65 | } 66 | 67 | pub fn image_uri(&self) -> &str { 68 | self.image_uri.as_str() 69 | } 70 | 71 | pub fn with_volume, D: Into>(mut self, from: F, dest: D) -> Self { 72 | self.volumes.insert(from.into(), dest.into()); 73 | self 74 | } 75 | 76 | pub fn with_env_var, V: Into>(mut self, k: K, v: V) -> Self { 77 | self.env_vars.insert(k.into(), v.into()); 78 | self 79 | } 80 | 81 | pub fn with_db>(self, v: T) -> Self { 82 | self.with_env_var("POSTGRES_DB", v) 83 | } 84 | 85 | pub fn db(&self) -> &str { 86 | self.env_vars.get("POSTGRES_DB").unwrap().as_str() 87 | } 88 | 89 | pub fn with_user>(self, v: T) -> Self { 90 | self.with_env_var("POSTGRES_USER", v) 91 | } 92 | 93 | pub fn user(&self) -> &str { 94 | self.env_vars.get("POSTGRES_USER").unwrap().as_str() 95 | } 96 | 97 | pub fn with_password>(self, v: T) -> Self { 98 | self.with_env_var("POSTGRES_PASSWORD", v) 99 | } 100 | 101 | pub fn password(&self) -> &str { 102 | self.env_vars.get("POSTGRES_PASSWORD").unwrap().as_str() 103 | } 104 | 105 | pub fn with_testdata(self, src: &str) -> Self { 106 | self.with_volume(src, "/testdata") 107 | } 108 | 109 | pub fn run(&self) -> PostgresContainer { 110 | let mut img = self.prepare_image(); 111 | 112 | for (from, to) in self.volumes.iter() { 113 | img = img.with_volume(from, to); 114 | } 115 | for (k, v) in self.env_vars.iter() { 116 | img = img.with_env_var(k, v); 117 | } 118 | 119 | self.docker.run(img) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /test-common/src/postgres_container/mod.rs: -------------------------------------------------------------------------------- 1 | use log::{error, info}; 2 | use postgres::Client; 3 | use std::fmt::{Display, Formatter}; 4 | use std::thread; 5 | use std::time::Duration; 6 | use testcontainers::images::generic::GenericImage; 7 | use testcontainers::Container; 8 | 9 | mod blueprint; 10 | 11 | /// A docker container running Postgres 12 | pub type PostgresContainer<'d> = Container<'d, GenericImage>; 13 | 14 | pub use blueprint::PostgresContainerBlueprint; 15 | 16 | #[allow(dead_code)] 17 | #[derive(Debug)] 18 | pub enum ImageOrigin { 19 | Local, 20 | Latest, 21 | Master, 22 | } 23 | 24 | #[allow(dead_code)] 25 | #[derive(Debug, PartialEq, Eq)] 26 | pub enum PgVersion { 27 | V15, 28 | V14, 29 | V13, 30 | V12, 31 | } 32 | 33 | impl TryFrom<&str> for PgVersion { 34 | type Error = String; 35 | 36 | fn try_from(value: &str) -> Result { 37 | match value { 38 | "15" => Ok(PgVersion::V15), 39 | "14" => Ok(PgVersion::V14), 40 | "13" => Ok(PgVersion::V13), 41 | "12" => Ok(PgVersion::V12), 42 | _ => Err(format!("Unknown postgres version {}", value)), 43 | } 44 | } 45 | } 46 | 47 | impl Display for PgVersion { 48 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 49 | match self { 50 | PgVersion::V15 => write!(f, "15"), 51 | PgVersion::V14 => write!(f, "14"), 52 | PgVersion::V13 => write!(f, "13"), 53 | PgVersion::V12 => write!(f, "12"), 54 | } 55 | } 56 | } 57 | 58 | pub fn postgres_image_uri(origin: ImageOrigin, version: PgVersion) -> String { 59 | let prefix = match origin { 60 | ImageOrigin::Local => "local/dev_promscale_extension:head-ts2-", 61 | ImageOrigin::Latest => "timescaledev/promscale-extension:latest-ts2.7.0-", 62 | ImageOrigin::Master => "ghcr.io/timescale/dev_promscale_extension:master-ts2-", 63 | }; 64 | let version = match version { 65 | PgVersion::V12 => "pg12", 66 | PgVersion::V13 => "pg13", 67 | PgVersion::V14 => "pg14", 68 | PgVersion::V15 => "pg15", 69 | }; 70 | format!("{}{}", prefix, version) 71 | } 72 | 73 | pub fn connect(pg_blueprint: &PostgresContainerBlueprint, container: &PostgresContainer) -> Client { 74 | let connection_string = format!( 75 | "postgres://{}:{}@localhost:{}/{}", 76 | pg_blueprint.user(), 77 | pg_blueprint.password(), 78 | container.get_host_port_ipv4(5432), 79 | pg_blueprint.db() 80 | ); 81 | retry(|| Client::connect(&connection_string, postgres::NoTls), 3).unwrap() 82 | } 83 | 84 | fn retry(operation: F, count: u32) -> Result 85 | where 86 | E: Display, 87 | F: Fn() -> Result, 88 | { 89 | assert!(count > 0); 90 | retry_inner(operation, 1, count) 91 | } 92 | 93 | fn retry_inner(operation: F, cur_count: u32, max_count: u32) -> Result 94 | where 95 | E: Display, 96 | F: Fn() -> Result, 97 | { 98 | let result: Result = operation(); 99 | match result { 100 | Ok(result) => Ok(result), 101 | Err(error) => { 102 | if cur_count == max_count { 103 | error!("encountered error '{}', no more retries", error); 104 | return Err(error); 105 | } 106 | let sleep_duration = Duration::from_secs(u64::pow(2, cur_count - 1)); 107 | info!( 108 | "encountered error '{}', will retry in '{}s'", 109 | error, 110 | sleep_duration.as_secs() 111 | ); 112 | thread::sleep(sleep_duration); 113 | return retry_inner(operation, cur_count + 1, max_count); 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /test-common/src/postgres_test_connection.rs: -------------------------------------------------------------------------------- 1 | use postgres::Client; 2 | use std::ops::{Deref, DerefMut}; 3 | 4 | use super::PostgresTestInstance; 5 | 6 | /// A wrapper around [`postgres::Client`] 7 | /// that can be treated as a smart-pointer. 8 | pub struct PostgresTestConnection<'pg_inst> { 9 | pub client: Client, 10 | // a phantom to hold onto parent's lifetime 11 | // to prevent premature database shutdown. 12 | pub(crate) _parent: &'pg_inst dyn PostgresTestInstance, 13 | } 14 | 15 | impl<'pg> Deref for PostgresTestConnection<'pg> { 16 | type Target = Client; 17 | 18 | // &self.client can't outlive &self and therefore can't outlive 'pg 19 | fn deref(&self) -> &Self::Target { 20 | &self.client 21 | } 22 | } 23 | 24 | impl<'pg> DerefMut for PostgresTestConnection<'pg> { 25 | fn deref_mut(&mut self) -> &mut Self::Target { 26 | &mut self.client 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /test-common/src/test_container_instance.rs: -------------------------------------------------------------------------------- 1 | use std::process::{Command, Stdio}; 2 | use std::str::from_utf8; 3 | 4 | use super::postgres_container::*; 5 | use super::*; 6 | 7 | pub struct TestContainerInstance<'h> { 8 | pg_blueprint: &'h PostgresContainerBlueprint, 9 | pub container: PostgresContainer<'h>, 10 | } 11 | 12 | impl<'pg_inst> TestContainerInstance<'pg_inst> { 13 | pub(crate) fn fresh_instance(pg_blueprint: &'pg_inst PostgresContainerBlueprint) -> Self { 14 | let container = pg_blueprint.run(); 15 | TestContainerInstance { 16 | pg_blueprint: pg_blueprint, 17 | container: container, 18 | } 19 | } 20 | } 21 | 22 | impl<'h> PostgresTestInstance for TestContainerInstance<'h> { 23 | fn connect<'pg_inst>(&'pg_inst self) -> PostgresTestConnection<'pg_inst> { 24 | PostgresTestConnection { 25 | client: postgres_container::connect(self.pg_blueprint, &self.container), 26 | _parent: self, 27 | } 28 | } 29 | 30 | fn exec_sql_script(&self, script_path: &str) -> String { 31 | let id = self.container.id(); 32 | let output = Command::new("docker") 33 | .arg("exec") 34 | .arg("-w") 35 | .arg("/") 36 | .arg(id) 37 | .arg("bash") 38 | .arg("-c") 39 | .arg(format!( 40 | "psql -U {} -d {} -f {} 2>&1", 41 | self.pg_blueprint.user(), 42 | self.pg_blueprint.db(), 43 | script_path 44 | )) 45 | .stdout(Stdio::piped()) 46 | .spawn() 47 | .unwrap() 48 | .wait_with_output() 49 | .unwrap(); 50 | from_utf8(&output.stdout).unwrap().to_string() 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /tools/changelog: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd -P)" 6 | ROOT_DIR="$(cd "${SCRIPT_DIR}/../" && pwd -P)" 7 | 8 | function usage(){ 9 | echo "usage: $(basename "$0") [OPTIONS]" 10 | echo "" 11 | echo "Prints the release notes for VERSION." 12 | echo "" 13 | echo "If VERSION has no changes recorded in CHANGELOG.md, then the " 14 | echo "changes from the unreleased section are used instead." 15 | echo "" 16 | echo "OPTIONS" 17 | echo " -h | --help Print this help message" 18 | echo "" 19 | } 20 | 21 | tag="$1" 22 | if [ -z "$tag" ]; then 23 | echo "Expected the desired version number as the first argument!" 24 | echo "" 25 | usage 26 | exit 2 27 | fi 28 | 29 | if [ "$tag" = "-h" ] || [ "$tag" = "--help" ]; then 30 | usage 31 | exit 2 32 | fi 33 | 34 | awk -v tag="${tag}" -v header="^## \\\[${tag}\\\]" ' 35 | BEGIN { notes="";flag=0;unreleased=0 } 36 | $0~header { flag=1; notes=$0 "\n"; next} 37 | /^## \[Unreleased\]/ { unreleased=1; notes=$0 "\n"; next } 38 | /^## / { 39 | if (unreleased == 1) { unreleased=0 } 40 | if (flag == 0) { 41 | next 42 | } else { 43 | exit 44 | } 45 | } 46 | flag==1||unreleased==1 {notes=notes $0 "\n"; next} 47 | END { gsub(/[\n]+$/, "", notes); print notes } 48 | ' "${ROOT_DIR}/CHANGELOG.md" 49 | -------------------------------------------------------------------------------- /tools/smoke-test: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # This script smoke tests the promscale connector against the promscale extension. 6 | # It leverages docker containers for both connector and extension. 7 | 8 | # It takes three positional arguments: 9 | EXTENSION_DOCKER_IMAGE=$1 # e.g. ghcr.io/timescale/promscale_dev_extension:master-ts2-pg14 10 | DOCKER_PLATFORM=$2 # e.g. linux/amd64 11 | 12 | TESTER_NAME=$(echo "${EXTENSION_DOCKER_IMAGE}" | sed 's/[:]/-/') 13 | 14 | docker run --rm --name "${TESTER_NAME}" --platform="${DOCKER_PLATFORM}" -e POSTGRES_PASSWORD=postgres -d "${EXTENSION_DOCKER_IMAGE}"; 15 | 16 | for i in $(seq 10) ; do 17 | if docker exec "${TESTER_NAME}" pg_isready -h 0.0.0.0 -p 5432 1>/dev/null 2>&1; then 18 | break 19 | fi 20 | sleep 1 21 | done 22 | 23 | docker exec "${TESTER_NAME}" psql -c 'CREATE EXTENSION promscale CASCADE;' 24 | 25 | if ! docker exec "${TESTER_NAME}" psql -c '\dx promscale;' | grep 'promscale'; then 26 | echo "Encountered error while testing image ${EXTENSION_DOCKER_IMAGE}"; 27 | docker logs "${TESTER_NAME}" 28 | docker stop "${TESTER_NAME}" 29 | docker rm -f "${TESTER_NAME}" 30 | exit 1 31 | fi; 32 | 33 | docker rm -f "${TESTER_NAME}" 34 | -------------------------------------------------------------------------------- /update-version.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Updates the extension version in all places necessary 4 | 5 | set -euo pipefail 6 | 7 | SED_ESCAPE_DOTS='s/\./\\\./g' 8 | 9 | if [ -z "${1:-}" ]; then 10 | echo "No version provided" 11 | exit 1 12 | fi 13 | 14 | NEW_VERSION=$(echo "$1" | sed ${SED_ESCAPE_DOTS}) 15 | 16 | # extract current version from Cargo.toml 17 | OLD_VERSION=$(bash extract-extension-version.sh | tr -d '\n' | sed ${SED_ESCAPE_DOTS}) 18 | 19 | # replace current version with new version in Cargo.toml 20 | # Note: some care has been taken to make this command portable between Unix and 21 | # BSD sed, hence the "slightly weird" invocation here. 22 | sed -i.bak -e "s/^version.*=.*\"${OLD_VERSION}\"\$/version = \"${NEW_VERSION}\"/g" Cargo.toml && rm Cargo.toml.bak 23 | 24 | cargo update --workspace 25 | 26 | if [ -z "${NEW_VERSION##*-dev}" ]; then 27 | echo "Skipping INSTALL.md because it's a dev version." 28 | else 29 | # replace current version with new version in *.md 30 | # Note: some care has been taken to make this command portable between Unix and 31 | # BSD sed, hence the "slightly weird" invocation here. 32 | sed -i.bak -e "s/${OLD_VERSION}/${NEW_VERSION}/g" INSTALL.md && rm INSTALL.md.bak 33 | fi --------------------------------------------------------------------------------