├── tests
├── __init__.py
├── utils.py
├── conftest.py
└── functional
│ └── adapter
│ ├── hypertable
│ ├── test_hypertable_index.py
│ ├── test_hypertable.py
│ ├── test_reorder_policy.py
│ ├── test_hypertable_integer_now_func.py
│ ├── test_hypertable_dimension.py
│ └── test_hypertable_compression.py
│ ├── continuous_aggregate
│ ├── test_continuous_aggregate_index.py
│ ├── test_continuous_aggregate_refresh_policy.py
│ ├── test_continuous_aggregate.py
│ ├── test_continuous_aggregate_retention_policy.py
│ └── test_continuous_aggregate_compression.py
│ ├── test_basic.py
│ ├── virtual_hypertable
│ ├── test_virtual_hypertable_chunk_time_interval.py
│ ├── test_virtual_hypertable_compression.py
│ ├── test_virtual_hypertable_integer_now_func.py
│ ├── test_virtual_hypertable_index_updates.py
│ ├── test_virtual_hypertable_retention_policy.py
│ ├── test_virtual_hypertable_reorder_policy.py
│ └── test_virtual_hypertable.py
│ └── test_retention_policy.py
├── .python-version
├── docs
├── stylesheets
│ └── extra.css
├── license.md
├── usage
│ ├── index.md
│ ├── retention-policies.md
│ ├── indexes.md
│ ├── reorder-policies.md
│ ├── virtual-hypertables.md
│ ├── compression.md
│ ├── hypertables.md
│ ├── macros.md
│ └── continuous-aggregates.md
├── index.md
└── installation.md
├── docs_build
├── requirements.txt
├── cloudflare-pages.sh
├── chunk_time_interval.md
├── integer_now_func.md
└── dimensions.md
├── dbt
├── __init__.py
├── include
│ └── timescaledb
│ │ ├── __init__.py
│ │ ├── dbt_project.yml
│ │ ├── profile_template.yml
│ │ └── macros
│ │ ├── relations
│ │ ├── integer_now_func.sql
│ │ ├── retention_policy.sql
│ │ ├── reorder_policy.sql
│ │ ├── continuous_aggregate.sql
│ │ ├── dimensions.sql
│ │ ├── compression.sql
│ │ └── hypertable.sql
│ │ ├── materializations
│ │ └── models
│ │ │ ├── virtual_hypertable.sql
│ │ │ ├── continuous_aggregate.sql
│ │ │ └── hypertable.sql
│ │ └── adapters.sql
└── adapters
│ └── timescaledb
│ ├── timescaledb_credentials.py
│ ├── __init__.py
│ ├── timescaledb_change_collection.py
│ ├── timescaledb_index_config.py
│ ├── timescaledb_adapter.py
│ ├── timescaledb_connection_manager.py
│ └── timescaledb_relation.py
├── assets
├── dbt-signature_tm.png
├── dbt-signature_tm_light.png
├── Timescale-Logo-Black-PNG.png
└── Timescale-Logo-Primary-PNG.png
├── test.env
├── docker-compose.yml
├── test.env.sample
├── .vscode
├── launch.json
└── settings.json
├── .github
├── dependabot.yml
└── workflows
│ ├── publish.yml
│ └── test.yml
├── overrides
└── partials
│ └── integrations
│ └── analytics
│ └── custom.html
├── .pre-commit-config.yaml
├── LICENSE
├── pyproject.toml
├── mkdocs.yml
├── README.md
└── .gitignore
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.12
2 |
--------------------------------------------------------------------------------
/docs/stylesheets/extra.css:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs_build/requirements.txt:
--------------------------------------------------------------------------------
1 | pdm
2 |
--------------------------------------------------------------------------------
/dbt/__init__.py:
--------------------------------------------------------------------------------
1 | from pkgutil import extend_path
2 |
3 | __path__ = extend_path(__path__, __name__)
4 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | PACKAGE_PATH = os.path.dirname(__file__)
4 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/dbt_project.yml:
--------------------------------------------------------------------------------
1 | name: dbt_timescaledb
2 | version: 1.8.0
3 | config-version: 2
4 |
--------------------------------------------------------------------------------
/assets/dbt-signature_tm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sdebruyn/dbt-timescaledb/HEAD/assets/dbt-signature_tm.png
--------------------------------------------------------------------------------
/assets/dbt-signature_tm_light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sdebruyn/dbt-timescaledb/HEAD/assets/dbt-signature_tm_light.png
--------------------------------------------------------------------------------
/assets/Timescale-Logo-Black-PNG.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sdebruyn/dbt-timescaledb/HEAD/assets/Timescale-Logo-Black-PNG.png
--------------------------------------------------------------------------------
/assets/Timescale-Logo-Primary-PNG.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sdebruyn/dbt-timescaledb/HEAD/assets/Timescale-Logo-Primary-PNG.png
--------------------------------------------------------------------------------
/test.env:
--------------------------------------------------------------------------------
1 | TIMESCALEDB_TEST_HOST=localhost
2 | TIMESCALEDB_TEST_PORT=5432
3 | POSTGRES_USER=timescaledb
4 | POSTGRES_PASSWORD=timescaledb
5 | POSTGRES_DB=timescaledb
6 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | timescaledb:
3 | env_file:
4 | - test.env
5 | image: timescale/timescaledb:latest-pg16
6 | ports:
7 | - 5432:5432/tcp
8 |
--------------------------------------------------------------------------------
/test.env.sample:
--------------------------------------------------------------------------------
1 | TIMESCALEDB_TEST_HOST=localhost
2 | TIMESCALEDB_TEST_PORT=5432
3 | POSTGRES_USER=timescaledb
4 | POSTGRES_PASSWORD=timescaledb
5 | POSTGRES_DB=timescaledb
6 |
--------------------------------------------------------------------------------
/tests/utils.py:
--------------------------------------------------------------------------------
1 | def get_indexes_sql(unique_schema: str, table_name: str) -> str:
2 | return f"""
3 | select *
4 | from pg_indexes
5 | where schemaname = '{unique_schema}'
6 | and tablename = '{table_name}'"""
7 |
--------------------------------------------------------------------------------
/docs_build/cloudflare-pages.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -ex
4 |
5 | cd ..
6 | python -m pdm install -x --no-default --no-editable -G docs
7 | python -m pdm run mkdocs build -d ./docs_build/site
8 |
9 | curl -sLo ./docs_build/site/t.js "https://umami.debruyn.dev/script.js"
10 |
--------------------------------------------------------------------------------
/dbt/adapters/timescaledb/timescaledb_credentials.py:
--------------------------------------------------------------------------------
1 | from typing import Literal
2 |
3 | from dbt.adapters.postgres import PostgresCredentials
4 |
5 |
6 | class TimescaleDBCredentials(PostgresCredentials):
7 | @property
8 | def type(self) -> Literal["timescaledb"]:
9 | return "timescaledb"
10 |
--------------------------------------------------------------------------------
/docs/license.md:
--------------------------------------------------------------------------------
1 | # License
2 |
3 | You can find this adapter's license below. Note that dbt Core and Timescale have their own licenses:
4 |
5 | * [dbt Core](https://github.com/dbt-labs/dbt-core/blob/main/License.md)
6 | * [TimescaleDB](https://docs.timescale.com/about/latest/timescaledb-editions/)
7 |
8 | ---
9 |
10 | --8<-- "LICENSE"
11 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "0.2.0",
3 | "configurations": [
4 | {
5 | "name": "Python: Debug Tests",
6 | "type": "debugpy",
7 | "request": "launch",
8 | "program": "${file}",
9 | "purpose": ["debug-test"],
10 | "console": "integratedTerminal",
11 | "justMyCode": false
12 | }
13 | ]
14 | }
15 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/profile_template.yml:
--------------------------------------------------------------------------------
1 | fixed:
2 | type: timescaledb
3 | prompts:
4 | host:
5 | hint: "your host name"
6 | port:
7 | default: 5432
8 | type: "int"
9 | user:
10 | hint: "dev username"
11 | password:
12 | hint: "dev password"
13 | hide_input: true
14 | dbname:
15 | hint: "default database"
16 | threads:
17 | hint: "1 or more"
18 | type: "int"
19 | default: 1
20 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "pip"
4 | directory: "/"
5 | schedule:
6 | interval: "daily"
7 | target-branch: "main"
8 | - package-ecosystem: "docker"
9 | directory: "/"
10 | schedule:
11 | interval: "daily"
12 | target-branch: "main"
13 | - package-ecosystem: "github-actions"
14 | directory: "/"
15 | schedule:
16 | interval: "daily"
17 | target-branch: "main"
18 |
--------------------------------------------------------------------------------
/dbt/adapters/timescaledb/__init__.py:
--------------------------------------------------------------------------------
1 | from dbt.adapters.base import AdapterPlugin
2 | from dbt.adapters.timescaledb.timescaledb_adapter import TimescaleDBAdapter
3 | from dbt.adapters.timescaledb.timescaledb_credentials import TimescaleDBCredentials
4 | from dbt.include import timescaledb
5 |
6 | Plugin = AdapterPlugin(
7 | adapter=TimescaleDBAdapter,
8 | credentials=TimescaleDBCredentials,
9 | include_path=timescaledb.PACKAGE_PATH,
10 | dependencies=["postgres"],
11 | )
12 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to PyPI
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | jobs:
9 | build-and-publish:
10 | runs-on: ubuntu-latest
11 | permissions:
12 | id-token: write
13 | steps:
14 | - uses: actions/checkout@v4
15 |
16 | - uses: pdm-project/setup-pdm@v4
17 | with:
18 | cache: true
19 | python-version: '3.11'
20 |
21 | - name: Publish package distributions to PyPI
22 | run: pdm publish
23 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/relations/integer_now_func.sql:
--------------------------------------------------------------------------------
1 | {% macro set_integer_now_func(relation, integer_now_func, integer_now_func_sql = none) %}
2 | {% if integer_now_func_sql %}
3 | create or replace function {{ relation.database }}.{{ relation.schema }}.{{ integer_now_func }}() returns bigint language sql immutable as $$
4 | {{ integer_now_func_sql }}
5 | $$;
6 | {% endif %}
7 | select set_integer_now_func('{{ relation }}', '{{ relation.database }}.{{ relation.schema }}.{{ integer_now_func }}');
8 | {% endmacro %}
9 |
--------------------------------------------------------------------------------
/docs/usage/index.md:
--------------------------------------------------------------------------------
1 | # Usage
2 |
3 | This adapter adds 3 materializations next to the ones available in `dbt-postgres`:
4 |
5 | * [Hypertables](hypertables.md)
6 | * [Virtual hypertables](virtual-hypertables.md)
7 | * [Continuous aggregates](continuous-aggregates.md)
8 |
9 | Other TimescaleDB features are available natively in dbt as well:
10 |
11 | * [Compression](compression.md)
12 | * [Transaction per chunk indexes](indexes.md)
13 | * [Reorder policies](reorder-policies.md)
14 | * [Retention policies](retention-policies.md)
15 |
16 | Most of the features are also available as [macros](macros.md).
17 |
--------------------------------------------------------------------------------
/docs_build/chunk_time_interval.md:
--------------------------------------------------------------------------------
1 | ### chunk_time_interval
2 |
3 | The `chunk_time_interval` config option allows you to set the interval at which TimescaleDB will chunk your (virtual) hypertable. This is useful for optimizing query performance and storage efficiency. The default value is `1 week`.
4 |
5 | Note that the type of the interval depends on the type of your time column and has to match.
6 |
7 | ```sql+jinja title="models/my_hypertable.sql"
8 | {{
9 | config(
10 | materialized='hypertable',
11 | main_dimension='time_column',
12 | chunk_time_interval="interval '1 day'"
13 | )
14 | }}
15 |
--------------------------------------------------------------------------------
/dbt/adapters/timescaledb/timescaledb_change_collection.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from typing import Set
3 |
4 | from dbt.adapters.postgres.relation_configs.index import (
5 | PostgresIndexConfigChange,
6 | )
7 |
8 |
9 | @dataclass
10 | class TimescaleDBHypertableConfigChangeCollection:
11 | indexes: Set[PostgresIndexConfigChange] = field(default_factory=set)
12 |
13 | @property
14 | def requires_full_refresh(self) -> bool:
15 | return any(index.requires_full_refresh for index in self.indexes)
16 |
17 | @property
18 | def has_changes(self) -> bool:
19 | return self.indexes != set()
20 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # dbt-timescaledb
2 |
3 | 
4 | 
5 | 
6 | 
7 |
8 | --8<-- "README.md:12"
9 |
--------------------------------------------------------------------------------
/overrides/partials/integrations/analytics/custom.html:
--------------------------------------------------------------------------------
1 |
7 |
18 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.testing.pytestEnabled": true,
3 | "python.testing.pytestArgs": [
4 | "tests"
5 | ],
6 | "python.testing.unittestEnabled": false,
7 | "yaml.schemas": {
8 | "https://raw.githubusercontent.com/dbt-labs/dbt-jsonschema/main/schemas/dbt_project.json": [
9 | "dbt_project.yml"
10 | ],
11 | "https://squidfunk.github.io/mkdocs-material/schema.json": "mkdocs.yml"
12 | },
13 | "yaml.customTags": [
14 | "!ENV scalar",
15 | "!ENV sequence",
16 | "tag:yaml.org,2002:python/name:material.extensions.emoji.to_svg",
17 | "tag:yaml.org,2002:python/name:material.extensions.emoji.twemoji",
18 | "tag:yaml.org,2002:python/name:pymdownx.superfences.fence_code_format"
19 | ]
20 | }
21 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Any
3 |
4 | import pytest
5 |
6 | pytest_plugins: list[str] = ["dbt.tests.fixtures.project"]
7 |
8 |
9 | @pytest.fixture(scope="class")
10 | def dbt_profile_target() -> dict[str, Any]:
11 | return {
12 | "type": "timescaledb",
13 | "host": os.getenv("TIMESCALEDB_TEST_HOST", "localhost"),
14 | "port": int(os.getenv("TIMESCALEDB_TEST_PORT", "5432")),
15 | "user": os.getenv("POSTGRES_USER", "timescaledb"),
16 | "pass": os.getenv("POSTGRES_PASSWORD", "timescaledb"),
17 | "dbname": os.getenv("POSTGRES_DB", "timescaledb"),
18 | }
19 |
20 |
21 | @pytest.fixture(scope="class")
22 | def unique_schema(unique_schema: str) -> str:
23 | # The schema name must be less than 64 characters long
24 | return unique_schema[:63]
25 |
--------------------------------------------------------------------------------
/docs/installation.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | Install the package using pip:
4 |
5 | ```bash
6 | pip install dbt-timescaledb
7 | ```
8 |
9 | In your `profiles.yml`, use the same configuration [as you'd do for a regular PostgreSQL database](https://docs.getdbt.com/docs/core/connect-data-platform/postgres-setup#profile-configuration). The only difference is that you need to set the `type` to `timescaledb`.
10 |
11 | ```yaml hl_lines="5" title="profiles.yml"
12 | company-name:
13 | target: dev
14 | outputs:
15 | dev:
16 | type: timescaledb # only option different from regular dbt-postgres
17 | host: [hostname]
18 | user: [username]
19 | password: [password]
20 | port: [port]
21 | dbname: [database name]
22 | schema: [dbt schema]
23 | # see dbt-postgres docs linked above for more options
24 | ```
25 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/relations/retention_policy.sql:
--------------------------------------------------------------------------------
1 | {% macro add_retention_policy(relation, retention_config) %}
2 | select add_retention_policy(
3 | '{{ relation }}',
4 | {{ retention_config.drop_after }},
5 |
6 | {%- if retention_config.schedule_interval %}
7 | schedule_interval => {{ retention_config.schedule_interval }},
8 | {% endif -%}
9 |
10 | {%- if retention_config.initial_start %}
11 | initial_start => {{ retention_config.initial_start }},
12 | {% endif -%}
13 |
14 | {%- if retention_config.timezone %}
15 | timezone => '{{ retention_config.timezone }}',
16 | {% endif -%}
17 |
18 | if_not_exists => true);
19 | {% endmacro %}
20 |
21 | {% macro clear_retention_policy(relation) %}
22 | select remove_retention_policy('{{ relation }}', if_exists => true);
23 | {% endmacro %}
24 |
--------------------------------------------------------------------------------
/dbt/adapters/timescaledb/timescaledb_index_config.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 |
3 | from dbt_common.utils.encoding import md5
4 |
5 | from dbt.adapters.postgres.impl import PostgresIndexConfig
6 |
7 |
8 | @dataclass
9 | class TimescaleDBIndexConfig(PostgresIndexConfig):
10 | transaction_per_chunk: bool = False
11 |
12 | def render(self, relation) -> str: # noqa: ANN001
13 | # We append the current timestamp to the index name because otherwise
14 | # the index will only be created on every other run. See
15 | # https://github.com/dbt-labs/dbt-core/issues/1945#issuecomment-576714925
16 | # for an explanation.
17 | inputs = self.columns + [
18 | relation.render(),
19 | str(self.unique),
20 | str(self.type),
21 | str(self.transaction_per_chunk),
22 | ]
23 | string = "_".join(inputs)
24 | return md5(string)
25 |
--------------------------------------------------------------------------------
/dbt/adapters/timescaledb/timescaledb_adapter.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Optional
2 |
3 | from dbt.adapters.base.meta import available
4 | from dbt.adapters.postgres import PostgresAdapter
5 | from dbt.adapters.timescaledb.timescaledb_connection_manager import (
6 | NO_TRANSACTION_MARKER,
7 | TimescaleDBConnectionManager,
8 | )
9 | from dbt.adapters.timescaledb.timescaledb_index_config import TimescaleDBIndexConfig
10 | from dbt.adapters.timescaledb.timescaledb_relation import TimescaleDBRelation
11 |
12 |
13 | class TimescaleDBAdapter(PostgresAdapter):
14 | ConnectionManager = TimescaleDBConnectionManager
15 | Relation = TimescaleDBRelation
16 |
17 | @available
18 | def parse_index(self, raw_index: Any) -> Optional[TimescaleDBIndexConfig]:
19 | return TimescaleDBIndexConfig.parse(raw_index)
20 |
21 | @available
22 | def marker_run_outside_transaction(self) -> str:
23 | return NO_TRANSACTION_MARKER
24 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.5.0
4 | hooks:
5 | - id: trailing-whitespace
6 | - id: end-of-file-fixer
7 | - id: check-yaml
8 | args: ['--unsafe']
9 | - id: check-added-large-files
10 | - id: check-json
11 | - id: check-ast
12 | - id: check-case-conflict
13 | - id: check-merge-conflict
14 | - id: check-shebang-scripts-are-executable
15 | - id: check-toml
16 | - id: fix-byte-order-marker
17 | - id: mixed-line-ending
18 | - repo: https://github.com/pdm-project/pdm
19 | rev: '2.13.0'
20 | hooks:
21 | - id: pdm-lock-check
22 | - repo: https://github.com/astral-sh/ruff-pre-commit
23 | rev: v0.3.4
24 | hooks:
25 | - id: ruff
26 | args: [ --fix ]
27 | - id: ruff-format
28 | - repo: https://github.com/IamTheFij/docker-pre-commit
29 | rev: v3.0.1
30 | hooks:
31 | - id: docker-compose-check
32 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/relations/reorder_policy.sql:
--------------------------------------------------------------------------------
1 | {% macro add_reorder_policy(relation, reorder_config) %}
2 | {%- set index_dict = reorder_config.index -%}
3 | {%- set index_config = adapter.parse_index(index_dict) -%}
4 | {%- set index_name = index_config.render(relation) -%}
5 |
6 | {%- if reorder_config.create_index is none or reorder_config.create_index %}
7 | {{ get_create_index_sql(relation, index_dict) }}
8 | {% endif -%}
9 |
10 | select add_reorder_policy('{{ relation }}', '{{ index_name }}',
11 |
12 | {%- if reorder_config.initial_start %}
13 | initial_start => '{{ reorder_config.initial_start }}',
14 | {% endif -%}
15 |
16 | {%- if reorder_config.timezone %}
17 | timezone => '{{ reorder_config.timezone }}',
18 | {% endif -%}
19 |
20 | if_not_exists => true
21 | );
22 | {% endmacro %}
23 |
24 | {% macro clear_reorder_policy(relation) %}
25 | select remove_reorder_policy('{{ relation }}', if_exists => true);
26 | {% endmacro %}
27 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Sam Debruyn
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: tests
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | jobs:
10 | pytest:
11 | name: Run linting and tests
12 |
13 | runs-on: ubuntu-latest
14 | env:
15 | PDM_BUILD_SCM_VERSION: "1.0.0"
16 | services:
17 | timescaledb:
18 | image: timescale/timescaledb:latest-pg15
19 | env:
20 | POSTGRES_PASSWORD: timescaledb
21 | POSTGRES_USER: timescaledb
22 | POSTGRES_DB: timescaledb
23 | ports:
24 | - 5432:5432
25 |
26 | steps:
27 | - uses: actions/checkout@v4
28 |
29 | - uses: pdm-project/setup-pdm@v4
30 | with:
31 | cache: true
32 | python-version: '3.11'
33 |
34 | - uses: pre-commit/action@v3.0.1
35 | with:
36 | extra_args: --all-files --show-diff-on-failure
37 |
38 | - name: Install dependencies
39 | run: pdm install -x -dG test
40 |
41 | - name: Run tests
42 | run: pdm run pytest --cov=dbt
43 |
44 | - name: Upload coverage reports to Codecov
45 | uses: codecov/codecov-action@v5
46 | env:
47 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
48 | with:
49 | files: ./coverage.xml
50 |
--------------------------------------------------------------------------------
/tests/functional/adapter/hypertable/test_hypertable_index.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 | from tests.utils import get_indexes_sql
8 |
9 |
10 | class TestHypertableIndex:
11 | def _model_sql(self, create_default_indexes: bool) -> str:
12 | return f"""
13 | {{{{
14 | config(
15 | materialized = "hypertable",
16 | main_dimension = "time_column",
17 | create_default_indexes = {create_default_indexes},
18 | indexes=[
19 | {{'columns': ['col_1'], 'transaction_per_chunk': True}}
20 | ]
21 | )
22 | }}}}
23 |
24 | select
25 | current_timestamp as time_column,
26 | 1 as col_1
27 | """
28 |
29 | @pytest.fixture(scope="class")
30 | def models(self) -> dict[str, Any]:
31 | return {
32 | "with_default.sql": self._model_sql(True),
33 | "without_default.sql": self._model_sql(False),
34 | }
35 |
36 | def test_table(self, project: TestProjInfo, unique_schema: str) -> None:
37 | results = run_dbt(["run"])
38 | assert len(results) == 2
39 |
40 | with_default_results = project.run_sql(get_indexes_sql(unique_schema, "with_default"), fetch="all")
41 | without_default_results = project.run_sql(
42 | get_indexes_sql(unique_schema, "without_default"), fetch="all"
43 | )
44 |
45 | assert len(with_default_results) == 2
46 | assert len(without_default_results) == 1
47 |
--------------------------------------------------------------------------------
/docs/usage/retention-policies.md:
--------------------------------------------------------------------------------
1 | # Retention policies
2 |
3 | You can add a retention policy to automatically drop old chunks of data from (virtual) hypertables and continuous aggregates in the background.
4 |
5 | !!! info
6 | Consult the [Timescale docs](https://docs.timescale.com/use-timescale/latest/data-retention/about-data-retention/) to learn more about retention policies.
7 |
8 | !!! note
9 | You can only create 1 retention policy per hypertable or continuous aggregate.
10 |
11 | ## Usage
12 |
13 | === "SQL"
14 |
15 | ```sql+jinja title="models/my_hypertable.sql"
16 | {{
17 | config(
18 | materialized='hypertable',
19 | main_dimension='time_column',
20 | retention_policy={
21 | "drop_after": "interval '1 month'"
22 | }
23 | )
24 | }}
25 | select
26 | current_timestamp as time_column,
27 | 1 as column_a
28 | ```
29 |
30 | === "YAML"
31 |
32 | ```yaml title="dbt_project.yml"
33 | models:
34 | your_project_name:
35 | folder_containing_the_hypertables:
36 | +materialized: hypertable
37 | model_one:
38 | +main_dimension: time_column
39 | +retention_policy:
40 | drop_after: interval '1 month'
41 | # ...
42 | ```
43 |
44 | ## Configuration options
45 |
46 | The following configuration options are supported (as part of `retention_policy`):
47 |
48 | * `drop_after` (required)
49 | * `schedule_interval`
50 | * `initial_start`
51 | * `timezone`
52 |
--------------------------------------------------------------------------------
/docs/usage/indexes.md:
--------------------------------------------------------------------------------
1 | # Transaction per chunk indexes
2 |
3 | Next to all regular Postgres indexes, TimescaleDB also supports [transaction per chunk](https://docs.timescale.com/api/latest/hypertable/create_index/) indexes.
4 |
5 | !!! info
6 | Consult the [Timescale docs](https://docs.timescale.com/api/latest/hypertable/create_index/) for more information regarding the usage of these indexes.
7 |
8 | ## Usage
9 |
10 | To create a transaction per chunk index, simply set the `transaction_per_chunk` option to `true` in the [index configuration](https://docs.getdbt.com/reference/resource-configs/postgres-configs#indexes), similar to the `unique` option.
11 |
12 | === "SQL"
13 |
14 | ```sql+jinja hl_lines="5" title="models/my_hypertable.sql"
15 | {{ config(
16 | materialized='hypertable',
17 | main_dimension='time_column',
18 | indexes=[
19 | {'columns': ['column_a'], 'transaction_per_chunk': True}
20 | ]
21 | }}
22 |
23 | select ...
24 | ```
25 |
26 | === "YAML"
27 |
28 | ```yaml hl_lines="8" title="dbt_project.yml"
29 | models:
30 | your_project_name:
31 | model_name:
32 | +materialized: hypertable
33 | +main_dimension: time_column
34 | +indexes:
35 | - columns: ['column_a']
36 | transaction_per_chunk: true
37 | # ...
38 | ```
39 |
40 | !!! info
41 | Consult the [dbt Postgres docs](https://docs.getdbt.com/reference/resource-configs/postgres-configs#indexes) for more information regarding how indexes can be configured.
42 |
--------------------------------------------------------------------------------
/tests/functional/adapter/continuous_aggregate/test_continuous_aggregate_index.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 |
8 |
9 | class TestContinuousAggregateIndex:
10 | def _model_sql(self, create_group_indexes: bool) -> str:
11 | return f"""
12 | {{{{
13 | config(
14 | materialized = "continuous_aggregate",
15 | create_group_indexes = {create_group_indexes},
16 | indexes=[
17 | {{'columns': ['col_1']}}
18 | ]
19 | )
20 | }}}}
21 |
22 | select
23 | count(*) as col_1,
24 | time_bucket(interval '1 day', time_column) as bucket
25 | from {{{{ ref('base') }}}}
26 | group by 2
27 | """
28 |
29 | @pytest.fixture(scope="class")
30 | def project_config_update(self) -> dict[str, Any]:
31 | return {
32 | "name": "continuous_aggregate_index_tests",
33 | "models": {
34 | "continuous_aggregate_index_tests": {
35 | "base": {"+materialized": "hypertable", "+main_dimension": "time_column"},
36 | }
37 | },
38 | }
39 |
40 | @pytest.fixture(scope="class")
41 | def models(self) -> dict[str, Any]:
42 | return {
43 | "base.sql": "select current_timestamp as time_column",
44 | "with_default.sql": self._model_sql(True),
45 | "without_default.sql": self._model_sql(False),
46 | }
47 |
48 | def test_continuous_aggregate(self, project: TestProjInfo) -> None:
49 | results = run_dbt(["run"])
50 | assert len(results) == 3
51 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_basic.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.basic.test_adapter_methods import BaseAdapterMethod
2 | from dbt.tests.adapter.basic.test_base import BaseSimpleMaterializations
3 | from dbt.tests.adapter.basic.test_empty import BaseEmpty
4 | from dbt.tests.adapter.basic.test_ephemeral import BaseEphemeral
5 | from dbt.tests.adapter.basic.test_generic_tests import BaseGenericTests
6 | from dbt.tests.adapter.basic.test_incremental import BaseIncremental
7 | from dbt.tests.adapter.basic.test_singular_tests import BaseSingularTests
8 | from dbt.tests.adapter.basic.test_singular_tests_ephemeral import BaseSingularTestsEphemeral
9 | from dbt.tests.adapter.basic.test_snapshot_check_cols import BaseSnapshotCheckCols
10 | from dbt.tests.adapter.basic.test_snapshot_timestamp import BaseSnapshotTimestamp
11 |
12 |
13 | class TestSimpleMaterializationsTimescaleDB(BaseSimpleMaterializations):
14 | pass
15 |
16 |
17 | class TestSingularTestsTimescaleDB(BaseSingularTests):
18 | pass
19 |
20 |
21 | class TestSingularTestsEphemeralTimescaleDB(BaseSingularTestsEphemeral):
22 | pass
23 |
24 |
25 | class TestEmptyTimescaleDB(BaseEmpty):
26 | pass
27 |
28 |
29 | class TestEphemeralTimescaleDB(BaseEphemeral):
30 | pass
31 |
32 |
33 | class TestIncrementalTimescaleDB(BaseIncremental):
34 | pass
35 |
36 |
37 | class TestGenericTestsTimescaleDB(BaseGenericTests):
38 | pass
39 |
40 |
41 | class TestSnapshotCheckColsTimescaleDB(BaseSnapshotCheckCols):
42 | pass
43 |
44 |
45 | class TestSnapshotTimestampTimescaleDB(BaseSnapshotTimestamp):
46 | pass
47 |
48 |
49 | class TestBaseAdapterMethodTimescaleDB(BaseAdapterMethod):
50 | pass
51 |
--------------------------------------------------------------------------------
/docs/usage/reorder-policies.md:
--------------------------------------------------------------------------------
1 | # Reorder policies
2 |
3 | You can add a reorder policy to reorder chunks on a given (virtual) hypertable index in the background.
4 |
5 | !!! info
6 | Consult the [Timescale docs](https://docs.timescale.com/api/latest/hypertable/add_reorder_policy/) to learn more about reorder policies.
7 |
8 | !!! note
9 | You can only create 1 reorder policy per hypertable.
10 |
11 | ## Usage
12 |
13 | === "SQL"
14 |
15 | ```sql+jinja title="models/my_hypertable.sql"
16 | {{
17 | config(
18 | materialized='hypertable',
19 | main_dimension='time_column',
20 | reorder_policy={
21 | index: {
22 | columns: ['column_a']
23 | }
24 | }
25 | )
26 | }}
27 | select
28 | current_timestamp as time_column,
29 | 1 as column_a
30 | ```
31 |
32 | === "YAML"
33 |
34 | ```yaml title="dbt_project.yml"
35 | models:
36 | your_project_name:
37 | folder_containing_the_hypertables:
38 | +materialized: hypertable
39 | model_one:
40 | +main_dimension: time_column
41 | +reorder_policy:
42 | index:
43 | columns: ['column_a']
44 | # ...
45 | ```
46 |
47 | ## Configuration options
48 |
49 | * `index` (required): The configuration for the index to reorder on. See [dbt Postgres docs](https://docs.getdbt.com/reference/resource-configs/postgres-configs#indexes) for more information regarding how indexes can be configured.
50 | * `create_index`: `true` by default. A boolean value to indicate if the index specified in `index` should be created or if it already exists.
51 |
--------------------------------------------------------------------------------
/docs_build/integer_now_func.md:
--------------------------------------------------------------------------------
1 | ### integer_now_func
2 |
3 | The following 2 options are available for (virtual) hypertables where the time column is not a timestamp:
4 |
5 | * `integer_now_func` (string): name of a function to be used to generate the current time as an integer.
6 | * `integer_now_func_sql` (string, optional): SQL code for the function mentioned above. If provided, the function with the name set in `integer_now_func` will be created. If not provided, an error will be thrown if the function does not exist already.
7 |
8 | !!! tip "Use a macro"
9 | You could also call a macro for your `integer_now_func_sql`.
10 |
11 | !!! tip "Idempotent"
12 | The `integer_now_func_sql` is idempotent and will replace an existing function if a function with the given name already exists. So while it may cause some overhead during the dbt run, it doesn't matter if you share this config across multiple models.
13 |
14 | !!! tip "The name is enough"
15 | You don't have to provide the SQL code for the function if you already have a function with the name set in `integer_now_func` in your database. You could create the function once in a single model or with `dbt run-operation` and then reuse it in all other models.
16 |
17 | !!! info
18 | Consult the [Timescale docs](https://docs.timescale.com/api/latest/hypertable/set_integer_now_func/) for more information regarding this functionality.
19 |
20 | ```sql+jinja title="models/my_hypertable.sql"
21 | {{
22 | config(
23 | materialized='hypertable',
24 | main_dimension='time_column',
25 | integer_now_func='my_hypertable_int_to_now',
26 | integer_now_func_sql='select extract(epoch from now())::bigint'
27 | )
28 | }}
29 | select 1::bigint as time_column
30 | ```
31 |
--------------------------------------------------------------------------------
/tests/functional/adapter/virtual_hypertable/test_virtual_hypertable_chunk_time_interval.py:
--------------------------------------------------------------------------------
1 | from datetime import timedelta
2 | from typing import Any
3 |
4 | import pytest
5 |
6 | from dbt.tests.fixtures.project import TestProjInfo
7 | from dbt.tests.util import run_dbt
8 |
9 |
10 | class TestVirtualHypertableChunkTimeInterval:
11 | @pytest.fixture(scope="class")
12 | def models(self) -> dict[str, Any]:
13 | return {"vht.sql": "--"}
14 |
15 | @pytest.fixture(scope="class")
16 | def project_config_update(self) -> dict[str, Any]:
17 | return {
18 | "name": "virtual_hypertable_tests",
19 | "models": {
20 | "virtual_hypertable_tests": {
21 | "vht": {"+materialized": "virtual_hypertable", "+chunk_time_interval": "interval '1 day'"}
22 | }
23 | },
24 | }
25 |
26 | def test_virtual_hypertable_chunk_time_interval(self, project: TestProjInfo, unique_schema: str) -> None:
27 | project.run_sql(f"""
28 | create table {unique_schema}.vht (time_column timestamp, col_1 int);
29 | select create_hypertable('{unique_schema}.vht', by_range('time_column'));""")
30 |
31 | check_interval_query = f"""
32 | select time_interval from timescaledb_information.dimensions
33 | where hypertable_schema = '{unique_schema}'
34 | and hypertable_name = 'vht'
35 | and column_name = 'time_column';
36 | """
37 | before: timedelta = project.run_sql(check_interval_query, fetch="all")[0][0]
38 |
39 | results = run_dbt(["run"])
40 | assert len(results) == 1
41 |
42 | after: timedelta = project.run_sql(check_interval_query, fetch="all")[0][0]
43 |
44 | assert before.days == 7
45 | assert after.days == 1
46 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/relations/continuous_aggregate.sql:
--------------------------------------------------------------------------------
1 | {% macro do_refresh_continuous_aggregate(relation) %}
2 | {% call statement('refresh', fetch_result=False, auto_begin=False) %}
3 | {{ adapter.marker_run_outside_transaction() }}
4 | call refresh_continuous_aggregate('{{ relation }}', null, null);
5 | {% endcall %}
6 | {% endmacro %}
7 |
8 | {% macro get_create_continuous_aggregate_as_sql(relation, sql) %}
9 | create materialized view if not exists {{ relation }}
10 | with (
11 | timescaledb.continuous
12 |
13 | {%- if config.get('materialized_only') %}
14 | ,timescaledb.materialized_only = {{ config.get("materialized_only") }}
15 | {% endif -%}
16 |
17 | {%- if config.get('create_group_indexes') %}
18 | ,timescaledb.create_group_indexes = {{ config.get("create_group_indexes") }}
19 | {% endif -%}
20 |
21 | ) as {{ sql }}
22 | with no data;
23 | {% endmacro %}
24 |
25 | {% macro add_refresh_policy(relation, refresh_config) %}
26 | select add_continuous_aggregate_policy('{{ relation }}',
27 | start_offset => {{ refresh_config.start_offset }},
28 | end_offset => {{ refresh_config.end_offset }},
29 |
30 | {%- if refresh_config.schedule_interval %}
31 | schedule_interval => {{ refresh_config.schedule_interval }},
32 | {% endif -%}
33 |
34 | {%- if refresh_config.initial_start %}
35 | initial_start => {{ refresh_config.initial_start }},
36 | {% endif -%}
37 |
38 | {%- if refresh_config.timezone %}
39 | timezone => '{{ refresh_config.timezone }}',
40 | {% endif -%}
41 |
42 | if_not_exists => true);
43 | {% endmacro %}
44 |
45 | {% macro clear_refresh_policy(relation) %}
46 | select remove_continuous_aggregate_policy('{{ relation }}', if_exists => true);
47 | {% endmacro %}
48 |
--------------------------------------------------------------------------------
/dbt/adapters/timescaledb/timescaledb_connection_manager.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Optional, Tuple
2 |
3 | from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
4 |
5 | from dbt.adapters.contracts.connection import Connection
6 | from dbt.adapters.events.logging import AdapterLogger
7 | from dbt.adapters.postgres.connections import PostgresConnectionManager
8 |
9 | NO_TRANSACTION_MARKER = "/* MARKER SHOULD RUN OUTSIDE TRANSACTION */"
10 |
11 | logger = AdapterLogger("TimescaleDB")
12 |
13 |
14 | class TimescaleDBConnectionManager(PostgresConnectionManager):
15 | TYPE = "timescaledb"
16 |
17 | def add_query(
18 | self,
19 | sql: str,
20 | auto_begin: bool = True,
21 | bindings: Optional[Any] = None,
22 | abridge_sql_log: bool = False,
23 | ) -> Tuple[Connection, Any]:
24 | restore_isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
25 | connection = None
26 |
27 | if NO_TRANSACTION_MARKER in sql:
28 | logger.debug("Found marker to run SQL outside transaction")
29 | auto_begin = False
30 | connection = self.get_thread_connection()
31 | restore_isolation_level = connection.handle.isolation_level
32 | logger.debug(f"Current isolation level: {restore_isolation_level}")
33 | connection.handle.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
34 | logger.debug(f"Set isolation level to {ISOLATION_LEVEL_AUTOCOMMIT} and autocommit to False")
35 |
36 | try:
37 | res1, res2 = super().add_query(sql, auto_begin, bindings, abridge_sql_log)
38 | finally:
39 | if restore_isolation_level != ISOLATION_LEVEL_AUTOCOMMIT:
40 | logger.debug(f"Restoring isolation level to {restore_isolation_level}")
41 | connection.handle.set_isolation_level(restore_isolation_level)
42 |
43 | return res1, res2
44 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/relations/dimensions.sql:
--------------------------------------------------------------------------------
1 | {% macro create_dimensions(relation) %}
2 | {%- set _dimensions = config.get('dimensions', default=[]) -%}
3 | {% for _dimension in _dimensions %}
4 | {% set create_dimension_sql = add_dimension(relation, _dimension) %}
5 | {% do run_query(create_dimension_sql) %}
6 | {% endfor %}
7 | {% endmacro %}
8 |
9 | {% macro add_dimension(relation, dimension_config) %}
10 | select add_dimension('{{ relation }}', {{ parse_dimension_config(dimension_config) }});
11 | {% endmacro %}
12 |
13 | {% macro parse_dimension_config(config_object) %}
14 | {#
15 | Example config objects:
16 |
17 | some_by_range_dimension = {
18 | "column_name": "the name of the column",
19 | "type": "by_range",
20 | "partition_interval": "interval '1 day'",
21 | "partition_func": "the name of the function"
22 | }
23 |
24 | or the shorthand version with just the name of the column for a by_range
25 |
26 | some_by_hash_dimension = {
27 | "column_name": "the name of the column",
28 | "type": "by_hash",
29 | "number_partitions": 123,
30 | "partition_func": "the name of the function"
31 | }
32 |
33 | #}
34 |
35 | {% if config_object is string %}
36 | {% set dimension_config = {"column_name": config_object} %}
37 | {% else %}
38 | {% set dimension_config = config_object %}
39 | {% endif %}
40 |
41 | {{- dimension_config.type|default('by_range') }}('{{ dimension_config.column_name }}'
42 | {%- if dimension_config.number_partitions %}
43 | , number_partitions => {{ dimension_config.number_partitions }}
44 | {% endif -%}
45 | {%- if dimension_config.partition_interval %}
46 | , partition_interval => {{ dimension_config.partition_interval }}
47 | {% endif -%}
48 | {%- if dimension_config.partition_func %}
49 | , partition_func => '{{ dimension_config.partition_func }}'
50 | {% endif -%}
51 | )
52 |
53 | {% endmacro %}
54 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/relations/compression.sql:
--------------------------------------------------------------------------------
1 | {% macro set_compression(relation, compression_config) %}
2 | {%- if relation.is_materialized_view -%}
3 | {%- set relation_type = "materialized view" -%}
4 | {%- elif relation.is_table -%}
5 | {%- set relation_type = "table" -%}
6 | {%- else -%}
7 | {{ exceptions.raise_compiler_error("Cannot enable compression on a " ~ relation.type) }}
8 | {%- endif -%}
9 |
10 | {% set bool_compression = compression_config is not none %}
11 |
12 | alter {{ relation_type }} {{ relation }} set (
13 | timescaledb.compress = {{ bool_compression }}
14 |
15 | {%- if compression_config and compression_config.orderby %}
16 | ,timescaledb.compress_orderby = '{{ compression_config.orderby }}'
17 | {% endif -%}
18 |
19 | {%- if compression_config and compression_config.segmentby %}
20 | ,timescaledb.compress_segmentby = '{{ compression_config.segmentby | join(",") }}'
21 | {% endif -%}
22 |
23 | {%- if compression_config and compression_config.chunk_time_interval %}
24 | ,timescaledb.compress_chunk_time_interval = '{{ compression_config.chunk_time_interval }}'
25 | {% endif -%}
26 | );
27 | {% endmacro %}
28 |
29 | {% macro add_compression_policy(relation, compression_config) %}
30 | select add_compression_policy(
31 | '{{ relation }}',
32 | {{ compression_config.after }},
33 |
34 | {%- if compression_config.schedule_interval %}
35 | schedule_interval => {{ compression_config.schedule_interval }},
36 | {% endif -%}
37 |
38 | {%- if compression_config.initial_start %}
39 | initial_start => {{ compression_config.initial_start }},
40 | {% endif -%}
41 |
42 | {%- if compression_config.timezone %}
43 | timezone => '{{ compression_config.timezone }}',
44 | {% endif -%}
45 |
46 | if_not_exists => true);
47 | {% endmacro %}
48 |
49 | {% macro clear_compression_policy(relation) %}
50 | select remove_compression_policy('{{ relation }}', if_exists => true);
51 | {% endmacro %}
52 |
--------------------------------------------------------------------------------
/tests/functional/adapter/hypertable/test_hypertable.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import (
7 | check_result_nodes_by_name,
8 | relation_from_name,
9 | run_dbt,
10 | )
11 |
12 | _base_model_config: dict[str, str] = {
13 | "+materialized": "hypertable",
14 | "+main_dimension": "time_column",
15 | }
16 |
17 | _models_with_configs: dict[str, Any] = {
18 | "default": _base_model_config,
19 | "empty": _base_model_config
20 | | {
21 | "+empty_hypertable": True,
22 | },
23 | }
24 |
25 | _model_sql: str = """
26 | select
27 | current_timestamp as time_column,
28 | 1 as col_1
29 | """
30 |
31 |
32 | class TestHypertable:
33 | @pytest.fixture(scope="class")
34 | def project_config_update(self) -> dict[str, Any]:
35 | return {
36 | "name": "hypertable_tests",
37 | "models": {"hypertable_tests": _models_with_configs},
38 | }
39 |
40 | @pytest.fixture(scope="class")
41 | def models(self) -> dict[str, Any]:
42 | return {f"{k}.sql": _model_sql for k in _models_with_configs.keys()}
43 |
44 | def test_hypertable(self, project: TestProjInfo, unique_schema: str) -> None:
45 | results = run_dbt(["run"])
46 | assert len(results) == len(_models_with_configs)
47 | check_result_nodes_by_name(results, _models_with_configs.keys())
48 | assert all(result.node.config.materialized == "hypertable" for result in results)
49 |
50 | hypertables = project.run_sql(
51 | f"""
52 | select *
53 | from timescaledb_information.hypertables
54 | where hypertable_schema = '{unique_schema}'""",
55 | fetch="all",
56 | )
57 | assert len(hypertables) == len(_models_with_configs)
58 |
59 | for model in _models_with_configs.keys():
60 | relation = relation_from_name(project.adapter, model)
61 | result = project.run_sql(f"select count(*) as num_rows from {relation}", fetch="one")
62 | assert result[0] == (0 if model == "empty" else 1)
63 |
--------------------------------------------------------------------------------
/tests/functional/adapter/hypertable/test_reorder_policy.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 | from tests.utils import get_indexes_sql
8 |
9 |
10 | class TestHypertableReorderPolicy:
11 | def _model_sql(self, create_index: bool) -> str:
12 | return f"""
13 | {{{{
14 | config(
15 | materialized = "hypertable",
16 | main_dimension = "time_column",
17 | create_default_indexes = False,
18 | indexes = [{{
19 | "columns": ["time_column", "col_1"]
20 | }}],
21 | reorder_policy = {{
22 | "create_index": {create_index},
23 | "index": {{ "columns": ["col_1"] if {create_index} else ["time_column", "col_1"] }}
24 | }},
25 | )
26 | }}}}
27 |
28 | select
29 | current_timestamp as time_column,
30 | 1 as col_1
31 | """
32 |
33 | @pytest.fixture(scope="class")
34 | def models(self) -> dict[str, Any]:
35 | return {
36 | "create_index.sql": self._model_sql(True),
37 | "sep_index.sql": self._model_sql(False),
38 | }
39 |
40 | def test_reorder_policy(self, project: TestProjInfo, unique_schema: str) -> None:
41 | results = run_dbt(["run"])
42 | assert len(results) == 2
43 |
44 | create_index_results = project.run_sql(get_indexes_sql(unique_schema, "create_index"), fetch="all")
45 | sep_index_results = project.run_sql(get_indexes_sql(unique_schema, "sep_index"), fetch="all")
46 |
47 | assert len(create_index_results) == 2, "Expected 2 indexes when index should be created"
48 | assert len(sep_index_results) == 1, "Expected 1 index on separate index creation"
49 |
50 | timescale_jobs = project.run_sql(
51 | f"""
52 | select *
53 | from timescaledb_information.jobs
54 | where application_name like 'Reorder Policy%'
55 | and hypertable_schema = '{unique_schema}'""",
56 | fetch="all",
57 | )
58 | assert len(timescale_jobs) == 2
59 | table_names = [job[15] for job in timescale_jobs]
60 | assert set(table_names) == {"create_index", "sep_index"}
61 |
--------------------------------------------------------------------------------
/tests/functional/adapter/virtual_hypertable/test_virtual_hypertable_compression.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 |
8 |
9 | class TestVirtualHypertableCompression:
10 | @pytest.fixture(scope="class")
11 | def models(self) -> dict[str, Any]:
12 | return {
13 | "vht.sql": """
14 | {% if var("enable_compression", false) %}
15 | {{ config(compression={"after": "interval '1 day'"}) }}
16 | {% endif %}
17 | --
18 | """
19 | }
20 |
21 | @pytest.fixture(scope="class")
22 | def project_config_update(self) -> dict[str, Any]:
23 | return {
24 | "name": "virtual_hypertable_tests",
25 | "models": {"virtual_hypertable_tests": {"vht": {"+materialized": "virtual_hypertable"}}},
26 | }
27 |
28 | def count_compression_settings(self, project: TestProjInfo, unique_schema: str) -> int:
29 | compression_settings = project.run_sql(
30 | f"""
31 | select *
32 | from timescaledb_information.compression_settings
33 | where hypertable_name = 'vht'
34 | and hypertable_schema = '{unique_schema}'""",
35 | fetch="all",
36 | )
37 | return len(compression_settings)
38 |
39 | def test_virtual_hypertable_compression(self, project: TestProjInfo, unique_schema: str) -> None:
40 | project.run_sql(f"""
41 | create table {unique_schema}.vht (time_column timestamp, col_1 int);
42 | select create_hypertable('{unique_schema}.vht', by_range('time_column'));""")
43 | results = run_dbt(["run"])
44 | assert len(results) == 1
45 |
46 | assert self.count_compression_settings(project, unique_schema) == 0
47 |
48 | run_enable_results = run_dbt(["run", "--vars", "enable_compression: true"])
49 | assert len(run_enable_results) == 1
50 |
51 | assert self.count_compression_settings(project, unique_schema) == 1
52 |
53 | run_disable_results = run_dbt(["run"])
54 | assert len(run_disable_results) == 1
55 |
56 | assert self.count_compression_settings(project, unique_schema) == 0
57 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/relations/hypertable.sql:
--------------------------------------------------------------------------------
1 | {% macro get_create_hypertable_as_sql(relation) %}
2 | {% set main_dimension = config.get("main_dimension") %}
3 | {% if not main_dimension %}
4 | {{ exceptions.raise_compiler_error("The configuration option main_dimension is required for hypertables.") }}
5 | {% endif %}
6 |
7 | select create_hypertable(
8 | '{{ relation }}',
9 | {{ parse_dimension_config(main_dimension) }},
10 |
11 | {%- if config.get('create_default_indexes') is not none %}
12 | create_default_indexes => {{ config.get('create_default_indexes') }},
13 | {% endif -%}
14 |
15 | migrate_data => true {# Required since dbt models will always contain data #}
16 | );
17 | {% endmacro %}
18 |
19 | {%- macro timescaledb__update_indexes_on_virtual_hypertable(relation, index_changes) -%}
20 | {%- for _index_change in index_changes -%}
21 | {%- set _index = _index_change.context -%}
22 |
23 | {%- if _index_change.action == "drop" -%}
24 | {{ postgres__get_drop_index_sql(relation, _index.name) }};
25 |
26 | {%- elif _index_change.action == "create" -%}
27 | {{ postgres__get_create_index_sql(relation, _index.as_node_config) }}
28 |
29 | {%- endif -%}
30 |
31 | {%- endfor -%}
32 |
33 | {%- endmacro -%}
34 |
35 | {% macro describe_hypertable(relation) %}
36 | {% set _indexes = run_query(get_show_indexes_sql(relation)) %}
37 | {% do return({'indexes': _indexes}) %}
38 | {% endmacro %}
39 |
40 | {% macro get_virtual_hypertable_change_collection(existing_relation, new_config) %}
41 | {% set _existing_hypertable = describe_hypertable(existing_relation) %}
42 | {% set _change_collection = existing_relation.get_hypertable_config_change_collection(_existing_hypertable, new_config.model) %}
43 | {% do return(_change_collection) %}
44 | {% endmacro %}
45 |
46 | {% macro set_chunk_time_interval(relation, chunk_time_interval, dimension_name = none) %}
47 | select set_chunk_time_interval('{{ relation }}', {{ chunk_time_interval }}
48 | {%- if dimension_name %}
49 | , dimension_name => '{{ dimension_name }}'
50 | {%- endif %}
51 | );
52 | {% endmacro %}
53 |
--------------------------------------------------------------------------------
/dbt/adapters/timescaledb/timescaledb_relation.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from typing import Optional
3 |
4 | import agate
5 |
6 | from dbt.adapters.contracts.relation import RelationConfig
7 | from dbt.adapters.postgres.relation import PostgresRelation
8 | from dbt.adapters.postgres.relation_configs import (
9 | PostgresIndexConfig,
10 | )
11 | from dbt.adapters.relation_configs import (
12 | RelationResults,
13 | )
14 | from dbt.adapters.timescaledb.timescaledb_change_collection import TimescaleDBHypertableConfigChangeCollection
15 |
16 |
17 | @dataclass(frozen=True, eq=False, repr=False)
18 | class TimescaleDBRelation(PostgresRelation):
19 | def get_hypertable_config_change_collection(
20 | self, relation_results: RelationResults, relation_config: RelationConfig
21 | ) -> Optional[TimescaleDBHypertableConfigChangeCollection]:
22 | if not relation_results:
23 | return None
24 |
25 | index_rows: agate.Table = relation_results.get("indexes", agate.Table(rows={}))
26 | index_dicts = [PostgresIndexConfig.parse_relation_results(index) for index in index_rows.rows]
27 | index_list = [PostgresIndexConfig.from_dict(index) for index in index_dicts]
28 | filtered_list = [
29 | index
30 | for index in index_list
31 | if not (
32 | not index.unique
33 | and index.method == "btree"
34 | and len(index.column_names) == 1
35 | and index.name.endswith("_idx")
36 | )
37 | ]
38 | index_set = frozenset(filtered_list)
39 |
40 | indexes_from_config = relation_config.config.get("indexes", [])
41 | parsed_from_config = [PostgresIndexConfig.parse_model_node(index) for index in indexes_from_config]
42 | obj_from_config = [PostgresIndexConfig.from_dict(index) for index in parsed_from_config]
43 | set_from_config = frozenset(obj_from_config)
44 |
45 | changeset = TimescaleDBHypertableConfigChangeCollection(
46 | indexes=self._get_index_config_changes(index_set, set_from_config)
47 | )
48 | return changeset if changeset.has_changes else None
49 |
--------------------------------------------------------------------------------
/tests/functional/adapter/hypertable/test_hypertable_integer_now_func.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 |
8 |
9 | class BaseTestHypertableIntegerNowFunc:
10 | @pytest.fixture(scope="class")
11 | def extra_model_config(self) -> dict[str, Any]:
12 | return {}
13 |
14 | @pytest.fixture(scope="class")
15 | def project_config_update(self, extra_model_config: dict[str, Any]) -> dict[str, Any]:
16 | return {
17 | "name": "hypertable_tests",
18 | "models": {
19 | "hypertable_tests": {
20 | "test_model": {
21 | "+materialized": "hypertable",
22 | "+main_dimension": "id",
23 | "+integer_now_func": "test_model_now",
24 | }
25 | | extra_model_config,
26 | }
27 | },
28 | }
29 |
30 | @pytest.fixture(scope="class")
31 | def models(self) -> dict[str, Any]:
32 | return {
33 | "test_model.sql": "select 1::bigint as id",
34 | }
35 |
36 | def prepare_func(self, project: TestProjInfo, unique_schema: str) -> None:
37 | pass
38 |
39 | def test_integer_now_func(self, project: TestProjInfo, unique_schema: str) -> None:
40 | self.prepare_func(project, unique_schema)
41 | results = run_dbt(["run"])
42 | assert len(results) == 1
43 |
44 |
45 | class TestHypertableIntegerNowFuncWithoutSQL(BaseTestHypertableIntegerNowFunc):
46 | def prepare_func(self, project: TestProjInfo, unique_schema: str) -> None:
47 | project.run_sql(
48 | f"""
49 | create or replace function {project.database}.{unique_schema}.test_model_now()
50 | returns bigint language sql immutable as $$
51 | select extract(epoch from now())::bigint
52 | $$;
53 | """
54 | )
55 |
56 |
57 | class TestHypertableIntegerNowFuncWithSQL(BaseTestHypertableIntegerNowFunc):
58 | @pytest.fixture(scope="class")
59 | def extra_model_config(self) -> dict[str, Any]:
60 | return {"integer_now_func_sql": "select extract(epoch from now())::bigint"}
61 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "dbt-timescaledb"
3 | dynamic = ["version"]
4 | description = "The TimescaleDB adapter plugin for dbt"
5 | authors = [
6 | {name = "Sam Debruyn", email = "dbt.sam@debruyn.dev"},
7 | ]
8 | dependencies = [
9 | "dbt-postgres>=1.8.0b2",
10 | "dbt-adapters>=1.0.0",
11 | "dbt-common>=1.0.0",
12 | ]
13 | requires-python = ">=3.11"
14 | readme = "README.md"
15 | license = {text = "MIT"}
16 | keywords = ["dbt", "timescaledb"]
17 |
18 | classifiers = [
19 | "Development Status :: 4 - Beta",
20 | "License :: OSI Approved :: MIT License",
21 | "Operating System :: Microsoft :: Windows",
22 | "Operating System :: MacOS :: MacOS X",
23 | "Operating System :: POSIX :: Linux",
24 | "Programming Language :: Python :: 3.12"
25 | ]
26 |
27 | [project.urls]
28 | Homepage = "https://dbt-timescaledb.debruyn.dev"
29 | Repository = "https://github.com/sdebruyn/dbt-timescaledb"
30 | Documentation = "https://dbt-timescaledb.debruyn.dev/usage/"
31 |
32 | [build-system]
33 | requires = ["pdm-backend"]
34 | build-backend = "pdm.backend"
35 |
36 | [tool.pdm]
37 | plugins = [
38 | "sync-pre-commit-lock"
39 | ]
40 |
41 | [tool.pdm.build]
42 | includes = ["dbt/"]
43 |
44 | [tool.pdm.version]
45 | source = "scm"
46 | write_to = "dbt/adapters/timescaledb/__version__.py"
47 | write_template = "version = '{}'"
48 |
49 | [tool.pdm.dev-dependencies]
50 | dev = [
51 | "pre-commit>=3.5.0",
52 | "ruff>=0.1.4",
53 | ]
54 | test = [
55 | "pytest>=7.4.3",
56 | "dbt-tests-adapter>=1.8.0b1",
57 | "pytest-xdist[psutil]>=3.4.0",
58 | "pytest-cov>=4.1.0",
59 | "pytest-dotenv>=0.5.2",
60 | ]
61 | docs = [
62 | "mkdocs-material[imaging]>=9.4.8",
63 | "Pygments>=2.16.1",
64 | "mkdocs-git-revision-date-localized-plugin>=1.2.1",
65 | "mkdocs-autorefs>=0.5.0",
66 | "pymdown-extensions>=10.4",
67 | "mkdocs-open-in-new-tab>=1.0.3",
68 | ]
69 |
70 | [tool.ruff]
71 | line-length = 110
72 | target-version = "py311"
73 |
74 | [tool.ruff.lint]
75 | select = ["E", "F", "B", "W", "I", "PL", "ANN"]
76 | ignore = ["PLR2004", "ANN101", "ANN401"]
77 |
78 | [tool.pytest.ini_options]
79 | env_files = ["test.env"]
80 | testpaths = ["tests"]
81 | addopts = "-W ignore::pytest.PytestCollectionWarning -v -n auto --cov-report=xml"
82 |
--------------------------------------------------------------------------------
/tests/functional/adapter/virtual_hypertable/test_virtual_hypertable_integer_now_func.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 |
8 |
9 | class BaseTestVirtualHypertableIntegerNowFunc:
10 | @pytest.fixture(scope="class")
11 | def extra_model_config(self) -> dict[str, Any]:
12 | return {}
13 |
14 | @pytest.fixture(scope="class")
15 | def project_config_update(self, extra_model_config: dict[str, Any]) -> dict[str, Any]:
16 | return {
17 | "name": "virtual_hypertable_tests",
18 | "models": {
19 | "virtual_hypertable_tests": {
20 | "vht": {
21 | "+materialized": "virtual_hypertable",
22 | "+integer_now_func": "test_model_now",
23 | }
24 | | extra_model_config,
25 | }
26 | },
27 | }
28 |
29 | @pytest.fixture(scope="class")
30 | def models(self) -> dict[str, Any]:
31 | return {
32 | "vht.sql": "--",
33 | }
34 |
35 | def prepare_func(self, project: TestProjInfo, unique_schema: str) -> None:
36 | project.run_sql(f"""
37 | create table {unique_schema}.vht (id bigint);
38 | select create_hypertable('{unique_schema}.vht', by_range('id'));""")
39 |
40 | def test_integer_now_func(self, project: TestProjInfo, unique_schema: str) -> None:
41 | self.prepare_func(project, unique_schema)
42 | results = run_dbt(["run"])
43 | assert len(results) == 1
44 |
45 |
46 | class TestVirtualHypertableIntegerNowFuncWithoutSQL(BaseTestVirtualHypertableIntegerNowFunc):
47 | def prepare_func(self, project: TestProjInfo, unique_schema: str) -> None:
48 | super().prepare_func(project, unique_schema)
49 | project.run_sql(
50 | f"""
51 | create or replace function {unique_schema}.test_model_now() returns bigint language sql immutable as $$
52 | select extract(epoch from now())::bigint
53 | $$;
54 | """
55 | )
56 |
57 |
58 | class TestVirtualHypertableIntegerNowFuncWithSQL(BaseTestVirtualHypertableIntegerNowFunc):
59 | @pytest.fixture(scope="class")
60 | def extra_model_config(self) -> dict[str, Any]:
61 | return {"integer_now_func_sql": "select extract(epoch from now())::bigint"}
62 |
--------------------------------------------------------------------------------
/tests/functional/adapter/virtual_hypertable/test_virtual_hypertable_index_updates.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 |
8 |
9 | class TestVirtualHypertableIndexUpdates:
10 | @pytest.fixture(scope="class")
11 | def models(self) -> dict[str, Any]:
12 | return {
13 | "vht.sql": """
14 | {% if var("add_index", false) %}
15 | {{ config(indexes=[{'columns': ['col_1']}]) }}
16 | {% endif %}
17 | --
18 | """
19 | }
20 |
21 | @pytest.fixture(scope="class")
22 | def project_config_update(self) -> dict[str, Any]:
23 | return {
24 | "name": "virtual_hypertable_tests",
25 | "models": {"virtual_hypertable_tests": {"vht": {"+materialized": "virtual_hypertable"}}},
26 | }
27 |
28 | def find_indexes(self, project: TestProjInfo, unique_schema: str) -> list[str]:
29 | indexes = project.run_sql(
30 | f"""
31 | select *
32 | from pg_indexes
33 | where tablename = 'vht'
34 | and schemaname = '{unique_schema}'""",
35 | fetch="all",
36 | )
37 | index_names = [job[2] for job in indexes]
38 | return index_names
39 |
40 | def test_virtual_hypertable_index_updates(self, project: TestProjInfo, unique_schema: str) -> None:
41 | project.run_sql(f"""
42 | create table {unique_schema}.vht (time_column timestamp, col_1 int);
43 | select create_hypertable('{unique_schema}.vht', by_range('time_column'));""")
44 | results = run_dbt(["run"])
45 | assert len(results) == 1
46 | assert len(self.find_indexes(project, unique_schema)) == 1
47 |
48 | run_enable_results = run_dbt(["run", "--vars", "add_index: true"])
49 | assert len(run_enable_results) == 1
50 | assert len(self.find_indexes(project, unique_schema)) == 2
51 |
52 | run_disable_results = run_dbt(["run"])
53 | assert len(run_disable_results) == 1
54 | assert len(self.find_indexes(project, unique_schema)) == 1
55 |
56 | run_enable_results = run_dbt(["run", "--vars", "add_index: true"])
57 | assert len(run_enable_results) == 1
58 | assert len(self.find_indexes(project, unique_schema)) == 2
59 |
60 | run_disable_results = run_dbt(["run"])
61 | assert len(run_disable_results) == 1
62 | assert len(self.find_indexes(project, unique_schema)) == 1
63 |
--------------------------------------------------------------------------------
/tests/functional/adapter/continuous_aggregate/test_continuous_aggregate_refresh_policy.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import (
7 | check_result_nodes_by_name,
8 | run_dbt,
9 | )
10 |
11 |
12 | class TestContinuousAggregateRefreshPolicy:
13 | @pytest.fixture(scope="class")
14 | def project_config_update(self) -> dict[str, Any]:
15 | return {
16 | "name": "continuous_aggregate_tests",
17 | "models": {
18 | "continuous_aggregate_tests": {
19 | "base": {"+materialized": "hypertable", "+main_dimension": "time_column"},
20 | "test_model": {
21 | "+materialized": "continuous_aggregate",
22 | "+refresh_policy": {
23 | "start_offset": "interval '1 month'",
24 | "end_offset": "interval '1 day'",
25 | "schedule_interval": "interval '3 day'",
26 | },
27 | },
28 | }
29 | },
30 | }
31 |
32 | @pytest.fixture(scope="class")
33 | def models(self) -> dict[str, Any]:
34 | return {
35 | "base.sql": "select current_timestamp as time_column",
36 | "test_model.sql": """
37 | select
38 | count(*),
39 | time_bucket(interval '1 day', time_column) as bucket
40 | from {{ ref('base') }}
41 | group by 2
42 | """,
43 | }
44 |
45 | def test_continuous_aggregate(self, project: TestProjInfo, unique_schema: str) -> None:
46 | results = run_dbt(["run"])
47 | assert len(results) == 2 # noqa
48 | check_result_nodes_by_name(results, ["base", "test_model"])
49 |
50 | continuous_aggregate_results = project.run_sql(
51 | f"""
52 | select *
53 | from timescaledb_information.continuous_aggregates
54 | where view_schema = '{unique_schema}'
55 | and view_name = 'test_model'""",
56 | fetch="all",
57 | )
58 | assert len(continuous_aggregate_results) == 1
59 |
60 | job_results = project.run_sql(
61 | """
62 | select *
63 | from timescaledb_information.jobs
64 | where application_name like 'Refresh Continuous Aggregate Policy%'
65 | and schedule_interval = interval '3 day'
66 | """,
67 | fetch="all",
68 | )
69 | assert len(job_results) == 1
70 |
--------------------------------------------------------------------------------
/docs/usage/virtual-hypertables.md:
--------------------------------------------------------------------------------
1 | # Virtual hypertables
2 |
3 | [Hypertables](https://docs.timescale.com/use-timescale/latest/hypertables/about-hypertables/) are usually used to ingest time-series data. They are a high-performance version of regular Postgres tables focussed on time-based bucketting, chunking, and partitioning.
4 |
5 | Hypertables by themselves don't make a lot of sense in dbt as you'd create them outside of dbt and then ingest data into them. With virtual hypertables, we can leverage pre-existing hypertables and use dbt to manage their configuration.
6 |
7 | !!! info
8 | Consult the [Timescale docs](https://docs.timescale.com/use-timescale/latest/hypertables/about-hypertables/) for more information regarding hypertables.
9 |
10 | !!! warning "Existing configurations"
11 | As soon as you start to manage a hypertable as a virtual hypertable with dbt-timescaledb, dbt will replace existing configurations on every run. This includes the retention policy, compression, and other settings. If you have existing configurations in place, you have to make sure to adjust the dbt configuration accordingly.
12 |
13 | ## Usage
14 |
15 | The hypertable has to pre-exist in your database. If the hypertable is not present, dbt will throw an error. Optionally, you can specify dbt's built-in `schema` parameter to reference a hypertable in a different schema.
16 |
17 | The SQL code in the dbt model does not matter and will be fully ignored. However, dbt will ignore empty models. You could just put `--` in the model to make it non-empty.
18 |
19 | === "SQL"
20 |
21 | ```sql+jinja hl_lines="3" title="models/existing_hypertable.sql"
22 | {{
23 | config(
24 | materialized='virtual_hypertable'
25 | )
26 | }}
27 | --
28 | ```
29 |
30 | === "YAML"
31 |
32 | ```yaml hl_lines="4" title="dbt_project.yml"
33 | models:
34 | your_project_name:
35 | folder_containing_the_hypertables:
36 | +materialized: virtual_hypertable
37 | # ...
38 | ```
39 |
40 | ## Configuration options
41 |
42 | Dimensions are not supported for virtual hypertables as you can only set them during the creation of the hypertable.
43 |
44 | You can use virtual hypertables to manage [compression](compression.md), indexes, set a [reorder policy](reorder-policies.md), define [retention policies](retention-policies.md), or any of the options below.
45 |
46 | --8<-- "docs_build/integer_now_func.md"
47 |
48 | --8<-- "docs_build/chunk_time_interval.md"
49 |
--------------------------------------------------------------------------------
/docs_build/dimensions.md:
--------------------------------------------------------------------------------
1 | ### Dimensions
2 |
3 | Hypertables have one or more dimensions, defined upon creation of the hypertable. The main dimension of a hypertable is provided using the `main_dimension` configuration option. Additional dimensions can be added to the hypertable using the `dimensions` configuration option.
4 |
5 | In this adapter, dimensions can be provided as a dictionary with the following options:
6 |
7 | * `column_name`
8 | * `type`: `by_hash` or `by_range` (default is `by_range`)
9 | * `partition_interval` (only for `by_range`)
10 | * `number_partitions` (only for `by_hash`)
11 | * `partitioning_func`
12 |
13 | Since most dimensions will probably be `by_range` dimensions with a column name, you can also provide the name of the column as a shorthand instead of a dictionary.
14 |
15 | !!! warning "Empty hypertable required"
16 | You can only add dimensions to an empty hypertable.
17 |
18 | !!! info
19 | Consult the [Timescale docs](https://docs.timescale.com/api/latest/hypertable/add_dimension/) for more information regarding adding dimensions or the [documentation on dimension builders](https://docs.timescale.com/api/latest/hypertable/dimension_info/).
20 |
21 | === "SQL"
22 |
23 | ```sql+jinja hl_lines="3-8" title="models/my_hypertable.sql"
24 | {{ config(
25 | materialized = 'hypertable',
26 | main_dimension = 'time_column',
27 | dimensions=[
28 | {"column_name": "id", "type": "by_hash", "number_partitions": 5},
29 | {"column_name": "col_1", "type": "by_range", "partition_interval": "interval '1 day'"},
30 | {"column_name": "another_column", "type": "by_range"}
31 | ]
32 | }}
33 |
34 | select
35 | current_timestamp as time_column,
36 | 1 as id,
37 | 2 as col_1,
38 | 3 as another_column
39 | ```
40 |
41 | === "YAML"
42 |
43 | ```yaml hl_lines="5-15" title="dbt_project.yml"
44 | models:
45 | your_project_name:
46 | model_name:
47 | +materialized: hypertable
48 | +main_dimension:
49 | column_name: time_column
50 | type: by_range
51 | # the above would be equivalent to +main_dimension: time_column
52 | +dimensions:
53 | - column_name: id
54 | type: by_hash
55 | number_partitions: 5
56 | - column_name: another_time_column
57 | type: by_range
58 | partition_interval: interval '1 day'
59 | # ...
60 | ```
61 |
--------------------------------------------------------------------------------
/tests/functional/adapter/virtual_hypertable/test_virtual_hypertable_retention_policy.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 |
8 |
9 | class TestVirtualHypertableRetentionPolicy:
10 | @pytest.fixture(scope="class")
11 | def models(self) -> dict[str, Any]:
12 | return {
13 | "vht.sql": """
14 | {% if var("create_retention_policy", false) %}
15 | {{ config(retention_policy = {"drop_after": "interval '1 day'"}) }}
16 | {% endif %}
17 | --
18 | """
19 | }
20 |
21 | @pytest.fixture(scope="class")
22 | def project_config_update(self) -> dict[str, Any]:
23 | return {
24 | "name": "virtual_hypertable_tests",
25 | "models": {"virtual_hypertable_tests": {"vht": {"+materialized": "virtual_hypertable"}}},
26 | }
27 |
28 | def find_retention_policy_tables(self, project: TestProjInfo, unique_schema: str) -> list[str]:
29 | timescale_jobs = project.run_sql(
30 | f"""
31 | select *
32 | from timescaledb_information.jobs
33 | where proc_name = 'policy_retention'
34 | and hypertable_schema = '{unique_schema}'""",
35 | fetch="all",
36 | )
37 | table_names = [job[15] for job in timescale_jobs]
38 | return table_names
39 |
40 | def test_virtual_hypertable_retention_policy(self, project: TestProjInfo, unique_schema: str) -> None:
41 | project.run_sql(f"""
42 | create table {unique_schema}.vht (time_column timestamp, col_1 int);
43 | select create_hypertable('{unique_schema}.vht', by_range('time_column'));""")
44 | results = run_dbt(["run"])
45 | assert len(results) == 1
46 | assert all(result.node.config.materialized == "virtual_hypertable" for result in results)
47 |
48 | assert self.find_retention_policy_tables(project, unique_schema) == []
49 |
50 | run_enable_results = run_dbt(["run", "--vars", "create_retention_policy: true"])
51 | assert len(run_enable_results) == 1
52 | assert all(result.node.config.materialized == "virtual_hypertable" for result in run_enable_results)
53 |
54 | assert self.find_retention_policy_tables(project, unique_schema) == ["vht"]
55 |
56 | run_disable_results = run_dbt(["run"])
57 | assert len(run_disable_results) == 1
58 | assert all(result.node.config.materialized == "virtual_hypertable" for result in run_disable_results)
59 |
60 | assert self.find_retention_policy_tables(project, unique_schema) == []
61 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/materializations/models/virtual_hypertable.sql:
--------------------------------------------------------------------------------
1 | {% materialization virtual_hypertable, adapter="timescaledb" %}
2 |
3 | {%- set target_relation = this.incorporate(type=this.Table) -%}
4 | {%- set existing_relation = load_cached_relation(target_relation) -%}
5 | {%- set change_collection = get_virtual_hypertable_change_collection(existing_relation, config) -%}
6 | {%- set grant_config = config.get('grants') -%}
7 | {{ run_hooks(pre_hooks, inside_transaction=False) }}
8 |
9 | -- `BEGIN` happens here:
10 | {{ run_hooks(pre_hooks, inside_transaction=True) }}
11 |
12 | -- build model
13 | {% call statement('main') -%}
14 | select 1 as dummy;
15 |
16 | {{ set_compression(target_relation, config.get("compression")) }}
17 | {{ clear_compression_policy(target_relation) }}
18 | {%- if config.get('compression') %}
19 | {{ add_compression_policy(target_relation, config.get("compression")) }}
20 | {% endif -%}
21 |
22 | {%- if config.get("integer_now_func") %}
23 | {{ set_integer_now_func(target_relation, config.get("integer_now_func"), config.get("integer_now_func_sql")) }}
24 | {% endif -%}
25 |
26 | {%- if config.get("chunk_time_interval") %}
27 | {{ set_chunk_time_interval(target_relation, config.get("chunk_time_interval")) }}
28 | {% endif -%}
29 |
30 | {%- if change_collection %}
31 | {{ timescaledb__update_indexes_on_virtual_hypertable(target_relation, change_collection.indexes) }}
32 | {%- endif %}
33 |
34 | {{ clear_reorder_policy(target_relation) }}
35 | {%- if config.get("reorder_policy") %}
36 | {{ add_reorder_policy(target_relation, config.get("reorder_policy")) }}
37 | {% endif -%}
38 |
39 | {{ clear_retention_policy(target_relation) }}
40 | {%- if config.get("retention_policy") %}
41 | {{ add_retention_policy(target_relation, config.get("retention_policy")) }}
42 | {% endif -%}
43 |
44 | {%- endcall %}
45 |
46 | {{ run_hooks(post_hooks, inside_transaction=True) }}
47 |
48 | {% set should_revoke = should_revoke(target_relation, full_refresh_mode=True) %}
49 | {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}
50 |
51 | {% do persist_docs(target_relation, model) %}
52 |
53 | -- `COMMIT` happens here
54 | {{ adapter.commit() }}
55 |
56 | {{ run_hooks(post_hooks, inside_transaction=False) }}
57 |
58 | {{ return({'relations': [target_relation]}) }}
59 |
60 | {% endmaterialization %}
61 |
--------------------------------------------------------------------------------
/tests/functional/adapter/virtual_hypertable/test_virtual_hypertable_reorder_policy.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 |
8 |
9 | class TestVirtualHypertableReorderPolicy:
10 | @pytest.fixture(scope="class")
11 | def models(self) -> dict[str, Any]:
12 | return {
13 | "vht.sql": """
14 | {% if var("create_reorder_policy", false) %}
15 | {{
16 | config(
17 | reorder_policy = {
18 | "create_index": true,
19 | "index": { "columns": ["col_1"] }
20 | }
21 | )
22 | }}
23 | {% endif %}
24 | --
25 | """
26 | }
27 |
28 | @pytest.fixture(scope="class")
29 | def project_config_update(self) -> dict[str, Any]:
30 | return {
31 | "name": "virtual_hypertable_tests",
32 | "models": {"virtual_hypertable_tests": {"vht": {"+materialized": "virtual_hypertable"}}},
33 | }
34 |
35 | def find_reorder_policy_tables(self, project: TestProjInfo, unique_schema: str) -> list[str]:
36 | timescale_jobs = project.run_sql(
37 | f"""
38 | select *
39 | from timescaledb_information.jobs
40 | where application_name like 'Reorder Policy%'
41 | and hypertable_schema = '{unique_schema}'""",
42 | fetch="all",
43 | )
44 | table_names = [job[15] for job in timescale_jobs]
45 | return table_names
46 |
47 | def test_virtual_hypertable_reorder_policy(self, project: TestProjInfo, unique_schema: str) -> None:
48 | project.run_sql(f"""
49 | create table {unique_schema}.vht (time_column timestamp, col_1 int);
50 | select create_hypertable('{unique_schema}.vht', by_range('time_column'));""")
51 | results = run_dbt(["run"])
52 | assert len(results) == 1
53 | assert all(result.node.config.materialized == "virtual_hypertable" for result in results)
54 |
55 | assert self.find_reorder_policy_tables(project, unique_schema) == []
56 |
57 | run_enable_results = run_dbt(["run", "--vars", "create_reorder_policy: true"])
58 | assert len(run_enable_results) == 1
59 | assert all(result.node.config.materialized == "virtual_hypertable" for result in run_enable_results)
60 |
61 | assert self.find_reorder_policy_tables(project, unique_schema) == ["vht"]
62 |
63 | run_disable_results = run_dbt(["run"])
64 | assert len(run_disable_results) == 1
65 | assert all(result.node.config.materialized == "virtual_hypertable" for result in run_disable_results)
66 |
67 | assert self.find_reorder_policy_tables(project, unique_schema) == []
68 |
--------------------------------------------------------------------------------
/tests/functional/adapter/continuous_aggregate/test_continuous_aggregate.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import (
7 | check_result_nodes_by_name,
8 | run_dbt,
9 | )
10 |
11 |
12 | class TestContinuousAggregate:
13 | @pytest.fixture(
14 | scope="class",
15 | params=[
16 | pytest.param({"+refresh_now": False}, id="refresh_now_false"),
17 | pytest.param({"+refresh_now": True}, id="refresh_now_true"),
18 | pytest.param({"materialized_only": False}, id="materialized_only_false"),
19 | pytest.param({"materialized_only": True}, id="materialized_only_true"),
20 | ],
21 | )
22 | def project_config_update(self, request) -> dict[str, Any]: # noqa: ANN001
23 | return {
24 | "name": "continuous_aggregate_tests",
25 | "models": {
26 | "continuous_aggregate_tests": {
27 | "vht": {"+materialized": "virtual_hypertable"},
28 | "test_model": {"+materialized": "continuous_aggregate"} | request.param,
29 | }
30 | },
31 | }
32 |
33 | @pytest.fixture(scope="class")
34 | def models(self) -> dict[str, Any]:
35 | return {
36 | "vht.sql": "--",
37 | "test_model.sql": """
38 | select
39 | count(*),
40 | time_bucket(interval '1 day', time_column) as bucket
41 | from {{ ref('vht') }}
42 | group by 2
43 | """,
44 | }
45 |
46 | def test_continuous_aggregate(self, project: TestProjInfo, unique_schema: str) -> None:
47 | project.run_sql(f"""
48 | create table if not exists {unique_schema}.vht (time_column timestamp);
49 | select create_hypertable('{unique_schema}.vht', by_range('time_column'), if_not_exists => true);""")
50 |
51 | results = run_dbt(["run"])
52 | assert len(results) == 2 # noqa
53 | check_result_nodes_by_name(results, ["vht", "test_model"])
54 | nodes = [r.node for r in results]
55 | test_model = next(n for n in nodes if n.name == "test_model")
56 | assert test_model.node_info["materialized"] == "continuous_aggregate"
57 |
58 | continuous_aggregate_results = project.run_sql(
59 | f"""
60 | select *
61 | from timescaledb_information.continuous_aggregates
62 | where view_schema = '{unique_schema}'
63 | and view_name = 'test_model'""",
64 | fetch="all",
65 | )
66 | assert len(continuous_aggregate_results) == 1
67 |
68 | def test_continuous_aggregate_multiple_runs(self, project: TestProjInfo, unique_schema: str) -> None:
69 | for _ in range(5):
70 | self.test_continuous_aggregate(project, unique_schema)
71 |
--------------------------------------------------------------------------------
/tests/functional/adapter/continuous_aggregate/test_continuous_aggregate_retention_policy.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 |
8 |
9 | class TestContinuousAggregateRetentionPolicy:
10 | @pytest.fixture(scope="class")
11 | def models(self) -> dict[str, Any]:
12 | return {
13 | "vht.sql": "--",
14 | "cagg.sql": """
15 | {% if var("create_retention_policy", false) %}
16 | {{ config(retention_policy = {"drop_after": "interval '1 day'"}) }}
17 | {% endif %}
18 | select
19 | count(*) as col_1,
20 | time_bucket(interval '1 day', time_column) as bucket
21 | from {{ ref('vht') }}
22 | group by 2""",
23 | }
24 |
25 | @pytest.fixture(scope="class")
26 | def project_config_update(self) -> dict[str, Any]:
27 | return {
28 | "name": "retention_policy_tests",
29 | "models": {
30 | "retention_policy_tests": {
31 | "cagg": {"+materialized": "continuous_aggregate"},
32 | "vht": {"+materialized": "virtual_hypertable"},
33 | }
34 | },
35 | }
36 |
37 | def find_retention_policy_continuous_aggregates(self, project: TestProjInfo, unique_schema: str) -> int:
38 | timescale_jobs = project.run_sql(
39 | f"""
40 | select *
41 | from timescaledb_information.jobs j
42 | join timescaledb_information.continuous_aggregates c
43 | on j.hypertable_schema = c.materialization_hypertable_schema
44 | and j.hypertable_name = c.materialization_hypertable_name
45 | where j.proc_name = 'policy_retention'
46 | and c.view_schema = '{unique_schema}'
47 | and c.view_name = 'cagg'""",
48 | fetch="all",
49 | )
50 | return len(timescale_jobs)
51 |
52 | def test_continuous_aggregate_retention_policy(self, project: TestProjInfo, unique_schema: str) -> None:
53 | project.run_sql(f"""
54 | create table {unique_schema}.vht (time_column timestamp);
55 | select create_hypertable('{unique_schema}.vht', by_range('time_column'));""")
56 |
57 | results = run_dbt(["run"])
58 | assert len(results) == 2
59 |
60 | assert self.find_retention_policy_continuous_aggregates(project, unique_schema) == 0
61 |
62 | run_enable_results = run_dbt(["run", "--vars", "create_retention_policy: true"])
63 | assert len(run_enable_results) == 2
64 |
65 | assert self.find_retention_policy_continuous_aggregates(project, unique_schema) == 1
66 |
67 | run_disable_results = run_dbt(["run"])
68 | assert len(run_disable_results) == 2
69 |
70 | assert self.find_retention_policy_continuous_aggregates(project, unique_schema) == 0
71 |
--------------------------------------------------------------------------------
/docs/usage/compression.md:
--------------------------------------------------------------------------------
1 | # Compression
2 |
3 | Compression is one of the key features of TimescaleDB and can speed up queries while drastically reducing storage requirements.
4 |
5 | !!! info
6 | Consult the [Timescale docs](https://docs.timescale.com/use-timescale/latest/compression/about-compression/) to learn more about compression.
7 |
8 | ## Usage
9 |
10 | Compression is a configuration option for **(virtual) hypertables and continuous aggregates**. The only required argument is `after`, referring to the time interval after which compression should be applied. This can be an interval or an integer depending on the data type of your time column.
11 |
12 | === "SQL"
13 |
14 | ```sql+jinja title="models/my_hypertable.sql"
15 | {{
16 | config(
17 | materialized='hypertable',
18 | main_dimension={"column_name": "time_column"},
19 | compression={
20 | "after": "interval '1 day'",
21 | }
22 | )
23 | }}
24 | select current_timestamp as time_column
25 | ```
26 |
27 | === "YAML"
28 |
29 | ```yaml title="dbt_project.yml"
30 | models:
31 | your_project_name:
32 | folder_containing_the_hypertables:
33 | +materialized: hypertable
34 | +compression: false # (1)!
35 | model_one:
36 | +main_dimension:
37 | column_name: time_column
38 | +compression:
39 | after: interval '1 day'
40 | model_two:
41 | +main_dimension:
42 | column_name: time_column_in_model_two
43 | +compression:
44 | after: interval '1 hour'
45 | chunk_time_interval: 1 day
46 | orderby: 'another_column'
47 | segmentby: ['column_one', 'column_two']
48 | # ...
49 | ```
50 |
51 | 1. This is the default value and the same as leaving out the `compression` key entirely.
52 |
53 | ## Configuration options
54 |
55 | The `after` option from the compression policy settings is the only required option.
56 |
57 | ### Compression settings
58 |
59 | * `orderby` (string)
60 | * `segmentby` (list of strings)
61 | * `chunk_time_interval` (the actual interval, not prefixed with "interval")
62 |
63 | !!! info
64 | Consult the [Timescale docs](https://docs.timescale.com/api/latest/compression/alter_table_compression/) for more information regarding these settings.
65 |
66 | ### Compression policy settings
67 |
68 | * `after` (interval or integer depending on your time column)
69 | * `schedule_interval` (interval)
70 | * `initial_start`
71 | * `timezone`
72 |
73 | !!! info
74 | Consult the [Timescale docs](https://docs.timescale.com/api/latest/compression/add_compression_policy/#add_compression_policy) for more information regarding these settings.
75 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: dbt-timescaledb
2 | site_description: Documentation for `dbt-timescaledb`
3 | site_url: https://dbt-timescaledb.debruyn.dev
4 | site_author: Sam Debruyn
5 | repo_url: https://github.com/sdebruyn/dbt-timescaledb
6 | edit_uri: edit/main/docs/
7 | repo_name: sdebruyn/dbt-timescaledb
8 | strict: true
9 | copyright: |
10 | dbt and dbt Core are trademarks of dbt Labs, Inc.
11 | Timescale and TimescaleDB are trademarks of Timescale, Inc.
12 |
13 | extra:
14 | social:
15 | - icon: fontawesome/brands/github
16 | link: https://github.com/sdebruyn/dbt-timescaledb
17 | - icon: fontawesome/brands/linkedin
18 | link: https://www.linkedin.com/in/samueldebruyn/
19 | - icon: fontawesome/brands/x-twitter
20 | link: https://x.com/s_debruyn
21 | - icon: fontawesome/solid/globe
22 | link: https://debruyn.dev
23 | analytics:
24 | provider: custom
25 |
26 | markdown_extensions:
27 | - admonition
28 | - pymdownx.highlight
29 | - pymdownx.superfences
30 | - pymdownx.inlinehilite
31 | - pymdownx.snippets:
32 | check_paths: true
33 | - pymdownx.emoji:
34 | emoji_index: !!python/name:material.extensions.emoji.twemoji
35 | emoji_generator: !!python/name:material.extensions.emoji.to_svg
36 | - pymdownx.tabbed:
37 | alternate_style: true
38 |
39 | plugins:
40 | - search
41 | # - social
42 | - git-revision-date-localized
43 | - autorefs
44 | - open-in-new-tab
45 |
46 | theme:
47 | name: material
48 | icon:
49 | logo: material/database-eye
50 | repo: fontawesome/brands/github
51 | custom_dir: overrides
52 | palette:
53 | - scheme: default
54 | toggle:
55 | icon: material/brightness-7
56 | name: Switch to dark mode
57 | primary: deep orange
58 | accent: yellow
59 | media: "(prefers-color-scheme: light)"
60 | - scheme: slate
61 | toggle:
62 | icon: material/brightness-4
63 | name: Switch to light mode
64 | primary: yellow
65 | accent: deep orange
66 | media: "(prefers-color-scheme: dark)"
67 | features:
68 | - search.suggest
69 | - content.action.edit
70 | - navigation.instant
71 | - navigation.instant.progress
72 | - navigation.tracking
73 | - navigation.top
74 | - navigation.tabs
75 | - navigation.tabs.sticky
76 | - content.code.copy
77 | - content.code.annotate
78 |
79 | extra_css:
80 | - stylesheets/extra.css
81 |
82 | nav:
83 | - Home:
84 | - Overview: index.md
85 | - Installation: installation.md
86 | - License: license.md
87 | - Usage:
88 | - Overview: usage/index.md
89 | - Hypertables: usage/hypertables.md
90 | - Virtual hypertables: usage/virtual-hypertables.md
91 | - Continuous aggregates: usage/continuous-aggregates.md
92 | - Compression: usage/compression.md
93 | - Reorder policies: usage/reorder-policies.md
94 | - Retention policies: usage/retention-policies.md
95 | - Indexes: usage/indexes.md
96 | - Macros: usage/macros.md
97 |
--------------------------------------------------------------------------------
/docs/usage/hypertables.md:
--------------------------------------------------------------------------------
1 | # Hypertables
2 |
3 | [Hypertables](https://docs.timescale.com/use-timescale/latest/hypertables/about-hypertables/) are usually used to ingest time-series data. They are a high-performance version of regular Postgres tables focussed on time-based bucketting, chunking, and partitioning.
4 |
5 | Hypertables make less sense as dbt models to store transformed data. However, you can still use them as such. A more useful version right now is the `empty` option, which will create empty hypertables.
6 |
7 | !!! tip "Look into virtual hypertables"
8 | If you're looking to use dbt to configure your leverage pre-existing hypertables, check out the [virtual hypertables](../usage/virtual-hypertables.md) guide.
9 |
10 | !!! danger "Only run hypertable models once"
11 |
12 | dbt will always recreate your entire model. This means that all existing data in your hypertables will be lost when you run them again. If you're using hypertables for ingesting time-series data, you probably don't want this.
13 |
14 | !!! info
15 | Consult the [Timescale docs](https://docs.timescale.com/use-timescale/latest/hypertables/about-hypertables/) for more information regarding hypertables.
16 |
17 | ## Usage
18 |
19 | To materialize a model as a hypertable, simply set its `materialization` in the config to `hypertable`. Every hypertable also requires you to set the name of the time column.
20 |
21 | === "SQL"
22 |
23 | ```sql+jinja hl_lines="3 4" title="models/my_hypertable.sql"
24 | {{
25 | config(
26 | materialized='hypertable',
27 | main_dimension='time_column'
28 | )
29 | }}
30 | select current_timestamp as time_column
31 | ```
32 |
33 | === "YAML"
34 |
35 | ```yaml title="dbt_project.yml"
36 | models:
37 | your_project_name:
38 | folder_containing_the_hypertables:
39 | +materialized: hypertable
40 | model_one:
41 | +main_dimension: time_column # (1)!
42 | model_two:
43 | +main_dimension: time_column_in_model_two
44 | # ...
45 | ```
46 |
47 | 1. While you can set the `hypertable` materialization for multiple models, you'll still have to configure the `main_dimension` for each model individually.
48 |
49 | ## Configuration options
50 |
51 | ### dbt-specific options
52 |
53 | The following options are not taken from the TimescaleDB APIs, but are specific to this adapter.
54 |
55 | * `empty_hypertable`: If set to `true`, the hypertable will be truncated right after creation (as a regular table) and right before converting it into a hypertable. Defaults to `false`.
56 |
57 | ### TimescaleDB hypertable options
58 |
59 | The TimescaleDB option `create_default_indexes` can be set to `true` or `false`. It defaults to `true`.
60 |
61 | --8<-- "docs_build/dimensions.md"
62 |
63 | --8<-- "docs_build/integer_now_func.md"
64 |
65 | --8<-- "docs_build/chunk_time_interval.md"
66 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/adapters.sql:
--------------------------------------------------------------------------------
1 | {% macro timescaledb__get_create_index_sql(relation, index_dict) -%}
2 | {%- set index_config = adapter.parse_index(index_dict) -%}
3 | {%- set comma_separated_columns = ", ".join(index_config.columns) -%}
4 | {%- set index_name = index_config.render(relation) -%}
5 |
6 | create {% if index_config.unique -%}
7 | unique
8 | {%- endif %} index if not exists
9 | "{{ index_name }}"
10 | on {{ relation }} {% if index_config.type -%}
11 | using {{ index_config.type }}
12 | {%- endif %}
13 | ({{ comma_separated_columns }})
14 | {%- if index_config.transaction_per_type %}
15 | with (timescaledb.transaction_per_chunk)
16 | {% endif -%};
17 | {%- endmacro %}
18 |
19 | {# https://github.com/dbt-labs/dbt-core/issues/9124 #}
20 | {% macro timescaledb__rename_relation(from_relation, to_relation) -%}
21 | {% set target_name = adapter.quote_as_configured(to_relation.identifier, 'identifier') %}
22 | {% call statement('rename_relation') -%}
23 | {{ get_rename_sql(from_relation, target_name) }};
24 | {%- endcall %}
25 | {% endmacro %}
26 |
27 | {# Continuous aggregates are seen as views instead of materialized views, fixing this below #}
28 | {% macro timescaledb__list_relations_without_caching(schema_relation) %}
29 | {% call statement('list_relations_without_caching', fetch_result=True) -%}
30 | with
31 | continuous_aggregates as (
32 | select
33 | view_name as name,
34 | view_schema as schema
35 | from timescaledb_information.continuous_aggregates
36 | where view_schema ilike '{{ schema_relation.schema }}'
37 | ),
38 | views_without_continuous_aggregates as (
39 | select
40 | viewname as name,
41 | schemaname as schema
42 | from pg_views
43 | where schemaname ilike '{{ schema_relation.schema }}'
44 | except all
45 | select * from continuous_aggregates
46 | )
47 | select
48 | '{{ schema_relation.database }}' as database,
49 | *,
50 | 'materialized_view' as type
51 | from continuous_aggregates
52 | union all
53 | select
54 | '{{ schema_relation.database }}' as database,
55 | tablename as name,
56 | schemaname as schema,
57 | 'table' as type
58 | from pg_tables
59 | where schemaname ilike '{{ schema_relation.schema }}'
60 | union all
61 | select
62 | '{{ schema_relation.database }}' as database,
63 | name,
64 | schema,
65 | 'view' as type
66 | from views_without_continuous_aggregates
67 | union all
68 | select
69 | '{{ schema_relation.database }}' as database,
70 | matviewname as name,
71 | schemaname as schema,
72 | 'materialized_view' as type
73 | from pg_matviews
74 | where schemaname ilike '{{ schema_relation.schema }}'
75 | {% endcall %}
76 | {{ return(load_result('list_relations_without_caching').table) }}
77 | {% endmacro %}
78 |
--------------------------------------------------------------------------------
/tests/functional/adapter/continuous_aggregate/test_continuous_aggregate_compression.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import check_result_nodes_by_name, run_dbt
7 |
8 |
9 | class TestContinuousAggregateCompression:
10 | @pytest.fixture(scope="class")
11 | def project_config_update(self) -> dict[str, Any]:
12 | return {
13 | "name": "continuous_aggregate_tests",
14 | "models": {
15 | "continuous_aggregate_tests": {
16 | "base": {
17 | "+materialized": "hypertable",
18 | "+main_dimension": "time_column",
19 | },
20 | "test_model": {
21 | "+materialized": "continuous_aggregate",
22 | "+refresh_policy": {
23 | "start_offset": "interval '1 month'",
24 | "end_offset": "interval '1 day'",
25 | "schedule_interval": "interval '1 day'",
26 | },
27 | "+compression": {
28 | "after": "interval '40 day'",
29 | "schedule_interval": "interval '5 day'",
30 | },
31 | },
32 | }
33 | },
34 | }
35 |
36 | @pytest.fixture(scope="class")
37 | def models(self) -> dict[str, Any]:
38 | return {
39 | "base.sql": "select current_timestamp as time_column",
40 | "test_model.sql": """
41 | select
42 | count(*),
43 | time_bucket(interval '1 day', time_column) as bucket
44 | from {{ ref('base') }}
45 | group by 2
46 | """,
47 | }
48 |
49 | def test_continuous_aggregate(self, project: TestProjInfo, unique_schema: str) -> None:
50 | results = run_dbt(["run"])
51 | assert len(results) == 2
52 | check_result_nodes_by_name(results, ["base", "test_model"])
53 |
54 | continuous_aggregate_results = project.run_sql(
55 | f"""
56 | select *
57 | from timescaledb_information.continuous_aggregates
58 | where view_schema = '{unique_schema}'
59 | and view_name = 'test_model'""",
60 | fetch="all",
61 | )
62 | assert len(continuous_aggregate_results) == 1
63 | continuous_aggregate = continuous_aggregate_results[0]
64 |
65 | assert continuous_aggregate[2] == unique_schema
66 | assert continuous_aggregate[3] == "test_model"
67 | assert continuous_aggregate[6] # compression_enabled
68 |
69 | job_results = project.run_sql(
70 | """
71 | select *
72 | from timescaledb_information.jobs
73 | where application_name like 'Compression Policy%'
74 | and schedule_interval = interval '5 day'
75 | """,
76 | fetch="all",
77 | )
78 | assert len(job_results) == 1
79 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # dbt-timescaledb
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | [](https://pdm-project.org)
13 | [](https://pypi.org/project/dbt-timescaledb/)
14 | [](https://github.com/sdebruyn/dbt-timescaledb/blob/main/LICENSE)
15 | [](https://github.com/sdebruyn/dbt-timescaledb/actions/workflows/test.yml)
16 | [](https://codecov.io/github/sdebruyn/dbt-timescaledb)
17 |
18 | **[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
19 |
20 | dbt is the T in ELT. Organize, cleanse, denormalize, filter, rename, and pre-aggregate the raw data in your warehouse so that it's ready for analysis.
21 |
22 | ## TimescaleDB
23 |
24 | **[Timescale](https://www.timescale.com/)** extends PostgreSQL for all of your resource-intensive production workloads, so you can build faster, scale further, and stay under budget.
25 |
26 | ## Supported versions
27 |
28 | | Adapter versions | Supported dbt versions | Supported TimescaleDB versions |
29 | | ----------------- | ---------------------- | ------------------------------ |
30 | | 1.7.0a1 - 1.7.0a7 | 1.7.x | 2.12 - latest |
31 | | 1.8.0a1 - 1.8.0a2 | 1.8.x - latest | 2.12 - latest |
32 | | 1.8.0a3 - latest | 1.8.x - latest | 2.13 - latest |
33 |
34 | The recommended versions of Timescale are TimescaleDB Community Edition or Timescale cloud. It is not recommended to use this adapter with TimescaleDB Apache 2 Edition. See the [TimescaleDB editions](https://docs.timescale.com/about/latest/timescaledb-editions/) page for more information.
35 |
36 | ## Features & documentation
37 |
38 | [Read the documentation](https://dbt-timescaledb.debruyn.dev/) ([installation](https://dbt-timescaledb.debruyn.dev/installation/) | [usage](https://dbt-timescaledb.debruyn.dev/usage/)) for more information.
39 |
40 | ## Code of Conduct
41 |
42 | Both dbt Labs and Timescale have published a code of conduct. Everyone interacting in this project's codebases, issues, discussions, and related Slack channels is expected to follow the [dbt Code of Conduct](https://docs.getdbt.com/community/resources/code-of-conduct) and the [Timescale Code of Conduct](https://www.timescale.com/code-of-conduct).
43 |
--------------------------------------------------------------------------------
/tests/functional/adapter/hypertable/test_hypertable_dimension.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import check_result_nodes_by_name, run_dbt
7 |
8 |
9 | class TestHypertableDimension:
10 | @pytest.fixture(scope="class")
11 | def project_config_update(self) -> dict[str, Any]:
12 | return {
13 | "name": "hypertable_tests",
14 | "models": {
15 | "hypertable_tests": {
16 | "test_model": {
17 | "+materialized": "hypertable",
18 | "+main_dimension": "time_column",
19 | "+empty_hypertable": True,
20 | "+dimensions": [
21 | {"column_name": "id", "type": "by_hash", "number_partitions": 5},
22 | {"column_name": "col_1", "partition_interval": 10000},
23 | ],
24 | }
25 | }
26 | },
27 | }
28 |
29 | @pytest.fixture(scope="class")
30 | def models(self) -> dict[str, Any]:
31 | return {
32 | "test_model.sql": "select current_timestamp as time_column, 1 as id, 2 as col_1",
33 | }
34 |
35 | def test_hypertable_dimension(self, project: TestProjInfo, unique_schema: str) -> None:
36 | results = run_dbt(["run"])
37 | assert len(results) == 1
38 | check_result_nodes_by_name(results, ["test_model"])
39 |
40 | dimensions_results = project.run_sql(
41 | f"""
42 | select *
43 | from timescaledb_information.dimensions
44 | where hypertable_schema = '{unique_schema}'
45 | and hypertable_name = 'test_model'
46 | """,
47 | fetch="all",
48 | )
49 | assert len(dimensions_results) == 3
50 |
51 | dimension_time_column = [x for x in dimensions_results if x[3] == "time_column"][0]
52 | dimension_id_column = [x for x in dimensions_results if x[3] == "id"][0]
53 | dimension_col_1_column = [x for x in dimensions_results if x[3] == "col_1"][0]
54 |
55 | assert dimension_time_column[5] == "Time"
56 | assert dimension_id_column[5] == "Space"
57 | assert dimension_col_1_column[5] == "Time"
58 |
59 | assert dimension_id_column[9] == 5
60 | assert dimension_col_1_column[7] == 10000
61 |
62 |
63 | class TestHypertableDimensionWithoutTruncateShouldRaiseException:
64 | @pytest.fixture(scope="class")
65 | def models(self) -> dict[str, Any]:
66 | return {
67 | "test_model.sql": "select current_timestamp as time_column, 1 as id",
68 | }
69 |
70 | @pytest.fixture(scope="class")
71 | def project_config_update(self) -> dict[str, Any]:
72 | return {
73 | "name": "hypertable_tests",
74 | "models": {
75 | "hypertable_tests": {
76 | "test_model": {
77 | "+materialized": "hypertable",
78 | "+main_dimension": "time_column",
79 | "+dimensions": [{"column_name": "id"}],
80 | }
81 | }
82 | },
83 | }
84 |
85 | def test_hypertable_dimension_throw_exception(self, project: TestProjInfo) -> None:
86 | results = run_dbt(["run"], expect_pass=False)
87 | assert len(results) == 1
88 | assert str(results[0].status) == "error"
89 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/materializations/models/continuous_aggregate.sql:
--------------------------------------------------------------------------------
1 | {%- materialization continuous_aggregate, adapter="timescaledb" -%}
2 |
3 | {%- set existing_relation = load_cached_relation(this) -%}
4 | {%- set target_relation = this.incorporate(type=this.MaterializedView) -%}
5 |
6 | {{ run_hooks(pre_hooks, inside_transaction=False) }}
7 |
8 | -- `BEGIN` happens here:
9 | {{ run_hooks(pre_hooks, inside_transaction=True) }}
10 |
11 | {%- set grant_config = config.get('grants') -%}
12 |
13 | {%- set full_refresh_mode = should_full_refresh() -%}
14 |
15 | {%- set should_drop = full_refresh_mode or not existing_relation.is_materialized_view -%}
16 | {%- set create_mode = not existing_relation or should_drop -%}
17 | {%- set alter_mode = not create_mode -%}
18 | {%- set configuration_changes = none -%}
19 |
20 | {%- if alter_mode -%}
21 | {%- set configuration_changes = get_materialized_view_configuration_changes(existing_relation, config) -%}
22 | {%- endif -%}
23 |
24 | {%- if configuration_changes.requires_full_refresh -%}
25 | {%- set alter_mode = false -%}
26 | {%- set should_drop = true -%}
27 | {%- set create_mode = true -%}
28 | {%- endif -%}
29 |
30 | {%- if should_drop -%}
31 | {{- drop_relation_if_exists(existing_relation) -}}
32 | {%- endif -%}
33 |
34 | {% call statement('main') -%}
35 | {%- if create_mode %}
36 | {{ get_create_continuous_aggregate_as_sql(target_relation, sql) }}
37 | {% endif -%}
38 |
39 | {%- if alter_mode and configuration_changes.indexes %}
40 | {{ postgres__update_indexes_on_materialized_view(target_relation, configuration_changes.indexes) }}
41 | {% endif -%}
42 |
43 | {%- if alter_mode %}
44 | {{ clear_refresh_policy(target_relation) }}
45 | {% endif -%}
46 | {%- if config.get('refresh_policy') %}
47 | {{ add_refresh_policy(target_relation, config.get('refresh_policy')) }}
48 | {%- endif -%}
49 |
50 | {%- if alter_mode or config.get('compression') %}
51 | {{ set_compression(target_relation, config.get("compression")) }}
52 | {% endif -%}
53 | {%- if alter_mode %}
54 | {{ clear_compression_policy(target_relation) }}
55 | {% endif -%}
56 | {%- if config.get('compression') %}
57 | {{ add_compression_policy(target_relation, config.get("compression")) }}
58 | {%- endif -%}
59 | {%- endcall %}
60 |
61 | {%- if create_mode %}
62 | {% do create_indexes(target_relation) %}
63 | {% endif -%}
64 |
65 | {%- if alter_mode %}
66 | {%- call statement("clear_retention_policy") %}
67 | {{ clear_retention_policy(target_relation) }}
68 | {% endcall -%}
69 | {% endif -%}
70 | {%- if config.get("retention_policy") %}
71 | {% call statement("retention_policy") %}
72 | {{ add_retention_policy(target_relation, config.get("retention_policy")) }}
73 | {% endcall %}
74 | {% endif -%}
75 |
76 | {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}
77 | {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}
78 |
79 | {% do persist_docs(target_relation, model) %}
80 |
81 | {{ run_hooks(post_hooks, inside_transaction=True) }}
82 |
83 | {{ adapter.commit() }}
84 |
85 | {{ run_hooks(post_hooks, inside_transaction=False) }}
86 |
87 | {#- Load the data into the continuous aggregate -#}
88 | {% if config.get("refresh_now", True) %}
89 | {% do do_refresh_continuous_aggregate(target_relation) %}
90 | {% endif %}
91 |
92 | {{ return({'relations': [target_relation]}) }}
93 |
94 | {%- endmaterialization -%}
95 |
--------------------------------------------------------------------------------
/docs/usage/macros.md:
--------------------------------------------------------------------------------
1 | # Macros
2 |
3 | The macros below are available to use in your dbt project. They are also used internally by the adapter to implement the configuration options.
4 |
5 | !!! tip
6 | Usually the macros below won't be used directly but instead will be used via the configuration options of the hypertables and continuous aggregates. They are documented here for completeness.
7 |
8 | ## `set_compression`
9 |
10 | Enable or disable compression on a (virtual) hypertable or continuous aggregate.
11 |
12 | [Configuration options](compression.md#compression-settings)
13 |
14 | ```sql+jinja
15 | {{ set_compression('table_name', {"orderby": "column_name"}) }}
16 | ```
17 |
18 | ## `add_compression_policy`
19 |
20 | Add a compression policy to a (virtual) hypertable or continuous aggregate.
21 |
22 | [Configuration options](compression.md#compression-policy-settings)
23 |
24 | ```sql+jinja
25 | {{ add_compression_policy('table_name', {"after": "interval '60d'"}) }}
26 | ```
27 |
28 | ## `clear_compression_policy`
29 |
30 | Remove any existing compression policy from a (virtual) hypertable or continuous aggregate.
31 |
32 | ```sql+jinja
33 | {{ clear_compression_policy('table_name') }}
34 | ```
35 |
36 | ## `add_reorder_policy`
37 |
38 | Add a reorder policy to a (virtual) hypertable.
39 |
40 | [Configuration options](reorder-policies.md#configuration-options)
41 |
42 | ```sql+jinja
43 | {{ add_reorder_policy('table_name', {"index": {"columns": "column_name"}}) }}
44 | ```
45 |
46 | ## `clear_reorder_policy`
47 |
48 | Remove any existing reorder policy from a (virtual) hypertable.
49 |
50 | ```sql+jinja
51 | {{ clear_reorder_policy('table_name') }}
52 | ```
53 |
54 | ## `add_refresh_policy`
55 |
56 | Add a refresh policy to a continuous aggregate.
57 |
58 | [Configuration options](continuous-aggregates.md#timescaledb-refresh-policy-options)
59 |
60 | ```sql+jinja
61 | {{ add_refresh_policy('continuous_aggregate_name', {
62 | "start_offset": "interval '3 day'",
63 | "end_offset": "interval '2 day'"}) }}
64 | ```
65 |
66 | ## `clear_refresh_policy`
67 |
68 | Remove any existing refresh policy from a continuous aggregate.
69 |
70 | ```sql+jinja
71 | {{ clear_refresh_policy('continuous_aggregate_name') }}
72 | ```
73 |
74 | ## `set_integer_now_func`
75 |
76 | Set the function used to generate the current time for integer time columns in hypertables.
77 |
78 | ```sql+jinja
79 | {{ set_integer_now_func('table_name', 'function_name') }}
80 | ```
81 |
82 | ## `set_chunk_time_interval`
83 |
84 | Set the chunk time interval for a (virtual) hypertable. This macro has an optional argument `dimension_name`. If provided, the chunk time interval will be set for the specified dimension only
85 |
86 | ```sql+jinja
87 | {{ set_chunk_time_interval('table_name', 'interval') }}
88 | ```
89 |
90 | ## `add_dimension`
91 |
92 | Add a dimension to a (virtual) hypertable.
93 |
94 | ```sql+jinja
95 | {{ add_dimension('table_name', dimension_config) }}
96 | ```
97 |
98 | --8<-- "docs_build/dimensions.md:5"
99 |
100 | ## `add_retention_policy`
101 |
102 | Add a retention policy to a (virtual) hypertable or continuous aggregate.
103 |
104 | [Configuration options](retention-policies.md#configuration-options)
105 |
106 | ```sql+jinja
107 | {{ add_retention_policy('table_name', {
108 | "drop_after": "interval '1 month'"
109 | }) }}
110 | ```
111 |
112 | ## `clear_retention_policy`
113 |
114 | Remove any existing retention policy from a (virtual) hypertable or a continuous aggregate.
115 |
116 | ```sql+jinja
117 | {{ clear_retention_policy('table_name') }}
118 | ```
119 |
--------------------------------------------------------------------------------
/tests/functional/adapter/virtual_hypertable/test_virtual_hypertable.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import (
7 | run_dbt,
8 | )
9 |
10 |
11 | class BaseTestVirtualHypertable:
12 | @pytest.fixture(scope="class")
13 | def extra_model_config(self) -> dict[str, Any]:
14 | return {}
15 |
16 | @pytest.fixture(scope="class")
17 | def project_config_update(self, extra_model_config: dict[str, Any]) -> dict[str, Any]:
18 | return {
19 | "name": "virtual_hypertable_tests",
20 | "models": {
21 | "virtual_hypertable_tests": {
22 | "vht": {"+materialized": "virtual_hypertable"} | extra_model_config
23 | }
24 | },
25 | }
26 |
27 | @pytest.fixture(scope="class")
28 | def models(self) -> dict[str, Any]:
29 | return {"vht.sql": "--"}
30 |
31 | def run_assertions(self, project: TestProjInfo, unique_schema: str, hypertable: Any) -> None:
32 | pass
33 |
34 | def test_virtual_hypertable(self, project: TestProjInfo, unique_schema: str) -> None:
35 | project.run_sql(f"""
36 | create table {unique_schema}.vht (time_column timestamp, col_1 int);
37 | select create_hypertable('{unique_schema}.vht', by_range('time_column'));""")
38 | results = run_dbt(["run"])
39 | assert len(results) == 1
40 | assert all(result.node.config.materialized == "virtual_hypertable" for result in results)
41 |
42 | hypertables = project.run_sql(
43 | f"""
44 | select *
45 | from timescaledb_information.hypertables
46 | where hypertable_name = 'vht'
47 | and hypertable_schema = '{unique_schema}'""",
48 | fetch="all",
49 | )
50 | assert len(hypertables) == 1
51 | hypertable = hypertables[0]
52 |
53 | timescale_jobs = project.run_sql(
54 | f"""
55 | select *
56 | from timescaledb_information.jobs
57 | where hypertable_name = 'vht'
58 | and hypertable_schema = '{unique_schema}'
59 | and application_name like 'Compression Policy%'
60 | and schedule_interval = interval '6 day'""",
61 | fetch="all",
62 | )
63 | self.validate_jobs(timescale_jobs)
64 |
65 | self.run_assertions(project, unique_schema, hypertable)
66 |
67 | def validate_jobs(self, jobs: Any) -> None:
68 | assert len(jobs) == 0
69 |
70 |
71 | class TestVirtualHypertable(BaseTestVirtualHypertable):
72 | pass
73 |
74 |
75 | class TestVirtualHypertableCompression(BaseTestVirtualHypertable):
76 | @pytest.fixture(scope="class")
77 | def extra_model_config(self) -> dict[str, Any]:
78 | return {"+compression": {"after": "interval '1 day'", "schedule_interval": "interval '6 day'"}}
79 |
80 | def run_assertions(self, project: TestProjInfo, unique_schema: str, hypertable: Any) -> None:
81 | assert hypertable[5] # compression_enabled
82 |
83 | compression_settings = project.run_sql(
84 | f"""
85 | select *
86 | from timescaledb_information.compression_settings
87 | where hypertable_name = 'vht'
88 | and hypertable_schema = '{unique_schema}'""",
89 | fetch="all",
90 | )
91 |
92 | assert len(compression_settings) == 1
93 | time_column = [x for x in compression_settings if x[2] == "time_column"][0]
94 |
95 | assert time_column[3] is None
96 | assert not time_column[5]
97 | assert time_column[6]
98 |
99 | def validate_jobs(self, jobs: Any) -> None:
100 | assert len(jobs) == 1
101 | job = jobs[0]
102 | assert job[9]
103 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_retention_policy.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import run_dbt
7 |
8 | RETENTION_CONFIGS: list = [
9 | pytest.param(
10 | {
11 | "drop_after": "interval '1 day'",
12 | },
13 | id="drop_after",
14 | ),
15 | pytest.param(
16 | {"drop_after": "interval '1 day'", "schedule_interval": "interval '3 day'"},
17 | id="drop_after_schedule_interval",
18 | ),
19 | ]
20 |
21 |
22 | class TestRetentionPolicy:
23 | @pytest.fixture(scope="class", params=RETENTION_CONFIGS)
24 | def retention_config(self, request: Any) -> dict[str, Any]:
25 | return request.param
26 |
27 | def base_models(self) -> dict[str, Any]:
28 | return {
29 | "base.sql": "select current_timestamp as time_column",
30 | }
31 |
32 | @pytest.fixture(scope="class")
33 | def models(self) -> dict[str, Any]:
34 | return self.base_models()
35 |
36 | @pytest.fixture(scope="class")
37 | def model_count(self, models: dict[str, Any]) -> int:
38 | return len(models)
39 |
40 | def base_model_config(self, retention_config: dict[str, Any]) -> dict[str, Any]:
41 | return {
42 | "base": {
43 | "+materialized": "hypertable",
44 | "+main_dimension": "time_column",
45 | "+retention_policy": retention_config,
46 | }
47 | }
48 |
49 | @pytest.fixture(scope="class")
50 | def model_configs(self, retention_config: dict[str, Any]) -> dict[str, Any]:
51 | return self.base_model_config(retention_config)
52 |
53 | @pytest.fixture(scope="class")
54 | def project_config_update(self, model_configs: dict[str, Any]) -> dict[str, Any]:
55 | return {
56 | "name": "retention_policy_tests",
57 | "models": {"retention_policy_tests": model_configs},
58 | }
59 |
60 | def test_retention_policy(self, project: TestProjInfo, model_count: int, unique_schema: str) -> None:
61 | results = run_dbt(["run"])
62 | assert len(results) == model_count
63 |
64 | hypertable_jobs = project.run_sql(
65 | f"""
66 | select *
67 | from timescaledb_information.jobs
68 | where proc_name = 'policy_retention'
69 | and hypertable_schema = '{unique_schema}'
70 | and hypertable_name = 'base'
71 | """,
72 | fetch="all",
73 | )
74 | assert len(hypertable_jobs) == 1
75 |
76 |
77 | class TestRetentionPolicyOnContinuousAggregate(TestRetentionPolicy):
78 | @pytest.fixture(scope="class")
79 | def models(self) -> dict[str, Any]:
80 | return self.base_models() | {
81 | "cagg.sql": """
82 | select
83 | count(*) as col_1,
84 | time_bucket(interval '1 day', time_column) as bucket
85 | from {{ ref('base') }}
86 | group by 2
87 | """,
88 | }
89 |
90 | @pytest.fixture(scope="class")
91 | def model_configs(self, retention_config: dict[str, Any]) -> dict[str, Any]:
92 | return super().base_model_config(retention_config) | {
93 | "cagg": {
94 | "+materialized": "continuous_aggregate",
95 | "+retention_policy": retention_config,
96 | }
97 | }
98 |
99 | def test_retention_policy(self, project: TestProjInfo, model_count: int, unique_schema: str) -> None:
100 | super().test_retention_policy(project, model_count, unique_schema)
101 |
102 | cagg_jobs = project.run_sql(
103 | f"""
104 | select *
105 | from timescaledb_information.jobs j
106 | join timescaledb_information.continuous_aggregates c
107 | on j.hypertable_schema = c.materialization_hypertable_schema
108 | and j.hypertable_name = c.materialization_hypertable_name
109 | where j.proc_name = 'policy_retention'
110 | and c.view_schema = '{unique_schema}'
111 | and c.view_name = 'cagg'
112 | """,
113 | fetch="all",
114 | )
115 | assert len(cagg_jobs) == 1
116 |
--------------------------------------------------------------------------------
/docs/usage/continuous-aggregates.md:
--------------------------------------------------------------------------------
1 | # Continuous aggregates
2 |
3 | Continuous aggregates are the reason that this adapter exists. With this adapter, you can use dbt to manage your continuous aggregates and their configuration.
4 |
5 | !!! info
6 | Consult the [Timescale docs](https://docs.timescale.com/use-timescale/latest/hypertables/about-hypertables/) for more information regarding continuous aggregates.
7 |
8 | !!! tip "Materialized views"
9 | dbt-postgres 1.6 added support for [materialized views](https://docs.getdbt.com/docs/build/materializations#materialized-view). This feature is **also still available** in this adapter. The main difference between materialized views and continuous aggregates is that continuous aggregates are automatically refreshed (based on a policy) by TimescaleDB, while materialized views are refreshed manually or when you run `dbt run`.
10 |
11 | !!! tip "Full refresh"
12 | Once continuous aggregates have been created, only its metadata will be updated on subsequent runs. If you need to fully refresh the continuous aggregate, - maybe because you have changed its implementation - you can use the `full-refresh` option in dbt to drop and recreate the continuous aggregate.
13 |
14 | !!! failure "CTEs and subqueries not supported"
15 | TimescaleDB [does not support](https://github.com/timescale/timescaledb/issues/1931) [CTE](https://docs.getdbt.com/terms/cte)s, subqueries and set-returning functions in continuous aggregates.
16 |
17 | ## Usage
18 |
19 | To use continuous aggregates, you need to set the `materialized` config to `continuous_aggregate`.
20 |
21 | === "SQL"
22 |
23 | ```sql+jinja hl_lines="2" title="models/my_aggregate.sql"
24 | {{
25 | config(materialized='continuous_aggregate')
26 | }}
27 | select
28 | count(*),
29 | time_bucket(interval '1 day', time_column) as bucket
30 | from {{ source('a_hypertable') }}
31 | group by 2
32 | ```
33 |
34 | === "YAML"
35 |
36 | ```yaml hl_lines="4" title="dbt_project.yaml"
37 | models:
38 | your_project_name:
39 | model_name:
40 | +materialized: continuous_aggregate
41 | # ...
42 | ```
43 |
44 | ## Configuration options
45 |
46 | ### dbt-specific options: refreshing upon creation
47 |
48 | Continuous aggregates are refreshed automatically by TimescaleDB. This is configured using a [refresh policy](#timescaledb-refresh-policy-options).
49 |
50 | They are also refreshed initially when they are created. This is done by default but can be disabled by setting the `refresh_now` config option to `false`.
51 |
52 | ### TimescaleDB continuous aggregate options
53 |
54 | All [TimescaleDB continuous aggregate configuration options](https://docs.timescale.com/api/latest/continuous-aggregates/create_materialized_view/#parameters) as of version 2.12 are supported through model configuration as well:
55 |
56 | * `materialized_only` (make sure to quote the value when setting to false, e.g. "False")
57 | * `create_group_indexes`
58 |
59 | ### TimescaleDB refresh policy options
60 |
61 | A continuous aggregate is usually used with a refresh policy. This is configured using the `refresh_policy` config option. The following options are supported:
62 |
63 | * `start_offset`
64 | * `end_offset`
65 | * `schedule_interval`
66 | * `initial_start`
67 | * `timezone`
68 |
69 | === "SQL"
70 |
71 | ```sql+jinja hl_lines="2" title="models/my_aggregate.sql"
72 | {{
73 | config(
74 | materialized='continuous_aggregate',
75 | refresh_policy={
76 | 'start_offset': "interval '1 month'",
77 | 'end_offset': "interval '1 hour'",
78 | 'schedule_interval': "interval '1 hour'",
79 | })
80 | }}
81 | select
82 | count(*),
83 | time_bucket(interval '1 day', time_column) as bucket
84 | from {{ source('a_hypertable') }}
85 | group by 2
86 | ```
87 |
88 | === "YAML"
89 |
90 | ```yaml hl_lines="4" title="dbt_project.yaml"
91 | models:
92 | your_project_name:
93 | model_name:
94 | +materialized: continuous_aggregate
95 | +refresh_policy:
96 | start_offset: "interval '1 month'"
97 | end_offset: "interval '1 hour'"
98 | schedule_interval: "interval '1 hour'"
99 | # ...
100 | ```
101 |
102 | !!! info
103 | Consult the [Timescale docs](https://docs.timescale.com/api/latest/continuous-aggregates/add_continuous_aggregate_policy/) for more information regarding these settings.
104 |
--------------------------------------------------------------------------------
/dbt/include/timescaledb/macros/materializations/models/hypertable.sql:
--------------------------------------------------------------------------------
1 | {% materialization hypertable, adapter="timescaledb" %}
2 |
3 | {%- set existing_relation = load_cached_relation(this) -%}
4 | {%- set target_relation = this.incorporate(type='table') %}
5 | {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}
6 | -- the intermediate_relation should not already exist in the database; get_relation
7 | -- will return None in that case. Otherwise, we get a relation that we can drop
8 | -- later, before we try to use this name for the current operation
9 | {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}
10 | /*
11 | See ../view/view.sql for more information about this relation.
12 | */
13 | {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}
14 | {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}
15 | -- as above, the backup_relation should not already exist
16 | {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}
17 | -- grab current tables grants config for comparision later on
18 | {%- set grant_config = config.get('grants') -%}
19 | {%- set should_truncate = config.get("empty_hypertable", false) -%}
20 | {%- set dimensions_count = config.get("dimensions", []) | length -%}
21 | {% if dimensions_count > 0 and not should_truncate %}
22 | {{ exceptions.raise_compiler_error("The hypertable should always be empty when adding dimensions. Make sure empty_hypertable is set in your model configuration.") }}
23 | {% endif %}
24 |
25 | -- drop the temp relations if they exist already in the database
26 | {{ drop_relation_if_exists(preexisting_intermediate_relation) }}
27 | {{ drop_relation_if_exists(preexisting_backup_relation) }}
28 |
29 | {{ run_hooks(pre_hooks, inside_transaction=False) }}
30 |
31 | -- `BEGIN` happens here:
32 | {{ run_hooks(pre_hooks, inside_transaction=True) }}
33 |
34 | -- build model
35 | {% call statement('main') -%}
36 | {{ get_create_table_as_sql(False, intermediate_relation, sql) }}
37 |
38 | {%- if should_truncate %}
39 | truncate {{ intermediate_relation }};
40 | {% endif -%}
41 |
42 | {{- get_create_hypertable_as_sql(intermediate_relation) }}
43 |
44 | {{ set_compression(intermediate_relation, config.get("compression")) }}
45 | {%- if config.get('compression') %}
46 | {{ add_compression_policy(intermediate_relation, config.get("compression")) }}
47 | {% endif -%}
48 |
49 | {%- if config.get("integer_now_func") %}
50 | {{ set_integer_now_func(intermediate_relation, config.get("integer_now_func"), config.get("integer_now_func_sql")) }}
51 | {% endif -%}
52 |
53 | {%- if config.get("chunk_time_interval") %}
54 | {{ set_chunk_time_interval(intermediate_relation, config.get("chunk_time_interval")) }}
55 | {% endif -%}
56 |
57 | {%- endcall %}
58 |
59 | -- cleanup
60 | {% if existing_relation is not none %}
61 | /* Do the equivalent of rename_if_exists. 'existing_relation' could have been dropped
62 | since the variable was first set. */
63 | {% set existing_relation = load_cached_relation(existing_relation) %}
64 | {% if existing_relation is not none %}
65 | {{ adapter.rename_relation(existing_relation, backup_relation) }}
66 | {% endif %}
67 | {% endif %}
68 |
69 | {{ adapter.rename_relation(intermediate_relation, target_relation) }}
70 |
71 | {% do create_indexes(target_relation) %}
72 |
73 | {%- if config.get("reorder_policy") %}
74 | {% call statement("reorder_policy") %}
75 | {{ add_reorder_policy(target_relation, config.get("reorder_policy")) }}
76 | {% endcall %}
77 | {% endif -%}
78 |
79 | {%- if config.get("retention_policy") %}
80 | {% call statement("retention_policy") %}
81 | {{ add_retention_policy(target_relation, config.get("retention_policy")) }}
82 | {% endcall %}
83 | {% endif -%}
84 |
85 | {% do create_dimensions(target_relation) %}
86 |
87 | {{ run_hooks(post_hooks, inside_transaction=True) }}
88 |
89 | {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}
90 | {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}
91 |
92 | {% do persist_docs(target_relation, model) %}
93 |
94 | -- `COMMIT` happens here
95 | {{ adapter.commit() }}
96 |
97 | -- finally, drop the existing/backup relation after the commit
98 | {{ drop_relation_if_exists(backup_relation) }}
99 |
100 | {{ run_hooks(post_hooks, inside_transaction=False) }}
101 |
102 | {{ return({'relations': [target_relation]}) }}
103 | {% endmaterialization %}
104 |
--------------------------------------------------------------------------------
/tests/functional/adapter/hypertable/test_hypertable_compression.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import pytest
4 |
5 | from dbt.tests.fixtures.project import TestProjInfo
6 | from dbt.tests.util import check_result_nodes_by_name, run_dbt
7 |
8 |
9 | class BaseTestHypertableCompression:
10 | def base_compression_settings(self) -> dict[str, Any]:
11 | return {"after": "interval '1 day'", "schedule_interval": "interval '6 day'"}
12 |
13 | @pytest.fixture(scope="class")
14 | def compression_settings(self) -> dict[str, Any]:
15 | return self.base_compression_settings()
16 |
17 | @pytest.fixture(scope="class")
18 | def model_config(self, compression_settings: dict[str, Any]) -> dict[str, Any]:
19 | return {
20 | "+materialized": "hypertable",
21 | "+main_dimension": "time_column",
22 | "+compression": compression_settings,
23 | }
24 |
25 | @pytest.fixture(scope="class")
26 | def project_config_update(self, model_config: dict[str, Any]) -> dict[str, Any]:
27 | return {
28 | "name": "hypertable_tests",
29 | "models": {
30 | "hypertable_tests": {
31 | "test_model": model_config,
32 | }
33 | },
34 | }
35 |
36 | @pytest.fixture(scope="class")
37 | def models(self) -> dict[str, Any]:
38 | return {
39 | "test_model.sql": """
40 | select
41 | current_timestamp as time_column,
42 | 1 as col_1
43 | """,
44 | }
45 |
46 | def validate_compression(self, compression_settings: list) -> None:
47 | assert len(compression_settings) == 1
48 | time_column = [x for x in compression_settings if x[2] == "time_column"][0]
49 |
50 | assert time_column[3] is None
51 | assert not time_column[5]
52 | assert time_column[6]
53 |
54 | def validate_jobs(self, timescale_jobs: list) -> None:
55 | assert len(timescale_jobs) == 1
56 | job = timescale_jobs[0]
57 | assert job[9]
58 |
59 | def test_hypertable(self, project: TestProjInfo, unique_schema: str) -> None:
60 | results = run_dbt(["run"])
61 | assert len(results) == 1
62 | check_result_nodes_by_name(results, ["test_model"])
63 | assert results[0].node.node_info["materialized"] == "hypertable"
64 |
65 | hypertables = project.run_sql(
66 | f"""
67 | select *
68 | from timescaledb_information.hypertables
69 | where hypertable_name = 'test_model'
70 | and hypertable_schema = '{unique_schema}'""",
71 | fetch="all",
72 | )
73 | assert len(hypertables) == 1
74 | hypertable = hypertables[0]
75 | assert hypertable[5] # compression_enabled
76 |
77 | compression_settings = project.run_sql(
78 | f"""
79 | select *
80 | from timescaledb_information.compression_settings
81 | where hypertable_name = 'test_model'
82 | and hypertable_schema = '{unique_schema}'""",
83 | fetch="all",
84 | )
85 | self.validate_compression(compression_settings)
86 | timescale_jobs = project.run_sql(
87 | f"""
88 | select *
89 | from timescaledb_information.jobs
90 | where hypertable_name = 'test_model'
91 | and hypertable_schema = '{unique_schema}'
92 | and application_name like 'Compression Policy%'
93 | and schedule_interval = interval '6 day'""",
94 | fetch="all",
95 | )
96 | self.validate_jobs(timescale_jobs)
97 |
98 |
99 | class TestHypertableCompressionSegmentBy(BaseTestHypertableCompression):
100 | @pytest.fixture(scope="class")
101 | def compression_settings(self) -> dict[str, Any]:
102 | return super().base_compression_settings() | {"segmentby": ["col_1"]}
103 |
104 | def validate_compression(self, compression_settings: list) -> None:
105 | assert len(compression_settings) == 2
106 | time_column = [x for x in compression_settings if x[2] == "time_column"][0]
107 | col_1 = [x for x in compression_settings if x[2] == "col_1"][0]
108 |
109 | assert time_column[3] is None
110 | assert not time_column[5]
111 | assert time_column[6]
112 |
113 | assert col_1[3] == 1
114 | assert col_1[4] is None
115 | assert col_1[5] is None
116 | assert col_1[6] is None
117 |
118 |
119 | class TestHypertableCompressionChunkTimeInterval(BaseTestHypertableCompression):
120 | @pytest.fixture(scope="class")
121 | def compression_settings(self) -> dict[str, Any]:
122 | return super().base_compression_settings() | {"chunk_time_interval": "1 day"}
123 |
124 |
125 | class TestHypertableCompressionOrderBy(BaseTestHypertableCompression):
126 | @pytest.fixture(scope="class")
127 | def compression_settings(self) -> dict[str, Any]:
128 | return super().base_compression_settings() | {"orderby": "col_1 asc"}
129 |
130 | def validate_compression(self, compression_settings: list) -> None:
131 | assert len(compression_settings) == 2
132 | time_column = [x for x in compression_settings if x[2] == "time_column"][0]
133 | col_1 = [x for x in compression_settings if x[2] == "col_1"][0]
134 |
135 | assert time_column[3] is None
136 | assert not time_column[5]
137 | assert time_column[6]
138 |
139 | assert col_1[3] is None
140 | assert col_1[4] == 1
141 | assert col_1[5]
142 | assert not col_1[6]
143 |
144 |
145 | class TestHypertableCompressionDefault(BaseTestHypertableCompression):
146 | pass
147 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.toptal.com/developers/gitignore/api/windows,macos,visualstudiocode,python,dbt
2 | # Edit at https://www.toptal.com/developers/gitignore?templates=windows,macos,visualstudiocode,python,dbt
3 |
4 | ### dbt ###
5 | target/
6 | dbt_modules/
7 | dbt_packages/
8 | logs/
9 |
10 | ### macOS ###
11 | # General
12 | .DS_Store
13 | .AppleDouble
14 | .LSOverride
15 |
16 | # Icon must end with two \r
17 | Icon
18 |
19 |
20 | # Thumbnails
21 | ._*
22 |
23 | # Files that might appear in the root of a volume
24 | .DocumentRevisions-V100
25 | .fseventsd
26 | .Spotlight-V100
27 | .TemporaryItems
28 | .Trashes
29 | .VolumeIcon.icns
30 | .com.apple.timemachine.donotpresent
31 |
32 | # Directories potentially created on remote AFP share
33 | .AppleDB
34 | .AppleDesktop
35 | Network Trash Folder
36 | Temporary Items
37 | .apdisk
38 |
39 | ### macOS Patch ###
40 | # iCloud generated files
41 | *.icloud
42 |
43 | ### Python ###
44 | # Byte-compiled / optimized / DLL files
45 | __pycache__/
46 | *.py[cod]
47 | *$py.class
48 |
49 | # C extensions
50 | *.so
51 |
52 | # Distribution / packaging
53 | .Python
54 | build/
55 | develop-eggs/
56 | dist/
57 | downloads/
58 | eggs/
59 | .eggs/
60 | lib/
61 | lib64/
62 | parts/
63 | sdist/
64 | var/
65 | wheels/
66 | share/python-wheels/
67 | *.egg-info/
68 | .installed.cfg
69 | *.egg
70 | MANIFEST
71 |
72 | # PyInstaller
73 | # Usually these files are written by a python script from a template
74 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
75 | *.manifest
76 | *.spec
77 |
78 | # Installer logs
79 | pip-log.txt
80 | pip-delete-this-directory.txt
81 |
82 | # Unit test / coverage reports
83 | htmlcov/
84 | .tox/
85 | .nox/
86 | .coverage
87 | .coverage.*
88 | .cache
89 | nosetests.xml
90 | coverage.xml
91 | *.cover
92 | *.py,cover
93 | .hypothesis/
94 | .pytest_cache/
95 | cover/
96 |
97 | # Translations
98 | *.mo
99 | *.pot
100 |
101 | # Django stuff:
102 | *.log
103 | local_settings.py
104 | db.sqlite3
105 | db.sqlite3-journal
106 |
107 | # Flask stuff:
108 | instance/
109 | .webassets-cache
110 |
111 | # Scrapy stuff:
112 | .scrapy
113 |
114 | # Sphinx documentation
115 | docs/_build/
116 |
117 | # PyBuilder
118 | .pybuilder/
119 |
120 | # Jupyter Notebook
121 | .ipynb_checkpoints
122 |
123 | # IPython
124 | profile_default/
125 | ipython_config.py
126 |
127 | # pyenv
128 | # For a library or package, you might want to ignore these files since the code is
129 | # intended to run in multiple environments; otherwise, check them in:
130 | # .python-version
131 |
132 | # pipenv
133 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
134 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
135 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
136 | # install all needed dependencies.
137 | #Pipfile.lock
138 |
139 | # poetry
140 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
141 | # This is especially recommended for binary packages to ensure reproducibility, and is more
142 | # commonly ignored for libraries.
143 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
144 | #poetry.lock
145 |
146 | # pdm
147 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
148 | #pdm.lock
149 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
150 | # in version control.
151 | # https://pdm.fming.dev/#use-with-ide
152 | .pdm.toml
153 |
154 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
155 | __pypackages__/
156 |
157 | # Celery stuff
158 | celerybeat-schedule
159 | celerybeat.pid
160 |
161 | # SageMath parsed files
162 | *.sage.py
163 |
164 | # Environments
165 | .env
166 | .venv
167 | env/
168 | venv/
169 | ENV/
170 | env.bak/
171 | venv.bak/
172 |
173 | # Spyder project settings
174 | .spyderproject
175 | .spyproject
176 |
177 | # Rope project settings
178 | .ropeproject
179 |
180 | # mkdocs documentation
181 | /site
182 |
183 | # mypy
184 | .mypy_cache/
185 | .dmypy.json
186 | dmypy.json
187 |
188 | # Pyre type checker
189 | .pyre/
190 |
191 | # pytype static type analyzer
192 | .pytype/
193 |
194 | # Cython debug symbols
195 | cython_debug/
196 |
197 | # PyCharm
198 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
199 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
200 | # and can be added to the global gitignore or merged into this file. For a more nuclear
201 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
202 | #.idea/
203 |
204 | # ruff
205 | .ruff_cache/
206 |
207 | # LSP config files
208 | pyrightconfig.json
209 |
210 | ### VisualStudioCode ###
211 | .vscode/*
212 | !.vscode/settings.json
213 | !.vscode/tasks.json
214 | !.vscode/launch.json
215 | !.vscode/extensions.json
216 | !.vscode/*.code-snippets
217 |
218 | # Local History for Visual Studio Code
219 | .history/
220 |
221 | # Built Visual Studio Code Extensions
222 | *.vsix
223 |
224 | ### VisualStudioCode Patch ###
225 | # Ignore all local history of files
226 | .history
227 | .ionide
228 |
229 | ### Windows ###
230 | # Windows thumbnail cache files
231 | Thumbs.db
232 | Thumbs.db:encryptable
233 | ehthumbs.db
234 | ehthumbs_vista.db
235 |
236 | # Dump file
237 | *.stackdump
238 |
239 | # Folder config file
240 | [Dd]esktop.ini
241 |
242 | # Recycle Bin used on file shares
243 | $RECYCLE.BIN/
244 |
245 | # Windows Installer files
246 | *.cab
247 | *.msi
248 | *.msix
249 | *.msm
250 | *.msp
251 |
252 | # Windows shortcuts
253 | *.lnk
254 |
255 | # End of https://www.toptal.com/developers/gitignore/api/windows,macos,visualstudiocode,python,dbt
256 | .pdm-python
257 | .pdm-build/
258 | test.env
259 | dbt/adapters/timescaledb/__version__.py
260 |
--------------------------------------------------------------------------------