├── .changes
├── 0.0.0.md
├── 1.9.0.md
├── header.tpl.md
└── unreleased
│ └── .gitkeep
├── .changie.yaml
├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ └── config.yml
├── pull_request_template.md
├── scripts
│ ├── psycopg2-check.sh
│ └── update_dev_dependency_branches.sh
└── workflows
│ ├── changelog-entry-check.yml
│ ├── code-quality.yml
│ ├── docs-issue.yml
│ ├── integration-tests.yml
│ ├── release-internal.yml
│ ├── release.yml
│ ├── release_prep_hatch.yml
│ ├── stale.yml
│ ├── triage-labels.yml
│ ├── unit-tests.yml
│ └── version-bump.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── dbt
├── __init__.py
├── adapters
│ └── postgres
│ │ ├── __init__.py
│ │ ├── __version__.py
│ │ ├── column.py
│ │ ├── connections.py
│ │ ├── impl.py
│ │ ├── record
│ │ ├── __init__.py
│ │ ├── cursor
│ │ │ ├── cursor.py
│ │ │ └── status.py
│ │ └── handle.py
│ │ ├── relation.py
│ │ └── relation_configs
│ │ ├── __init__.py
│ │ ├── constants.py
│ │ ├── index.py
│ │ └── materialized_view.py
└── include
│ └── postgres
│ ├── __init__.py
│ ├── dbt_project.yml
│ ├── macros
│ ├── adapters.sql
│ ├── catalog.sql
│ ├── materializations
│ │ ├── incremental_strategies.sql
│ │ └── snapshot_merge.sql
│ ├── relations.sql
│ ├── relations
│ │ ├── materialized_view
│ │ │ ├── alter.sql
│ │ │ ├── create.sql
│ │ │ ├── describe.sql
│ │ │ ├── drop.sql
│ │ │ ├── refresh.sql
│ │ │ └── rename.sql
│ │ ├── table
│ │ │ ├── drop.sql
│ │ │ ├── rename.sql
│ │ │ └── replace.sql
│ │ └── view
│ │ │ ├── drop.sql
│ │ │ ├── rename.sql
│ │ │ └── replace.sql
│ ├── timestamps.sql
│ └── utils
│ │ ├── any_value.sql
│ │ ├── columns_spec_ddl.sql
│ │ ├── dateadd.sql
│ │ ├── datediff.sql
│ │ ├── last_day.sql
│ │ ├── listagg.sql
│ │ └── split_part.sql
│ ├── profile_template.yml
│ └── sample_profiles.yml
├── docker
├── Dockerfile
├── README.md
└── dev.Dockerfile
├── pyproject.toml
├── scripts
└── setup_test_database.sql
├── test.env.example
└── tests
├── __init__.py
├── conftest.py
├── functional
├── README.md
├── __init__.py
├── adapter
│ ├── __init__.py
│ ├── test_aliases.py
│ ├── test_basic.py
│ ├── test_caching.py
│ ├── test_clone.py
│ ├── test_column_types.py
│ ├── test_concurrency.py
│ ├── test_constraints.py
│ ├── test_data_types.py
│ ├── test_debug.py
│ ├── test_empty.py
│ ├── test_ephemeral.py
│ ├── test_grants.py
│ ├── test_hooks
│ │ ├── data
│ │ │ ├── seed_model.sql
│ │ │ └── seed_run.sql
│ │ └── test_hooks.py
│ ├── test_incremental.py
│ ├── test_incremental_microbatch.py
│ ├── test_persist_docs.py
│ ├── test_query_comment.py
│ ├── test_relations.py
│ ├── test_show.py
│ ├── test_simple_copy.py
│ ├── test_simple_seed
│ │ ├── seed_bom.csv
│ │ └── test_simple_seed.py
│ ├── test_simple_snapshot.py
│ ├── test_store_test_failures.py
│ ├── test_unit_testing.py
│ └── test_utils.py
├── basic
│ ├── data
│ │ ├── seed-initial.csv
│ │ ├── seed-update.csv
│ │ ├── summary_expected.csv
│ │ ├── summary_expected_update.csv
│ │ ├── varchar10_seed.sql
│ │ └── varchar300_seed.sql
│ ├── test_basic.py
│ ├── test_invalid_reference.py
│ ├── test_jaffle_shop.py
│ ├── test_mixed_case_db.py
│ ├── test_project.py
│ ├── test_simple_reference.py
│ └── test_varchar_widening.py
├── compile
│ ├── fixtures.py
│ └── test_compile.py
├── conftest.py
├── contracts
│ ├── test_contract_enforcement.py
│ ├── test_contract_precision.py
│ └── test_nonstandard_data_type.py
├── custom_aliases
│ ├── fixtures.py
│ └── test_custom_aliases.py
├── custom_singular_tests
│ ├── data
│ │ └── seed_expected.sql
│ └── test_custom_singular_tests.py
├── dbt_debug
│ └── test_dbt_debug.py
├── dbt_runner.py
├── exit_codes
│ ├── fixtures.py
│ └── test_exit_codes.py
├── exposures
│ ├── fixtures.py
│ ├── test_exposure_configs.py
│ └── test_exposures.py
├── graph_selection
│ ├── test_graph_selection.py
│ ├── test_group_selection.py
│ ├── test_intersection_syntax.py
│ ├── test_schema_test_graph_selection.py
│ ├── test_tag_selection.py
│ └── test_version_selection.py
├── incremental_schema_tests
│ ├── fixtures.py
│ └── test_incremental_schema.py
├── invalid_model_tests
│ ├── test_invalid_models.py
│ └── test_model_warning.py
├── macros
│ ├── data
│ │ └── seed.sql
│ ├── fixtures.py
│ ├── package_macro_overrides
│ │ ├── dbt_project.yml
│ │ └── macros
│ │ │ └── macros.sql
│ └── test_macros.py
├── materializations
│ ├── conftest.py
│ ├── fixtures.py
│ ├── materialized_view_tests
│ │ ├── test_materialized_view.py
│ │ ├── test_postgres_materialized_view.py
│ │ └── utils.py
│ ├── test_incremental.py
│ ├── test_runtime_materialization.py
│ └── test_supported_languages.py
├── postgres
│ ├── fixtures.py
│ └── test_indexes.py
├── projects
│ ├── __init__.py
│ ├── dbt_integration
│ │ ├── __init__.py
│ │ ├── macros
│ │ │ └── do_something.sql
│ │ ├── models
│ │ │ ├── incremental.sql
│ │ │ ├── table.sql
│ │ │ └── view.sql
│ │ └── schemas
│ │ │ ├── project.yml
│ │ │ └── schema.yml
│ ├── graph_selection
│ │ ├── __init__.py
│ │ ├── data
│ │ │ ├── seed.csv
│ │ │ └── summary_expected.csv
│ │ ├── models
│ │ │ ├── alternative_users.sql
│ │ │ ├── base_users.sql
│ │ │ ├── emails.sql
│ │ │ ├── emails_alt.sql
│ │ │ ├── nested_users.sql
│ │ │ ├── never_selected.sql
│ │ │ ├── subdir.sql
│ │ │ ├── users.sql
│ │ │ ├── users_rollup.sql
│ │ │ └── users_rollup_dependency.sql
│ │ └── schemas
│ │ │ ├── patch_path_selection.yml
│ │ │ ├── properties.yml
│ │ │ └── schema.yml
│ ├── jaffle_shop
│ │ ├── __init__.py
│ │ ├── data
│ │ │ ├── raw_customers.csv
│ │ │ ├── raw_orders.csv
│ │ │ └── raw_payments.csv
│ │ ├── docs
│ │ │ ├── docs.md
│ │ │ └── overview.md
│ │ ├── models
│ │ │ ├── customers.sql
│ │ │ └── orders.sql
│ │ ├── schemas
│ │ │ ├── jaffle_shop.yml
│ │ │ └── staging.yml
│ │ └── staging
│ │ │ ├── stg_customers.sql
│ │ │ ├── stg_orders.sql
│ │ │ └── stg_payments.sql
│ └── utils.py
├── retry
│ ├── fixtures.py
│ └── test_retry.py
├── schema
│ ├── fixtures
│ │ ├── macros.py
│ │ └── sql.py
│ └── test_custom_schema.py
├── selected_resources
│ ├── fixtures.py
│ └── test_selected_resources.py
├── semantic_models
│ ├── fixtures.py
│ ├── test_semantic_model_configs.py
│ ├── test_semantic_model_parsing.py
│ └── test_semantic_models.py
├── show
│ ├── fixtures.py
│ └── test_show.py
├── sources
│ ├── common_source_setup.py
│ ├── data
│ │ └── seed.sql
│ ├── fixtures.py
│ ├── test_simple_source.py
│ ├── test_source_configs.py
│ ├── test_source_fresher_state.py
│ └── test_source_freshness.py
├── statements
│ ├── fixtures.py
│ └── test_statements.py
├── test_access.py
├── test_analyses.py
├── test_catalog.py
├── test_clean.py
├── test_colors.py
├── test_column_quotes.py
├── test_config.py
├── test_connection_manager.py
├── test_custom_target_path.py
├── test_cycles.py
├── test_default_selectors.py
├── test_events.py
├── test_external_reference.py
├── test_fail_fast.py
├── test_multiple_indexes.py
├── test_ref_override.py
├── test_relation_name.py
├── test_severity.py
├── test_store_test_failures.py
├── test_thread_count.py
├── test_timezones.py
├── test_types.py
├── test_unlogged_table.py
├── unit_testing
│ ├── fixtures.py
│ ├── test_csv_fixtures.py
│ ├── test_state.py
│ ├── test_unit_testing.py
│ ├── test_ut_dependency.py
│ └── test_ut_sources.py
└── utils.py
└── unit
├── test_adapter.py
├── test_adapter_conversions.py
├── test_connection.py
├── test_filter_catalog.py
├── test_materialized_view.py
├── test_renamed_relations.py
└── utils.py
/.changes/0.0.0.md:
--------------------------------------------------------------------------------
1 | ## Previous Releases
2 | For information on prior major and minor releases, see their changelogs:
3 | - [1.8](https://github.com/dbt-labs/dbt-postgres/blob/1.8.latest/CHANGELOG.md)
4 |
--------------------------------------------------------------------------------
/.changes/1.9.0.md:
--------------------------------------------------------------------------------
1 | ## dbt-postgres 1.9.0 - December 09, 2024
2 |
3 | ### Breaking Changes
4 |
5 | - Drop support for Python 3.8 ([#161](https://github.com/dbt-labs/dbt-postgres/issues/161))
6 |
7 | ### Features
8 |
9 | - Add tests for cross-database `cast` macro ([#76](https://github.com/dbt-labs/dbt-postgres/issues/76))
10 | - Cross-database `date` macro ([#82](https://github.com/dbt-labs/dbt-postgres/issues/82))
11 | - Add support for Python 3.12 ([#17](https://github.com/dbt-labs/dbt-postgres/issues/17))
12 | - Allow configuring snapshot column names ([#144](https://github.com/dbt-labs/dbt-postgres/issues/144))
13 | - Microbatch incremental strategy implementation: merge ([#149](https://github.com/dbt-labs/dbt-postgres/issues/149))
14 | - Enable setting current value of dbt_valid_to ([#151](https://github.com/dbt-labs/dbt-postgres/issues/151))
15 |
16 | ### Fixes
17 |
18 | - Fix the semicolon semantics for indexes while respecting other bug fix ([#85](https://github.com/dbt-labs/dbt-postgres/issues/85))
19 | - Default to psycopg2-binary and allow overriding to psycopg2 via DBT_PSYCOPG2_NAME (restores previous behavior) ([#96](https://github.com/dbt-labs/dbt-postgres/issues/96))
20 | - Fix `persist_docs` for `materialized_view` materializations. Previously, using this configuration with materialized view models would lead to an error. ([#120](https://github.com/dbt-labs/dbt-postgres/issues/120))
21 |
22 | ### Under the Hood
23 |
24 | - Add support for experimental record/replay testing. ([#123](https://github.com/dbt-labs/dbt-postgres/issues/123))
25 | - Updating changie.yaml to add contributors and PR links ([#109](https://github.com/dbt-labs/dbt-postgres/issues/109))
26 |
27 | ### Contributors
28 | - [@dbeatty10](https://github.com/dbeatty10) ([#76](https://github.com/dbt-labs/dbt-postgres/issues/76), [#82](https://github.com/dbt-labs/dbt-postgres/issues/82))
29 | - [@gshank](https://github.com/gshank) ([#144](https://github.com/dbt-labs/dbt-postgres/issues/144), [#151](https://github.com/dbt-labs/dbt-postgres/issues/151))
30 | - [@leahwicz](https://github.com/leahwicz) ([#109](https://github.com/dbt-labs/dbt-postgres/issues/109))
31 | - [@michelleark](https://github.com/michelleark) ([#149](https://github.com/dbt-labs/dbt-postgres/issues/149))
32 | - [@mikealfare](https://github.com/mikealfare) ([#161](https://github.com/dbt-labs/dbt-postgres/issues/161), [#17](https://github.com/dbt-labs/dbt-postgres/issues/17), [#96](https://github.com/dbt-labs/dbt-postgres/issues/96))
33 | - [@morsapaes](https://github.com/morsapaes) ([#120](https://github.com/dbt-labs/dbt-postgres/issues/120))
34 | - [@peterallenwebb](https://github.com/peterallenwebb) ([#123](https://github.com/dbt-labs/dbt-postgres/issues/123))
35 | - [@versusfacit](https://github.com/versusfacit) ([#85](https://github.com/dbt-labs/dbt-postgres/issues/85))
36 |
--------------------------------------------------------------------------------
/.changes/header.tpl.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 | All notable changes to this project will be documented in this file.
3 |
4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
5 | adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html),
6 | and is generated by [Changie](https://github.com/miniscruff/changie).
7 |
--------------------------------------------------------------------------------
/.changes/unreleased/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dbt-labs/dbt-postgres/4ceebd5939034123e77ad5b714d45545a7962d90/.changes/unreleased/.gitkeep
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # This codeowners file is used to ensure all PRs require reviews from the adapters team
2 |
3 | * @dbt-labs/adapters
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: Ask the community for help
4 | url: https://github.com/dbt-labs/docs.getdbt.com/discussions
5 | about: Need help troubleshooting? Check out our guide on how to ask
6 | - name: Contact dbt Cloud support
7 | url: mailto:support@getdbt.com
8 | about: Are you using dbt Cloud? Contact our support team for help!
9 | - name: Participate in Discussions
10 | url: https://github.com/dbt-labs/dbt-adapters/discussions
11 | about: Do you have a Big Idea for dbt-postgres? Read open discussions, or start a new one
12 | - name: Create an issue for dbt-postgres
13 | url: https://github.com/dbt-labs/dbt-adapters/issues/new/choose
14 | about: Report a bug or request a feature for dbt-postgres
15 | - name: Create an issue for dbt-core
16 | url: https://github.com/dbt-labs/dbt-core/issues/new/choose
17 | about: Report a bug or request a feature for dbt-core
18 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/.github/scripts/psycopg2-check.sh:
--------------------------------------------------------------------------------
1 | python -m venv venv
2 | source venv/bin/activate
3 | python -m pip install .
4 |
5 | if [[ "$PSYCOPG2_WORKAROUND" == true ]]; then
6 | if [[ $(pip show psycopg2-binary) ]]; then
7 | PSYCOPG2_VERSION=$(pip show psycopg2-binary | grep Version | cut -d " " -f 2)
8 | pip uninstall -y psycopg2-binary
9 | pip install psycopg2==$PSYCOPG2_VERSION
10 | fi
11 | fi
12 |
13 | PSYCOPG2_NAME=$((pip show psycopg2 || pip show psycopg2-binary) | grep Name | cut -d " " -f 2)
14 | if [[ "$PSYCOPG2_NAME" != "$PSYCOPG2_EXPECTED_NAME" ]]; then
15 | echo -e 'Expected: "$PSYCOPG2_EXPECTED_NAME" but found: "$PSYCOPG2_NAME"'
16 | exit 1
17 | fi
18 |
19 | deactivate
20 | rm -r ./venv
21 |
--------------------------------------------------------------------------------
/.github/scripts/update_dev_dependency_branches.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 | set -e
3 |
4 |
5 | dbt_adapters_branch=$1
6 | dbt_core_branch=$2
7 | dbt_common_branch=$3
8 | target_req_file="pyproject.toml"
9 | core_req_sed_pattern="s|dbt-core.git.*#subdirectory=core|dbt-core.git@${dbt_core_branch}#subdirectory=core|g"
10 | adapters_req_sed_pattern="s|dbt-adapters.git|dbt-adapters.git@${dbt_adapters_branch}|g"
11 | common_req_sed_pattern="s|dbt-common.git|dbt-common.git@${dbt_common_branch}|g"
12 | if [[ "$OSTYPE" == darwin* ]]; then
13 | # mac ships with a different version of sed that requires a delimiter arg
14 | sed -i "" "$adapters_req_sed_pattern" $target_req_file
15 | sed -i "" "$core_req_sed_pattern" $target_req_file
16 | sed -i "" "$common_req_sed_pattern" $target_req_file
17 | else
18 | sed -i "$adapters_req_sed_pattern" $target_req_file
19 | sed -i "$core_req_sed_pattern" $target_req_file
20 | sed -i "$common_req_sed_pattern" $target_req_file
21 | fi
22 |
--------------------------------------------------------------------------------
/.github/workflows/changelog-entry-check.yml:
--------------------------------------------------------------------------------
1 | name: Changelog entry check
2 |
3 | on:
4 | pull_request:
5 | types:
6 | - opened
7 | - reopened
8 | - labeled
9 | - unlabeled
10 | - synchronize
11 |
12 | defaults:
13 | run:
14 | shell: bash
15 |
16 | permissions:
17 | contents: read
18 | pull-requests: write
19 |
20 | jobs:
21 | changelog-entry-check:
22 | uses: dbt-labs/actions/.github/workflows/changelog-existence.yml@main
23 | with:
24 | changelog_comment: >-
25 | Thank you for your pull request! We could not find a changelog entry for this change.
26 | For details on how to document a change, see the
27 | [dbt-postgres contributing guide](https://github.com/dbt-labs/dbt-postgres/blob/main/CONTRIBUTING.md).
28 | skip_label: "Skip Changelog"
29 | secrets: inherit
30 |
--------------------------------------------------------------------------------
/.github/workflows/code-quality.yml:
--------------------------------------------------------------------------------
1 | name: Code Quality
2 |
3 | on:
4 | push:
5 | branches:
6 | - "main"
7 | - "*.latest"
8 | pull_request:
9 | workflow_dispatch:
10 | inputs:
11 | dbt_adapters_branch:
12 | description: "The branch of dbt-adapters to evaluate"
13 | type: string
14 | default: "main"
15 | workflow_call:
16 | inputs:
17 | dbt_adapters_branch:
18 | description: "The branch of dbt-adapters to evaluate"
19 | type: string
20 | default: "main"
21 |
22 | permissions: read-all
23 |
24 | # will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
25 | concurrency:
26 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
27 | cancel-in-progress: true
28 |
29 | jobs:
30 | code-quality:
31 | name: Code Quality
32 | runs-on: ubuntu-latest
33 |
34 | steps:
35 | - name: Check out repository
36 | uses: actions/checkout@v4
37 | with:
38 | persist-credentials: false
39 |
40 | - name: Update Adapters and Core branches
41 | if: ${{ contains(github.event_name, 'workflow_') }}
42 | shell: bash
43 | run: ./.github/scripts/update_dev_packages.sh ${{ inputs.dbt_adapters_branch }} "main"
44 |
45 | - name: Setup `hatch`
46 | uses: dbt-labs/dbt-adapters/.github/actions/setup-hatch@main
47 |
48 | - name: Run code quality
49 | shell: bash
50 | run: hatch run code-quality
51 |
--------------------------------------------------------------------------------
/.github/workflows/docs-issue.yml:
--------------------------------------------------------------------------------
1 | # **what?**
2 | # Open an issue in docs.getdbt.com when an issue is labeled `user docs` and closed as completed
3 |
4 | # **why?**
5 | # To reduce barriers for keeping docs up to date
6 |
7 | # **when?**
8 | # When an issue is labeled `user docs` and is closed as completed. Can be labeled before or after the issue is closed.
9 |
10 |
11 | name: Open issues in docs.getdbt.com repo when an issue is labeled
12 | run-name: "Open an issue in docs.getdbt.com for issue #${{ github.event.issue.number }}"
13 |
14 | on:
15 | issues:
16 | types: [labeled, closed]
17 |
18 | defaults:
19 | run:
20 | shell: bash
21 |
22 | permissions:
23 | issues: write # comments on issues
24 |
25 | jobs:
26 | open_issues:
27 | # we only want to run this when the issue is closed as completed and the label `user docs` has been assigned.
28 | # If this logic does not exist in this workflow, it runs the
29 | # risk of duplicaton of issues being created due to merge and label both triggering this workflow to run and neither having
30 | # generating the comment before the other runs. This lives here instead of the shared workflow because this is where we
31 | # decide if it should run or not.
32 | if: |
33 | (github.event.issue.state == 'closed' && github.event.issue.state_reason == 'completed') && (
34 | (github.event.action == 'closed' && contains(github.event.issue.labels.*.name, 'user docs')) ||
35 | (github.event.action == 'labeled' && github.event.label.name == 'user docs'))
36 | uses: dbt-labs/actions/.github/workflows/open-issue-in-repo.yml@main
37 | with:
38 | issue_repository: "dbt-labs/docs.getdbt.com"
39 | issue_title: "Docs Changes Needed from ${{ github.event.repository.name }} Issue #${{ github.event.issue.number }}"
40 | issue_body: "At a minimum, update body to include a link to the page on docs.getdbt.com requiring updates and what part(s) of the page you would like to see updated."
41 | secrets: inherit
42 |
--------------------------------------------------------------------------------
/.github/workflows/release-internal.yml:
--------------------------------------------------------------------------------
1 | # What?
2 | #
3 | # Tag and release an arbitrary ref. Uploads to an internal archive for further processing.
4 | #
5 | # How?
6 | #
7 | # After checking out and testing the provided ref, the image is built and uploaded.
8 | #
9 | # When?
10 | #
11 | # Manual trigger.
12 |
13 | name: "Release internal patch"
14 |
15 | on:
16 | workflow_dispatch:
17 | inputs:
18 | ref:
19 | description: "The ref (sha or branch name) to use"
20 | type: string
21 | default: "main"
22 | required: true
23 | package_test_command:
24 | description: "Package test command"
25 | type: string
26 | default: "python -c \"import dbt.adapters.postgres\""
27 | required: true
28 | skip_tests:
29 | description: "Should the tests be skipped? (default to false)"
30 | type: boolean
31 | required: true
32 | default: false
33 |
34 | defaults:
35 | run:
36 | shell: "bash"
37 |
38 | jobs:
39 | invoke-reusable-workflow:
40 | name: "Build and Release Internally"
41 |
42 | uses: "dbt-labs/dbt-release/.github/workflows/internal-archive-release.yml@main"
43 |
44 | with:
45 | package_test_command: "${{ inputs.package_test_command }}"
46 | dbms_name: "postgres"
47 | ref: "${{ inputs.ref }}"
48 | skip_tests: "${{ inputs.skip_tests }}"
49 |
50 | secrets: "inherit"
51 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | # **what?**
2 | # For issues that have been open for awhile without activity, label
3 | # them as stale with a warning that they will be closed out. If
4 | # anyone comments to keep the issue open, it will automatically
5 | # remove the stale label and keep it open.
6 |
7 | # Stale label rules:
8 | # awaiting_response, more_information_needed -> 90 days
9 | # good_first_issue, help_wanted -> 360 days (a year)
10 | # tech_debt -> 720 (2 years)
11 | # all else defaults -> 180 days (6 months)
12 |
13 | # **why?**
14 | # To keep the repo in a clean state from issues that aren't relevant anymore
15 |
16 | # **when?**
17 | # Once a day
18 |
19 | name: "Close stale issues and PRs"
20 | on:
21 | schedule:
22 | - cron: "30 1 * * *"
23 |
24 | permissions:
25 | issues: write
26 | pull-requests: write
27 |
28 | jobs:
29 | stale:
30 | uses: dbt-labs/actions/.github/workflows/stale-bot-matrix.yml@main
31 |
--------------------------------------------------------------------------------
/.github/workflows/triage-labels.yml:
--------------------------------------------------------------------------------
1 | # **what?**
2 | # When the maintenance team triages, we sometimes need more information from the issue creator. In
3 | # those cases we remove the `triage` label and add the `awaiting_response` label. Once we
4 | # recieve a response in the form of a comment, we want the `awaiting_response` label removed
5 | # in favor of the `triage` label so we are aware that the issue needs action.
6 |
7 | # **why?**
8 | # To help with out team triage issue tracking
9 |
10 | # **when?**
11 | # This will run when a comment is added to an issue and that issue has the `awaiting_response` label.
12 |
13 | name: Update Triage Label
14 |
15 | on: issue_comment
16 |
17 | defaults:
18 | run:
19 | shell: bash
20 |
21 | permissions:
22 | issues: write
23 |
24 | jobs:
25 | triage_label:
26 | if: contains(github.event.issue.labels.*.name, 'awaiting_response')
27 | uses: dbt-labs/actions/.github/workflows/swap-labels.yml@main
28 | with:
29 | add_label: "triage"
30 | remove_label: "awaiting_response"
31 | secrets: inherit
32 |
--------------------------------------------------------------------------------
/.github/workflows/unit-tests.yml:
--------------------------------------------------------------------------------
1 | name: Unit Tests
2 |
3 | on:
4 | push:
5 | branches:
6 | - "main"
7 | - "*.latest"
8 | pull_request:
9 | workflow_dispatch:
10 |
11 | permissions: read-all
12 |
13 | # will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise
14 | concurrency:
15 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }}
16 | cancel-in-progress: true
17 |
18 | jobs:
19 | unit:
20 | name: Unit Tests
21 | runs-on: ubuntu-latest
22 |
23 | strategy:
24 | fail-fast: false
25 | matrix:
26 | python-version: ["3.9", "3.10", "3.11", "3.12"]
27 |
28 | steps:
29 | - name: Check out repository
30 | uses: actions/checkout@v4
31 | with:
32 | persist-credentials: false
33 |
34 | - name: Setup `hatch`
35 | uses: dbt-labs/dbt-adapters/.github/actions/setup-hatch@main
36 | with:
37 | python-version: ${{ matrix.python-version }}
38 |
39 | - name: Run unit tests
40 | run: hatch run unit-tests
41 | shell: bash
42 |
--------------------------------------------------------------------------------
/.github/workflows/version-bump.yml:
--------------------------------------------------------------------------------
1 | # **what?**
2 | # This workflow will take the new version number to bump to. With that
3 | # it will run versionbump to update the version number everywhere in the
4 | # code base and then run changie to create the corresponding changelog.
5 | # A PR will be created with the changes that can be reviewed before committing.
6 |
7 | # **why?**
8 | # This is to aid in releasing dbt and making sure we have updated
9 | # the version in all places and generated the changelog.
10 |
11 | # **when?**
12 | # This is triggered manually
13 |
14 | name: Version Bump
15 |
16 | on:
17 | workflow_dispatch:
18 | inputs:
19 | version_number:
20 | description: 'The version number to bump to (ex. 1.2.0, 1.3.0b1)'
21 | required: true
22 |
23 | jobs:
24 | version_bump_and_changie:
25 | uses: dbt-labs/actions/.github/workflows/version-bump.yml@main
26 | with:
27 | version_number: ${{ inputs.version_number }}
28 | secrets: inherit # ok since what we are calling is internally maintained
29 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | default_language_version:
2 | python: python3
3 |
4 | repos:
5 | - repo: https://github.com/pre-commit/pre-commit-hooks
6 | rev: v4.4.0
7 | hooks:
8 | - id: check-yaml
9 | args: [--unsafe]
10 | - id: check-json
11 | - id: end-of-file-fixer
12 | - id: trailing-whitespace
13 | - id: check-case-conflict
14 |
15 | - repo: https://github.com/dbt-labs/pre-commit-hooks
16 | rev: v0.1.0a1
17 | hooks:
18 | - id: dbt-core-in-adapters-check
19 |
20 | - repo: https://github.com/psf/black
21 | rev: 24.4.0
22 | hooks:
23 | - id: black
24 | args:
25 | - --line-length=99
26 | - --target-version=py39
27 | - --target-version=py310
28 | - --target-version=py311
29 | - --target-version=py312
30 |
31 | - repo: https://github.com/pycqa/flake8
32 | rev: 7.0.0
33 | hooks:
34 | - id: flake8
35 | exclude: tests/
36 | args:
37 | - --max-line-length=99
38 | - --select=E,F,W
39 | - --ignore=E203,E501,E741,W503,W504
40 | - --per-file-ignores=*/__init__.py:F401
41 |
42 | - repo: https://github.com/pre-commit/mirrors-mypy
43 | rev: v1.9.0
44 | hooks:
45 | - id: mypy
46 | args:
47 | - --explicit-package-bases
48 | - --ignore-missing-imports
49 | - --pretty
50 | - --show-error-codes
51 | files: ^dbt/adapters/postgres
52 | additional_dependencies:
53 | - types-PyYAML
54 | - types-protobuf
55 | - types-pytz
56 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 | All notable changes to this project will be documented in this file.
3 |
4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
5 | adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html),
6 | and is generated by [Changie](https://github.com/miniscruff/changie).
7 |
8 | ## dbt-postgres 1.9.0 - December 09, 2024
9 |
10 | ### Breaking Changes
11 |
12 | - Drop support for Python 3.8 ([#161](https://github.com/dbt-labs/dbt-postgres/issues/161))
13 |
14 | ### Features
15 |
16 | - Add tests for cross-database `cast` macro ([#76](https://github.com/dbt-labs/dbt-postgres/issues/76))
17 | - Cross-database `date` macro ([#82](https://github.com/dbt-labs/dbt-postgres/issues/82))
18 | - Add support for Python 3.12 ([#17](https://github.com/dbt-labs/dbt-postgres/issues/17))
19 | - Allow configuring snapshot column names ([#144](https://github.com/dbt-labs/dbt-postgres/issues/144))
20 | - Microbatch incremental strategy implementation: merge ([#149](https://github.com/dbt-labs/dbt-postgres/issues/149))
21 | - Enable setting current value of dbt_valid_to ([#151](https://github.com/dbt-labs/dbt-postgres/issues/151))
22 |
23 | ### Fixes
24 |
25 | - Fix the semicolon semantics for indexes while respecting other bug fix ([#85](https://github.com/dbt-labs/dbt-postgres/issues/85))
26 | - Default to psycopg2-binary and allow overriding to psycopg2 via DBT_PSYCOPG2_NAME (restores previous behavior) ([#96](https://github.com/dbt-labs/dbt-postgres/issues/96))
27 | - Fix `persist_docs` for `materialized_view` materializations. Previously, using this configuration with materialized view models would lead to an error. ([#120](https://github.com/dbt-labs/dbt-postgres/issues/120))
28 |
29 | ### Under the Hood
30 |
31 | - Add support for experimental record/replay testing. ([#123](https://github.com/dbt-labs/dbt-postgres/issues/123))
32 | - Updating changie.yaml to add contributors and PR links ([#109](https://github.com/dbt-labs/dbt-postgres/issues/109))
33 |
34 | ### Contributors
35 | - [@dbeatty10](https://github.com/dbeatty10) ([#76](https://github.com/dbt-labs/dbt-postgres/issues/76), [#82](https://github.com/dbt-labs/dbt-postgres/issues/82))
36 | - [@gshank](https://github.com/gshank) ([#144](https://github.com/dbt-labs/dbt-postgres/issues/144), [#151](https://github.com/dbt-labs/dbt-postgres/issues/151))
37 | - [@leahwicz](https://github.com/leahwicz) ([#109](https://github.com/dbt-labs/dbt-postgres/issues/109))
38 | - [@michelleark](https://github.com/michelleark) ([#149](https://github.com/dbt-labs/dbt-postgres/issues/149))
39 | - [@mikealfare](https://github.com/mikealfare) ([#161](https://github.com/dbt-labs/dbt-postgres/issues/161), [#17](https://github.com/dbt-labs/dbt-postgres/issues/17), [#96](https://github.com/dbt-labs/dbt-postgres/issues/96))
40 | - [@morsapaes](https://github.com/morsapaes) ([#120](https://github.com/dbt-labs/dbt-postgres/issues/120))
41 | - [@peterallenwebb](https://github.com/peterallenwebb) ([#123](https://github.com/dbt-labs/dbt-postgres/issues/123))
42 | - [@versusfacit](https://github.com/versusfacit) ([#85](https://github.com/dbt-labs/dbt-postgres/issues/85))
43 |
44 |
45 | ## Previous Releases
46 | For information on prior major and minor releases, see their changelogs:
47 | - [1.8](https://github.com/dbt-labs/dbt-postgres/blob/1.8.latest/CHANGELOG.md)
48 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to `dbt-postgres`
2 |
3 | This repository has moved into [dbt-labs/dbt-adapters](https://www.github.com/dbt-labs/dbt-adapters).
4 | Please refer to that repo for a guide on how to contribute to `dbt-postgres`.
5 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | This repository has moved into [dbt-labs/dbt-adapters](https://www.github.com/dbt-labs/dbt-adapters).
6 | Please refer to that repo for information about `dbt-postgres`.
7 |
--------------------------------------------------------------------------------
/dbt/__init__.py:
--------------------------------------------------------------------------------
1 | from pkgutil import extend_path
2 |
3 | __path__ = extend_path(__path__, __name__)
4 |
--------------------------------------------------------------------------------
/dbt/adapters/postgres/__init__.py:
--------------------------------------------------------------------------------
1 | from dbt.adapters.base import AdapterPlugin
2 |
3 | from dbt.adapters.postgres.column import PostgresColumn
4 | from dbt.adapters.postgres.connections import PostgresConnectionManager, PostgresCredentials
5 | from dbt.adapters.postgres.impl import PostgresAdapter
6 | from dbt.adapters.postgres.relation import PostgresRelation
7 | from dbt.include import postgres
8 |
9 |
10 | Plugin = AdapterPlugin(
11 | adapter=PostgresAdapter, # type: ignore
12 | credentials=PostgresCredentials,
13 | include_path=postgres.PACKAGE_PATH,
14 | )
15 |
--------------------------------------------------------------------------------
/dbt/adapters/postgres/__version__.py:
--------------------------------------------------------------------------------
1 | version = "1.9.0"
2 |
--------------------------------------------------------------------------------
/dbt/adapters/postgres/column.py:
--------------------------------------------------------------------------------
1 | from dbt.adapters.base import Column
2 |
3 |
4 | class PostgresColumn(Column):
5 | @property
6 | def data_type(self):
7 | # on postgres, do not convert 'text' or 'varchar' to 'varchar()'
8 | if self.dtype.lower() == "text" or (
9 | self.dtype.lower() == "character varying" and self.char_size is None
10 | ):
11 | return self.dtype
12 | return super().data_type
13 |
--------------------------------------------------------------------------------
/dbt/adapters/postgres/record/__init__.py:
--------------------------------------------------------------------------------
1 | from dbt.adapters.postgres.record.cursor.cursor import PostgresRecordReplayCursor
2 | from dbt.adapters.postgres.record.handle import PostgresRecordReplayHandle
3 |
--------------------------------------------------------------------------------
/dbt/adapters/postgres/record/cursor/cursor.py:
--------------------------------------------------------------------------------
1 | from dbt_common.record import record_function
2 |
3 | from dbt.adapters.record import RecordReplayCursor
4 |
5 | from dbt.adapters.postgres.record.cursor.status import CursorGetStatusMessageRecord
6 |
7 |
8 | class PostgresRecordReplayCursor(RecordReplayCursor):
9 | """A custom extension of RecordReplayCursor that adds the statusmessage
10 | property which is specific to psycopg."""
11 |
12 | @property
13 | @record_function(CursorGetStatusMessageRecord, method=True, id_field_name="connection_name")
14 | def statusmessage(self):
15 | return self.native_cursor.statusmessage
16 |
--------------------------------------------------------------------------------
/dbt/adapters/postgres/record/cursor/status.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | from typing import Optional
3 |
4 | from dbt_common.record import Record, Recorder
5 |
6 |
7 | @dataclasses.dataclass
8 | class CursorGetStatusMessageParams:
9 | connection_name: str
10 |
11 |
12 | @dataclasses.dataclass
13 | class CursorGetStatusMessageResult:
14 | msg: Optional[str]
15 |
16 |
17 | @Recorder.register_record_type
18 | class CursorGetStatusMessageRecord(Record):
19 | params_cls = CursorGetStatusMessageParams
20 | result_cls = CursorGetStatusMessageResult
21 | group = "Database"
22 |
--------------------------------------------------------------------------------
/dbt/adapters/postgres/record/handle.py:
--------------------------------------------------------------------------------
1 | from dbt.adapters.record import RecordReplayHandle
2 |
3 | from dbt.adapters.postgres.record.cursor.cursor import PostgresRecordReplayCursor
4 |
5 |
6 | class PostgresRecordReplayHandle(RecordReplayHandle):
7 | """A custom extension of RecordReplayHandle that returns
8 | a psycopg-specific PostgresRecordReplayCursor object."""
9 |
10 | def cursor(self):
11 | cursor = None if self.native_handle is None else self.native_handle.cursor()
12 | return PostgresRecordReplayCursor(cursor, self.connection)
13 |
--------------------------------------------------------------------------------
/dbt/adapters/postgres/relation_configs/__init__.py:
--------------------------------------------------------------------------------
1 | from dbt.adapters.postgres.relation_configs.constants import (
2 | MAX_CHARACTERS_IN_IDENTIFIER,
3 | )
4 | from dbt.adapters.postgres.relation_configs.index import (
5 | PostgresIndexConfig,
6 | PostgresIndexConfigChange,
7 | )
8 | from dbt.adapters.postgres.relation_configs.materialized_view import (
9 | PostgresMaterializedViewConfig,
10 | PostgresMaterializedViewConfigChangeCollection,
11 | )
12 |
--------------------------------------------------------------------------------
/dbt/adapters/postgres/relation_configs/constants.py:
--------------------------------------------------------------------------------
1 | MAX_CHARACTERS_IN_IDENTIFIER = 63
2 |
--------------------------------------------------------------------------------
/dbt/include/postgres/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | PACKAGE_PATH = os.path.dirname(__file__)
4 |
--------------------------------------------------------------------------------
/dbt/include/postgres/dbt_project.yml:
--------------------------------------------------------------------------------
1 | config-version: 2
2 | name: dbt_postgres
3 | version: 1.0
4 |
5 | macro-paths: ["macros"]
6 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/catalog.sql:
--------------------------------------------------------------------------------
1 |
2 | {% macro postgres__get_catalog_relations(information_schema, relations) -%}
3 | {%- call statement('catalog', fetch_result=True) -%}
4 |
5 | {#
6 | If the user has multiple databases set and the first one is wrong, this will fail.
7 | But we won't fail in the case where there are multiple quoting-difference-only dbs, which is better.
8 | #}
9 | {% set database = information_schema.database %}
10 | {{ adapter.verify_database(database) }}
11 |
12 | select
13 | '{{ database }}' as table_database,
14 | sch.nspname as table_schema,
15 | tbl.relname as table_name,
16 | case tbl.relkind
17 | when 'v' then 'VIEW'
18 | when 'm' then 'MATERIALIZED VIEW'
19 | else 'BASE TABLE'
20 | end as table_type,
21 | tbl_desc.description as table_comment,
22 | col.attname as column_name,
23 | col.attnum as column_index,
24 | pg_catalog.format_type(col.atttypid, col.atttypmod) as column_type,
25 | col_desc.description as column_comment,
26 | pg_get_userbyid(tbl.relowner) as table_owner
27 |
28 | from pg_catalog.pg_namespace sch
29 | join pg_catalog.pg_class tbl on tbl.relnamespace = sch.oid
30 | join pg_catalog.pg_attribute col on col.attrelid = tbl.oid
31 | left outer join pg_catalog.pg_description tbl_desc on (tbl_desc.objoid = tbl.oid and tbl_desc.objsubid = 0)
32 | left outer join pg_catalog.pg_description col_desc on (col_desc.objoid = tbl.oid and col_desc.objsubid = col.attnum)
33 | where (
34 | {%- for relation in relations -%}
35 | {%- if relation.identifier -%}
36 | (upper(sch.nspname) = upper('{{ relation.schema }}') and
37 | upper(tbl.relname) = upper('{{ relation.identifier }}'))
38 | {%- else-%}
39 | upper(sch.nspname) = upper('{{ relation.schema }}')
40 | {%- endif -%}
41 | {%- if not loop.last %} or {% endif -%}
42 | {%- endfor -%}
43 | )
44 | and not pg_is_other_temp_schema(sch.oid) -- not a temporary schema belonging to another session
45 | and tbl.relpersistence in ('p', 'u') -- [p]ermanent table or [u]nlogged table. Exclude [t]emporary tables
46 | and tbl.relkind in ('r', 'v', 'f', 'p', 'm') -- o[r]dinary table, [v]iew, [f]oreign table, [p]artitioned table, [m]aterialized view. Other values are [i]ndex, [S]equence, [c]omposite type, [t]OAST table
47 | and col.attnum > 0 -- negative numbers are used for system columns such as oid
48 | and not col.attisdropped -- column as not been dropped
49 |
50 | order by
51 | sch.nspname,
52 | tbl.relname,
53 | col.attnum
54 |
55 | {%- endcall -%}
56 |
57 | {{ return(load_result('catalog').table) }}
58 | {%- endmacro %}
59 |
60 |
61 | {% macro postgres__get_catalog(information_schema, schemas) -%}
62 | {%- set relations = [] -%}
63 | {%- for schema in schemas -%}
64 | {%- set dummy = relations.append({'schema': schema}) -%}
65 | {%- endfor -%}
66 | {{ return(postgres__get_catalog_relations(information_schema, relations)) }}
67 | {%- endmacro %}
68 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/materializations/incremental_strategies.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__get_incremental_default_sql(arg_dict) %}
2 |
3 | {% if arg_dict["unique_key"] %}
4 | {% do return(get_incremental_delete_insert_sql(arg_dict)) %}
5 | {% else %}
6 | {% do return(get_incremental_append_sql(arg_dict)) %}
7 | {% endif %}
8 |
9 | {% endmacro %}
10 |
11 |
12 | {% macro postgres__get_incremental_microbatch_sql(arg_dict) %}
13 |
14 | {% if arg_dict["unique_key"] %}
15 | {% do return(adapter.dispatch('get_incremental_merge_sql', 'dbt')(arg_dict)) %}
16 | {% else %}
17 | {{ exceptions.raise_compiler_error("dbt-postgres 'microbatch' requires a `unique_key` config") }}
18 | {% endif %}
19 |
20 | {% endmacro %}
21 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/materializations/snapshot_merge.sql:
--------------------------------------------------------------------------------
1 |
2 | {% macro postgres__snapshot_merge_sql(target, source, insert_cols) -%}
3 | {%- set insert_cols_csv = insert_cols | join(', ') -%}
4 |
5 | {%- set columns = config.get("snapshot_table_column_names") or get_snapshot_table_column_names() -%}
6 |
7 | update {{ target }}
8 | set {{ columns.dbt_valid_to }} = DBT_INTERNAL_SOURCE.{{ columns.dbt_valid_to }}
9 | from {{ source }} as DBT_INTERNAL_SOURCE
10 | where DBT_INTERNAL_SOURCE.{{ columns.dbt_scd_id }}::text = {{ target }}.{{ columns.dbt_scd_id }}::text
11 | and DBT_INTERNAL_SOURCE.dbt_change_type::text in ('update'::text, 'delete'::text)
12 | {% if config.get("dbt_valid_to_current") %}
13 | and ({{ target }}.{{ columns.dbt_valid_to }} = {{ config.get('dbt_valid_to_current') }} or {{ target }}.{{ columns.dbt_valid_to }} is null);
14 | {% else %}
15 | and {{ target }}.{{ columns.dbt_valid_to }} is null;
16 | {% endif %}
17 |
18 |
19 | insert into {{ target }} ({{ insert_cols_csv }})
20 | select {% for column in insert_cols -%}
21 | DBT_INTERNAL_SOURCE.{{ column }} {%- if not loop.last %}, {%- endif %}
22 | {%- endfor %}
23 | from {{ source }} as DBT_INTERNAL_SOURCE
24 | where DBT_INTERNAL_SOURCE.dbt_change_type::text = 'insert'::text;
25 | {% endmacro %}
26 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__get_relations() -%}
2 |
3 | {#
4 | -- in pg_depend, objid is the dependent, refobjid is the referenced object
5 | -- > a pg_depend entry indicates that the referenced object cannot be
6 | -- > dropped without also dropping the dependent object.
7 | #}
8 |
9 | {%- call statement('relations', fetch_result=True) -%}
10 | with relation as (
11 | select
12 | pg_rewrite.ev_class as class,
13 | pg_rewrite.oid as id
14 | from pg_rewrite
15 | ),
16 | class as (
17 | select
18 | oid as id,
19 | relname as name,
20 | relnamespace as schema,
21 | relkind as kind
22 | from pg_class
23 | ),
24 | dependency as (
25 | select distinct
26 | pg_depend.objid as id,
27 | pg_depend.refobjid as ref
28 | from pg_depend
29 | ),
30 | schema as (
31 | select
32 | pg_namespace.oid as id,
33 | pg_namespace.nspname as name
34 | from pg_namespace
35 | where nspname != 'information_schema' and nspname not like 'pg\_%'
36 | ),
37 | referenced as (
38 | select
39 | relation.id AS id,
40 | referenced_class.name ,
41 | referenced_class.schema ,
42 | referenced_class.kind
43 | from relation
44 | join class as referenced_class on relation.class=referenced_class.id
45 | where referenced_class.kind in ('r', 'v', 'm')
46 | ),
47 | relationships as (
48 | select
49 | referenced.name as referenced_name,
50 | referenced.schema as referenced_schema_id,
51 | dependent_class.name as dependent_name,
52 | dependent_class.schema as dependent_schema_id,
53 | referenced.kind as kind
54 | from referenced
55 | join dependency on referenced.id=dependency.id
56 | join class as dependent_class on dependency.ref=dependent_class.id
57 | where
58 | (referenced.name != dependent_class.name or
59 | referenced.schema != dependent_class.schema)
60 | )
61 |
62 | select
63 | referenced_schema.name as referenced_schema,
64 | relationships.referenced_name as referenced_name,
65 | dependent_schema.name as dependent_schema,
66 | relationships.dependent_name as dependent_name
67 | from relationships
68 | join schema as dependent_schema on relationships.dependent_schema_id=dependent_schema.id
69 | join schema as referenced_schema on relationships.referenced_schema_id=referenced_schema.id
70 | group by referenced_schema, referenced_name, dependent_schema, dependent_name
71 | order by referenced_schema, referenced_name, dependent_schema, dependent_name;
72 |
73 | {%- endcall -%}
74 |
75 | {{ return(load_result('relations').table) }}
76 | {% endmacro %}
77 |
78 | {% macro postgres_get_relations() %}
79 | {{ return(postgres__get_relations()) }}
80 | {% endmacro %}
81 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/materialized_view/alter.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__get_alter_materialized_view_as_sql(
2 | relation,
3 | configuration_changes,
4 | sql,
5 | existing_relation,
6 | backup_relation,
7 | intermediate_relation
8 | ) %}
9 |
10 | -- apply a full refresh immediately if needed
11 | {% if configuration_changes.requires_full_refresh %}
12 |
13 | {{ get_replace_sql(existing_relation, relation, sql) }}
14 |
15 | -- otherwise apply individual changes as needed
16 | {% else %}
17 |
18 | {{ postgres__update_indexes_on_materialized_view(relation, configuration_changes.indexes) }}
19 |
20 | {%- endif -%}
21 |
22 | {% endmacro %}
23 |
24 |
25 | {%- macro postgres__update_indexes_on_materialized_view(relation, index_changes) -%}
26 | {{- log("Applying UPDATE INDEXES to: " ~ relation) -}}
27 |
28 | {%- for _index_change in index_changes -%}
29 | {%- set _index = _index_change.context -%}
30 |
31 | {%- if _index_change.action == "drop" -%}
32 |
33 | {{ postgres__get_drop_index_sql(relation, _index.name) }}
34 |
35 | {%- elif _index_change.action == "create" -%}
36 |
37 | {{ postgres__get_create_index_sql(relation, _index.as_node_config) }}
38 |
39 | {%- endif -%}
40 | {{ ';' if not loop.last else "" }}
41 |
42 | {%- endfor -%}
43 |
44 | {%- endmacro -%}
45 |
46 |
47 | {% macro postgres__get_materialized_view_configuration_changes(existing_relation, new_config) %}
48 | {% set _existing_materialized_view = postgres__describe_materialized_view(existing_relation) %}
49 | {% set _configuration_changes = existing_relation.get_materialized_view_config_change_collection(_existing_materialized_view, new_config.model) %}
50 | {% do return(_configuration_changes) %}
51 | {% endmacro %}
52 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/materialized_view/create.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__get_create_materialized_view_as_sql(relation, sql) %}
2 | create materialized view if not exists {{ relation }} as {{ sql }};
3 |
4 | {% for _index_dict in config.get('indexes', []) -%}
5 | {{- get_create_index_sql(relation, _index_dict) -}}{{ ';' if not loop.last else "" }}
6 | {%- endfor -%}
7 |
8 | {% endmacro %}
9 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/materialized_view/describe.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__describe_materialized_view(relation) %}
2 | -- for now just get the indexes, we don't need the name or the query yet
3 | {% set _indexes = run_query(get_show_indexes_sql(relation)) %}
4 | {% do return({'indexes': _indexes}) %}
5 | {% endmacro %}
6 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/materialized_view/drop.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__drop_materialized_view(relation) -%}
2 | drop materialized view if exists {{ relation }} cascade
3 | {%- endmacro %}
4 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/materialized_view/refresh.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__refresh_materialized_view(relation) %}
2 | refresh materialized view {{ relation }}
3 | {% endmacro %}
4 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/materialized_view/rename.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__get_rename_materialized_view_sql(relation, new_name) %}
2 | alter materialized view {{ relation }} rename to {{ new_name }}
3 | {% endmacro %}
4 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/table/drop.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__drop_table(relation) -%}
2 | drop table if exists {{ relation }} cascade
3 | {%- endmacro %}
4 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/table/rename.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__get_rename_table_sql(relation, new_name) %}
2 | alter table {{ relation }} rename to {{ new_name }}
3 | {% endmacro %}
4 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/table/replace.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__get_replace_table_sql(relation, sql) -%}
2 |
3 | {%- set sql_header = config.get('sql_header', none) -%}
4 | {{ sql_header if sql_header is not none }}
5 |
6 | create or replace table {{ relation }}
7 | {% set contract_config = config.get('contract') %}
8 | {% if contract_config.enforced %}
9 | {{ get_assert_columns_equivalent(sql) }}
10 | {{ get_table_columns_and_constraints() }}
11 | {%- set sql = get_select_subquery(sql) %}
12 | {% endif %}
13 | as (
14 | {{ sql }}
15 | );
16 |
17 | {%- endmacro %}
18 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/view/drop.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__drop_view(relation) -%}
2 | drop view if exists {{ relation }} cascade
3 | {%- endmacro %}
4 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/view/rename.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__get_rename_view_sql(relation, new_name) %}
2 | alter view {{ relation }} rename to {{ new_name }}
3 | {% endmacro %}
4 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/relations/view/replace.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__get_replace_view_sql(relation, sql) -%}
2 |
3 | {%- set sql_header = config.get('sql_header', none) -%}
4 | {{ sql_header if sql_header is not none }}
5 |
6 | create or replace view {{ relation }}
7 | {% set contract_config = config.get('contract') %}
8 | {% if contract_config.enforced %}
9 | {{ get_assert_columns_equivalent(sql) }}
10 | {%- endif %}
11 | as (
12 | {{ sql }}
13 | );
14 |
15 | {%- endmacro %}
16 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/timestamps.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__current_timestamp() -%}
2 | now()
3 | {%- endmacro %}
4 |
5 | {% macro postgres__snapshot_string_as_time(timestamp) -%}
6 | {%- set result = "'" ~ timestamp ~ "'::timestamp without time zone" -%}
7 | {{ return(result) }}
8 | {%- endmacro %}
9 |
10 | {% macro postgres__snapshot_get_time() -%}
11 | {{ current_timestamp() }}::timestamp without time zone
12 | {%- endmacro %}
13 |
14 | {% macro postgres__current_timestamp_backcompat() %}
15 | current_timestamp::{{ type_timestamp() }}
16 | {% endmacro %}
17 |
18 | {% macro postgres__current_timestamp_in_utc_backcompat() %}
19 | (current_timestamp at time zone 'utc')::{{ type_timestamp() }}
20 | {% endmacro %}
21 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/utils/any_value.sql:
--------------------------------------------------------------------------------
1 | {#- /*Postgres doesn't support any_value, so we're using min() to get the same result*/ -#}
2 |
3 | {% macro postgres__any_value(expression) -%}
4 |
5 | min({{ expression }})
6 |
7 | {%- endmacro %}
8 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/utils/columns_spec_ddl.sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dbt-labs/dbt-postgres/4ceebd5939034123e77ad5b714d45545a7962d90/dbt/include/postgres/macros/utils/columns_spec_ddl.sql
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/utils/dateadd.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__dateadd(datepart, interval, from_date_or_timestamp) %}
2 |
3 | {{ from_date_or_timestamp }} + ((interval '1 {{ datepart }}') * ({{ interval }}))
4 |
5 | {% endmacro %}
6 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/utils/datediff.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__datediff(first_date, second_date, datepart) -%}
2 |
3 | {% if datepart == 'year' %}
4 | (date_part('year', ({{second_date}})::date) - date_part('year', ({{first_date}})::date))
5 | {% elif datepart == 'quarter' %}
6 | ({{ datediff(first_date, second_date, 'year') }} * 4 + date_part('quarter', ({{second_date}})::date) - date_part('quarter', ({{first_date}})::date))
7 | {% elif datepart == 'month' %}
8 | ({{ datediff(first_date, second_date, 'year') }} * 12 + date_part('month', ({{second_date}})::date) - date_part('month', ({{first_date}})::date))
9 | {% elif datepart == 'day' %}
10 | (({{second_date}})::date - ({{first_date}})::date)
11 | {% elif datepart == 'week' %}
12 | ({{ datediff(first_date, second_date, 'day') }} / 7 + case
13 | when date_part('dow', ({{first_date}})::timestamp) <= date_part('dow', ({{second_date}})::timestamp) then
14 | case when {{first_date}} <= {{second_date}} then 0 else -1 end
15 | else
16 | case when {{first_date}} <= {{second_date}} then 1 else 0 end
17 | end)
18 | {% elif datepart == 'hour' %}
19 | ({{ datediff(first_date, second_date, 'day') }} * 24 + date_part('hour', ({{second_date}})::timestamp) - date_part('hour', ({{first_date}})::timestamp))
20 | {% elif datepart == 'minute' %}
21 | ({{ datediff(first_date, second_date, 'hour') }} * 60 + date_part('minute', ({{second_date}})::timestamp) - date_part('minute', ({{first_date}})::timestamp))
22 | {% elif datepart == 'second' %}
23 | ({{ datediff(first_date, second_date, 'minute') }} * 60 + floor(date_part('second', ({{second_date}})::timestamp)) - floor(date_part('second', ({{first_date}})::timestamp)))
24 | {% elif datepart == 'millisecond' %}
25 | ({{ datediff(first_date, second_date, 'minute') }} * 60000 + floor(date_part('millisecond', ({{second_date}})::timestamp)) - floor(date_part('millisecond', ({{first_date}})::timestamp)))
26 | {% elif datepart == 'microsecond' %}
27 | ({{ datediff(first_date, second_date, 'minute') }} * 60000000 + floor(date_part('microsecond', ({{second_date}})::timestamp)) - floor(date_part('microsecond', ({{first_date}})::timestamp)))
28 | {% else %}
29 | {{ exceptions.raise_compiler_error("Unsupported datepart for macro datediff in postgres: {!r}".format(datepart)) }}
30 | {% endif %}
31 |
32 | {%- endmacro %}
33 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/utils/last_day.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__last_day(date, datepart) -%}
2 |
3 | {%- if datepart == 'quarter' -%}
4 | -- postgres dateadd does not support quarter interval.
5 | cast(
6 | {{dbt.dateadd('day', '-1',
7 | dbt.dateadd('month', '3', dbt.date_trunc(datepart, date))
8 | )}}
9 | as date)
10 | {%- else -%}
11 | {{dbt.default_last_day(date, datepart)}}
12 | {%- endif -%}
13 |
14 | {%- endmacro %}
15 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/utils/listagg.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}
2 |
3 | {% if limit_num -%}
4 | array_to_string(
5 | (array_agg(
6 | {{ measure }}
7 | {% if order_by_clause -%}
8 | {{ order_by_clause }}
9 | {%- endif %}
10 | ))[1:{{ limit_num }}],
11 | {{ delimiter_text }}
12 | )
13 | {%- else %}
14 | string_agg(
15 | {{ measure }},
16 | {{ delimiter_text }}
17 | {% if order_by_clause -%}
18 | {{ order_by_clause }}
19 | {%- endif %}
20 | )
21 | {%- endif %}
22 |
23 | {%- endmacro %}
24 |
--------------------------------------------------------------------------------
/dbt/include/postgres/macros/utils/split_part.sql:
--------------------------------------------------------------------------------
1 | {% macro postgres__split_part(string_text, delimiter_text, part_number) %}
2 |
3 | {% if part_number >= 0 %}
4 | {{ dbt.default__split_part(string_text, delimiter_text, part_number) }}
5 | {% else %}
6 | {{ dbt._split_part_negative(string_text, delimiter_text, part_number) }}
7 | {% endif %}
8 |
9 | {% endmacro %}
10 |
--------------------------------------------------------------------------------
/dbt/include/postgres/profile_template.yml:
--------------------------------------------------------------------------------
1 | fixed:
2 | type: postgres
3 | prompts:
4 | host:
5 | hint: 'hostname for the instance'
6 | port:
7 | default: 5432
8 | type: 'int'
9 | user:
10 | hint: 'dev username'
11 | pass:
12 | hint: 'dev password'
13 | hide_input: true
14 | dbname:
15 | hint: 'default database that dbt will build objects in'
16 | schema:
17 | hint: 'default schema that dbt will build objects in'
18 | threads:
19 | hint: '1 or more'
20 | type: 'int'
21 | default: 1
22 |
--------------------------------------------------------------------------------
/dbt/include/postgres/sample_profiles.yml:
--------------------------------------------------------------------------------
1 | default:
2 | outputs:
3 |
4 | dev:
5 | type: postgres
6 | threads: [1 or more]
7 | host: [host]
8 | port: [port]
9 | user: [dev_username]
10 | pass: [dev_password]
11 | dbname: [dbname]
12 | schema: [dev_schema]
13 |
14 | prod:
15 | type: postgres
16 | threads: [1 or more]
17 | host: [host]
18 | port: [port]
19 | user: [prod_username]
20 | pass: [prod_password]
21 | dbname: [dbname]
22 | schema: [prod_schema]
23 |
24 | target: dev
25 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # this image gets published to GHCR for production use
2 | ARG py_version=3.12.4
3 |
4 | FROM python:$py_version-slim-bullseye AS base
5 |
6 | RUN apt-get update \
7 | && apt-get dist-upgrade -y \
8 | && apt-get install -y --no-install-recommends \
9 | build-essential=12.9 \
10 | ca-certificates=20210119 \
11 | git=1:2.30.2-1+deb11u2 \
12 | libpq-dev=13.18-0+deb11u1 \
13 | make=4.3-4.1 \
14 | openssh-client=1:8.4p1-5+deb11u3 \
15 | software-properties-common=0.96.20.2-2.1 \
16 | && apt-get clean \
17 | && rm -rf \
18 | /var/lib/apt/lists/* \
19 | /tmp/* \
20 | /var/tmp/*
21 |
22 | ENV PYTHONIOENCODING=utf-8
23 | ENV LANG=C.UTF-8
24 |
25 | RUN python -m pip install --upgrade "pip==24.0" "setuptools==69.2.0" "wheel==0.43.0" --no-cache-dir
26 |
27 |
28 | FROM base AS dbt-postgres
29 |
30 | ARG commit_ref=main
31 |
32 | HEALTHCHECK CMD dbt --version || exit 1
33 |
34 | WORKDIR /usr/app/dbt/
35 | ENTRYPOINT ["dbt"]
36 |
37 | RUN python -m pip install --no-cache-dir "dbt-postgres @ git+https://github.com/dbt-labs/dbt-postgres@${commit_ref}"
38 |
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 | # Docker for dbt
2 | This docker file is suitable for building dbt Docker images locally or using with CI/CD to automate populating a container registry.
3 |
4 |
5 | ## Building an image:
6 | This Dockerfile can create images for the following target: `dbt-postgres`
7 |
8 | In order to build a new image, run the following docker command.
9 | ```shell
10 | docker build --tag --target dbt-postgres
11 | ```
12 | ---
13 | > **Note:** Docker must be configured to use [BuildKit](https://docs.docker.com/develop/develop-images/build_enhancements/) in order for images to build properly!
14 |
15 | ---
16 |
17 | By default the image will be populated with the latest version of `dbt-postgres` on `main`.
18 | If you need to use a different version you can specify it by git ref using the `--build-arg` flag:
19 | ```shell
20 | docker build --tag \
21 | --target dbt-postgres \
22 | --build-arg commit_ref= \
23 |
24 | ```
25 |
26 | ### Examples:
27 | To build an image named "my-dbt" that supports Postgres using the latest releases:
28 | ```shell
29 | docker build --tag my-dbt --target dbt-postgres .
30 | ```
31 |
32 | To build an image named "my-other-dbt" that supports Postgres using the adapter version 1.8.0:
33 | ```shell
34 | cd dbt-core/docker
35 | docker build \
36 | --tag my-other-dbt \
37 | --target dbt-postgres \
38 | --build-arg commit_ref=v1.8.0 \
39 | .
40 | ```
41 |
42 | ## Running an image in a container:
43 | The `ENTRYPOINT` for this Dockerfile is the command `dbt` so you can bind-mount your project to `/usr/app` and use dbt as normal:
44 | ```shell
45 | docker run \
46 | --network=host \
47 | --mount type=bind,source=path/to/project,target=/usr/app \
48 | --mount type=bind,source=path/to/profiles.yml,target=/root/.dbt/profiles.yml \
49 | my-dbt \
50 | ls
51 | ```
52 | ---
53 | **Notes:**
54 | * Bind-mount sources _must_ be an absolute path
55 | * You may need to make adjustments to the docker networking setting depending on the specifics of your data warehouse/database host.
56 |
57 | ---
58 |
--------------------------------------------------------------------------------
/docker/dev.Dockerfile:
--------------------------------------------------------------------------------
1 | # this image does not get published, it is intended for local development only, see `Makefile` for usage
2 | FROM ubuntu:24.04 AS base
3 |
4 | # prevent python installation from asking for time zone region
5 | ARG DEBIAN_FRONTEND=noninteractive
6 |
7 | # add python repository
8 | RUN apt-get update \
9 | && apt-get install -y software-properties-common=0.99.48 \
10 | && add-apt-repository -y ppa:deadsnakes/ppa \
11 | && apt-get clean \
12 | && rm -rf \
13 | /var/lib/apt/lists/* \
14 | /tmp/* \
15 | /var/tmp/*
16 |
17 | # install python
18 | RUN apt-get update \
19 | && apt-get install -y --no-install-recommends \
20 | build-essential=12.10ubuntu1 \
21 | git-all=1:2.43.0-1ubuntu7.1 \
22 | libpq-dev=16.4-0ubuntu0.24.04.2 \
23 | python3.9=3.9.20-1+noble1 \
24 | python3.9-dev=3.9.20-1+noble1 \
25 | python3.9-distutils=3.9.20-1+noble1 \
26 | python3.9-venv=3.9.20-1+noble1 \
27 | python3-pip=24.0+dfsg-1ubuntu1 \
28 | python3-wheel=0.42.0-2 \
29 | && apt-get clean \
30 | && rm -rf \
31 | /var/lib/apt/lists/* \
32 | /tmp/* \
33 | /var/tmp/*
34 |
35 | # update the default system interpreter to the newly installed version
36 | RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.9 1
37 |
38 | # install python dependencies
39 | RUN python -m pip install --upgrade "hatch==1.13.0" --no-cache-dir --compile
40 |
41 |
42 | FROM base AS dbt-postgres-dev
43 |
44 | HEALTHCHECK CMD python --version || exit 1
45 |
46 | # send stdout/stderr to terminal
47 | ENV PYTHONUNBUFFERED=1
48 |
49 | # setup mount for local code
50 | WORKDIR /opt/code
51 | VOLUME /opt/code
52 |
53 | # setup hatch virtual envs
54 | RUN hatch config set dirs.env.virtual ".hatch"
55 |
--------------------------------------------------------------------------------
/scripts/setup_test_database.sql:
--------------------------------------------------------------------------------
1 | CREATE DATABASE dbt;
2 |
3 | CREATE ROLE root WITH PASSWORD 'password';
4 | ALTER ROLE root WITH LOGIN;
5 | GRANT CREATE, CONNECT ON DATABASE dbt TO root WITH GRANT OPTION;
6 |
7 | CREATE ROLE noaccess WITH PASSWORD 'password' NOSUPERUSER;
8 | ALTER ROLE noaccess WITH LOGIN;
9 | GRANT CONNECT ON DATABASE dbt TO noaccess;
10 |
11 | CREATE ROLE dbt_test_user_1;
12 | CREATE ROLE dbt_test_user_2;
13 | CREATE ROLE dbt_test_user_3;
14 |
15 | CREATE DATABASE "dbtMixedCase";
16 | GRANT CREATE, CONNECT ON DATABASE "dbtMixedCase" TO root WITH GRANT OPTION;
17 |
--------------------------------------------------------------------------------
/test.env.example:
--------------------------------------------------------------------------------
1 | POSTGRES_TEST_HOST=
2 | POSTGRES_TEST_PORT=
3 | POSTGRES_TEST_USER=
4 | POSTGRES_TEST_PASS=
5 | POSTGRES_TEST_DATABASE=
6 | POSTGRES_TEST_THREADS=
7 |
8 | DBT_TEST_USER_1=dbt_test_user_1
9 | DBT_TEST_USER_2=dbt_test_user_2
10 | DBT_TEST_USER_3=dbt_test_user_3
11 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dbt-labs/dbt-postgres/4ceebd5939034123e77ad5b714d45545a7962d90/tests/__init__.py
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | # in order to call dbt's internal profile rendering, we need to set the
2 | # flags global. This is a bit of a hack, but it's the best way to do it.
3 | from dbt.flags import set_from_args
4 | from argparse import Namespace
5 |
6 | set_from_args(Namespace(), None)
7 |
8 | pytest_plugins = "dbt.tests.fixtures.project"
9 |
--------------------------------------------------------------------------------
/tests/functional/README.md:
--------------------------------------------------------------------------------
1 | # This is where we are putting the pytest conversions of test/integration
2 |
3 | # Goals of moving tests to pytest
4 | * Readability
5 | * Modularity
6 | * Easier to create and debug
7 | * Ability to create a project for external debugging
8 |
9 | # TODO
10 | * Create the ability to export a project
11 | * Explore using:
12 | * https://github.com/pytest-docker-compose/pytest-docker-compose or
13 | * https://github.com/avast/pytest-docker for automatically managing a postgres instance running in a docker container
14 | * Track test coverage (https://pytest-cov.readthedocs.io/en/latest)
15 |
--------------------------------------------------------------------------------
/tests/functional/__init__.py:
--------------------------------------------------------------------------------
1 | # Functional tests focus on the business requirements of an application. They
2 | # only verify the output of an action and do not check the intermediate states
3 | # of the system when performing that action.
4 |
--------------------------------------------------------------------------------
/tests/functional/adapter/__init__.py:
--------------------------------------------------------------------------------
1 | # this file namespaces the test files within to avoid naming collision for the test collector
2 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_aliases.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.aliases.test_aliases import (
2 | BaseAliases,
3 | BaseAliasErrors,
4 | BaseSameAliasDifferentSchemas,
5 | BaseSameAliasDifferentDatabases,
6 | )
7 |
8 |
9 | class TestAliases(BaseAliases):
10 | pass
11 |
12 |
13 | class TestAliasErrors(BaseAliasErrors):
14 | pass
15 |
16 |
17 | class TestSameAliasDifferentSchemas(BaseSameAliasDifferentSchemas):
18 | pass
19 |
20 |
21 | class TestSameAliasDifferentDatabases(BaseSameAliasDifferentDatabases):
22 | pass
23 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_basic.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.basic.test_adapter_methods import BaseAdapterMethod
2 | from dbt.tests.adapter.basic.test_base import BaseSimpleMaterializations
3 | from dbt.tests.adapter.basic.test_docs_generate import (
4 | BaseDocsGenerate,
5 | BaseDocsGenReferences,
6 | )
7 | from dbt.tests.adapter.basic.test_empty import BaseEmpty
8 | from dbt.tests.adapter.basic.test_ephemeral import BaseEphemeral
9 | from dbt.tests.adapter.basic.test_generic_tests import BaseGenericTests
10 | from dbt.tests.adapter.basic.test_incremental import (
11 | BaseIncremental,
12 | BaseIncrementalNotSchemaChange,
13 | BaseIncrementalBadStrategy,
14 | )
15 | from dbt.tests.adapter.basic.test_singular_tests import BaseSingularTests
16 | from dbt.tests.adapter.basic.test_singular_tests_ephemeral import BaseSingularTestsEphemeral
17 | from dbt.tests.adapter.basic.test_snapshot_check_cols import BaseSnapshotCheckCols
18 | from dbt.tests.adapter.basic.test_snapshot_timestamp import BaseSnapshotTimestamp
19 | from dbt.tests.adapter.basic.test_table_materialization import BaseTableMaterialization
20 | from dbt.tests.adapter.basic.test_validate_connection import BaseValidateConnection
21 |
22 |
23 | class TestBaseCaching(BaseAdapterMethod):
24 | pass
25 |
26 |
27 | class TestSimpleMaterializations(BaseSimpleMaterializations):
28 | pass
29 |
30 |
31 | class TestDocsGenerate(BaseDocsGenerate):
32 | pass
33 |
34 |
35 | class TestDocsGenReferences(BaseDocsGenReferences):
36 | pass
37 |
38 |
39 | class TestEmpty(BaseEmpty):
40 | pass
41 |
42 |
43 | class TestEphemeral(BaseEphemeral):
44 | pass
45 |
46 |
47 | class TestGenericTests(BaseGenericTests):
48 | pass
49 |
50 |
51 | class TestIncremental(BaseIncremental):
52 | pass
53 |
54 |
55 | class TestBaseIncrementalNotSchemaChange(BaseIncrementalNotSchemaChange):
56 | pass
57 |
58 |
59 | class TestBaseIncrementalBadStrategy(BaseIncrementalBadStrategy):
60 | pass
61 |
62 |
63 | class TestSingularTests(BaseSingularTests):
64 | pass
65 |
66 |
67 | class TestSingularTestsEphemeral(BaseSingularTestsEphemeral):
68 | pass
69 |
70 |
71 | class TestSnapshotCheckCols(BaseSnapshotCheckCols):
72 | pass
73 |
74 |
75 | class TestSnapshotTimestamp(BaseSnapshotTimestamp):
76 | pass
77 |
78 |
79 | class TestTableMat(BaseTableMaterialization):
80 | pass
81 |
82 |
83 | class TestValidateConnection(BaseValidateConnection):
84 | pass
85 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_caching.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.caching.test_caching import (
2 | BaseCachingLowercaseModel,
3 | BaseCachingSelectedSchemaOnly,
4 | BaseCachingUppercaseModel,
5 | BaseNoPopulateCache,
6 | )
7 |
8 |
9 | class TestCachingLowerCaseModel(BaseCachingLowercaseModel):
10 | pass
11 |
12 |
13 | class TestCachingUppercaseModel(BaseCachingUppercaseModel):
14 | pass
15 |
16 |
17 | class TestCachingSelectedSchemaOnly(BaseCachingSelectedSchemaOnly):
18 | pass
19 |
20 |
21 | class TestNoPopulateCache(BaseNoPopulateCache):
22 | pass
23 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_clone.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from dbt.tests.adapter.dbt_clone.test_dbt_clone import (
4 | BaseCloneNotPossible,
5 | BaseClonePossible,
6 | BaseCloneSameTargetAndState,
7 | )
8 |
9 |
10 | class TestBaseCloneNotPossible(BaseCloneNotPossible):
11 | pass
12 |
13 |
14 | @pytest.mark.skip("Cloning is not possible in Postgres")
15 | class TestBaseClonePossible(BaseClonePossible):
16 | pass
17 |
18 |
19 | class TestCloneSameTargetAndState(BaseCloneSameTargetAndState):
20 | pass
21 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_column_types.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.column_types.test_column_types import BasePostgresColumnTypes
2 |
3 |
4 | class TestPostgresColumnTypes(BasePostgresColumnTypes):
5 | pass
6 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_concurrency.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.concurrency.test_concurrency import BaseConcurrency
2 |
3 |
4 | class TestConcurrency(BaseConcurrency):
5 | pass
6 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_constraints.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.constraints.test_constraints import (
2 | BaseTableConstraintsColumnsEqual,
3 | BaseViewConstraintsColumnsEqual,
4 | BaseIncrementalConstraintsColumnsEqual,
5 | BaseConstraintsRuntimeDdlEnforcement,
6 | BaseConstraintsRollback,
7 | BaseIncrementalConstraintsRuntimeDdlEnforcement,
8 | BaseIncrementalConstraintsRollback,
9 | BaseTableContractSqlHeader,
10 | BaseIncrementalContractSqlHeader,
11 | BaseModelConstraintsRuntimeEnforcement,
12 | BaseConstraintQuotedColumn,
13 | BaseIncrementalForeignKeyConstraint,
14 | )
15 |
16 |
17 | class TestTableConstraintsColumnsEqual(BaseTableConstraintsColumnsEqual):
18 | pass
19 |
20 |
21 | class TestViewConstraintsColumnsEqual(BaseViewConstraintsColumnsEqual):
22 | pass
23 |
24 |
25 | class TestIncrementalConstraintsColumnsEqual(BaseIncrementalConstraintsColumnsEqual):
26 | pass
27 |
28 |
29 | class TestTableConstraintsRuntimeDdlEnforcement(BaseConstraintsRuntimeDdlEnforcement):
30 | pass
31 |
32 |
33 | class TestTableConstraintsRollback(BaseConstraintsRollback):
34 | pass
35 |
36 |
37 | class TestIncrementalConstraintsRuntimeDdlEnforcement(
38 | BaseIncrementalConstraintsRuntimeDdlEnforcement
39 | ):
40 | pass
41 |
42 |
43 | class TestIncrementalConstraintsRollback(BaseIncrementalConstraintsRollback):
44 | pass
45 |
46 |
47 | class TestTableContractSqlHeader(BaseTableContractSqlHeader):
48 | pass
49 |
50 |
51 | class TestIncrementalContractSqlHeader(BaseIncrementalContractSqlHeader):
52 | pass
53 |
54 |
55 | class TestModelConstraintsRuntimeEnforcement(BaseModelConstraintsRuntimeEnforcement):
56 | pass
57 |
58 |
59 | class TestConstraintQuotedColumn(BaseConstraintQuotedColumn):
60 | pass
61 |
62 |
63 | class TestIncrementalForeignKeyConstraint(BaseIncrementalForeignKeyConstraint):
64 | pass
65 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_data_types.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.utils.data_types.test_type_bigint import BaseTypeBigInt
2 | from dbt.tests.adapter.utils.data_types.test_type_boolean import BaseTypeBoolean
3 | from dbt.tests.adapter.utils.data_types.test_type_float import BaseTypeFloat
4 | from dbt.tests.adapter.utils.data_types.test_type_int import BaseTypeInt
5 | from dbt.tests.adapter.utils.data_types.test_type_numeric import BaseTypeNumeric
6 | from dbt.tests.adapter.utils.data_types.test_type_string import BaseTypeString
7 | from dbt.tests.adapter.utils.data_types.test_type_timestamp import BaseTypeTimestamp
8 |
9 |
10 | class TestTypeBigInt(BaseTypeBigInt):
11 | pass
12 |
13 |
14 | class TestTypeBoolean(BaseTypeBoolean):
15 | pass
16 |
17 |
18 | class TestTypeFloat(BaseTypeFloat):
19 | pass
20 |
21 |
22 | class TestTypeInt(BaseTypeInt):
23 | pass
24 |
25 |
26 | class TestTypeNumeric(BaseTypeNumeric):
27 | pass
28 |
29 |
30 | class TestTypeString(BaseTypeString):
31 | pass
32 |
33 |
34 | class TestTypeTimestamp(BaseTypeTimestamp):
35 | pass
36 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_debug.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.dbt_debug.test_dbt_debug import (
2 | BaseDebugPostgres,
3 | BaseDebugInvalidProjectPostgres,
4 | )
5 |
6 |
7 | class TestDebugPostgres(BaseDebugPostgres):
8 | pass
9 |
10 |
11 | class TestDebugInvalidProjectPostgres(BaseDebugInvalidProjectPostgres):
12 | pass
13 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_empty.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.empty.test_empty import BaseTestEmpty
2 |
3 |
4 | class TestEmpty(BaseTestEmpty):
5 | pass
6 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_ephemeral.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.ephemeral.test_ephemeral import (
2 | BaseEphemeralMulti,
3 | BaseEphemeralNested,
4 | BaseEphemeralErrorHandling,
5 | )
6 |
7 |
8 | class TestEphemeralMulti(BaseEphemeralMulti):
9 | pass
10 |
11 |
12 | class TestEphemeralNested(BaseEphemeralNested):
13 | pass
14 |
15 |
16 | class TestEphemeralErrorHandling(BaseEphemeralErrorHandling):
17 | pass
18 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_grants.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.grants.test_incremental_grants import BaseIncrementalGrants
2 | from dbt.tests.adapter.grants.test_invalid_grants import BaseInvalidGrants
3 | from dbt.tests.adapter.grants.test_model_grants import BaseModelGrants
4 | from dbt.tests.adapter.grants.test_seed_grants import BaseSeedGrants
5 | from dbt.tests.adapter.grants.test_snapshot_grants import BaseSnapshotGrants
6 |
7 |
8 | class TestIncrementalGrants(BaseIncrementalGrants):
9 | pass
10 |
11 |
12 | class TestInvalidGrants(BaseInvalidGrants):
13 | pass
14 |
15 |
16 | class TestModelGrants(BaseModelGrants):
17 | pass
18 |
19 |
20 | class TestSeedGrants(BaseSeedGrants):
21 | pass
22 |
23 |
24 | class TestSnapshotGrants(BaseSnapshotGrants):
25 | pass
26 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_hooks/data/seed_model.sql:
--------------------------------------------------------------------------------
1 | drop table if exists {schema}.on_model_hook;
2 |
3 | create table {schema}.on_model_hook (
4 | test_state TEXT, -- start|end
5 | target_dbname TEXT,
6 | target_host TEXT,
7 | target_name TEXT,
8 | target_schema TEXT,
9 | target_type TEXT,
10 | target_user TEXT,
11 | target_pass TEXT,
12 | target_threads INTEGER,
13 | run_started_at TEXT,
14 | invocation_id TEXT,
15 | thread_id TEXT
16 | );
17 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_hooks/data/seed_run.sql:
--------------------------------------------------------------------------------
1 | drop table if exists {schema}.on_run_hook;
2 |
3 | create table {schema}.on_run_hook (
4 | test_state TEXT, -- start|end
5 | target_dbname TEXT,
6 | target_host TEXT,
7 | target_name TEXT,
8 | target_schema TEXT,
9 | target_type TEXT,
10 | target_user TEXT,
11 | target_pass TEXT,
12 | target_threads INTEGER,
13 | run_started_at TEXT,
14 | invocation_id TEXT,
15 | thread_id TEXT
16 | );
17 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_hooks/test_hooks.py:
--------------------------------------------------------------------------------
1 | """
2 | This file needs to be in its own directory because it uses a `data` directory.
3 | Placing this file in its own directory avoids collisions.
4 | """
5 |
6 | from dbt.tests.adapter.hooks.test_model_hooks import (
7 | BasePrePostModelHooks,
8 | BaseHookRefs,
9 | BasePrePostModelHooksOnSeeds,
10 | BaseHooksRefsOnSeeds,
11 | BasePrePostModelHooksOnSeedsPlusPrefixed,
12 | BasePrePostModelHooksOnSeedsPlusPrefixedWhitespace,
13 | BasePrePostModelHooksOnSnapshots,
14 | BasePrePostModelHooksInConfig,
15 | BasePrePostModelHooksInConfigWithCount,
16 | BasePrePostModelHooksInConfigKwargs,
17 | BasePrePostSnapshotHooksInConfigKwargs,
18 | BaseDuplicateHooksInConfigs,
19 | )
20 | from dbt.tests.adapter.hooks.test_run_hooks import (
21 | BasePrePostRunHooks,
22 | BaseAfterRunHooks,
23 | )
24 |
25 |
26 | class TestPrePostModelHooks(BasePrePostModelHooks):
27 | pass
28 |
29 |
30 | class TestHookRefs(BaseHookRefs):
31 | pass
32 |
33 |
34 | class TestPrePostModelHooksOnSeeds(BasePrePostModelHooksOnSeeds):
35 | pass
36 |
37 |
38 | class TestHooksRefsOnSeeds(BaseHooksRefsOnSeeds):
39 | pass
40 |
41 |
42 | class TestPrePostModelHooksOnSeedsPlusPrefixed(BasePrePostModelHooksOnSeedsPlusPrefixed):
43 | pass
44 |
45 |
46 | class TestPrePostModelHooksOnSeedsPlusPrefixedWhitespace(
47 | BasePrePostModelHooksOnSeedsPlusPrefixedWhitespace
48 | ):
49 | pass
50 |
51 |
52 | class TestPrePostModelHooksOnSnapshots(BasePrePostModelHooksOnSnapshots):
53 | pass
54 |
55 |
56 | class TestPrePostModelHooksInConfig(BasePrePostModelHooksInConfig):
57 | pass
58 |
59 |
60 | class TestPrePostModelHooksInConfigWithCount(BasePrePostModelHooksInConfigWithCount):
61 | pass
62 |
63 |
64 | class TestPrePostModelHooksInConfigKwargs(BasePrePostModelHooksInConfigKwargs):
65 | pass
66 |
67 |
68 | class TestPrePostSnapshotHooksInConfigKwargs(BasePrePostSnapshotHooksInConfigKwargs):
69 | pass
70 |
71 |
72 | class TestDuplicateHooksInConfigs(BaseDuplicateHooksInConfigs):
73 | pass
74 |
75 |
76 | class TestPrePostRunHooks(BasePrePostRunHooks):
77 | pass
78 |
79 |
80 | class TestAfterRunHooks(BaseAfterRunHooks):
81 | pass
82 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_incremental.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.incremental.test_incremental_merge_exclude_columns import (
2 | BaseMergeExcludeColumns,
3 | )
4 | from dbt.tests.adapter.incremental.test_incremental_on_schema_change import (
5 | BaseIncrementalOnSchemaChange,
6 | )
7 | from dbt.tests.adapter.incremental.test_incremental_predicates import BaseIncrementalPredicates
8 | from dbt.tests.adapter.incremental.test_incremental_unique_id import BaseIncrementalUniqueKey
9 |
10 |
11 | class TestBaseMergeExcludeColumns(BaseMergeExcludeColumns):
12 | pass
13 |
14 |
15 | class TestIncrementalOnSchemaChange(BaseIncrementalOnSchemaChange):
16 | pass
17 |
18 |
19 | class TestIncrementalPredicatesDeleteInsert(BaseIncrementalPredicates):
20 | pass
21 |
22 |
23 | class TestIncrementalUniqueKey(BaseIncrementalUniqueKey):
24 | pass
25 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_incremental_microbatch.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.incremental.test_incremental_microbatch import (
2 | BaseMicrobatch,
3 | )
4 |
5 |
6 | class TestPostgresMicrobatch(BaseMicrobatch):
7 | pass
8 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_persist_docs.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import json
3 |
4 | from dbt.tests.adapter.materialized_view import files
5 | from dbt.tests.adapter.persist_docs.test_persist_docs import (
6 | BasePersistDocs,
7 | BasePersistDocsColumnMissing,
8 | BasePersistDocsCommentOnQuotedColumn,
9 | )
10 | from tests.functional.utils import run_dbt
11 |
12 | _MATERIALIZED_VIEW_PROPERTIES__SCHEMA_YML = """
13 | version: 2
14 |
15 | models:
16 | - name: my_materialized_view
17 | description: |
18 | Materialized view model description "with double quotes"
19 | and with 'single quotes' as welll as other;
20 | '''abc123'''
21 | reserved -- characters
22 | 80% of statistics are made up on the spot
23 | --
24 | /* comment */
25 | Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting
26 | """
27 |
28 |
29 | class TestPersistDocs(BasePersistDocs):
30 | pass
31 |
32 |
33 | class TestPersistDocsColumnMissing(BasePersistDocsColumnMissing):
34 | pass
35 |
36 |
37 | class TestPersistDocsCommentOnQuotedColumn(BasePersistDocsCommentOnQuotedColumn):
38 | pass
39 |
40 |
41 | class TestPersistDocsWithMaterializedView(BasePersistDocs):
42 | @pytest.fixture(scope="class", autouse=True)
43 | def seeds(self):
44 | return {"my_seed.csv": files.MY_SEED}
45 |
46 | @pytest.fixture(scope="class")
47 | def models(self):
48 | return {
49 | "my_materialized_view.sql": files.MY_MATERIALIZED_VIEW,
50 | }
51 |
52 | @pytest.fixture(scope="class")
53 | def properties(self):
54 | return {
55 | "schema.yml": _MATERIALIZED_VIEW_PROPERTIES__SCHEMA_YML,
56 | }
57 |
58 | def test_has_comments_pglike(self, project):
59 | run_dbt(["docs", "generate"])
60 | with open("target/catalog.json") as fp:
61 | catalog_data = json.load(fp)
62 | assert "nodes" in catalog_data
63 | assert len(catalog_data["nodes"]) == 2
64 | view_node = catalog_data["nodes"]["model.test.my_materialized_view"]
65 | assert view_node["metadata"]["comment"].startswith("Materialized view model description")
66 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_query_comment.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.query_comment.test_query_comment import (
2 | BaseQueryComments,
3 | BaseMacroQueryComments,
4 | BaseMacroArgsQueryComments,
5 | BaseMacroInvalidQueryComments,
6 | BaseNullQueryComments,
7 | BaseEmptyQueryComments,
8 | )
9 | import pytest
10 |
11 |
12 | class TestQueryComments(BaseQueryComments):
13 | pass
14 |
15 |
16 | class TestMacroQueryComments(BaseMacroQueryComments):
17 | pass
18 |
19 |
20 | class TestMacroArgsQueryComments(BaseMacroArgsQueryComments):
21 | @pytest.mark.skip(
22 | "This test is incorrectly comparing the version of `dbt-core`"
23 | "to the version of `dbt-postgres`, which is not always the same."
24 | )
25 | def test_matches_comment(self, project, get_package_version):
26 | pass
27 |
28 |
29 | class TestMacroInvalidQueryComments(BaseMacroInvalidQueryComments):
30 | pass
31 |
32 |
33 | class TestNullQueryComments(BaseNullQueryComments):
34 | pass
35 |
36 |
37 | class TestEmptyQueryComments(BaseEmptyQueryComments):
38 | pass
39 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_relations.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.relations.test_changing_relation_type import BaseChangeRelationTypeValidator
2 | from dbt.tests.adapter.relations.test_dropping_schema_named import BaseDropSchemaNamed
3 |
4 |
5 | class TestChangeRelationTypes(BaseChangeRelationTypeValidator):
6 | pass
7 |
8 |
9 | class TestDropSchemaNamed(BaseDropSchemaNamed):
10 | pass
11 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_show.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.dbt_show.test_dbt_show import (
2 | BaseShowLimit,
3 | BaseShowSqlHeader,
4 | BaseShowDoesNotHandleDoubleLimit,
5 | )
6 |
7 |
8 | class TestPostgresShowSqlHeader(BaseShowSqlHeader):
9 | pass
10 |
11 |
12 | class TestPostgresShowLimit(BaseShowLimit):
13 | pass
14 |
15 |
16 | class TestPostgresShowDoesNotHandleDoubleLimit(BaseShowDoesNotHandleDoubleLimit):
17 | pass
18 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_simple_copy.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.simple_copy.test_copy_uppercase import BaseSimpleCopyUppercase
2 | from dbt.tests.adapter.simple_copy.test_simple_copy import (
3 | SimpleCopyBase,
4 | EmptyModelsArentRunBase,
5 | )
6 |
7 |
8 | class TestSimpleCopyUppercase(BaseSimpleCopyUppercase):
9 | pass
10 |
11 |
12 | class TestSimpleCopyBase(SimpleCopyBase):
13 | pass
14 |
15 |
16 | class TestEmptyModelsArentRun(EmptyModelsArentRunBase):
17 | pass
18 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_simple_seed/test_simple_seed.py:
--------------------------------------------------------------------------------
1 | """
2 | This file needs to be in its own directory because it creates a `data` directory at run time.
3 | Placing this file in its own directory avoids collisions.
4 | """
5 |
6 | from dbt.tests.adapter.simple_seed.test_seed import (
7 | BaseBasicSeedTests,
8 | BaseSeedConfigFullRefreshOn,
9 | BaseSeedConfigFullRefreshOff,
10 | BaseSeedCustomSchema,
11 | BaseSeedWithUniqueDelimiter,
12 | BaseSeedWithWrongDelimiter,
13 | BaseSeedWithEmptyDelimiter,
14 | BaseSimpleSeedEnabledViaConfig,
15 | BaseSeedParsing,
16 | BaseSimpleSeedWithBOM,
17 | BaseSeedSpecificFormats,
18 | BaseTestEmptySeed,
19 | )
20 | from dbt.tests.adapter.simple_seed.test_seed_type_override import (
21 | BaseSimpleSeedColumnOverride,
22 | )
23 |
24 |
25 | class TestBasicSeedTests(BaseBasicSeedTests):
26 | pass
27 |
28 |
29 | class TestSeedConfigFullRefreshOn(BaseSeedConfigFullRefreshOn):
30 | pass
31 |
32 |
33 | class TestSeedConfigFullRefreshOff(BaseSeedConfigFullRefreshOff):
34 | pass
35 |
36 |
37 | class TestSeedCustomSchema(BaseSeedCustomSchema):
38 | pass
39 |
40 |
41 | class TestSeedWithUniqueDelimiter(BaseSeedWithUniqueDelimiter):
42 | pass
43 |
44 |
45 | class TestSeedWithWrongDelimiter(BaseSeedWithWrongDelimiter):
46 | pass
47 |
48 |
49 | class TestSeedWithEmptyDelimiter(BaseSeedWithEmptyDelimiter):
50 | pass
51 |
52 |
53 | class TestSimpleSeedEnabledViaConfig(BaseSimpleSeedEnabledViaConfig):
54 | pass
55 |
56 |
57 | class TestSeedParsing(BaseSeedParsing):
58 | pass
59 |
60 |
61 | class TestSimpleSeedWithBOM(BaseSimpleSeedWithBOM):
62 | pass
63 |
64 |
65 | class TestSeedSpecificFormats(BaseSeedSpecificFormats):
66 | pass
67 |
68 |
69 | class TestEmptySeed(BaseTestEmptySeed):
70 | pass
71 |
72 |
73 | class TestSimpleSeedColumnOverride(BaseSimpleSeedColumnOverride):
74 | pass
75 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_simple_snapshot.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.simple_snapshot.test_snapshot import (
2 | BaseSimpleSnapshot,
3 | BaseSnapshotCheck,
4 | )
5 |
6 |
7 | class TestSnapshot(BaseSimpleSnapshot):
8 | pass
9 |
10 |
11 | class TestSnapshotCheck(BaseSnapshotCheck):
12 | pass
13 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_store_test_failures.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.store_test_failures_tests.test_store_test_failures import (
2 | BaseStoreTestFailures,
3 | )
4 |
5 |
6 | class TestStoreTestFailures(BaseStoreTestFailures):
7 | pass
8 |
--------------------------------------------------------------------------------
/tests/functional/adapter/test_unit_testing.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.unit_testing.test_case_insensitivity import BaseUnitTestCaseInsensivity
2 | from dbt.tests.adapter.unit_testing.test_invalid_input import BaseUnitTestInvalidInput
3 | from dbt.tests.adapter.unit_testing.test_types import BaseUnitTestingTypes
4 |
5 |
6 | class TestPostgresUnitTestCaseInsensitivity(BaseUnitTestCaseInsensivity):
7 | pass
8 |
9 |
10 | class TestPostgresUnitTestInvalidInput(BaseUnitTestInvalidInput):
11 | pass
12 |
13 |
14 | class TestPostgresUnitTestingTypes(BaseUnitTestingTypes):
15 | pass
16 |
--------------------------------------------------------------------------------
/tests/functional/basic/data/summary_expected.csv:
--------------------------------------------------------------------------------
1 | gender,ct
2 | Female,40
3 | Male,60
4 |
--------------------------------------------------------------------------------
/tests/functional/basic/data/summary_expected_update.csv:
--------------------------------------------------------------------------------
1 | gender,ct
2 | Female,94
3 | Male,106
4 |
--------------------------------------------------------------------------------
/tests/functional/basic/test_basic.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import get_manifest
2 | import pytest
3 |
4 | from tests.functional.utils import run_dbt
5 |
6 |
7 | @pytest.fixture(scope="class")
8 | def models():
9 | return {"my_model.sql": "select 1 as fun"}
10 |
11 |
12 | def test_basic(project):
13 | # Tests that a project with a single model works
14 | results = run_dbt(["run"])
15 | assert len(results) == 1
16 | manifest = get_manifest(project.project_root)
17 | assert "model.test.my_model" in manifest.nodes
18 |
--------------------------------------------------------------------------------
/tests/functional/basic/test_invalid_reference.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | from dbt_common.exceptions import CompilationError
3 | import pytest
4 |
5 |
6 | descendant_sql = """
7 | -- should be ref('model')
8 | select * from {{ ref(model) }}
9 | """
10 |
11 |
12 | model_sql = """
13 | select 1 as id
14 | """
15 |
16 |
17 | @pytest.fixture(scope="class")
18 | def models():
19 | return {
20 | "descendant.sql": descendant_sql,
21 | "model.sql": model_sql,
22 | }
23 |
24 |
25 | def test_undefined_value(project):
26 | # Tests that a project with an invalid reference fails
27 | with pytest.raises(CompilationError):
28 | run_dbt(["compile"])
29 |
--------------------------------------------------------------------------------
/tests/functional/basic/test_jaffle_shop.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import get_manifest, write_file
2 |
3 | from tests.functional.projects import JaffleShop
4 | from tests.functional.utils import run_dbt, run_dbt_and_capture
5 |
6 |
7 | class TestBasic(JaffleShop):
8 | def test_basic(self, project):
9 | # test .dbtignore works
10 | write_file("models/ignore*.sql\nignore_folder", project.project_root, ".dbtignore")
11 | # Create the data from seeds
12 | results = run_dbt(["seed"])
13 |
14 | # Tests that the jaffle_shop project runs
15 | results = run_dbt(["run"])
16 | assert len(results) == 5
17 | manifest = get_manifest(project.project_root)
18 | assert "model.jaffle_shop.orders" in manifest.nodes
19 |
20 | def test_execution_time_format_is_humanized(self, project):
21 | # Create the data from seeds
22 | run_dbt(["seed"])
23 | _, log_output = run_dbt_and_capture(["run"])
24 |
25 | assert " in 0 hours 0 minutes and " in log_output
26 | assert " seconds" in log_output
27 |
--------------------------------------------------------------------------------
/tests/functional/basic/test_mixed_case_db.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import get_manifest
2 | import pytest
3 |
4 | from tests.functional.utils import run_dbt
5 |
6 |
7 | model_sql = """
8 | select 1 as id
9 | """
10 |
11 |
12 | @pytest.fixture(scope="class")
13 | def models():
14 | return {"model.sql": model_sql}
15 |
16 |
17 | @pytest.fixture(scope="class")
18 | def dbt_profile_data(unique_schema):
19 | return {
20 | "test": {
21 | "outputs": {
22 | "default": {
23 | "type": "postgres",
24 | "threads": 4,
25 | "host": "localhost",
26 | "port": 5432,
27 | "user": "root",
28 | "pass": "password",
29 | "dbname": "dbtMixedCase",
30 | "schema": unique_schema,
31 | },
32 | },
33 | "target": "default",
34 | },
35 | }
36 |
37 |
38 | def test_basic(project_root, project):
39 | assert project.database == "dbtMixedCase"
40 |
41 | # Tests that a project with a single model works
42 | results = run_dbt(["run"])
43 | assert len(results) == 1
44 | manifest = get_manifest(project_root)
45 | assert "model.test.model" in manifest.nodes
46 | # Running a second time works
47 | run_dbt(["run"])
48 |
--------------------------------------------------------------------------------
/tests/functional/basic/test_varchar_widening.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from dbt.tests.util import check_relations_equal
4 | import pytest
5 |
6 | from tests.functional.utils import run_dbt
7 |
8 |
9 | incremental_sql = """
10 | {{
11 | config(
12 | materialized = "incremental"
13 | )
14 | }}
15 |
16 | select * from {{ this.schema }}.seed
17 |
18 | {% if is_incremental() %}
19 |
20 | where id > (select max(id) from {{this}})
21 |
22 | {% endif %}
23 | """
24 |
25 | materialized_sql = """
26 | {{
27 | config(
28 | materialized = "table"
29 | )
30 | }}
31 |
32 | select * from {{ this.schema }}.seed
33 | """
34 |
35 |
36 | @pytest.fixture(scope="class")
37 | def models():
38 | return {"incremental.sql": incremental_sql, "materialized.sql": materialized_sql}
39 |
40 |
41 | def test_varchar_widening(project):
42 | path = os.path.join(project.test_data_dir, "varchar10_seed.sql")
43 | project.run_sql_file(path)
44 |
45 | results = run_dbt(["run"])
46 | assert len(results) == 2
47 |
48 | check_relations_equal(project.adapter, ["seed", "incremental"])
49 | check_relations_equal(project.adapter, ["seed", "materialized"])
50 |
51 | path = os.path.join(project.test_data_dir, "varchar300_seed.sql")
52 | project.run_sql_file(path)
53 |
54 | results = run_dbt(["run"])
55 | assert len(results) == 2
56 |
57 | check_relations_equal(project.adapter, ["seed", "incremental"])
58 | check_relations_equal(project.adapter, ["seed", "materialized"])
59 |
--------------------------------------------------------------------------------
/tests/functional/compile/fixtures.py:
--------------------------------------------------------------------------------
1 | first_model_sql = """
2 | select 1 as fun
3 | """
4 |
5 | second_model_sql = """
6 | {%- set columns = adapter.get_columns_in_relation(ref('first_model')) -%}
7 | select
8 | *,
9 | {{ this.schema }} as schema
10 | from {{ ref('first_model') }}
11 | """
12 |
13 | first_ephemeral_model_sql = """
14 | {{ config(materialized = 'ephemeral') }}
15 | select 1 as fun
16 | """
17 |
18 | second_ephemeral_model_sql = """
19 | {{ config(materialized = 'ephemeral') }}
20 | select * from {{ ref('first_ephemeral_model') }}
21 | """
22 |
23 | third_ephemeral_model_sql = """
24 | select * from {{ ref('second_ephemeral_model')}}
25 | union all
26 | select 2 as fun
27 | """
28 |
29 | model_multiline_jinja = """
30 | select {{
31 | 1 + 1
32 | }} as fun
33 | """
34 |
35 | with_recursive_model_sql = """
36 | {{ config(materialized = 'ephemeral') }}
37 | with recursive t(n) as (
38 | select * from {{ ref('first_ephemeral_model') }}
39 | union all
40 | select n+1 from t where n < 100
41 | )
42 | select sum(n) from t;
43 | """
44 |
45 | schema_yml = """
46 | version: 2
47 |
48 | models:
49 | - name: second_model
50 | description: "The second model"
51 | columns:
52 | - name: fun
53 | data_tests:
54 | - not_null
55 | - name: schema
56 | data_tests:
57 | - unique
58 | """
59 |
--------------------------------------------------------------------------------
/tests/functional/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 |
5 | from tests.functional.projects import dbt_integration
6 |
7 |
8 | @pytest.fixture(scope="class")
9 | def dbt_integration_project():
10 | return dbt_integration()
11 |
12 |
13 | @pytest.fixture(scope="class")
14 | def dbt_profile_target():
15 | return {
16 | "type": "postgres",
17 | "host": os.getenv("POSTGRES_TEST_HOST", "localhost"),
18 | "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)),
19 | "user": os.getenv("POSTGRES_TEST_USER", "root"),
20 | "pass": os.getenv("POSTGRES_TEST_PASS", "password"),
21 | "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"),
22 | "threads": int(os.getenv("POSTGRES_TEST_THREADS", 4)),
23 | }
24 |
--------------------------------------------------------------------------------
/tests/functional/contracts/test_contract_enforcement.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt, write_file
2 | import pytest
3 |
4 |
5 | my_model_sql = """
6 | select 'some string' as string_column
7 | """
8 |
9 | my_model_int_sql = """
10 | select 123 as int_column
11 | """
12 |
13 | model_schema_yml = """
14 | models:
15 | - name: my_model
16 | config:
17 | materialized: incremental
18 | on_schema_change: append_new_columns
19 | contract: {enforced: true}
20 | columns:
21 | - name: string_column
22 | data_type: text
23 | """
24 |
25 |
26 | class TestIncrementalModelContractEnforcement:
27 | @pytest.fixture(scope="class")
28 | def models(self):
29 | return {
30 | "my_model.sql": my_model_sql,
31 | "schema.yml": model_schema_yml,
32 | }
33 |
34 | def test_contracted_incremental(self, project):
35 | results = run_dbt()
36 | assert len(results) == 1
37 | # now update the column type in the model to break the contract
38 | write_file(my_model_int_sql, project.project_root, "models", "my_model.sql")
39 |
40 | expected_msg = "This model has an enforced contract that failed."
41 | results = run_dbt(expect_pass=False)
42 | assert len(results) == 1
43 | msg = results[0].message
44 | assert expected_msg in msg
45 |
--------------------------------------------------------------------------------
/tests/functional/contracts/test_contract_precision.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from tests.functional.utils import run_dbt_and_capture
4 |
5 |
6 | my_numeric_model_sql = """
7 | select
8 | 1.234 as non_integer
9 | """
10 |
11 | model_schema_numerics_yml = """
12 | version: 2
13 | models:
14 | - name: my_numeric_model
15 | config:
16 | contract:
17 | enforced: true
18 | columns:
19 | - name: non_integer
20 | data_type: numeric
21 | """
22 |
23 | model_schema_numerics_precision_yml = """
24 | version: 2
25 | models:
26 | - name: my_numeric_model
27 | config:
28 | contract:
29 | enforced: true
30 | columns:
31 | - name: non_integer
32 | data_type: numeric(38,3)
33 | """
34 |
35 |
36 | class TestModelContractNumericNoPrecision:
37 | @pytest.fixture(scope="class")
38 | def models(self):
39 | return {
40 | "my_numeric_model.sql": my_numeric_model_sql,
41 | "schema.yml": model_schema_numerics_yml,
42 | }
43 |
44 | def test_contracted_numeric_without_precision(self, project):
45 | expected_msg = "Detected columns with numeric type and unspecified precision/scale, this can lead to unintended rounding: ['non_integer']"
46 | _, logs = run_dbt_and_capture(["run"], expect_pass=True)
47 | assert expected_msg in logs
48 | _, logs = run_dbt_and_capture(["--warn-error", "run"], expect_pass=False)
49 | assert "Compilation Error in model my_numeric_model" in logs
50 | assert expected_msg in logs
51 |
52 |
53 | class TestModelContractNumericPrecision:
54 | @pytest.fixture(scope="class")
55 | def models(self):
56 | return {
57 | "my_numeric_model.sql": my_numeric_model_sql,
58 | "schema.yml": model_schema_numerics_precision_yml,
59 | }
60 |
61 | def test_contracted_numeric_with_precision(self, project):
62 | expected_msg = "Detected columns with numeric type and unspecified precision/scale, this can lead to unintended rounding: ['non_integer']"
63 | _, logs = run_dbt_and_capture(["run"], expect_pass=True)
64 | assert expected_msg not in logs
65 |
--------------------------------------------------------------------------------
/tests/functional/contracts/test_nonstandard_data_type.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from tests.functional.utils import run_dbt_and_capture
4 |
5 |
6 | my_numeric_model_sql = """
7 | select
8 | 12.34 as price
9 | """
10 |
11 | my_money_model_sql = """
12 | select
13 | cast('12.34' as money) as price
14 | """
15 |
16 | model_schema_money_yml = """
17 | models:
18 | - name: my_model
19 | config:
20 | contract:
21 | enforced: true
22 | columns:
23 | - name: price
24 | data_type: money
25 | """
26 |
27 | model_schema_numeric_yml = """
28 | models:
29 | - name: my_model
30 | config:
31 | contract:
32 | enforced: true
33 | columns:
34 | - name: price
35 | data_type: numeric
36 | """
37 |
38 |
39 | class TestModelContractUnrecognizedTypeCode1:
40 | @pytest.fixture(scope="class")
41 | def models(self):
42 | return {
43 | "my_model.sql": my_money_model_sql,
44 | "schema.yml": model_schema_money_yml,
45 | }
46 |
47 | def test_nonstandard_data_type(self, project):
48 | expected_debug_msg = "The `type_code` 790 was not recognized"
49 | _, logs = run_dbt_and_capture(["--debug", "run"], expect_pass=True)
50 | assert expected_debug_msg in logs
51 |
52 |
53 | class TestModelContractUnrecognizedTypeCodeActualMismatch:
54 | @pytest.fixture(scope="class")
55 | def models(self):
56 | return {
57 | "my_model.sql": my_money_model_sql,
58 | "schema.yml": model_schema_numeric_yml,
59 | }
60 |
61 | def test_nonstandard_data_type(self, project):
62 | expected_msg = "unknown type_code 790 | DECIMAL | data type mismatch"
63 | expected_debug_msg = "The `type_code` 790 was not recognized"
64 | _, logs = run_dbt_and_capture(["--debug", "run"], expect_pass=False)
65 | assert expected_msg in logs
66 | assert expected_debug_msg in logs
67 |
68 |
69 | class TestModelContractUnrecognizedTypeCodeExpectedMismatch:
70 | @pytest.fixture(scope="class")
71 | def models(self):
72 | return {
73 | "my_model.sql": my_numeric_model_sql,
74 | "schema.yml": model_schema_money_yml,
75 | }
76 |
77 | def test_nonstandard_data_type(self, project):
78 | expected_msg = "DECIMAL | unknown type_code 790 | data type mismatch"
79 | expected_debug_msg = "The `type_code` 790 was not recognized"
80 | _, logs = run_dbt_and_capture(["--debug", "run"], expect_pass=False)
81 | assert expected_msg in logs
82 | assert expected_debug_msg in logs
83 |
--------------------------------------------------------------------------------
/tests/functional/custom_aliases/fixtures.py:
--------------------------------------------------------------------------------
1 | model1_sql = """
2 | {{ config(materialized='table', alias='alias') }}
3 |
4 | select {{ string_literal(this.name) }} as model_name
5 | """
6 |
7 | model2_sql = """
8 | {{ config(materialized='table') }}
9 |
10 | select {{ string_literal(this.name) }} as model_name
11 | """
12 |
13 | macros_sql = """
14 | {% macro generate_alias_name(custom_alias_name, node) -%}
15 | {%- if custom_alias_name is none -%}
16 | {{ node.name }}
17 | {%- else -%}
18 | custom_{{ custom_alias_name | trim }}
19 | {%- endif -%}
20 | {%- endmacro %}
21 |
22 |
23 | {% macro string_literal(s) -%}
24 | {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }}
25 | {%- endmacro %}
26 |
27 | {% macro default__string_literal(s) %}
28 | '{{ s }}'::text
29 | {% endmacro %}
30 | """
31 |
32 | macros_config_sql = """
33 | {#-- Verify that the config['alias'] key is present #}
34 | {% macro generate_alias_name(custom_alias_name, node) -%}
35 | {%- if custom_alias_name is none -%}
36 | {{ node.name }}
37 | {%- else -%}
38 | custom_{{ node.config['alias'] if 'alias' in node.config else '' | trim }}
39 | {%- endif -%}
40 | {%- endmacro %}
41 |
42 | {% macro string_literal(s) -%}
43 | {{ adapter.dispatch('string_literal', macro_namespace='test')(s) }}
44 | {%- endmacro %}
45 |
46 | {% macro default__string_literal(s) %}
47 | '{{ s }}'::text
48 | {% endmacro %}
49 | """
50 |
51 | schema_yml = """
52 | version: 2
53 |
54 | models:
55 | - name: model1
56 | columns:
57 | - name: model_name
58 | data_tests:
59 | - accepted_values:
60 | values: ['custom_alias']
61 | - name: model2
62 | columns:
63 | - name: model_name
64 | data_tests:
65 | - accepted_values:
66 | values: ['model2']
67 |
68 | """
69 |
--------------------------------------------------------------------------------
/tests/functional/custom_aliases/test_custom_aliases.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from dbt.tests.util import run_dbt
3 |
4 | from tests.functional.custom_aliases import fixtures
5 |
6 |
7 | class TestAliases:
8 | @pytest.fixture(scope="class")
9 | def models(self):
10 | return {
11 | "model1.sql": fixtures.model1_sql,
12 | "model2.sql": fixtures.model2_sql,
13 | "schema.yml": fixtures.schema_yml,
14 | }
15 |
16 | @pytest.fixture(scope="class")
17 | def macros(self):
18 | return {"macros.sql": fixtures.macros_sql}
19 |
20 | def test_customer_alias_name(self, project):
21 | results = run_dbt(["run"])
22 | assert len(results) == 2
23 |
24 | results = run_dbt(["test"])
25 | assert len(results) == 2
26 |
27 |
28 | class TestAliasesWithConfig(TestAliases):
29 | @pytest.fixture(scope="class")
30 | def macros(self):
31 | return {"macros.sql": fixtures.macros_config_sql}
32 |
--------------------------------------------------------------------------------
/tests/functional/custom_singular_tests/test_custom_singular_tests.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from dbt.tests.util import run_dbt
4 | import pytest
5 |
6 |
7 | # from `test/integration/009_data_test`
8 |
9 | #
10 | # Models
11 | #
12 |
13 | models__table_copy = """
14 | {{
15 | config(
16 | materialized='table'
17 | )
18 | }}
19 |
20 | select * from {{ this.schema }}.seed
21 | """
22 |
23 | #
24 | # Tests
25 | #
26 |
27 | tests__fail_email_is_always_null = """
28 | select *
29 | from {{ ref('table_copy') }}
30 | where email is not null
31 | """
32 |
33 | tests__fail_no_ref = """
34 | select 1
35 | """
36 |
37 | tests__dotted_path_pass_id_not_null = """
38 | {# Same as `pass_id_not_null` but with dots in its name #}
39 |
40 | select *
41 | from {{ ref('table_copy') }}
42 | where id is null
43 | """
44 |
45 | tests__pass_id_not_null = """
46 | select *
47 | from {{ ref('table_copy') }}
48 | where id is null
49 | """
50 |
51 | tests__pass_no_ref = """
52 | select 1 limit 0
53 | """
54 |
55 |
56 | class CustomSingularTestsBase(object):
57 | @pytest.fixture(scope="class", autouse=True)
58 | def setUp(self, project):
59 | """Create seed and downstream model tests are to be run on"""
60 | project.run_sql_file(project.test_data_dir / Path("seed_expected.sql"))
61 |
62 | results = run_dbt()
63 | assert len(results) == 1
64 |
65 | @pytest.fixture(scope="class")
66 | def models(self):
67 | return {"table_copy.sql": models__table_copy}
68 |
69 |
70 | class TestPassingTests(CustomSingularTestsBase):
71 | @pytest.fixture(scope="class")
72 | def tests(self):
73 | return {
74 | "my_db.my_schema.table_copy.pass_id_not_null.sql": tests__dotted_path_pass_id_not_null,
75 | "tests__pass_id_not_null.sql": tests__pass_id_not_null,
76 | "tests__pass_no_ref.sql": tests__pass_no_ref,
77 | }
78 |
79 | def test_data_tests(self, project, tests):
80 | test_results = run_dbt(["test"])
81 | assert len(test_results) == len(tests)
82 |
83 | for result in test_results:
84 | assert result.status == "pass"
85 | assert not result.skipped
86 | assert result.failures == 0
87 |
88 |
89 | class TestFailingTests(CustomSingularTestsBase):
90 | @pytest.fixture(scope="class")
91 | def tests(self):
92 | return {
93 | "tests__fail_email_is_always_null.sql": tests__fail_email_is_always_null,
94 | "tests__fail_no_ref.sql": tests__fail_no_ref,
95 | }
96 |
97 | def test_data_tests(self, project, tests):
98 | """assert that all deliberately failing tests actually fail"""
99 | test_results = run_dbt(["test"], expect_pass=False)
100 | assert len(test_results) == len(tests)
101 |
102 | for result in test_results:
103 | assert result.status == "fail"
104 | assert not result.skipped
105 | assert result.failures > 0
106 | assert result.adapter_response == {
107 | "_message": "SELECT 1",
108 | "code": "SELECT",
109 | "rows_affected": 1,
110 | }
111 |
--------------------------------------------------------------------------------
/tests/functional/dbt_debug/test_dbt_debug.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from tests.functional.utils import run_dbt_and_capture
4 |
5 | MODELS__MODEL_SQL = """
6 | seled 1 as id
7 | """
8 |
9 |
10 | class BaseDebug:
11 | @pytest.fixture(scope="class")
12 | def models(self):
13 | return {"model.sql": MODELS__MODEL_SQL}
14 |
15 | @pytest.fixture(autouse=True)
16 | def capsys(self, capsys):
17 | self.capsys = capsys
18 |
19 | def assertGotValue(self, linepat, result):
20 | found = False
21 | output = self.capsys.readouterr().out
22 | for line in output.split("\n"):
23 | if linepat.match(line):
24 | found = True
25 | assert result in line
26 | if not found:
27 | with pytest.raises(Exception) as exc:
28 | msg = f"linepat {linepat} not found in stdout: {output}"
29 | assert msg in str(exc.value)
30 |
31 | def check_project(self, splitout, msg="ERROR invalid"):
32 | for line in splitout:
33 | if line.strip().startswith("dbt_project.yml file"):
34 | assert msg in line
35 | elif line.strip().startswith("profiles.yml file"):
36 | assert "ERROR invalid" not in line
37 |
38 |
39 | class BaseDebugProfileVariable(BaseDebug):
40 | @pytest.fixture(scope="class")
41 | def project_config_update(self):
42 | return {"config-version": 2, "profile": '{{ "te" ~ "st" }}'}
43 |
44 |
45 | class TestDebugPostgres(BaseDebug):
46 | def test_ok(self, project):
47 | result, log = run_dbt_and_capture(["debug"])
48 | assert "ERROR" not in log
49 |
50 |
51 | class TestDebugProfileVariablePostgres(BaseDebugProfileVariable):
52 | pass
53 |
--------------------------------------------------------------------------------
/tests/functional/dbt_runner.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Callable, List, Optional
3 |
4 | from dbt.cli.main import dbtRunner, dbtRunnerResult
5 | from dbt.contracts.graph.manifest import Manifest
6 | from dbt.tests.util import get_run_results
7 | from dbt_common.events.base_types import EventMsg
8 |
9 |
10 | def assert_run_results_have_compiled_node_attributes(
11 | args: List[str], result: dbtRunnerResult
12 | ) -> None:
13 | commands_with_run_results = ["build", "compile", "docs", "run", "test"]
14 | if not [a for a in args if a in commands_with_run_results] or not result.success:
15 | return
16 |
17 | run_results = get_run_results(os.getcwd())
18 | for r in run_results["results"]:
19 | if r["unique_id"].startswith("model") and r["status"] == "success":
20 | assert "compiled_code" in r
21 | assert "compiled" in r
22 |
23 |
24 | _STANDARD_ASSERTIONS = [assert_run_results_have_compiled_node_attributes]
25 |
26 |
27 | class dbtTestRunner(dbtRunner):
28 | exit_assertions: List[Callable[[List[str], dbtRunnerResult], None]]
29 |
30 | def __init__(
31 | self,
32 | manifest: Optional[Manifest] = None,
33 | callbacks: Optional[List[Callable[[EventMsg], None]]] = None,
34 | exit_assertions: Optional[List[Callable[[List[str], dbtRunnerResult], None]]] = None,
35 | ):
36 | self.exit_assertions = exit_assertions if exit_assertions else _STANDARD_ASSERTIONS # type: ignore
37 | super().__init__(manifest, callbacks)
38 |
39 | def invoke(self, args: List[str], **kwargs) -> dbtRunnerResult:
40 | result = super().invoke(args, **kwargs)
41 |
42 | for assertion in self.exit_assertions:
43 | assertion(args, result)
44 |
45 | return result
46 |
--------------------------------------------------------------------------------
/tests/functional/exit_codes/fixtures.py:
--------------------------------------------------------------------------------
1 | bad_sql = """
2 | select bad sql here
3 | """
4 |
5 | dupe_sql = """
6 | select 1 as id, current_date as updated_at
7 | union all
8 | select 2 as id, current_date as updated_at
9 | union all
10 | select 3 as id, current_date as updated_at
11 | union all
12 | select 4 as id, current_date as updated_at
13 | """
14 |
15 | good_sql = """
16 | select 1 as id, current_date as updated_at
17 | union all
18 | select 2 as id, current_date as updated_at
19 | union all
20 | select 3 as id, current_date as updated_at
21 | union all
22 | select 4 as id, current_date as updated_at
23 | """
24 |
25 | snapshots_good_sql = """
26 | {% snapshot good_snapshot %}
27 | {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at')}}
28 | select * from {{ schema }}.good
29 | {% endsnapshot %}
30 | """
31 |
32 | snapshots_bad_sql = """
33 | {% snapshot good_snapshot %}
34 | {{ config(target_schema=schema, target_database=database, strategy='timestamp', unique_key='id', updated_at='updated_at_not_real')}}
35 | select * from {{ schema }}.good
36 | {% endsnapshot %}
37 | """
38 |
39 | schema_yml = """
40 | version: 2
41 | models:
42 | - name: good
43 | columns:
44 | - name: updated_at
45 | data_tests:
46 | - not_null
47 | - name: bad
48 | columns:
49 | - name: updated_at
50 | data_tests:
51 | - not_null
52 | - name: dupe
53 | columns:
54 | - name: updated_at
55 | data_tests:
56 | - unique
57 | """
58 |
59 | data_seed_good_csv = """a,b,c
60 | 1,2,3
61 | """
62 |
63 | data_seed_bad_csv = """a,b,c
64 | 1,\2,3,a,a,a
65 | """
66 |
--------------------------------------------------------------------------------
/tests/functional/exposures/test_exposures.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import get_manifest, run_dbt
2 | import pytest
3 |
4 | from tests.functional.exposures import fixtures
5 |
6 |
7 | class TestBasicExposures:
8 | @pytest.fixture(scope="class")
9 | def models(self):
10 | return {
11 | "exposure.yml": fixtures.simple_exposure_yml,
12 | "model.sql": fixtures.models_sql,
13 | "metricflow_time_spine.sql": fixtures.metricflow_time_spine_sql,
14 | "second_model.sql": fixtures.second_model_sql,
15 | "schema.yml": fixtures.source_schema_yml,
16 | "semantic_models.yml": fixtures.semantic_models_schema_yml,
17 | "metrics.yml": fixtures.metrics_schema_yml,
18 | }
19 |
20 | def test_names_with_spaces(self, project):
21 | run_dbt(["run"])
22 | manifest = get_manifest(project.project_root)
23 | exposure_ids = list(manifest.exposures.keys())
24 | expected_exposure_ids = [
25 | "exposure.test.simple_exposure",
26 | "exposure.test.notebook_exposure",
27 | ]
28 | assert exposure_ids == expected_exposure_ids
29 | assert manifest.exposures["exposure.test.simple_exposure"].label == "simple exposure label"
30 |
31 | def test_depends_on(self, project):
32 | run_dbt(["run"])
33 | manifest = get_manifest(project.project_root)
34 | exposure_depends_on = manifest.exposures["exposure.test.simple_exposure"].depends_on.nodes
35 | expected_exposure_depends_on = [
36 | "source.test.test_source.test_table",
37 | "model.test.model",
38 | "metric.test.metric",
39 | ]
40 | assert sorted(exposure_depends_on) == sorted(expected_exposure_depends_on)
41 |
--------------------------------------------------------------------------------
/tests/functional/invalid_model_tests/test_model_warning.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | import pytest
3 |
4 |
5 | warnings_sql = """
6 | {% do exceptions.warn('warning: everything is terrible but not that terrible') %}
7 | {{ exceptions.warn("warning: everything is terrible but not that terrible") }}
8 | select 1 as id
9 | """
10 |
11 |
12 | class TestEmitWarning:
13 | @pytest.fixture(scope="class")
14 | def models(self):
15 | return {"warnings.sql": warnings_sql}
16 |
17 | def test_warn(self, project):
18 | run_dbt(["run"], expect_pass=True)
19 |
--------------------------------------------------------------------------------
/tests/functional/macros/data/seed.sql:
--------------------------------------------------------------------------------
1 | create table {schema}.expected_dep_macro (
2 | foo TEXT,
3 | bar TEXT
4 | );
5 |
6 | create table {schema}.expected_local_macro (
7 | foo2 TEXT,
8 | bar2 TEXT
9 | );
10 |
11 | create table {schema}.seed (
12 | id integer,
13 | updated_at timestamp
14 | );
15 |
16 | insert into {schema}.expected_dep_macro (foo, bar)
17 | values ('arg1', 'arg2');
18 |
19 | insert into {schema}.expected_local_macro (foo2, bar2)
20 | values ('arg1', 'arg2'), ('arg3', 'arg4');
21 |
22 | insert into {schema}.seed (id, updated_at)
23 | values (1, '2017-01-01'), (2, '2017-01-02');
24 |
--------------------------------------------------------------------------------
/tests/functional/macros/package_macro_overrides/dbt_project.yml:
--------------------------------------------------------------------------------
1 | name: 'package_macro_overrides'
2 | version: '1.0'
3 | config-version: 2
4 |
5 | profile: 'default'
6 |
7 | macro-paths: ["macros"]
8 |
--------------------------------------------------------------------------------
/tests/functional/macros/package_macro_overrides/macros/macros.sql:
--------------------------------------------------------------------------------
1 | {% macro get_columns_in_relation(relation) %}
2 | {{ return('a string') }}
3 | {% endmacro %}
4 |
--------------------------------------------------------------------------------
/tests/functional/materializations/materialized_view_tests/test_postgres_materialized_view.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from dbt.tests.util import run_dbt
3 |
4 | SEED = """
5 | order_id,customer_id,total_amount,order_date
6 | 1,101,50.00,2024-04-01
7 | 2,102,75.00,2024-04-02
8 | 3,103,100.00,2024-04-03
9 | 4,101,30.00,2024-04-04
10 | 5,104,45.00,2024-04-05
11 | """.strip()
12 |
13 | ORDERS = """
14 | -- models/orders.sql
15 | {{
16 | config(
17 | materialized='materialized_view'
18 | )
19 | }}
20 | SELECT
21 | order_id,
22 | customer_id,
23 | total_amount,
24 | order_date
25 | FROM
26 | {{ ref('source_orders') }}
27 | """
28 |
29 | PRODUCT_SALES = """
30 | {{
31 | config(
32 | materialized='materialized_view'
33 | )
34 | }}
35 | SELECT
36 | order_id,
37 | SUM(total_amount) AS total_sales_amount
38 | FROM
39 | {{ ref('orders') }}
40 | GROUP BY
41 | order_id
42 | """
43 |
44 |
45 | class TestPostgresTestRefreshMaterializedView:
46 | """
47 | this test addresses a issue in postgres around materialized views,
48 | and renaming against a model who has dependent models that are also materialized views
49 | related pr: https://github.com/dbt-labs/dbt-core/pull/9959
50 | """
51 |
52 | @pytest.fixture(scope="class")
53 | def models(self):
54 | yield {"orders.sql": ORDERS, "product_sales.sql": PRODUCT_SALES}
55 |
56 | @pytest.fixture(scope="class")
57 | def seeds(self):
58 | yield {"source_orders.csv": SEED}
59 |
60 | def test_postgres_refresh_dependent_naterialized_views(self, project):
61 | run_dbt(["seed"])
62 | run_dbt(["run", "--full-refresh"])
63 | run_dbt(["run", "--full-refresh"])
64 |
--------------------------------------------------------------------------------
/tests/functional/materializations/materialized_view_tests/utils.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List, Optional
2 |
3 | from dbt.adapters.base.relation import BaseRelation
4 |
5 | from dbt.adapters.postgres.relation import PostgresRelation
6 |
7 |
8 | def query_relation_type(project, relation: BaseRelation) -> Optional[str]:
9 | assert isinstance(relation, PostgresRelation)
10 | sql = f"""
11 | select
12 | 'table' as relation_type
13 | from pg_tables
14 | where schemaname = '{relation.schema}'
15 | and tablename = '{relation.identifier}'
16 | union all
17 | select
18 | 'view' as relation_type
19 | from pg_views
20 | where schemaname = '{relation.schema}'
21 | and viewname = '{relation.identifier}'
22 | union all
23 | select
24 | 'materialized_view' as relation_type
25 | from pg_matviews
26 | where schemaname = '{relation.schema}'
27 | and matviewname = '{relation.identifier}'
28 | """
29 | results = project.run_sql(sql, fetch="all")
30 | if len(results) == 0:
31 | return None
32 | elif len(results) > 1:
33 | raise ValueError(f"More than one instance of {relation.name} found!")
34 | else:
35 | return results[0][0]
36 |
37 |
38 | def query_indexes(project, relation: BaseRelation) -> List[Dict[str, str]]:
39 | assert isinstance(relation, PostgresRelation)
40 | # pulled directly from `postgres__describe_indexes_template` and manually verified
41 | sql = f"""
42 | select
43 | i.relname as name,
44 | m.amname as method,
45 | ix.indisunique as "unique",
46 | array_to_string(array_agg(a.attname), ',') as column_names
47 | from pg_index ix
48 | join pg_class i
49 | on i.oid = ix.indexrelid
50 | join pg_am m
51 | on m.oid=i.relam
52 | join pg_class t
53 | on t.oid = ix.indrelid
54 | join pg_namespace n
55 | on n.oid = t.relnamespace
56 | join pg_attribute a
57 | on a.attrelid = t.oid
58 | and a.attnum = ANY(ix.indkey)
59 | where t.relname ilike '{ relation.identifier }'
60 | and n.nspname ilike '{ relation.schema }'
61 | and t.relkind in ('r', 'm')
62 | group by 1, 2, 3
63 | order by 1, 2, 3
64 | """
65 | raw_indexes = project.run_sql(sql, fetch="all")
66 | indexes = [
67 | {
68 | header: value
69 | for header, value in zip(["name", "method", "unique", "column_names"], index)
70 | }
71 | for index in raw_indexes
72 | ]
73 | return indexes
74 |
--------------------------------------------------------------------------------
/tests/functional/materializations/test_incremental.py:
--------------------------------------------------------------------------------
1 | from dbt.context.providers import generate_runtime_model_context
2 | from dbt.tests.util import get_manifest, run_dbt
3 | from dbt_common.exceptions import DbtRuntimeError
4 | import pytest
5 |
6 |
7 | @pytest.fixture(scope="class")
8 | def models():
9 | return {"my_model.sql": "select 1 as fun"}
10 |
11 |
12 | def test_basic(project):
13 | results = run_dbt(["run"])
14 | assert len(results) == 1
15 |
16 | manifest = get_manifest(project.project_root)
17 | model = manifest.nodes["model.test.my_model"]
18 |
19 | # Normally the context will be provided by the macro that calls the
20 | # get_incrmental_strategy_macro method, but for testing purposes
21 | # we create a runtime_model_context.
22 | context = generate_runtime_model_context(
23 | model,
24 | project.adapter.config,
25 | manifest,
26 | )
27 |
28 | macro_func = project.adapter.get_incremental_strategy_macro(context, "default")
29 | assert macro_func
30 | assert type(macro_func).__name__ == "MacroGenerator"
31 |
32 | macro_func = project.adapter.get_incremental_strategy_macro(context, "append")
33 | assert macro_func
34 | assert type(macro_func).__name__ == "MacroGenerator"
35 |
36 | macro_func = project.adapter.get_incremental_strategy_macro(context, "delete+insert")
37 | assert macro_func
38 | assert type(macro_func).__name__ == "MacroGenerator"
39 |
40 | # This incremental strategy only works for Postgres >= 15
41 | macro_func = project.adapter.get_incremental_strategy_macro(context, "merge")
42 | assert macro_func
43 | assert type(macro_func).__name__ == "MacroGenerator"
44 |
45 | # This incremental strategy is not valid for Postgres
46 | with pytest.raises(DbtRuntimeError) as excinfo:
47 | macro_func = project.adapter.get_incremental_strategy_macro(context, "insert_overwrite")
48 | assert "insert_overwrite" in str(excinfo.value)
49 |
--------------------------------------------------------------------------------
/tests/functional/materializations/test_supported_languages.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | import pytest
3 |
4 |
5 | custom_mat_tmpl = """
6 | {% materialization custom_mat{} %}
7 | {%- set target_relation = this.incorporate(type='table') %}
8 | {% call statement('main') -%}
9 | select 1 as column1
10 | {%- endcall %}
11 | {{ return({'relations': [target_relation]}) }}
12 | {% endmaterialization %}
13 | """
14 |
15 | models__sql_model = """
16 | {{ config(materialized='custom_mat') }}
17 | select 1 as fun
18 | """
19 |
20 | models__py_model = """
21 | def model(dbt, session):
22 | dbt.config(materialized='custom_mat')
23 | return
24 | """
25 |
26 |
27 | class SupportedLanguageBase:
28 | model_map = {
29 | "sql": ("sql_model.sql", models__sql_model),
30 | "python": ("py_model.py", models__py_model),
31 | }
32 |
33 | @pytest.fixture(scope="class")
34 | def macros(self):
35 | custom_mat = custom_mat_tmpl.replace("{}", "")
36 |
37 | if hasattr(self, "supported_langs"):
38 | custom_mat = custom_mat_tmpl.replace(
39 | "{}", f", supported_languages=[{self.lang_list()}]"
40 | )
41 | return {"custom_mat.sql": custom_mat}
42 |
43 | @pytest.fixture(scope="class")
44 | def models(self):
45 | file_name, model = self.model_map[self.use_lang]
46 | return {file_name: model}
47 |
48 | def lang_list(self):
49 | return ", ".join([f"'{l}'" for l in self.supported_langs])
50 |
51 | def test_language(self, project):
52 | result = run_dbt(["run"], expect_pass=self.expect_pass)
53 | if not self.expect_pass:
54 | assert "only supports languages" in result.results[0].message
55 |
56 |
57 | class TestSupportedLanguages_SupportsDefault_UsingSql(SupportedLanguageBase):
58 | use_lang = "sql"
59 | expect_pass = True
60 |
61 |
62 | class TestSupportedLanguages_SupportsDefault_UsingPython(SupportedLanguageBase):
63 | use_lang = "python"
64 | expect_pass = False
65 |
66 |
67 | class TestSupportedLanguages_SupportsSql_UsingSql(SupportedLanguageBase):
68 | supported_langs = ["sql"]
69 | use_lang = "sql"
70 | expect_pass = True
71 |
72 |
73 | class TestSupportedLanguages_SuppotsSql_UsingPython(SupportedLanguageBase):
74 | supported_langs = ["sql"]
75 | use_lang = "python"
76 | expect_pass = False
77 |
78 |
79 | class TestSupportedLanguages_SuppotsPython_UsingSql(SupportedLanguageBase):
80 | supported_langs = ["python"]
81 | use_lang = "sql"
82 | expect_pass = False
83 |
84 |
85 | class TestSupportedLanguages_SuppotsPython_UsingPython(SupportedLanguageBase):
86 | supported_langs = ["python"]
87 | use_lang = "python"
88 | expect_pass = True
89 |
90 |
91 | class TestSupportedLanguages_SuppotsSqlAndPython_UsingSql(SupportedLanguageBase):
92 | supported_langs = ["sql", "python"]
93 | use_lang = "sql"
94 | expect_pass = True
95 |
96 |
97 | class TestSupportedLanguages_SuppotsSqlAndPython_UsingPython(SupportedLanguageBase):
98 | supported_langs = ["sql", "python"]
99 | use_lang = "python"
100 | expect_pass = True
101 |
--------------------------------------------------------------------------------
/tests/functional/postgres/fixtures.py:
--------------------------------------------------------------------------------
1 | models__incremental_sql = """
2 | {{
3 | config(
4 | materialized = "incremental",
5 | indexes=[
6 | {'columns': ['column_a'], 'type': 'hash'},
7 | {'columns': ['column_a', 'column_b'], 'unique': True},
8 | ]
9 | )
10 | }}
11 |
12 | select *
13 | from (
14 | select 1 as column_a, 2 as column_b
15 | ) t
16 |
17 | {% if is_incremental() %}
18 | where column_a > (select max(column_a) from {{this}})
19 | {% endif %}
20 |
21 | """
22 |
23 | models__table_sql = """
24 | {{
25 | config(
26 | materialized = "table",
27 | indexes=[
28 | {'columns': ['column_a']},
29 | {'columns': ['column_b']},
30 | {'columns': ['column_a', 'column_b']},
31 | {'columns': ['column_b', 'column_a'], 'type': 'btree', 'unique': True},
32 | {'columns': ['column_a'], 'type': 'hash'}
33 | ]
34 | )
35 | }}
36 |
37 | select 1 as column_a, 2 as column_b
38 |
39 | """
40 |
41 | models_invalid__invalid_columns_type_sql = """
42 | {{
43 | config(
44 | materialized = "table",
45 | indexes=[
46 | {'columns': 'column_a, column_b'},
47 | ]
48 | )
49 | }}
50 |
51 | select 1 as column_a, 2 as column_b
52 |
53 | """
54 |
55 | models_invalid__invalid_type_sql = """
56 | {{
57 | config(
58 | materialized = "table",
59 | indexes=[
60 | {'columns': ['column_a'], 'type': 'non_existent_type'},
61 | ]
62 | )
63 | }}
64 |
65 | select 1 as column_a, 2 as column_b
66 |
67 | """
68 |
69 | models_invalid__invalid_unique_config_sql = """
70 | {{
71 | config(
72 | materialized = "table",
73 | indexes=[
74 | {'columns': ['column_a'], 'unique': 'yes'},
75 | ]
76 | )
77 | }}
78 |
79 | select 1 as column_a, 2 as column_b
80 |
81 | """
82 |
83 | models_invalid__missing_columns_sql = """
84 | {{
85 | config(
86 | materialized = "table",
87 | indexes=[
88 | {'unique': True},
89 | ]
90 | )
91 | }}
92 |
93 | select 1 as column_a, 2 as column_b
94 |
95 | """
96 |
97 | snapshots__colors_sql = """
98 | {% snapshot colors %}
99 |
100 | {{
101 | config(
102 | target_database=database,
103 | target_schema=schema,
104 | unique_key='id',
105 | strategy='check',
106 | check_cols=['color'],
107 | indexes=[
108 | {'columns': ['id'], 'type': 'hash'},
109 | {'columns': ['id', 'color'], 'unique': True},
110 | ]
111 | )
112 | }}
113 |
114 | {% if var('version') == 1 %}
115 |
116 | select 1 as id, 'red' as color union all
117 | select 2 as id, 'green' as color
118 |
119 | {% else %}
120 |
121 | select 1 as id, 'blue' as color union all
122 | select 2 as id, 'green' as color
123 |
124 | {% endif %}
125 |
126 | {% endsnapshot %}
127 |
128 | """
129 |
130 | seeds__seed_csv = """country_code,country_name
131 | US,United States
132 | CA,Canada
133 | GB,United Kingdom
134 | """
135 |
--------------------------------------------------------------------------------
/tests/functional/projects/__init__.py:
--------------------------------------------------------------------------------
1 | from tests.functional.projects.dbt_integration import dbt_integration
2 | from tests.functional.projects.graph_selection import GraphSelection
3 | from tests.functional.projects.jaffle_shop import JaffleShop
4 |
--------------------------------------------------------------------------------
/tests/functional/projects/dbt_integration/__init__.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 |
3 | from tests.functional.projects.utils import read
4 |
5 |
6 | read_macro = partial(read, "dbt_integration", "macros")
7 | read_model = partial(read, "dbt_integration", "models")
8 | read_schema = partial(read, "dbt_integration", "schemas")
9 |
10 |
11 | def dbt_integration():
12 | return {
13 | "dbt_project.yml": read_schema("project"),
14 | "macros": {"do_something.sql": read_macro("do_something")},
15 | "models": {
16 | "schema.yml": read_schema("schema"),
17 | "incremental.sql": read_model("incremental"),
18 | "table_model.sql": read_model("table"),
19 | "view_model.sql": read_model("view"),
20 | },
21 | }
22 |
--------------------------------------------------------------------------------
/tests/functional/projects/dbt_integration/macros/do_something.sql:
--------------------------------------------------------------------------------
1 | {% macro do_something(foo, bar) %}
2 |
3 | select
4 | '{{ foo }}'::text as foo,
5 | '{{ bar }}'::text as bar
6 |
7 | {% endmacro %}
8 |
--------------------------------------------------------------------------------
/tests/functional/projects/dbt_integration/models/incremental.sql:
--------------------------------------------------------------------------------
1 | {{ config(
2 | materialized = 'incremental',
3 | unique_key = 'id',
4 | ) }}
5 |
6 | select * from {{ this.schema }}.seed
7 |
8 | {% if is_incremental() %}
9 | where updated_at > (select max(updated_at) from {{ this }})
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/tests/functional/projects/dbt_integration/models/table.sql:
--------------------------------------------------------------------------------
1 | {{ config(materialized = 'table') }}
2 | select * from {{ this.schema }}.seed
3 |
--------------------------------------------------------------------------------
/tests/functional/projects/dbt_integration/models/view.sql:
--------------------------------------------------------------------------------
1 | {{ config(materialized = 'view') }}
2 | select * from {{ this.schema }}.seed
3 |
--------------------------------------------------------------------------------
/tests/functional/projects/dbt_integration/schemas/project.yml:
--------------------------------------------------------------------------------
1 | name: dbt_integration_project
2 | version: '1.0'
3 | config-version: 2
4 |
5 | model-paths: ["models"] # paths to models
6 | analysis-paths: ["analyses"] # path with analysis files which are compiled, but not run
7 | target-path: "target" # path for compiled code
8 | clean-targets: ["target"] # directories removed by the clean task
9 | test-paths: ["tests"] # where to store test results
10 | seed-paths: ["seeds"] # load CSVs from this directory with `dbt seed`
11 | macro-paths: ["macros"] # where to find macros
12 |
13 | profile: user
14 |
15 | models:
16 | dbt_integration_project:
17 |
--------------------------------------------------------------------------------
/tests/functional/projects/dbt_integration/schemas/schema.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | models:
4 | - name: table_model
5 | columns:
6 | - name: id
7 | data_tests:
8 | - unique
9 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/__init__.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 |
3 | import pytest
4 |
5 | from tests.functional.projects.utils import read
6 |
7 |
8 | read_data = partial(read, "graph_selection", "data")
9 | read_model = partial(read, "graph_selection", "models")
10 | read_schema = partial(read, "graph_selection", "schemas")
11 |
12 |
13 | class GraphSelection:
14 | @pytest.fixture(scope="class")
15 | def models(self):
16 | return {
17 | "schema.yml": read_schema("schema"),
18 | "patch_path_selection_schema.yml": read_schema("patch_path_selection"),
19 | "base_users.sql": read_model("base_users"),
20 | "users.sql": read_model("users"),
21 | "versioned_v3.sql": read_model("base_users"),
22 | "users_rollup.sql": read_model("users_rollup"),
23 | "users_rollup_dependency.sql": read_model("users_rollup_dependency"),
24 | "emails.sql": read_model("emails"),
25 | "emails_alt.sql": read_model("emails_alt"),
26 | "alternative.users.sql": read_model("alternative_users"),
27 | "never_selected.sql": read_model("never_selected"),
28 | "test": {
29 | "subdir.sql": read_model("subdir"),
30 | "versioned_v2.sql": read_model("subdir"),
31 | "subdir": {
32 | "nested_users.sql": read_model("nested_users"),
33 | "versioned_v1.sql": read_model("nested_users"),
34 | },
35 | },
36 | }
37 |
38 | @pytest.fixture(scope="class")
39 | def seeds(self, test_data_dir):
40 | return {
41 | "properties.yml": read_schema("properties"),
42 | "seed.csv": read_data("seed"),
43 | "summary_expected.csv": read_data("summary_expected"),
44 | }
45 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/data/summary_expected.csv:
--------------------------------------------------------------------------------
1 | gender,ct
2 | Female,40
3 | Male,60
4 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/alternative_users.sql:
--------------------------------------------------------------------------------
1 | {# Same as ´users´ model, but with dots in the model name #}
2 | {{ config(
3 | materialized = 'table',
4 | tags=['dots']
5 | ) }}
6 |
7 | select * from {{ ref('base_users') }}
8 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/base_users.sql:
--------------------------------------------------------------------------------
1 | {{ config(
2 | materialized = 'ephemeral',
3 | tags = ['base']
4 | ) }}
5 |
6 | select * from {{ source('raw', 'seed') }}
7 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/emails.sql:
--------------------------------------------------------------------------------
1 | {{ config(
2 | materialized='ephemeral',
3 | tags=['base']
4 | ) }}
5 |
6 | select distinct email from {{ ref('base_users') }}
7 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/emails_alt.sql:
--------------------------------------------------------------------------------
1 | select distinct email from {{ ref('users') }}
2 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/nested_users.sql:
--------------------------------------------------------------------------------
1 | select 1 as id
2 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/never_selected.sql:
--------------------------------------------------------------------------------
1 | {{ config(schema='_and_then') }}
2 | select * from {{ this.schema }}.seed
3 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/subdir.sql:
--------------------------------------------------------------------------------
1 | select 1 as id
2 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/users.sql:
--------------------------------------------------------------------------------
1 | {{ config(
2 | materialized = 'table',
3 | tags=['bi', 'users']
4 | ) }}
5 |
6 | select * from {{ ref('base_users') }}
7 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/users_rollup.sql:
--------------------------------------------------------------------------------
1 | {{ config(
2 | materialized = 'view',
3 | tags = 'bi'
4 | ) }}
5 |
6 | with users as (
7 | select * from {{ ref('users') }}
8 | )
9 |
10 | select
11 | gender,
12 | count(*) as ct
13 | from users
14 | group by 1
15 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/models/users_rollup_dependency.sql:
--------------------------------------------------------------------------------
1 | {{ config(materialized='table') }}
2 | select * from {{ ref('users_rollup') }}
3 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/schemas/patch_path_selection.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | models:
4 | - name: subdir
5 | description: submarine sandwich directory
6 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/schemas/properties.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | seeds:
4 | - name: summary_expected
5 | config:
6 | column_types:
7 | ct: BIGINT
8 | gender: text
9 |
--------------------------------------------------------------------------------
/tests/functional/projects/graph_selection/schemas/schema.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | groups:
4 | - name: emails_group
5 | owner:
6 | name: Jeremy
7 | email: data@jer.co
8 | slack: talk-jerco-memes
9 | github: jtcohen6
10 | whatever: you want
11 | - name: users_group
12 | owner:
13 | name: Jeremy
14 | email: data@jer.co
15 | slack: talk-jerco-memes
16 | github: jtcohen6
17 | whatever: you want
18 | - name: users_rollup_group
19 | owner:
20 | name: Jeremy
21 | email: data@jer.co
22 | slack: talk-jerco-memes
23 | github: jtcohen6
24 | whatever: you want
25 |
26 | models:
27 | - name: emails
28 | group: emails_group
29 | columns:
30 | - name: email
31 | data_tests:
32 | - not_null:
33 | severity: warn
34 | - name: users
35 | group: users_group
36 | columns:
37 | - name: id
38 | data_tests:
39 | - unique
40 | - name: users_rollup
41 | group: users_rollup_group
42 | columns:
43 | - name: gender
44 | data_tests:
45 | - unique
46 | - name: versioned
47 | latest_version: 2
48 | versions:
49 | - v: 0
50 | - v: 1
51 | - v: 2
52 | - v: 3
53 | - v: 4.5
54 | - v: "5.0"
55 | - v: 21
56 | - v: "test"
57 |
58 | sources:
59 | - name: raw
60 | schema: '{{ target.schema }}'
61 | tables:
62 | - name: seed
63 |
64 | exposures:
65 | - name: user_exposure
66 | type: dashboard
67 | depends_on:
68 | - ref('users')
69 | - ref('users_rollup')
70 | - ref('versioned', v=3)
71 | owner:
72 | email: nope@example.com
73 | - name: seed_ml_exposure
74 | type: ml
75 | depends_on:
76 | - source('raw', 'seed')
77 | owner:
78 | email: nope@example.com
79 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/__init__.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 |
3 | import pytest
4 |
5 | from tests.functional.projects.utils import read
6 |
7 |
8 | read_data = partial(read, "jaffle_shop", "data")
9 | read_doc = partial(read, "jaffle_shop", "docs")
10 | read_model = partial(read, "jaffle_shop", "models")
11 | read_schema = partial(read, "jaffle_shop", "schemas")
12 | read_staging = partial(read, "jaffle_shop", "staging")
13 |
14 |
15 | class JaffleShop:
16 | @pytest.fixture(scope="class")
17 | def models(self):
18 | return {
19 | "customers.sql": read_model("customers"),
20 | "docs.md": read_doc("docs"),
21 | "orders.sql": read_model("orders"),
22 | "ignored_model1.sql": "select 1 as id",
23 | "ignored_model2.sql": "select 1 as id",
24 | "overview.md": read_doc("overview"),
25 | "schema.yml": read_schema("jaffle_shop"),
26 | "ignore_folder": {
27 | "model1.sql": "select 1 as id",
28 | "model2.sql": "select 1 as id",
29 | },
30 | "staging": {
31 | "schema.yml": read_schema("staging"),
32 | "stg_customers.sql": read_staging("stg_customers"),
33 | "stg_orders.sql": read_staging("stg_orders"),
34 | "stg_payments.sql": read_staging("stg_payments"),
35 | },
36 | }
37 |
38 | @pytest.fixture(scope="class")
39 | def seeds(self):
40 | return {
41 | "raw_customers.csv": read_data("raw_customers"),
42 | "raw_orders.csv": read_data("raw_orders"),
43 | "raw_payments.csv": read_data("raw_payments"),
44 | }
45 |
46 | @pytest.fixture(scope="class")
47 | def project_config_update(self):
48 | return {
49 | "name": "jaffle_shop",
50 | "models": {
51 | "jaffle_shop": {
52 | "materialized": "table",
53 | "staging": {
54 | "materialized": "view",
55 | },
56 | }
57 | },
58 | }
59 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/data/raw_customers.csv:
--------------------------------------------------------------------------------
1 | id,first_name,last_name
2 | 1,Michael,P.
3 | 2,Shawn,M.
4 | 3,Kathleen,P.
5 | 4,Jimmy,C.
6 | 5,Katherine,R.
7 | 6,Sarah,R.
8 | 7,Martin,M.
9 | 8,Frank,R.
10 | 9,Jennifer,F.
11 | 10,Henry,W.
12 | 11,Fred,S.
13 | 12,Amy,D.
14 | 13,Kathleen,M.
15 | 14,Steve,F.
16 | 15,Teresa,H.
17 | 16,Amanda,H.
18 | 17,Kimberly,R.
19 | 18,Johnny,K.
20 | 19,Virginia,F.
21 | 20,Anna,A.
22 | 21,Willie,H.
23 | 22,Sean,H.
24 | 23,Mildred,A.
25 | 24,David,G.
26 | 25,Victor,H.
27 | 26,Aaron,R.
28 | 27,Benjamin,B.
29 | 28,Lisa,W.
30 | 29,Benjamin,K.
31 | 30,Christina,W.
32 | 31,Jane,G.
33 | 32,Thomas,O.
34 | 33,Katherine,M.
35 | 34,Jennifer,S.
36 | 35,Sara,T.
37 | 36,Harold,O.
38 | 37,Shirley,J.
39 | 38,Dennis,J.
40 | 39,Louise,W.
41 | 40,Maria,A.
42 | 41,Gloria,C.
43 | 42,Diana,S.
44 | 43,Kelly,N.
45 | 44,Jane,R.
46 | 45,Scott,B.
47 | 46,Norma,C.
48 | 47,Marie,P.
49 | 48,Lillian,C.
50 | 49,Judy,N.
51 | 50,Billy,L.
52 | 51,Howard,R.
53 | 52,Laura,F.
54 | 53,Anne,B.
55 | 54,Rose,M.
56 | 55,Nicholas,R.
57 | 56,Joshua,K.
58 | 57,Paul,W.
59 | 58,Kathryn,K.
60 | 59,Adam,A.
61 | 60,Norma,W.
62 | 61,Timothy,R.
63 | 62,Elizabeth,P.
64 | 63,Edward,G.
65 | 64,David,C.
66 | 65,Brenda,W.
67 | 66,Adam,W.
68 | 67,Michael,H.
69 | 68,Jesse,E.
70 | 69,Janet,P.
71 | 70,Helen,F.
72 | 71,Gerald,C.
73 | 72,Kathryn,O.
74 | 73,Alan,B.
75 | 74,Harry,A.
76 | 75,Andrea,H.
77 | 76,Barbara,W.
78 | 77,Anne,W.
79 | 78,Harry,H.
80 | 79,Jack,R.
81 | 80,Phillip,H.
82 | 81,Shirley,H.
83 | 82,Arthur,D.
84 | 83,Virginia,R.
85 | 84,Christina,R.
86 | 85,Theresa,M.
87 | 86,Jason,C.
88 | 87,Phillip,B.
89 | 88,Adam,T.
90 | 89,Margaret,J.
91 | 90,Paul,P.
92 | 91,Todd,W.
93 | 92,Willie,O.
94 | 93,Frances,R.
95 | 94,Gregory,H.
96 | 95,Lisa,P.
97 | 96,Jacqueline,A.
98 | 97,Shirley,D.
99 | 98,Nicole,M.
100 | 99,Mary,G.
101 | 100,Jean,M.
102 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/data/raw_orders.csv:
--------------------------------------------------------------------------------
1 | id,user_id,order_date,status
2 | 1,1,2018-01-01,returned
3 | 2,3,2018-01-02,completed
4 | 3,94,2018-01-04,completed
5 | 4,50,2018-01-05,completed
6 | 5,64,2018-01-05,completed
7 | 6,54,2018-01-07,completed
8 | 7,88,2018-01-09,completed
9 | 8,2,2018-01-11,returned
10 | 9,53,2018-01-12,completed
11 | 10,7,2018-01-14,completed
12 | 11,99,2018-01-14,completed
13 | 12,59,2018-01-15,completed
14 | 13,84,2018-01-17,completed
15 | 14,40,2018-01-17,returned
16 | 15,25,2018-01-17,completed
17 | 16,39,2018-01-18,completed
18 | 17,71,2018-01-18,completed
19 | 18,64,2018-01-20,returned
20 | 19,54,2018-01-22,completed
21 | 20,20,2018-01-23,completed
22 | 21,71,2018-01-23,completed
23 | 22,86,2018-01-24,completed
24 | 23,22,2018-01-26,return_pending
25 | 24,3,2018-01-27,completed
26 | 25,51,2018-01-28,completed
27 | 26,32,2018-01-28,completed
28 | 27,94,2018-01-29,completed
29 | 28,8,2018-01-29,completed
30 | 29,57,2018-01-31,completed
31 | 30,69,2018-02-02,completed
32 | 31,16,2018-02-02,completed
33 | 32,28,2018-02-04,completed
34 | 33,42,2018-02-04,completed
35 | 34,38,2018-02-06,completed
36 | 35,80,2018-02-08,completed
37 | 36,85,2018-02-10,completed
38 | 37,1,2018-02-10,completed
39 | 38,51,2018-02-10,completed
40 | 39,26,2018-02-11,completed
41 | 40,33,2018-02-13,completed
42 | 41,99,2018-02-14,completed
43 | 42,92,2018-02-16,completed
44 | 43,31,2018-02-17,completed
45 | 44,66,2018-02-17,completed
46 | 45,22,2018-02-17,completed
47 | 46,6,2018-02-19,completed
48 | 47,50,2018-02-20,completed
49 | 48,27,2018-02-21,completed
50 | 49,35,2018-02-21,completed
51 | 50,51,2018-02-23,completed
52 | 51,71,2018-02-24,completed
53 | 52,54,2018-02-25,return_pending
54 | 53,34,2018-02-26,completed
55 | 54,54,2018-02-26,completed
56 | 55,18,2018-02-27,completed
57 | 56,79,2018-02-28,completed
58 | 57,93,2018-03-01,completed
59 | 58,22,2018-03-01,completed
60 | 59,30,2018-03-02,completed
61 | 60,12,2018-03-03,completed
62 | 61,63,2018-03-03,completed
63 | 62,57,2018-03-05,completed
64 | 63,70,2018-03-06,completed
65 | 64,13,2018-03-07,completed
66 | 65,26,2018-03-08,completed
67 | 66,36,2018-03-10,completed
68 | 67,79,2018-03-11,completed
69 | 68,53,2018-03-11,completed
70 | 69,3,2018-03-11,completed
71 | 70,8,2018-03-12,completed
72 | 71,42,2018-03-12,shipped
73 | 72,30,2018-03-14,shipped
74 | 73,19,2018-03-16,completed
75 | 74,9,2018-03-17,shipped
76 | 75,69,2018-03-18,completed
77 | 76,25,2018-03-20,completed
78 | 77,35,2018-03-21,shipped
79 | 78,90,2018-03-23,shipped
80 | 79,52,2018-03-23,shipped
81 | 80,11,2018-03-23,shipped
82 | 81,76,2018-03-23,shipped
83 | 82,46,2018-03-24,shipped
84 | 83,54,2018-03-24,shipped
85 | 84,70,2018-03-26,placed
86 | 85,47,2018-03-26,shipped
87 | 86,68,2018-03-26,placed
88 | 87,46,2018-03-27,placed
89 | 88,91,2018-03-27,shipped
90 | 89,21,2018-03-28,placed
91 | 90,66,2018-03-30,shipped
92 | 91,47,2018-03-31,placed
93 | 92,84,2018-04-02,placed
94 | 93,66,2018-04-03,placed
95 | 94,63,2018-04-03,placed
96 | 95,27,2018-04-04,placed
97 | 96,90,2018-04-06,placed
98 | 97,89,2018-04-07,placed
99 | 98,41,2018-04-07,placed
100 | 99,85,2018-04-09,placed
101 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/data/raw_payments.csv:
--------------------------------------------------------------------------------
1 | id,order_id,payment_method,amount
2 | 1,1,credit_card,1000
3 | 2,2,credit_card,2000
4 | 3,3,coupon,100
5 | 4,4,coupon,2500
6 | 5,5,bank_transfer,1700
7 | 6,6,credit_card,600
8 | 7,7,credit_card,1600
9 | 8,8,credit_card,2300
10 | 9,9,gift_card,2300
11 | 10,9,bank_transfer,0
12 | 11,10,bank_transfer,2600
13 | 12,11,credit_card,2700
14 | 13,12,credit_card,100
15 | 14,13,credit_card,500
16 | 15,13,bank_transfer,1400
17 | 16,14,bank_transfer,300
18 | 17,15,coupon,2200
19 | 18,16,credit_card,1000
20 | 19,17,bank_transfer,200
21 | 20,18,credit_card,500
22 | 21,18,credit_card,800
23 | 22,19,gift_card,600
24 | 23,20,bank_transfer,1500
25 | 24,21,credit_card,1200
26 | 25,22,bank_transfer,800
27 | 26,23,gift_card,2300
28 | 27,24,coupon,2600
29 | 28,25,bank_transfer,2000
30 | 29,25,credit_card,2200
31 | 30,25,coupon,1600
32 | 31,26,credit_card,3000
33 | 32,27,credit_card,2300
34 | 33,28,bank_transfer,1900
35 | 34,29,bank_transfer,1200
36 | 35,30,credit_card,1300
37 | 36,31,credit_card,1200
38 | 37,32,credit_card,300
39 | 38,33,credit_card,2200
40 | 39,34,bank_transfer,1500
41 | 40,35,credit_card,2900
42 | 41,36,bank_transfer,900
43 | 42,37,credit_card,2300
44 | 43,38,credit_card,1500
45 | 44,39,bank_transfer,800
46 | 45,40,credit_card,1400
47 | 46,41,credit_card,1700
48 | 47,42,coupon,1700
49 | 48,43,gift_card,1800
50 | 49,44,gift_card,1100
51 | 50,45,bank_transfer,500
52 | 51,46,bank_transfer,800
53 | 52,47,credit_card,2200
54 | 53,48,bank_transfer,300
55 | 54,49,credit_card,600
56 | 55,49,credit_card,900
57 | 56,50,credit_card,2600
58 | 57,51,credit_card,2900
59 | 58,51,credit_card,100
60 | 59,52,bank_transfer,1500
61 | 60,53,credit_card,300
62 | 61,54,credit_card,1800
63 | 62,54,bank_transfer,1100
64 | 63,55,credit_card,2900
65 | 64,56,credit_card,400
66 | 65,57,bank_transfer,200
67 | 66,58,coupon,1800
68 | 67,58,gift_card,600
69 | 68,59,gift_card,2800
70 | 69,60,credit_card,400
71 | 70,61,bank_transfer,1600
72 | 71,62,gift_card,1400
73 | 72,63,credit_card,2900
74 | 73,64,bank_transfer,2600
75 | 74,65,credit_card,0
76 | 75,66,credit_card,2800
77 | 76,67,bank_transfer,400
78 | 77,67,credit_card,1900
79 | 78,68,credit_card,1600
80 | 79,69,credit_card,1900
81 | 80,70,credit_card,2600
82 | 81,71,credit_card,500
83 | 82,72,credit_card,2900
84 | 83,73,bank_transfer,300
85 | 84,74,credit_card,3000
86 | 85,75,credit_card,1900
87 | 86,76,coupon,200
88 | 87,77,credit_card,0
89 | 88,77,bank_transfer,1900
90 | 89,78,bank_transfer,2600
91 | 90,79,credit_card,1800
92 | 91,79,credit_card,900
93 | 92,80,gift_card,300
94 | 93,81,coupon,200
95 | 94,82,credit_card,800
96 | 95,83,credit_card,100
97 | 96,84,bank_transfer,2500
98 | 97,85,bank_transfer,1700
99 | 98,86,coupon,2300
100 | 99,87,gift_card,3000
101 | 100,87,credit_card,2600
102 | 101,88,credit_card,2900
103 | 102,89,bank_transfer,2200
104 | 103,90,bank_transfer,200
105 | 104,91,credit_card,1900
106 | 105,92,bank_transfer,1500
107 | 106,92,coupon,200
108 | 107,93,gift_card,2600
109 | 108,94,coupon,700
110 | 109,95,coupon,2400
111 | 110,96,gift_card,1700
112 | 111,97,bank_transfer,1400
113 | 112,98,bank_transfer,1000
114 | 113,99,credit_card,2400
115 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/docs/docs.md:
--------------------------------------------------------------------------------
1 | {% docs orders_status %}
2 |
3 | Orders can be one of the following statuses:
4 |
5 | | status | description |
6 | |----------------|------------------------------------------------------------------------------------------------------------------------|
7 | | placed | The order has been placed but has not yet left the warehouse |
8 | | shipped | The order has ben shipped to the customer and is currently in transit |
9 | | completed | The order has been received by the customer |
10 | | return_pending | The customer has indicated that they would like to return the order, but it has not yet been received at the warehouse |
11 | | returned | The order has been returned by the customer and received at the warehouse |
12 |
13 |
14 | {% enddocs %}
15 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/docs/overview.md:
--------------------------------------------------------------------------------
1 | {% docs __overview__ %}
2 |
3 | ## Data Documentation for Jaffle Shop
4 |
5 | `jaffle_shop` is a fictional ecommerce store.
6 |
7 | This [dbt](https://www.getdbt.com/) project is for testing out code.
8 |
9 | The source code can be found [here](https://github.com/clrcrl/jaffle_shop).
10 |
11 | {% enddocs %}
12 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/models/customers.sql:
--------------------------------------------------------------------------------
1 | with customers as (
2 |
3 | select * from {{ ref('stg_customers') }}
4 |
5 | ),
6 |
7 | orders as (
8 |
9 | select * from {{ ref('stg_orders') }}
10 |
11 | ),
12 |
13 | payments as (
14 |
15 | select * from {{ ref('stg_payments') }}
16 |
17 | ),
18 |
19 | customer_orders as (
20 |
21 | select
22 | customer_id,
23 |
24 | min(order_date) as first_order,
25 | max(order_date) as most_recent_order,
26 | count(order_id) as number_of_orders
27 | from orders
28 |
29 | group by customer_id
30 |
31 | ),
32 |
33 | customer_payments as (
34 |
35 | select
36 | orders.customer_id,
37 | sum(amount) as total_amount
38 |
39 | from payments
40 |
41 | left join orders on
42 | payments.order_id = orders.order_id
43 |
44 | group by orders.customer_id
45 |
46 | ),
47 |
48 | final as (
49 |
50 | select
51 | customers.customer_id,
52 | customers.first_name,
53 | customers.last_name,
54 | customer_orders.first_order,
55 | customer_orders.most_recent_order,
56 | customer_orders.number_of_orders,
57 | customer_payments.total_amount as customer_lifetime_value
58 |
59 | from customers
60 |
61 | left join customer_orders
62 | on customers.customer_id = customer_orders.customer_id
63 |
64 | left join customer_payments
65 | on customers.customer_id = customer_payments.customer_id
66 |
67 | )
68 |
69 | select * from final
70 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/models/orders.sql:
--------------------------------------------------------------------------------
1 | {% set payment_methods = ['credit_card', 'coupon', 'bank_transfer', 'gift_card'] %}
2 |
3 | with orders as (
4 |
5 | select * from {{ ref('stg_orders') }}
6 |
7 | ),
8 |
9 | payments as (
10 |
11 | select * from {{ ref('stg_payments') }}
12 |
13 | ),
14 |
15 | order_payments as (
16 |
17 | select
18 | order_id,
19 |
20 | {% for payment_method in payment_methods -%}
21 | sum(case when payment_method = '{{ payment_method }}' then amount else 0 end) as {{ payment_method }}_amount,
22 | {% endfor -%}
23 |
24 | sum(amount) as total_amount
25 |
26 | from payments
27 |
28 | group by order_id
29 |
30 | ),
31 |
32 | final as (
33 |
34 | select
35 | orders.order_id,
36 | orders.customer_id,
37 | orders.order_date,
38 | orders.status,
39 |
40 | {% for payment_method in payment_methods -%}
41 |
42 | order_payments.{{ payment_method }}_amount,
43 |
44 | {% endfor -%}
45 |
46 | order_payments.total_amount as amount
47 |
48 | from orders
49 |
50 |
51 | left join order_payments
52 | on orders.order_id = order_payments.order_id
53 |
54 | )
55 |
56 | select * from final
57 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/schemas/jaffle_shop.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | models:
4 | - name: customers
5 | description: This table has basic information about a customer, as well as some derived facts based on a customer's orders
6 |
7 | columns:
8 | - name: customer_id
9 | description: This is a unique identifier for a customer
10 | data_tests:
11 | - unique
12 | - not_null
13 |
14 | - name: first_name
15 | description: Customer's first name. PII.
16 |
17 | - name: last_name
18 | description: Customer's last name. PII.
19 |
20 | - name: first_order
21 | description: Date (UTC) of a customer's first order
22 |
23 | - name: most_recent_order
24 | description: Date (UTC) of a customer's most recent order
25 |
26 | - name: number_of_orders
27 | description: Count of the number of orders a customer has placed
28 |
29 | - name: total_order_amount
30 | description: Total value (AUD) of a customer's orders
31 |
32 | - name: orders
33 | description: This table has basic information about orders, as well as some derived facts based on payments
34 |
35 | columns:
36 | - name: order_id
37 | data_tests:
38 | - unique
39 | - not_null
40 | description: This is a unique identifier for an order
41 |
42 | - name: customer_id
43 | description: Foreign key to the customers table
44 | data_tests:
45 | - not_null
46 | - relationships:
47 | to: ref('customers')
48 | field: customer_id
49 |
50 | - name: order_date
51 | description: Date (UTC) that the order was placed
52 |
53 | - name: status
54 | description: '{{ doc("orders_status") }}'
55 | data_tests:
56 | - accepted_values:
57 | values: ['placed', 'shipped', 'completed', 'return_pending', 'returned']
58 |
59 | - name: amount
60 | description: Total amount (AUD) of the order
61 | data_tests:
62 | - not_null
63 |
64 | - name: credit_card_amount
65 | description: Amount of the order (AUD) paid for by credit card
66 | data_tests:
67 | - not_null
68 |
69 | - name: coupon_amount
70 | description: Amount of the order (AUD) paid for by coupon
71 | data_tests:
72 | - not_null
73 |
74 | - name: bank_transfer_amount
75 | description: Amount of the order (AUD) paid for by bank transfer
76 | data_tests:
77 | - not_null
78 |
79 | - name: gift_card_amount
80 | description: Amount of the order (AUD) paid for by gift card
81 | data_tests:
82 | - not_null
83 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/schemas/staging.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | models:
4 | - name: stg_customers
5 | columns:
6 | - name: customer_id
7 | data_tests:
8 | - unique
9 | - not_null
10 |
11 | - name: stg_orders
12 | columns:
13 | - name: order_id
14 | data_tests:
15 | - unique
16 | - not_null
17 | - name: status
18 | data_tests:
19 | - accepted_values:
20 | values: ['placed', 'shipped', 'completed', 'return_pending', 'returned']
21 |
22 | - name: stg_payments
23 | columns:
24 | - name: payment_id
25 | data_tests:
26 | - unique
27 | - not_null
28 | - name: payment_method
29 | data_tests:
30 | - accepted_values:
31 | values: ['credit_card', 'coupon', 'bank_transfer', 'gift_card']
32 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/staging/stg_customers.sql:
--------------------------------------------------------------------------------
1 | with source as (
2 |
3 | {#-
4 | Normally we would select from the table here, but we are using seeds to load
5 | our data in this project
6 | #}
7 | select * from {{ ref('raw_customers') }}
8 |
9 | ),
10 |
11 | renamed as (
12 |
13 | select
14 | id as customer_id,
15 | first_name,
16 | last_name
17 |
18 | from source
19 |
20 | )
21 |
22 | select * from renamed
23 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/staging/stg_orders.sql:
--------------------------------------------------------------------------------
1 | with source as (
2 |
3 | {#-
4 | Normally we would select from the table here, but we are using seeds to load
5 | our data in this project
6 | #}
7 | select * from {{ ref('raw_orders') }}
8 |
9 | ),
10 |
11 | renamed as (
12 |
13 | select
14 | id as order_id,
15 | user_id as customer_id,
16 | order_date,
17 | status
18 |
19 | from source
20 |
21 | )
22 |
23 | select * from renamed
24 |
--------------------------------------------------------------------------------
/tests/functional/projects/jaffle_shop/staging/stg_payments.sql:
--------------------------------------------------------------------------------
1 | with source as (
2 |
3 | {#-
4 | Normally we would select from the table here, but we are using seeds to load
5 | our data in this project
6 | #}
7 | select * from {{ ref('raw_payments') }}
8 |
9 | ),
10 |
11 | renamed as (
12 |
13 | select
14 | id as payment_id,
15 | order_id,
16 | payment_method,
17 |
18 | -- `amount` is currently stored in cents, so we convert it to dollars
19 | amount / 100 as amount
20 |
21 | from source
22 |
23 | )
24 |
25 | select * from renamed
26 |
--------------------------------------------------------------------------------
/tests/functional/projects/utils.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 |
4 | FILE_TYPES = {
5 | "data": "csv",
6 | "docs": "md",
7 | "macros": "sql",
8 | "models": "sql",
9 | "schemas": "yml",
10 | "staging": "sql",
11 | }
12 |
13 |
14 | def read(project: str, file_type: str, file_name: str) -> str:
15 | root = Path(__file__).parent / project
16 | extension = FILE_TYPES[file_type]
17 | file = root / file_type / f"{file_name}.{extension}"
18 | contents = file.read_text()
19 | if file_type == "data":
20 | return contents.strip()
21 | return contents
22 |
--------------------------------------------------------------------------------
/tests/functional/retry/fixtures.py:
--------------------------------------------------------------------------------
1 | models__sample_model = """select 1 as id, baz as foo"""
2 | models__second_model = """select 1 as id, 2 as bar"""
3 |
4 | models__union_model = """
5 | select foo + bar as sum3 from {{ ref('sample_model') }}
6 | left join {{ ref('second_model') }} on sample_model.id = second_model.id
7 | """
8 |
9 | schema_yml = """
10 | models:
11 | - name: sample_model
12 | columns:
13 | - name: foo
14 | data_tests:
15 | - accepted_values:
16 | values: [3]
17 | quote: false
18 | config:
19 | severity: warn
20 | - name: second_model
21 | columns:
22 | - name: bar
23 | data_tests:
24 | - accepted_values:
25 | values: [3]
26 | quote: false
27 | config:
28 | severity: warn
29 | - name: union_model
30 | columns:
31 | - name: sum3
32 | data_tests:
33 | - accepted_values:
34 | values: [3]
35 | quote: false
36 | """
37 |
38 | macros__alter_timezone_sql = """
39 | {% macro alter_timezone(timezone='America/Los_Angeles') %}
40 | {% set sql %}
41 | SET TimeZone='{{ timezone }}';
42 | {% endset %}
43 |
44 | {% do run_query(sql) %}
45 | {% do log("Timezone set to: " + timezone, info=True) %}
46 | {% endmacro %}
47 | """
48 |
49 | simple_model = """
50 | select null as id
51 | """
52 |
53 | simple_schema = """
54 | models:
55 | - name: some_model
56 | columns:
57 | - name: id
58 | data_tests:
59 | - not_null
60 | """
61 |
--------------------------------------------------------------------------------
/tests/functional/schema/fixtures/macros.py:
--------------------------------------------------------------------------------
1 | _CUSTOM_MACRO = """
2 | {% macro generate_schema_name(schema_name, node) %}
3 |
4 | {{ schema_name }}_{{ target.schema }}_macro
5 |
6 | {% endmacro %}
7 | """
8 |
9 | _CUSTOM_MACRO_W_CONFIG = """
10 | {% macro generate_schema_name(schema_name, node) %}
11 |
12 | {{ node.config['schema'] }}_{{ target.schema }}_macro
13 |
14 | {% endmacro %}
15 | """
16 |
17 | _CUSTOM_MACRO_MULTI_SCHEMA = """
18 | {% macro generate_alias_name(custom_alias_name=none, node=none) -%}
19 | {%- set node_name = node.name | trim -%}
20 | {%- set split_name = node_name.split('.') -%}
21 | {%- set n_parts = split_name | length -%}
22 |
23 | {{ split_name[1] if n_parts>1 else node_name }}
24 |
25 | {%- endmacro -%}
26 |
27 |
28 | {% macro generate_schema_name(custom_schema_name=none, node=none) -%}
29 | {%- set default_schema = target.schema -%}
30 | {%- set node_name = node.name | trim -%}
31 | {%- set split_name = node_name.split('.') -%}
32 | {%- set n_parts = split_name | length -%}
33 |
34 | {{ split_name[0] if n_parts>1 else default_schema }}
35 |
36 | {%- endmacro -%}
37 | """
38 |
--------------------------------------------------------------------------------
/tests/functional/schema/fixtures/sql.py:
--------------------------------------------------------------------------------
1 | _TABLE_ONE = """
2 | select * from {{ ref('seed') }}
3 | """
4 | _TABLE_ONE_DOT_MODEL_SCHEMA = "first_schema"
5 | _TABLE_ONE_DOT_MODEL_NAME = f"{_TABLE_ONE_DOT_MODEL_SCHEMA}.view_1"
6 | _TABLE_ONE_DOT_MODEL = """
7 | select * from {{ target.schema }}.seed
8 | """
9 |
10 | _TABLE_TWO_SCHEMA = "custom"
11 | _TABLE_TWO = (
12 | """
13 | {{ config(schema='"""
14 | + _TABLE_TWO_SCHEMA
15 | + """') }}
16 | select * from {{ ref('view_1') }}
17 | """
18 | )
19 | _TABLE_TWO_DOT_MODEL_SCHEMA = "second_schema"
20 | _TABLE_TWO_DOT_MODEL_NAME = f"{_TABLE_TWO_DOT_MODEL_SCHEMA}.view_2"
21 | _TABLE_TWO_DOT_MODEL = "select * from {{ ref('" + _TABLE_ONE_DOT_MODEL_NAME + "') }}"
22 |
23 | _TABLE_THREE_SCHEMA = "test"
24 | _TABLE_THREE = (
25 | """
26 | {{ config(materialized='table', schema='"""
27 | + _TABLE_THREE_SCHEMA
28 | + """') }}
29 |
30 |
31 | with v1 as (
32 |
33 | select * from{{ ref('view_1') }}
34 |
35 | ),
36 |
37 | v2 as (
38 |
39 | select * from {{ ref('view_2') }}
40 |
41 | ),
42 |
43 | combined as (
44 |
45 | select last_name from v1
46 | union all
47 | select last_name from v2
48 |
49 | )
50 |
51 | select
52 | last_name,
53 | count(*) as count
54 |
55 | from combined
56 | group by 1
57 | """
58 | )
59 |
60 | _TABLE_THREE_DOT_MODEL = """
61 | {{ config(materialized='table') }}
62 |
63 |
64 | with v1 as (
65 |
66 | select * from {{ ref('first_schema.view_1') }}
67 |
68 | ),
69 |
70 | v2 as (
71 |
72 | select * from {{ ref('second_schema.view_2') }}
73 |
74 | ),
75 |
76 | combined as (
77 |
78 | select last_name from v1
79 | union all
80 | select last_name from v2
81 |
82 | )
83 |
84 | select
85 | last_name,
86 | count(*) as count
87 |
88 | from combined
89 | group by 1
90 | """
91 |
92 | _SEED_CSV = """id,first_name,last_name,email,gender,ip_address
93 | 1,Jack,Hunter,jhunter0@pbs.org,Male,59.80.20.168
94 | 2,Kathryn,Walker,kwalker1@ezinearticles.com,Female,194.121.179.35
95 | 3,Gerald,Ryan,gryan2@com.com,Male,11.3.212.243"""
96 |
97 | _CUSTOM_CONFIG = """
98 | {{ config(schema='custom') }}
99 |
100 | select * from {{ ref('view_1') }}
101 | """
102 |
103 | _VALIDATION_SQL = """
104 | drop table if exists {database}.{schema}.seed cascade;
105 | create table {database}.{schema}.seed (
106 | id BIGSERIAL PRIMARY KEY,
107 | first_name VARCHAR(50),
108 | last_name VARCHAR(50),
109 | email VARCHAR(50),
110 | gender VARCHAR(50),
111 | ip_address VARCHAR(20)
112 | );
113 |
114 | drop table if exists {database}.{schema}.agg cascade;
115 | create table {database}.{schema}.agg (
116 | last_name VARCHAR(50),
117 | count BIGINT
118 | );
119 |
120 |
121 | insert into {database}.{schema}.seed (first_name, last_name, email, gender, ip_address) values
122 | ('Jack', 'Hunter', 'jhunter0@pbs.org', 'Male', '59.80.20.168'),
123 | ('Kathryn', 'Walker', 'kwalker1@ezinearticles.com', 'Female', '194.121.179.35'),
124 | ('Gerald', 'Ryan', 'gryan2@com.com', 'Male', '11.3.212.243');
125 |
126 | insert into {database}.{schema}.agg (last_name, count) values
127 | ('Hunter', 2), ('Walker', 2), ('Ryan', 2);
128 | """
129 |
--------------------------------------------------------------------------------
/tests/functional/selected_resources/fixtures.py:
--------------------------------------------------------------------------------
1 | on_run_start_macro_assert_selected_models_expected_list = """
2 | {% macro assert_selected_models_expected_list(expected_list) %}
3 |
4 | {% if execute and (expected_list is not none) %}
5 |
6 | {% set sorted_selected_resources = selected_resources | sort %}
7 | {% set sorted_expected_list = expected_list | sort %}
8 |
9 | {% if sorted_selected_resources != sorted_expected_list %}
10 | {{ exceptions.raise_compiler_error("FAIL: sorted_selected_resources" ~ sorted_selected_resources ~ " is different from " ~ sorted_expected_list) }}
11 | {% endif %}
12 |
13 | {% endif %}
14 |
15 | {% endmacro %}
16 | """
17 |
18 |
19 | my_model1 = """
20 | select 1 as id
21 | """
22 |
23 | my_model2 = """
24 | select * from {{ ref('model1') }}
25 | """
26 |
27 | my_snapshot = """
28 | {% snapshot cc_all_snapshot %}
29 | {{ config(
30 | check_cols='all', unique_key='id', strategy='check',
31 | target_database=database, target_schema=schema
32 | ) }}
33 | select * from {{ ref('model2') }}
34 | {% endsnapshot %}
35 | """
36 |
--------------------------------------------------------------------------------
/tests/functional/selected_resources/test_selected_resources.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | import pytest
3 |
4 | from tests.functional.selected_resources.fixtures import (
5 | my_model1,
6 | my_model2,
7 | my_snapshot,
8 | on_run_start_macro_assert_selected_models_expected_list,
9 | )
10 |
11 |
12 | @pytest.fixture(scope="class")
13 | def macros():
14 | return {
15 | "assert_selected_models_expected_list.sql": on_run_start_macro_assert_selected_models_expected_list,
16 | }
17 |
18 |
19 | @pytest.fixture(scope="class")
20 | def models():
21 | return {"model1.sql": my_model1, "model2.sql": my_model2}
22 |
23 |
24 | @pytest.fixture(scope="class")
25 | def snapshots():
26 | return {
27 | "my_snapshot.sql": my_snapshot,
28 | }
29 |
30 |
31 | @pytest.fixture(scope="class")
32 | def project_config_update():
33 | return {
34 | "on-run-start": "{{ assert_selected_models_expected_list(var('expected_list',None)) }}",
35 | }
36 |
37 |
38 | @pytest.fixture
39 | def build_all(project):
40 | run_dbt(["build"])
41 |
42 |
43 | @pytest.mark.usefixtures("build_all")
44 | class TestSelectedResources:
45 | def test_selected_resources_build_selector(self, project):
46 | results = run_dbt(
47 | [
48 | "build",
49 | "--select",
50 | "model1+",
51 | "--vars",
52 | '{"expected_list": ["model.test.model1", "model.test.model2", "snapshot.test.cc_all_snapshot"]}',
53 | ]
54 | )
55 | assert results[0].status == "success"
56 |
57 | def test_selected_resources_build_selector_subgraph(self, project):
58 | results = run_dbt(
59 | [
60 | "build",
61 | "--select",
62 | "model2+",
63 | "--vars",
64 | '{"expected_list": ["model.test.model2", "snapshot.test.cc_all_snapshot"]}',
65 | ]
66 | )
67 | assert results[0].status == "success"
68 |
69 | def test_selected_resources_run(self, project):
70 | results = run_dbt(
71 | [
72 | "run",
73 | "--select",
74 | "model1+",
75 | "--vars",
76 | '{"expected_list": ["model.test.model2", "model.test.model1"]}',
77 | ]
78 | )
79 | assert results[0].status == "success"
80 |
81 | def test_selected_resources_build_no_selector(self, project):
82 | results = run_dbt(
83 | [
84 | "build",
85 | "--vars",
86 | '{"expected_list": ["model.test.model1", "model.test.model2", "snapshot.test.cc_all_snapshot"]}',
87 | ]
88 | )
89 | assert results[0].status == "success"
90 |
91 | def test_selected_resources_build_no_model(self, project):
92 | results = run_dbt(
93 | [
94 | "build",
95 | "--select",
96 | "model_that_does_not_exist",
97 | "--vars",
98 | '{"expected_list": []}',
99 | ]
100 | )
101 | assert not results
102 |
103 | def test_selected_resources_test_no_model(self, project):
104 | results = run_dbt(["test", "--select", "model1+", "--vars", '{"expected_list": []}'])
105 | assert not results
106 |
--------------------------------------------------------------------------------
/tests/functional/show/fixtures.py:
--------------------------------------------------------------------------------
1 | models__sample_model = """
2 | select * from {{ ref('sample_seed') }}
3 | """
4 |
5 | models__sample_number_model = """
6 | select
7 | cast(1.0 as int) as float_to_int_field,
8 | 3.0 as float_field,
9 | 4.3 as float_with_dec_field,
10 | 5 as int_field
11 | """
12 |
13 | models__sample_number_model_with_nulls = """
14 | select
15 | cast(1.0 as int) as float_to_int_field,
16 | 3.0 as float_field,
17 | 4.3 as float_with_dec_field,
18 | 5 as int_field
19 |
20 | union all
21 |
22 | select
23 | cast(null as int) as float_to_int_field,
24 | cast(null as float) as float_field,
25 | cast(null as float) as float_with_dec_field,
26 | cast(null as int) as int_field
27 |
28 | """
29 |
30 | models__second_model = """
31 | select
32 | sample_num as col_one,
33 | sample_bool as col_two,
34 | 42 as answer
35 | from {{ ref('sample_model') }}
36 | """
37 |
38 | models__sql_header = """
39 | {% call set_sql_header(config) %}
40 | set session time zone '{{ var("timezone", "Europe/Paris") }}';
41 | {%- endcall %}
42 | select current_setting('timezone') as timezone
43 | """
44 |
45 | private_model_yml = """
46 | groups:
47 | - name: my_cool_group
48 | owner: {name: me}
49 |
50 | models:
51 | - name: private_model
52 | access: private
53 | config:
54 | group: my_cool_group
55 | """
56 |
57 |
58 | schema_yml = """
59 | models:
60 | - name: sample_model
61 | latest_version: 1
62 |
63 | # declare the versions, and fully specify them
64 | versions:
65 | - v: 2
66 | config:
67 | materialized: table
68 | columns:
69 | - name: sample_num
70 | data_type: int
71 | - name: sample_bool
72 | data_type: bool
73 | - name: answer
74 | data_type: int
75 |
76 | - v: 1
77 | config:
78 | materialized: table
79 | contract: {enforced: true}
80 | columns:
81 | - name: sample_num
82 | data_type: int
83 | - name: sample_bool
84 | data_type: bool
85 | """
86 |
87 | models__ephemeral_model = """
88 | {{ config(materialized = 'ephemeral') }}
89 | select
90 | coalesce(sample_num, 0) + 10 as col_deci
91 | from {{ ref('sample_model') }}
92 | """
93 |
94 | models__second_ephemeral_model = """
95 | {{ config(materialized = 'ephemeral') }}
96 | select
97 | col_deci + 100 as col_hundo
98 | from {{ ref('ephemeral_model') }}
99 | """
100 |
101 | seeds__sample_seed = """sample_num,sample_bool
102 | 1,true
103 | 2,false
104 | 3,true
105 | 4,false
106 | 5,true
107 | 6,false
108 | 7,true
109 | """
110 |
--------------------------------------------------------------------------------
/tests/functional/sources/common_source_setup.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from dbt.tests.util import run_dbt
4 | import pytest
5 | import yaml
6 |
7 | from tests.functional.sources.fixtures import (
8 | models_descendant_model_sql,
9 | models_ephemeral_model_sql,
10 | models_multi_source_model_sql,
11 | models_nonsource_descendant_sql,
12 | models_schema_yml,
13 | models_view_model_sql,
14 | seeds_expected_multi_source_csv,
15 | seeds_other_source_table_csv,
16 | seeds_other_table_csv,
17 | seeds_source_csv,
18 | )
19 |
20 |
21 | class BaseSourcesTest:
22 | @pytest.fixture(scope="class", autouse=True)
23 | def setEnvVars(self):
24 | os.environ["DBT_TEST_SCHEMA_NAME_VARIABLE"] = "test_run_schema"
25 |
26 | yield
27 |
28 | del os.environ["DBT_TEST_SCHEMA_NAME_VARIABLE"]
29 |
30 | @pytest.fixture(scope="class")
31 | def models(self):
32 | return {
33 | "schema.yml": models_schema_yml,
34 | "view_model.sql": models_view_model_sql,
35 | "ephemeral_model.sql": models_ephemeral_model_sql,
36 | "descendant_model.sql": models_descendant_model_sql,
37 | "multi_source_model.sql": models_multi_source_model_sql,
38 | "nonsource_descendant.sql": models_nonsource_descendant_sql,
39 | }
40 |
41 | @pytest.fixture(scope="class")
42 | def seeds(self):
43 | return {
44 | "source.csv": seeds_source_csv,
45 | "other_table.csv": seeds_other_table_csv,
46 | "expected_multi_source.csv": seeds_expected_multi_source_csv,
47 | "other_source_table.csv": seeds_other_source_table_csv,
48 | }
49 |
50 | @pytest.fixture(scope="class")
51 | def project_config_update(self):
52 | return {
53 | "config-version": 2,
54 | "seed-paths": ["seeds"],
55 | "quoting": {"database": True, "schema": True, "identifier": True},
56 | "seeds": {
57 | "quote_columns": True,
58 | },
59 | }
60 |
61 | def run_dbt_with_vars(self, project, cmd, *args, **kwargs):
62 | vars_dict = {
63 | "test_run_schema": project.test_schema,
64 | "test_loaded_at": project.adapter.quote("updated_at"),
65 | }
66 | cmd.extend(["--vars", yaml.safe_dump(vars_dict)])
67 | return run_dbt(cmd, *args, **kwargs)
68 |
--------------------------------------------------------------------------------
/tests/functional/statements/test_statements.py:
--------------------------------------------------------------------------------
1 | import pathlib
2 |
3 | from dbt.tests.util import check_relations_equal, run_dbt, write_file
4 | import pytest
5 |
6 | from tests.functional.statements.fixtures import (
7 | models__statement_actual,
8 | models__statement_duplicated_load,
9 | models__statement_load_main_twice,
10 | seeds__statement_actual,
11 | seeds__statement_expected,
12 | )
13 |
14 |
15 | class TestStatements:
16 | @pytest.fixture(scope="class", autouse=True)
17 | def setUp(self, project):
18 | # put seeds in 'seed' not 'seeds' directory
19 | (pathlib.Path(project.project_root) / "seed").mkdir(parents=True, exist_ok=True)
20 | write_file(seeds__statement_actual, project.project_root, "seed", "seed.csv")
21 | write_file(
22 | seeds__statement_expected, project.project_root, "seed", "statement_expected.csv"
23 | )
24 |
25 | @pytest.fixture(scope="class")
26 | def models(self):
27 | return {
28 | "statement_actual.sql": models__statement_actual,
29 | "statement_duplicated_load.sql": models__statement_duplicated_load,
30 | "statement_load_main_twice.sql": models__statement_load_main_twice,
31 | }
32 |
33 | @pytest.fixture(scope="class")
34 | def project_config_update(self):
35 | return {
36 | "seeds": {
37 | "quote_columns": False,
38 | },
39 | "seed-paths": ["seed"],
40 | }
41 |
42 | def test_postgres_statements(self, project):
43 | results = run_dbt(["seed"])
44 | assert len(results) == 2
45 | results = run_dbt(["run", "-m", "statement_actual"])
46 | assert len(results) == 1
47 |
48 | check_relations_equal(project.adapter, ["statement_actual", "statement_expected"])
49 |
50 | def test_duplicated_load_statements(self, project):
51 | run_dbt(["seed"])
52 | results = run_dbt(["run", "-m", "statement_duplicated_load"], False)
53 | assert len(results) == 1
54 | assert results.results[0].status == "error"
55 | assert (
56 | "The 'statement' result named 'test_statement' has already been loaded into a variable"
57 | in results.results[0].message
58 | )
59 |
60 | def test_load_statement_on_main_twice(self, project):
61 | run_dbt(["seed"])
62 | results = run_dbt(["run", "-m", "statement_load_main_twice"])
63 | assert len(results) == 1
64 | check_relations_equal(project.adapter, ["statement_load_main_twice", "statement_expected"])
65 |
--------------------------------------------------------------------------------
/tests/functional/test_analyses.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from dbt.tests.util import get_manifest
4 | import pytest
5 |
6 | from tests.functional.utils import run_dbt
7 |
8 |
9 | my_model_sql = """
10 | select 1 as id
11 | """
12 |
13 | raw_stuff_sql = """
14 | {% raw %}
15 | {% invalid jinja stuff %}
16 | {% endraw %}
17 | """
18 |
19 | schema_yml = """
20 | version: 2
21 |
22 | analyses:
23 | - name: my_analysis
24 | description: "This is my analysis"
25 | """
26 |
27 | my_analysis_sql = """
28 | select * from {{ ref('my_model') }}
29 | """
30 |
31 |
32 | class TestAnalyses:
33 | @pytest.fixture(scope="class")
34 | def models(self):
35 | return {"my_model.sql": my_model_sql}
36 |
37 | @pytest.fixture(scope="class")
38 | def analyses(self):
39 | return {
40 | "raw_stuff.sql": raw_stuff_sql,
41 | "schema.yml": schema_yml,
42 | "my_analysis.sql": my_analysis_sql,
43 | }
44 |
45 | def assert_contents_equal(self, path, expected):
46 | with open(path) as fp:
47 | assert fp.read().strip() == expected
48 |
49 | def test_postgres_analyses(self, project):
50 | compiled_analysis_path = os.path.normpath("target/compiled/test/analyses")
51 | path_1 = os.path.join(compiled_analysis_path, "my_analysis.sql")
52 | path_2 = os.path.join(compiled_analysis_path, "raw_stuff.sql")
53 |
54 | run_dbt(["clean"])
55 | assert not (os.path.exists(compiled_analysis_path))
56 |
57 | results = run_dbt(["compile"])
58 | assert len(results) == 3
59 |
60 | manifest = get_manifest(project.project_root)
61 | analysis_id = "analysis.test.my_analysis"
62 | assert analysis_id in manifest.nodes
63 |
64 | node = manifest.nodes[analysis_id]
65 | assert node.description == "This is my analysis"
66 |
67 | assert os.path.exists(path_1)
68 | assert os.path.exists(path_2)
69 |
70 | expected_sql = 'select * from "{}"."{}"."my_model"'.format(
71 | project.database, project.test_schema
72 | )
73 | self.assert_contents_equal(path_1, expected_sql)
74 | self.assert_contents_equal(path_2, "{% invalid jinja stuff %}")
75 |
--------------------------------------------------------------------------------
/tests/functional/test_catalog.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.adapter.catalog.relation_types import CatalogRelationTypes
2 |
3 |
4 | class TestCatalogRelationTypes(CatalogRelationTypes):
5 | pass
6 |
--------------------------------------------------------------------------------
/tests/functional/test_clean.py:
--------------------------------------------------------------------------------
1 | from dbt_common.exceptions import DbtRuntimeError
2 | import pytest
3 |
4 | from tests.functional.utils import run_dbt
5 |
6 |
7 | class TestCleanSourcePath:
8 | @pytest.fixture(scope="class")
9 | def project_config_update(self):
10 | return "clean-targets: ['models']"
11 |
12 | def test_clean_source_path(self, project):
13 | with pytest.raises(DbtRuntimeError, match="dbt will not clean the following source paths"):
14 | run_dbt(["clean"])
15 |
16 |
17 | class TestCleanPathOutsideProjectRelative:
18 | @pytest.fixture(scope="class")
19 | def project_config_update(self):
20 | return "clean-targets: ['..']"
21 |
22 | def test_clean_path_outside_project(self, project):
23 | with pytest.raises(
24 | DbtRuntimeError,
25 | match="dbt will not clean the following directories outside the project",
26 | ):
27 | run_dbt(["clean"])
28 |
29 |
30 | class TestCleanPathOutsideProjectAbsolute:
31 | @pytest.fixture(scope="class")
32 | def project_config_update(self):
33 | return "clean-targets: ['/']"
34 |
35 | def test_clean_path_outside_project(self, project):
36 | with pytest.raises(
37 | DbtRuntimeError,
38 | match="dbt will not clean the following directories outside the project",
39 | ):
40 | run_dbt(["clean"])
41 |
42 |
43 | class TestCleanPathOutsideProjectWithFlag:
44 | @pytest.fixture(scope="class")
45 | def project_config_update(self):
46 | return "clean-targets: ['/tmp/foo']"
47 |
48 | def test_clean_path_outside_project(self, project):
49 | # Doesn't fail because flag is set
50 | run_dbt(["clean", "--no-clean-project-files-only"])
51 |
52 | with pytest.raises(
53 | DbtRuntimeError,
54 | match="dbt will not clean the following directories outside the project",
55 | ):
56 | run_dbt(["clean", "--clean-project-files-only"])
57 |
--------------------------------------------------------------------------------
/tests/functional/test_colors.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | import pytest
4 |
5 | from tests.functional.utils import run_dbt_and_capture
6 |
7 |
8 | models__do_nothing_then_fail_sql = """
9 | select 1,
10 |
11 | """
12 |
13 |
14 | @pytest.fixture(scope="class")
15 | def models():
16 | return {"do_nothing_then_fail.sql": models__do_nothing_then_fail_sql}
17 |
18 |
19 | @pytest.fixture(scope="class")
20 | def project_config_update():
21 | return {"config-version": 2}
22 |
23 |
24 | class TestColors:
25 | def test_use_colors(self, project):
26 | self.assert_colors_used(
27 | "--use-colors",
28 | expect_colors=True,
29 | )
30 |
31 | def test_no_use_colors(self, project):
32 | self.assert_colors_used(
33 | "--no-use-colors",
34 | expect_colors=False,
35 | )
36 |
37 | def assert_colors_used(self, flag, expect_colors):
38 | _, stdout = run_dbt_and_capture(args=[flag, "run"], expect_pass=False)
39 | # pattern to match formatted log output
40 | pattern = re.compile(r"\[31m.*|\[33m.*")
41 | stdout_contains_formatting_characters = bool(pattern.search(stdout))
42 | if expect_colors:
43 | assert stdout_contains_formatting_characters
44 | else:
45 | assert not stdout_contains_formatting_characters
46 |
--------------------------------------------------------------------------------
/tests/functional/test_column_quotes.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | import pytest
3 |
4 |
5 | _MODELS__COLUMN_QUOTING_DEFAULT = """
6 | {% set col_a = '"col_A"' %}
7 | {% set col_b = '"col_B"' %}
8 |
9 | {{
10 | config(
11 | materialized = 'incremental',
12 | unique_key = col_a,
13 | )
14 | }}
15 |
16 | select
17 | {{ col_a }},
18 | {{ col_b }}
19 | from {{ref('seed')}}
20 | """
21 |
22 | _MODELS__COLUMN_QUOTING_NO_QUOTING = """
23 | {% set col_a = '"col_a"' %}
24 | {% set col_b = '"col_b"' %}
25 |
26 | {{
27 | config(
28 | materialized = 'incremental',
29 | unique_key = col_a,
30 | )
31 | }}
32 |
33 | select
34 | {{ col_a }},
35 | {{ col_b }}
36 | from {{ref('seed')}}
37 | """
38 |
39 | _SEEDS_BASIC_SEED = """col_A,col_B
40 | 1,2
41 | 3,4
42 | 5,6
43 | """
44 |
45 |
46 | class BaseColumnQuotingTest:
47 | @pytest.fixture(scope="class")
48 | def models(self):
49 | return {"model.sql": _MODELS__COLUMN_QUOTING_DEFAULT}
50 |
51 | @pytest.fixture(scope="class")
52 | def seeds(self):
53 | return {"seed.csv": _SEEDS_BASIC_SEED}
54 |
55 | @pytest.fixture(scope="function")
56 | def run_column_quotes(self, project):
57 | def fixt():
58 | results = run_dbt(["seed"])
59 | assert len(results) == 1
60 | results = run_dbt(["run"])
61 | assert len(results) == 1
62 | results = run_dbt(["run"])
63 | assert len(results) == 1
64 |
65 | return fixt
66 |
67 |
68 | class TestColumnQuotingDefault(BaseColumnQuotingTest):
69 | def test_column_quotes(self, run_column_quotes):
70 | run_column_quotes()
71 |
72 |
73 | class TestColumnQuotingEnabled(BaseColumnQuotingTest):
74 | @pytest.fixture(scope="class")
75 | def project_config_update(self):
76 | return {
77 | "seeds": {
78 | "quote_columns": True,
79 | },
80 | }
81 |
82 | def test_column_quotes(self, run_column_quotes):
83 | run_column_quotes()
84 |
85 |
86 | class TestColumnQuotingDisabled(BaseColumnQuotingTest):
87 | @pytest.fixture(scope="class")
88 | def models(self):
89 | return {"model.sql": _MODELS__COLUMN_QUOTING_NO_QUOTING}
90 |
91 | @pytest.fixture(scope="class")
92 | def project_config_update(self):
93 | return {
94 | "seeds": {
95 | "quote_columns": False,
96 | },
97 | }
98 |
99 | def test_column_quotes(self, run_column_quotes):
100 | run_column_quotes()
101 |
--------------------------------------------------------------------------------
/tests/functional/test_connection_manager.py:
--------------------------------------------------------------------------------
1 | from unittest import TestCase, mock
2 |
3 | from dbt.adapters.contracts.connection import Connection, Identifier
4 | from dbt_common.helper_types import Port
5 | import psycopg2
6 |
7 | from dbt.adapters.postgres import PostgresCredentials, PostgresConnectionManager
8 |
9 |
10 | class TestConnectionManagerOpen(TestCase):
11 | connection = None
12 |
13 | # Postgres-specific
14 | def setUp(self):
15 | self.connection = self.get_connection()
16 |
17 | def get_connection(self) -> Connection:
18 | if connection := self.connection:
19 | pass
20 | else:
21 | credentials = PostgresCredentials(
22 | host="localhost",
23 | user="test-user",
24 | port=Port(1111),
25 | password="test-password",
26 | database="test-db",
27 | schema="test-schema",
28 | retries=2,
29 | )
30 | connection = Connection(Identifier("postgres"), None, credentials)
31 | return connection
32 |
33 | def test_open(self):
34 | """Test opening a Postgres Connection with failures in the first 3 attempts.
35 |
36 | This test uses a Connection populated with test PostgresCredentials values, and
37 | expects a mock connect to raise a psycopg2.errors.ConnectionFailuer
38 | in the first 3 invocations, after which the mock should return True. As a result:
39 | * The Connection state should be "open" and the handle True, as connect
40 | returns in the 4th attempt.
41 | * The resulting attempt count should be 4.
42 | """
43 | conn = self.connection
44 | attempt = 0
45 |
46 | def connect(*args, **kwargs):
47 | nonlocal attempt
48 | attempt += 1
49 |
50 | if attempt <= 2:
51 | raise psycopg2.errors.ConnectionFailure("Connection has failed")
52 |
53 | return True
54 |
55 | with mock.patch("psycopg2.connect", wraps=connect) as mock_connect:
56 | PostgresConnectionManager.open(conn)
57 |
58 | assert mock_connect.call_count == 3
59 |
60 | assert attempt == 3
61 | assert conn.state == "open"
62 | assert conn.handle is True
63 |
--------------------------------------------------------------------------------
/tests/functional/test_custom_target_path.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from dbt.tests.util import run_dbt
4 | import pytest
5 |
6 |
7 | class TestTargetPathConfig:
8 | @pytest.fixture(scope="class")
9 | def project_config_update(self):
10 | return {"config-version": 2, "target-path": "project_target"}
11 |
12 | def test_target_path(self, project):
13 | run_dbt(["run"])
14 | assert Path("project_target").is_dir()
15 | assert not Path("target").is_dir()
16 |
17 |
18 | class TestTargetPathEnvVar:
19 | def test_target_path(self, project, monkeypatch):
20 | monkeypatch.setenv("DBT_TARGET_PATH", "env_target")
21 | run_dbt(["run"])
22 | assert Path("env_target").is_dir()
23 | assert not Path("project_target").is_dir()
24 | assert not Path("target").is_dir()
25 |
26 |
27 | class TestTargetPathCliArg:
28 | def test_target_path(self, project, monkeypatch):
29 | monkeypatch.setenv("DBT_TARGET_PATH", "env_target")
30 | run_dbt(["run", "--target-path", "cli_target"])
31 | assert Path("cli_target").is_dir()
32 | assert not Path("env_target").is_dir()
33 | assert not Path("project_target").is_dir()
34 | assert not Path("target").is_dir()
35 |
--------------------------------------------------------------------------------
/tests/functional/test_cycles.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | import pytest
3 |
4 |
5 | model_a_sql = """
6 | select * from {{ ref('model_b') }}
7 | """
8 |
9 | model_b_sql = """
10 | select * from {{ ref('model_a') }}
11 | """
12 |
13 | complex_cycle__model_a_sql = """
14 | select 1 as id
15 | """
16 |
17 | complex_cycle__model_b_sql = """
18 | select * from {{ ref('model_a') }}s
19 | union all
20 | select * from {{ ref('model_e') }}
21 | """
22 |
23 | complex_cycle__model_c_sql = """
24 | select * from {{ ref('model_b') }}
25 | """
26 |
27 | complex_cycle__model_d_sql = """
28 | select * from {{ ref('model_c') }}
29 | """
30 |
31 | complex_cycle__model_e_sql = """
32 | select * from {{ ref('model_e') }}
33 | """
34 |
35 |
36 | class TestSimpleCycle:
37 | @pytest.fixture(scope="class")
38 | def models(self):
39 | return {"model_a.sql": model_a_sql, "model_b.sql": model_b_sql}
40 |
41 | def test_simple_cycle(self, project):
42 | with pytest.raises(RuntimeError) as exc:
43 | run_dbt(["run"])
44 | expected_msg = "Found a cycle"
45 | assert expected_msg in str(exc.value)
46 |
47 |
48 | class TestComplexCycle:
49 | @pytest.fixture(scope="class")
50 | def models(self):
51 | # The cycle in this graph looks like:
52 | # A -> B -> C -> D
53 | # ^ |
54 | # | |
55 | # +--- E <--+
56 | return {
57 | "model_a.sql": complex_cycle__model_a_sql,
58 | "model_b.sql": complex_cycle__model_b_sql,
59 | "model_c.sql": complex_cycle__model_c_sql,
60 | "model_d.sql": complex_cycle__model_d_sql,
61 | "model_e.sql": complex_cycle__model_e_sql,
62 | }
63 |
64 | def test_complex_cycle(self, project):
65 | with pytest.raises(RuntimeError) as exc:
66 | run_dbt(["run"])
67 | expected_msg = "Found a cycle"
68 | assert expected_msg in str(exc.value)
69 |
--------------------------------------------------------------------------------
/tests/functional/test_default_selectors.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | import pytest
3 |
4 |
5 | models__schema_yml = """
6 | version: 2
7 |
8 | sources:
9 | - name: src
10 | schema: "{{ target.schema }}"
11 | freshness:
12 | warn_after: {count: 24, period: hour}
13 | loaded_at_field: _loaded_at
14 | tables:
15 | - name: source_a
16 | identifier: model_c
17 | columns:
18 | - name: fun
19 | - name: _loaded_at
20 | - name: src
21 | schema: "{{ target.schema }}"
22 | freshness:
23 | warn_after: {count: 24, period: hour}
24 | loaded_at_field: _loaded_at
25 | tables:
26 | - name: source_b
27 | identifier: model_c
28 | columns:
29 | - name: fun
30 | - name: _loaded_at
31 |
32 | models:
33 | - name: model_a
34 | columns:
35 | - name: fun
36 | tags: [marketing]
37 | - name: model_b
38 | columns:
39 | - name: fun
40 | tags: [finance]
41 | """
42 |
43 | models__model_a_sql = """
44 | SELECT 1 AS fun
45 | """
46 |
47 | models__model_b_sql = """
48 | SELECT 1 AS fun
49 | """
50 |
51 | seeds__model_c_csv = """fun,_loaded_at
52 | 1,2021-04-19 01:00:00"""
53 |
54 |
55 | @pytest.fixture(scope="class")
56 | def models():
57 | return {
58 | "schema.yml": models__schema_yml,
59 | "model_b.sql": models__model_b_sql,
60 | "model_a.sql": models__model_a_sql,
61 | }
62 |
63 |
64 | @pytest.fixture(scope="class")
65 | def seeds():
66 | return {"model_c.csv": seeds__model_c_csv}
67 |
68 |
69 | @pytest.fixture(scope="class")
70 | def selectors():
71 | return """
72 | selectors:
73 | - name: default_selector
74 | description: test default selector
75 | definition:
76 | union:
77 | - method: source
78 | value: "test.src.source_a"
79 | - method: fqn
80 | value: "model_a"
81 | default: true
82 | """
83 |
84 |
85 | class TestDefaultSelectors:
86 | def test_model__list(self, project):
87 | result = run_dbt(["ls", "--resource-type", "model"])
88 | assert "test.model_a" in result
89 |
90 | def test_model__compile(self, project):
91 | result = run_dbt(["compile"])
92 | assert len(result) == 1
93 | assert result.results[0].node.name == "model_a"
94 |
95 | def test_source__freshness(self, project):
96 | run_dbt(["seed", "-s", "test.model_c"])
97 | result = run_dbt(["source", "freshness"])
98 | assert len(result) == 1
99 | assert result.results[0].node.name == "source_a"
100 |
--------------------------------------------------------------------------------
/tests/functional/test_events.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from dbt.cli.main import dbtRunner
4 | from dbt_common.events.base_types import EventLevel
5 |
6 |
7 | def test_performance_report(project):
8 | resource_report_level = None
9 |
10 | def check_for_report(e):
11 | # If we see a ResourceReport event, record its level
12 | if e.info.name == "ResourceReport":
13 | nonlocal resource_report_level
14 | resource_report_level = e.info.level
15 |
16 | runner = dbtRunner(callbacks=[check_for_report])
17 |
18 | runner.invoke(["run"])
19 |
20 | # With not cli flag or env var set, ResourceReport should be debug level.
21 | assert resource_report_level == EventLevel.DEBUG
22 |
23 | try:
24 | os.environ["DBT_SHOW_RESOURCE_REPORT"] = "1"
25 | runner.invoke(["run"])
26 |
27 | # With the appropriate env var set, ResourceReport should be info level.
28 | # This allows this fairly technical log line to be omitted by default
29 | # but still available in production scenarios.
30 | assert resource_report_level == EventLevel.INFO
31 | finally:
32 | del os.environ["DBT_SHOW_RESOURCE_REPORT"]
33 |
--------------------------------------------------------------------------------
/tests/functional/test_external_reference.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | import pytest
3 |
4 |
5 | external_model_sql = """
6 | {{
7 | config(
8 | materialized = "view"
9 | )
10 | }}
11 |
12 | select * from "{{ this.schema + 'z' }}"."external"
13 | """
14 |
15 | model_sql = """
16 | select 1 as id
17 | """
18 |
19 |
20 | class TestExternalReference:
21 | @pytest.fixture(scope="class")
22 | def models(self):
23 | return {"model.sql": external_model_sql}
24 |
25 | def test_external_reference(self, project, unique_schema):
26 | external_schema = unique_schema + "z"
27 | project.run_sql(f'create schema "{external_schema}"')
28 | project.run_sql(f'create table "{external_schema}"."external" (id integer)')
29 | project.run_sql(f'insert into "{external_schema}"."external" values (1), (2)')
30 |
31 | results = run_dbt(["run"])
32 | assert len(results) == 1
33 |
34 | # running it again should succeed
35 | results = run_dbt(["run"])
36 | assert len(results) == 1
37 |
38 |
39 | # The opposite of the test above -- check that external relations that
40 | # depend on a dbt model do not create issues with caching
41 | class TestExternalDependency:
42 | @pytest.fixture(scope="class")
43 | def models(self):
44 | return {"model.sql": model_sql}
45 |
46 | def test_external_reference(self, project, unique_schema):
47 | results = run_dbt(["run"])
48 | assert len(results) == 1
49 |
50 | external_schema = unique_schema + "z"
51 | project.run_sql(f'create schema "{external_schema}"')
52 | project.run_sql(
53 | f'create view "{external_schema}"."external" as (select * from {unique_schema}.model)'
54 | )
55 |
56 | # running it again should succeed
57 | results = run_dbt(["run"])
58 | assert len(results) == 1
59 |
--------------------------------------------------------------------------------
/tests/functional/test_fail_fast.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 |
4 | from dbt.tests.util import run_dbt
5 | import pytest
6 |
7 |
8 | models__one_sql = """
9 | select 1
10 | """
11 |
12 | models__two_sql = """
13 | -- depends_on: {{ ref('one') }}
14 | select 1 /failed
15 | """
16 |
17 |
18 | class FailFastBase:
19 | @pytest.fixture(scope="class")
20 | def models(self):
21 | return {"one.sql": models__one_sql, "two.sql": models__two_sql}
22 |
23 |
24 | class TestFastFailingDuringRun(FailFastBase):
25 | def test_fail_fast_run(
26 | self,
27 | project,
28 | models, # noqa: F811
29 | ):
30 | res = run_dbt(["run", "--fail-fast", "--threads", "1"], expect_pass=False)
31 | assert {r.node.unique_id: r.status for r in res.results} == {
32 | "model.test.one": "success",
33 | "model.test.two": "error",
34 | }
35 |
36 | run_results_file = Path(project.project_root) / "target/run_results.json"
37 | assert run_results_file.is_file()
38 | with run_results_file.open() as run_results_str:
39 | run_results = json.loads(run_results_str.read())
40 | assert len(run_results["results"]) == 2
41 | assert run_results["results"][0]["status"] == "success"
42 | assert run_results["results"][1]["status"] == "error"
43 |
44 |
45 | class TestFailFastFromConfig(FailFastBase):
46 | @pytest.fixture(scope="class")
47 | def project_config_update(self):
48 | return {
49 | "flags": {
50 | "send_anonymous_usage_stats": False,
51 | "fail_fast": True,
52 | }
53 | }
54 |
55 | def test_fail_fast_run_project_flags(
56 | self,
57 | project,
58 | models, # noqa: F811
59 | ):
60 | res = run_dbt(["run", "--threads", "1"], expect_pass=False)
61 | assert {r.node.unique_id: r.status for r in res.results} == {
62 | "model.test.one": "success",
63 | "model.test.two": "error",
64 | }
65 |
--------------------------------------------------------------------------------
/tests/functional/test_multiple_indexes.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from tests.functional.utils import run_dbt
4 |
5 |
6 | REF_MULTIPLE_INDEX_MODEL = """
7 | {{
8 | config(
9 | materialized="materialized_view",
10 | indexes=[
11 | {"columns": ["foo"], "type": "btree"},
12 | {"columns": ["bar"], "type": "btree"},
13 | ],
14 | )
15 | }}
16 |
17 | SELECT 1 AS foo, 2 AS bar
18 | """
19 |
20 |
21 | class TestUnrestrictedPackageAccess:
22 | @pytest.fixture(scope="class")
23 | def models(self):
24 | return {"index_test.sql": REF_MULTIPLE_INDEX_MODEL}
25 |
26 | def test_unrestricted_protected_ref(self, project):
27 | run_dbt()
28 |
--------------------------------------------------------------------------------
/tests/functional/test_store_test_failures.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from dbt.tests.adapter.store_test_failures_tests.basic import (
4 | StoreTestFailuresAsExceptions,
5 | StoreTestFailuresAsGeneric,
6 | StoreTestFailuresAsInteractions,
7 | StoreTestFailuresAsProjectLevelEphemeral,
8 | StoreTestFailuresAsProjectLevelOff,
9 | StoreTestFailuresAsProjectLevelView,
10 | )
11 |
12 |
13 | class PostgresMixin:
14 | audit_schema: str
15 |
16 | @pytest.fixture(scope="function", autouse=True)
17 | def setup_audit_schema(self, project, setup_method):
18 | # postgres only supports schema names of 63 characters
19 | # a schema with a longer name still gets created, but the name gets truncated
20 | self.audit_schema = self.audit_schema[:63]
21 |
22 |
23 | class TestStoreTestFailuresAsInteractions(StoreTestFailuresAsInteractions, PostgresMixin):
24 | pass
25 |
26 |
27 | class TestStoreTestFailuresAsProjectLevelOff(StoreTestFailuresAsProjectLevelOff, PostgresMixin):
28 | pass
29 |
30 |
31 | class TestStoreTestFailuresAsProjectLevelView(StoreTestFailuresAsProjectLevelView, PostgresMixin):
32 | pass
33 |
34 |
35 | class TestStoreTestFailuresAsProjectLevelEphemeral(
36 | StoreTestFailuresAsProjectLevelEphemeral, PostgresMixin
37 | ):
38 | pass
39 |
40 |
41 | class TestStoreTestFailuresAsGeneric(StoreTestFailuresAsGeneric, PostgresMixin):
42 | pass
43 |
44 |
45 | class TestStoreTestFailuresAsExceptions(StoreTestFailuresAsExceptions, PostgresMixin):
46 | pass
47 |
--------------------------------------------------------------------------------
/tests/functional/test_thread_count.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | import pytest
3 |
4 |
5 | class TestThreadCount:
6 | @pytest.fixture(scope="class")
7 | def models(self):
8 | sql = "with x as (select pg_sleep(1)) select 1"
9 | independent_models = {f"do_nothing_{num}.sql": sql for num in range(1, 21)}
10 | return independent_models
11 |
12 | @pytest.fixture(scope="class")
13 | def project_config_update(self):
14 | return {"config-version": 2}
15 |
16 | @pytest.fixture(scope="class")
17 | def profiles_config_update(self):
18 | return {"threads": 2}
19 |
20 | def test_threading_8x(self, project):
21 | results = run_dbt(args=["run", "--threads", "16"])
22 | assert len(results), 20
23 |
--------------------------------------------------------------------------------
/tests/functional/test_timezones.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from dbt.tests.util import run_dbt
4 | import pytest
5 |
6 |
7 | # Canada/Saskatchewan does not observe DST so the time diff won't change depending on when it is in the year
8 | model_sql = """
9 | {{ config(materialized='table') }}
10 |
11 | select
12 | '{{ run_started_at.astimezone(modules.pytz.timezone("Canada/Saskatchewan")) }}' as run_started_at_saskatchewan,
13 | '{{ run_started_at }}' as run_started_at_utc
14 | """
15 |
16 |
17 | class TestTimezones:
18 | @pytest.fixture(scope="class")
19 | def models(self):
20 | return {"timezones.sql": model_sql}
21 |
22 | @pytest.fixture(scope="class")
23 | def dbt_profile_data(self, unique_schema):
24 | return {
25 | "test": {
26 | "outputs": {
27 | "dev": {
28 | "type": "postgres",
29 | "threads": 1,
30 | "host": "localhost",
31 | "port": int(os.getenv("POSTGRES_TEST_PORT", 5432)),
32 | "user": os.getenv("POSTGRES_TEST_USER", "root"),
33 | "pass": os.getenv("POSTGRES_TEST_PASS", "password"),
34 | "dbname": os.getenv("POSTGRES_TEST_DATABASE", "dbt"),
35 | "schema": unique_schema,
36 | },
37 | },
38 | "target": "dev",
39 | }
40 | }
41 |
42 | @pytest.fixture(scope="class")
43 | def query(self, project):
44 | return """
45 | select
46 | run_started_at_saskatchewan,
47 | run_started_at_utc
48 | from {schema}.timezones
49 | """.format(
50 | schema=project.test_schema
51 | )
52 |
53 | # This test used to use freeze_time, but that doesn't work
54 | # with our timestamp fields in proto messages.
55 | def test_run_started_at(self, project, query):
56 | results = run_dbt(["run"])
57 |
58 | assert len(results) == 1
59 |
60 | result = project.run_sql(query, fetch="all")[0]
61 | saskatchewan, utc = result
62 |
63 | assert "+00:00" in utc
64 | assert "-06:00" in saskatchewan
65 |
--------------------------------------------------------------------------------
/tests/functional/test_types.py:
--------------------------------------------------------------------------------
1 | from dbt.contracts.results import NodeStatus
2 | from dbt.tests.util import run_dbt
3 | import pytest
4 |
5 |
6 | macros_sql = """
7 | {% macro test_array_results() %}
8 |
9 | {% set sql %}
10 | select ARRAY[1, 2, 3, 4] as mydata
11 | {% endset %}
12 |
13 | {% set result = run_query(sql) %}
14 | {% set value = result.columns['mydata'][0] %}
15 |
16 | {# This will be json-stringified #}
17 | {% if value != "[1, 2, 3, 4]" %}
18 | {% do exceptions.raise_compiler_error("Value was " ~ value) %}
19 | {% endif %}
20 |
21 | {% endmacro %}
22 | """
23 |
24 |
25 | class TestTypes:
26 | @pytest.fixture(scope="class")
27 | def macros(self):
28 | return {
29 | "macros.sql": macros_sql,
30 | }
31 |
32 | def test_nested_types(self, project):
33 | result = run_dbt(["run-operation", "test_array_results"])
34 | assert result.results[0].status == NodeStatus.Success
35 |
--------------------------------------------------------------------------------
/tests/functional/test_unlogged_table.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.util import run_dbt
2 | import pytest
3 |
4 |
5 | schema_yml = """
6 | version: 2
7 | models:
8 | - name: table_unlogged
9 | description: "Unlogged table model"
10 | columns:
11 | - name: column_a
12 | description: "Sample description"
13 | quote: true
14 | """
15 |
16 | table_unlogged_sql = """
17 | {{ config(materialized = 'table', unlogged = True) }}
18 |
19 | select 1 as column_a
20 | """
21 |
22 |
23 | class TestPostgresUnloggedTable:
24 | @pytest.fixture(scope="class")
25 | def models(self):
26 | return {
27 | "schema.yml": schema_yml,
28 | "table_unlogged.sql": table_unlogged_sql,
29 | }
30 |
31 | @pytest.fixture(scope="class")
32 | def project_config_update(self):
33 | return {
34 | "models": {
35 | "test": {
36 | "materialized": "table",
37 | "+persist_docs": {
38 | "relation": True,
39 | "columns": True,
40 | },
41 | }
42 | }
43 | }
44 |
45 | def test_postgres_unlogged_table_catalog(self, project):
46 | table_name = "table_unlogged"
47 |
48 | results = run_dbt(["run", "--models", table_name])
49 | assert len(results) == 1
50 |
51 | result = self.get_table_persistence(project, table_name)
52 | assert result == "u"
53 |
54 | catalog = run_dbt(["docs", "generate"])
55 |
56 | assert len(catalog.nodes) == 1
57 |
58 | table_node = catalog.nodes["model.test.table_unlogged"]
59 | assert table_node
60 | assert "column_a" in table_node.columns
61 |
62 | def get_table_persistence(self, project, table_name):
63 | sql = """
64 | SELECT
65 | relpersistence
66 | FROM pg_class
67 | WHERE relname = '{table_name}'
68 | """
69 | sql = sql.format(table_name=table_name, schema=project.test_schema)
70 | result = project.run_sql(sql, fetch="one")
71 | assert len(result) == 1
72 |
73 | return result[0]
74 |
--------------------------------------------------------------------------------
/tests/functional/unit_testing/test_ut_dependency.py:
--------------------------------------------------------------------------------
1 | from dbt.tests.fixtures.project import write_project_files
2 | from dbt.tests.util import get_unique_ids_in_results, run_dbt
3 | import pytest
4 |
5 |
6 | local_dependency__dbt_project_yml = """
7 |
8 | name: 'local_dep'
9 | version: '1.0'
10 |
11 | seeds:
12 | quote_columns: False
13 |
14 | """
15 |
16 | local_dependency__schema_yml = """
17 | sources:
18 | - name: seed_source
19 | schema: "{{ var('schema_override', target.schema) }}"
20 | tables:
21 | - name: "seed"
22 | columns:
23 | - name: id
24 | data_tests:
25 | - unique
26 |
27 | unit_tests:
28 | - name: test_dep_model_id
29 | model: dep_model
30 | given:
31 | - input: ref('seed')
32 | rows:
33 | - {id: 1, name: Joe}
34 | expect:
35 | rows:
36 | - {name_id: Joe_1}
37 |
38 |
39 | """
40 |
41 | local_dependency__dep_model_sql = """
42 | select name || '_' || id as name_id from {{ ref('seed') }}
43 |
44 | """
45 |
46 | local_dependency__seed_csv = """id,name
47 | 1,Mary
48 | 2,Sam
49 | 3,John
50 | """
51 |
52 | my_model_sql = """
53 | select * from {{ ref('dep_model') }}
54 | """
55 |
56 | my_model_schema_yml = """
57 | unit_tests:
58 | - name: test_my_model_name_id
59 | model: my_model
60 | given:
61 | - input: ref('dep_model')
62 | rows:
63 | - {name_id: Joe_1}
64 | expect:
65 | rows:
66 | - {name_id: Joe_1}
67 | """
68 |
69 |
70 | class TestUnitTestingInDependency:
71 | @pytest.fixture(scope="class", autouse=True)
72 | def setUp(self, project_root):
73 | local_dependency_files = {
74 | "dbt_project.yml": local_dependency__dbt_project_yml,
75 | "models": {
76 | "schema.yml": local_dependency__schema_yml,
77 | "dep_model.sql": local_dependency__dep_model_sql,
78 | },
79 | "seeds": {"seed.csv": local_dependency__seed_csv},
80 | }
81 | write_project_files(project_root, "local_dependency", local_dependency_files)
82 |
83 | @pytest.fixture(scope="class")
84 | def packages(self):
85 | return {"packages": [{"local": "local_dependency"}]}
86 |
87 | @pytest.fixture(scope="class")
88 | def models(self):
89 | return {
90 | "my_model.sql": my_model_sql,
91 | "schema.yml": my_model_schema_yml,
92 | }
93 |
94 | def test_unit_test_in_dependency(self, project):
95 | run_dbt(["deps"])
96 | run_dbt(["seed"])
97 | results = run_dbt(["run"])
98 | assert len(results) == 2
99 |
100 | results = run_dbt(["test"])
101 | assert len(results) == 3
102 | unique_ids = get_unique_ids_in_results(results)
103 | assert "unit_test.local_dep.dep_model.test_dep_model_id" in unique_ids
104 |
105 | results = run_dbt(["test", "--select", "test_type:unit"])
106 | # two unit tests, 1 in root package, one in local_dep package
107 | assert len(results) == 2
108 |
109 | results = run_dbt(["test", "--select", "local_dep"])
110 | # 2 tests in local_dep package
111 | assert len(results) == 2
112 |
113 | results = run_dbt(["test", "--select", "test"])
114 | # 1 test in root package
115 | assert len(results) == 1
116 |
--------------------------------------------------------------------------------
/tests/functional/unit_testing/test_ut_sources.py:
--------------------------------------------------------------------------------
1 | from dbt.contracts.results import RunStatus, TestStatus
2 | from dbt.tests.util import run_dbt, write_file
3 | import pytest
4 |
5 |
6 | raw_customers_csv = """id,first_name,last_name,email
7 | 1,Michael,Perez,mperez0@chronoengine.com
8 | 2,Shawn,Mccoy,smccoy1@reddit.com
9 | 3,Kathleen,Payne,kpayne2@cargocollective.com
10 | 4,Jimmy,Cooper,jcooper3@cargocollective.com
11 | 5,Katherine,Rice,krice4@typepad.com
12 | 6,Sarah,Ryan,sryan5@gnu.org
13 | 7,Martin,Mcdonald,mmcdonald6@opera.com
14 | 8,Frank,Robinson,frobinson7@wunderground.com
15 | 9,Jennifer,Franklin,jfranklin8@mail.ru
16 | 10,Henry,Welch,hwelch9@list-manage.com
17 | """
18 |
19 | schema_sources_yml = """
20 | sources:
21 | - name: seed_sources
22 | schema: "{{ target.schema }}"
23 | tables:
24 | - name: raw_customers
25 | columns:
26 | - name: id
27 | data_tests:
28 | - not_null:
29 | severity: "{{ 'error' if target.name == 'prod' else 'warn' }}"
30 | - unique
31 | - name: first_name
32 | - name: last_name
33 | - name: email
34 | unit_tests:
35 | - name: test_customers
36 | model: customers
37 | given:
38 | - input: source('seed_sources', 'raw_customers')
39 | rows:
40 | - {id: 1, first_name: Emily}
41 | expect:
42 | rows:
43 | - {id: 1, first_name: Emily}
44 | """
45 |
46 | customers_sql = """
47 | select * from {{ source('seed_sources', 'raw_customers') }}
48 | """
49 |
50 | failing_test_schema_yml = """
51 | - name: fail_test_customers
52 | model: customers
53 | given:
54 | - input: source('seed_sources', 'raw_customers')
55 | rows:
56 | - {id: 1, first_name: Emily}
57 | expect:
58 | rows:
59 | - {id: 1, first_name: Joan}
60 | """
61 |
62 |
63 | class TestUnitTestSourceInput:
64 | @pytest.fixture(scope="class")
65 | def seeds(self):
66 | return {
67 | "raw_customers.csv": raw_customers_csv,
68 | }
69 |
70 | @pytest.fixture(scope="class")
71 | def models(self):
72 | return {
73 | "customers.sql": customers_sql,
74 | "sources.yml": schema_sources_yml,
75 | }
76 |
77 | def test_source_input(self, project):
78 | results = run_dbt(["seed"])
79 | results = run_dbt(["run"])
80 | len(results) == 1
81 |
82 | results = run_dbt(["test", "--select", "test_type:unit"])
83 | assert len(results) == 1
84 |
85 | results = run_dbt(["build"])
86 | assert len(results) == 5
87 | result_unique_ids = [result.node.unique_id for result in results]
88 | assert len(result_unique_ids) == 5
89 | assert "unit_test.test.customers.test_customers" in result_unique_ids
90 |
91 | # write failing unit test
92 | write_file(
93 | schema_sources_yml + failing_test_schema_yml,
94 | project.project_root,
95 | "models",
96 | "sources.yml",
97 | )
98 | results = run_dbt(["build"], expect_pass=False)
99 | for result in results:
100 | if result.node.unique_id == "model.test.customers":
101 | assert result.status == RunStatus.Skipped
102 | elif result.node.unique_id == "model.test.customers":
103 | assert result.status == TestStatus.Fail
104 | assert len(results) == 6
105 |
--------------------------------------------------------------------------------
/tests/functional/utils.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 | from os import chdir
3 | from os.path import normcase, normpath
4 | from pathlib import Path
5 | from typing import List, Optional
6 |
7 | from dbt.tests.util import (
8 | run_dbt as _run_dbt,
9 | run_dbt_and_capture as _run_dbt_and_capture,
10 | )
11 |
12 |
13 | @contextmanager
14 | def up_one(return_path: Optional[Path] = None):
15 | current_path = Path.cwd()
16 | chdir("../")
17 | try:
18 | yield
19 | finally:
20 | chdir(return_path or current_path)
21 |
22 |
23 | def normalize(path):
24 | """On windows, neither is enough on its own:
25 |
26 | >>> normcase('C:\\documents/ALL CAPS/subdir\\..')
27 | 'c:\\documents\\all caps\\subdir\\..'
28 | >>> normpath('C:\\documents/ALL CAPS/subdir\\..')
29 | 'C:\\documents\\ALL CAPS'
30 | >>> normpath(normcase('C:\\documents/ALL CAPS/subdir\\..'))
31 | 'c:\\documents\\all caps'
32 | """
33 | return normcase(normpath(path))
34 |
35 |
36 | def run_dbt(args: Optional[List[str]] = None, expect_pass: bool = True):
37 | _set_flags()
38 | return _run_dbt(args, expect_pass)
39 |
40 |
41 | def run_dbt_and_capture(args: Optional[List[str]] = None, expect_pass: bool = True):
42 | _set_flags()
43 | return _run_dbt_and_capture(args, expect_pass)
44 |
45 |
46 | def _set_flags():
47 | # in order to call dbt's internal profile rendering, we need to set the
48 | # flags global. This is a bit of a hack, but it's the best way to do it.
49 | from dbt.flags import set_from_args
50 | from argparse import Namespace
51 |
52 | set_from_args(Namespace(), None)
53 |
--------------------------------------------------------------------------------
/tests/unit/test_filter_catalog.py:
--------------------------------------------------------------------------------
1 | import decimal
2 | from unittest import TestCase
3 |
4 | import agate
5 | from dbt_common.clients import agate_helper
6 |
7 | from dbt.adapters.postgres import PostgresAdapter
8 |
9 |
10 | class TestPostgresFilterCatalog(TestCase):
11 | def test__catalog_filter_table(self):
12 | used_schemas = [["a", "B"], ["a", "1234"]]
13 | column_names = ["table_name", "table_database", "table_schema", "something"]
14 | rows = [
15 | ["foo", "a", "b", "1234"], # include
16 | ["foo", "a", "1234", "1234"], # include, w/ table schema as str
17 | ["foo", "c", "B", "1234"], # skip
18 | ["1234", "A", "B", "1234"], # include, w/ table name as str
19 | ]
20 | table = agate.Table(rows, column_names, agate_helper.DEFAULT_TYPE_TESTER)
21 |
22 | result = PostgresAdapter._catalog_filter_table(table, used_schemas)
23 | assert len(result) == 3
24 | for row in result.rows:
25 | assert isinstance(row["table_schema"], str)
26 | assert isinstance(row["table_database"], str)
27 | assert isinstance(row["table_name"], str)
28 | assert isinstance(row["something"], decimal.Decimal)
29 |
--------------------------------------------------------------------------------
/tests/unit/test_materialized_view.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 |
3 | from dbt.adapters.contracts.relation import RelationType
4 | from dbt.adapters.relation_configs.config_change import RelationConfigChangeAction
5 |
6 | from dbt.adapters.postgres.relation import PostgresRelation
7 | from dbt.adapters.postgres.relation_configs import PostgresIndexConfig
8 |
9 |
10 | def test_index_config_changes():
11 | index_0_old = {
12 | "name": "my_index_0",
13 | "column_names": {"column_0"},
14 | "unique": True,
15 | "method": "btree",
16 | }
17 | index_1_old = {
18 | "name": "my_index_1",
19 | "column_names": {"column_1"},
20 | "unique": True,
21 | "method": "btree",
22 | }
23 | index_2_old = {
24 | "name": "my_index_2",
25 | "column_names": {"column_2"},
26 | "unique": True,
27 | "method": "btree",
28 | }
29 | existing_indexes = frozenset(
30 | PostgresIndexConfig.from_dict(index) for index in [index_0_old, index_1_old, index_2_old]
31 | )
32 |
33 | index_0_new = deepcopy(index_0_old)
34 | index_2_new = deepcopy(index_2_old)
35 | index_2_new.update(method="hash")
36 | index_3_new = {
37 | "name": "my_index_3",
38 | "column_names": {"column_3"},
39 | "unique": True,
40 | "method": "hash",
41 | }
42 | new_indexes = frozenset(
43 | PostgresIndexConfig.from_dict(index) for index in [index_0_new, index_2_new, index_3_new]
44 | )
45 |
46 | relation = PostgresRelation.create(
47 | database="my_database",
48 | schema="my_schema",
49 | identifier="my_materialized_view",
50 | type=RelationType.MaterializedView,
51 | )
52 |
53 | index_changes = relation._get_index_config_changes(existing_indexes, new_indexes)
54 |
55 | assert isinstance(index_changes, list)
56 | assert len(index_changes) == len(["drop 1", "drop 2", "create 2", "create 3"])
57 | assert index_changes[0].action == RelationConfigChangeAction.drop
58 | assert index_changes[1].action == RelationConfigChangeAction.drop
59 | assert index_changes[2].action == RelationConfigChangeAction.create
60 | assert index_changes[3].action == RelationConfigChangeAction.create
61 |
--------------------------------------------------------------------------------
/tests/unit/test_renamed_relations.py:
--------------------------------------------------------------------------------
1 | from dbt.adapters.postgres.relation import PostgresRelation
2 | from dbt.adapters.contracts.relation import RelationType
3 |
4 |
5 | def test_renameable_relation():
6 | relation = PostgresRelation.create(
7 | database="my_db",
8 | schema="my_schema",
9 | identifier="my_table",
10 | type=RelationType.Table,
11 | )
12 | assert relation.renameable_relations == frozenset(
13 | {
14 | RelationType.View,
15 | RelationType.Table,
16 | }
17 | )
18 |
--------------------------------------------------------------------------------