├── .github └── workflows │ ├── bump-version.yml │ ├── release-package.yml │ ├── reset-test-account.yml │ └── run-tests.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── conftest.py ├── docs ├── .gitkeep ├── README.md ├── SUMMARY.md ├── blueprint.md ├── getting-started.md ├── resources │ ├── account_parameter.md │ ├── aggregation_policy.md │ ├── alert.md │ ├── api_authentication_security_integration.md │ ├── api_integration.md │ ├── authentication_policy.md │ ├── azure_storage_integration.md │ ├── compute_pool.md │ ├── database.md │ ├── database_role.md │ ├── dynamic_table.md │ ├── email_notification_integration.md │ ├── event_table.md │ ├── external_access_integration.md │ ├── external_stage.md │ ├── failover_group.md │ ├── future_grant.md │ ├── gcs_storage_integration.md │ ├── generic_secret.md │ ├── glue_catalog_integration.md │ ├── grant.md │ ├── grant_on_all.md │ ├── hybrid_table.md │ ├── image_repository.md │ ├── internal_stage.md │ ├── javascript_udf.md │ ├── json_file_format.md │ ├── materialized_view.md │ ├── network_policy.md │ ├── network_rule.md │ ├── oauth_secret.md │ ├── object_store_catalog_integration.md │ ├── packages_policy.md │ ├── parquet_file_format.md │ ├── password_policy.md │ ├── password_secret.md │ ├── pipe.md │ ├── python_stored_procedure.md │ ├── python_udf.md │ ├── replication_group.md │ ├── resource_monitor.md │ ├── role.md │ ├── role_grant.md │ ├── s3storage_integration.md │ ├── schema.md │ ├── secret.md │ ├── sequence.md │ ├── service.md │ ├── session_policy.md │ ├── share.md │ ├── snowflake_partner_oauth_security_integration.md │ ├── snowservices_oauth_security_integration.md │ ├── stage_stream.md │ ├── table.md │ ├── table_stream.md │ ├── tag.md │ ├── task.md │ ├── user.md │ ├── view.md │ ├── view_stream.md │ └── warehouse.md ├── titan-core-github-action.md └── working-with-resources.md ├── examples ├── UNC5537-account-hardening.yml ├── account-parameters.yml ├── dbt-labs-how-we-configure-snowflake.yml ├── dbt-with-schema-access-role-tree.yml ├── dicom-image-classification-to-detect-pneumonia.yml ├── for-each-example.yml ├── sfquickstarts-dota2-game-replay-parser.yml ├── snowflake-tutorials-create-your-first-iceberg-table.yml └── terraform-tags-example.yml ├── images └── github-explainer.png ├── prompts ├── generate_docstring_prompt.txt └── generate_new_resource_prompt.txt ├── pyproject.toml ├── requirements.dev.txt ├── requirements.txt ├── scripts └── install ├── setup.py ├── tests ├── __init__.py ├── fixtures │ ├── adapters │ │ └── permifrost.yml │ ├── json │ │ ├── account_parameter.json │ │ ├── aggregation_policy.json │ │ ├── alert.json │ │ ├── api_integration.json │ │ ├── authentication_policy.json │ │ ├── azure_storage_integration.json │ │ ├── column.json │ │ ├── compute_pool.json │ │ ├── csv_file_format.json │ │ ├── database.json │ │ ├── database_role.json │ │ ├── database_role_grant.json │ │ ├── dynamic_table.json │ │ ├── event_table.json │ │ ├── external_access_integration.json │ │ ├── external_function.json │ │ ├── external_stage.json │ │ ├── external_table_stream.json │ │ ├── external_volume.json │ │ ├── failover_group.json │ │ ├── file_format.json │ │ ├── future_grant.json │ │ ├── gcs_storage_integration.json │ │ ├── generic_secret.json │ │ ├── glue_catalog_integration.json │ │ ├── grant.json │ │ ├── image_repository.json │ │ ├── internal_stage.json │ │ ├── javascript_udf.json │ │ ├── json_file_format.json │ │ ├── masking_policy.json │ │ ├── materialized_view.json │ │ ├── network_policy.json │ │ ├── network_rule.json │ │ ├── notebook.json │ │ ├── notification_integration.json │ │ ├── oauth_secret.json │ │ ├── object_store_catalog_integration.json │ │ ├── packages_policy.json │ │ ├── parquet_file_format.json │ │ ├── password_policy.json │ │ ├── password_secret.json │ │ ├── pipe.json │ │ ├── python_stored_procedure.json │ │ ├── python_udf.json │ │ ├── replication_group.json │ │ ├── resource_monitor.json │ │ ├── role.json │ │ ├── role_grant.json │ │ ├── s3_storage_integration.json │ │ ├── scanner_package.json │ │ ├── schema.json │ │ ├── sequence.json │ │ ├── service.json │ │ ├── session_policy.json │ │ ├── share.json │ │ ├── stage.json │ │ ├── stage_stream.json │ │ ├── table.json │ │ ├── table_stream.json │ │ ├── tag.json │ │ ├── task.json │ │ ├── user.json │ │ ├── view.json │ │ ├── view_stream.json │ │ └── warehouse.json │ └── sql │ │ ├── account_parameter.sql │ │ ├── aggregation_policy.sql │ │ ├── alert.sql │ │ ├── api_integration.sql │ │ ├── authentication_policy.sql │ │ ├── azure_storage_integration.sql │ │ ├── column.sql │ │ ├── compute_pool.sql │ │ ├── csv_file_format.sql │ │ ├── database.sql │ │ ├── database_role.sql │ │ ├── dynamic_table.sql │ │ ├── event_table.sql │ │ ├── external_access_integration.sql │ │ ├── external_function.sql │ │ ├── external_stage.sql │ │ ├── external_volume.sql │ │ ├── failover_group.sql │ │ ├── future_grant.sql │ │ ├── gcs_storage_integration.sql │ │ ├── generic_secret.sql │ │ ├── glue_catalog_integration.sql │ │ ├── grant.sql │ │ ├── image_repository.sql │ │ ├── internal_stage.sql │ │ ├── javascript_udf.sql │ │ ├── json_file_format.sql │ │ ├── materialized_view.sql │ │ ├── network_policy.sql │ │ ├── network_rule.sql │ │ ├── notebook.sql │ │ ├── notification_integration.sql │ │ ├── oauth_secret.sql │ │ ├── object_store_catalog_integration.sql │ │ ├── packages_policy.sql │ │ ├── parquet_file_format.sql │ │ ├── password_policy.sql │ │ ├── password_secret.sql │ │ ├── pipe.sql │ │ ├── python_stored_procedure.sql │ │ ├── python_udf.sql │ │ ├── replication_group.sql │ │ ├── resource_monitor.sql │ │ ├── role.sql │ │ ├── role_grant.sql │ │ ├── s3_storage_integration.sql │ │ ├── schema.sql │ │ ├── security_integration.sql │ │ ├── sequence.sql │ │ ├── service.sql │ │ ├── session_policy.sql │ │ ├── share.sql │ │ ├── snowflake_iceberg_table.sql │ │ ├── stage_stream.sql │ │ ├── table.sql │ │ ├── table_stream.sql │ │ ├── tag.sql │ │ ├── task.sql │ │ ├── user.sql │ │ ├── view.sql │ │ ├── view_stream.sql │ │ └── warehouse.sql ├── helpers.py ├── integration │ ├── data_provider │ │ ├── test_fetch.py │ │ ├── test_fetch_future_grant.py │ │ ├── test_fetch_owner.py │ │ ├── test_fetch_resource.py │ │ ├── test_fetch_resource_simple.py │ │ ├── test_fetch_table.py │ │ └── test_list_resource.py │ ├── test_blueprint.py │ ├── test_examples.py │ ├── test_export.py │ ├── test_lifecycle.py │ ├── test_resources.py │ └── test_update.py ├── test_adapters.py ├── test_blueprint.py ├── test_blueprint_finalize.py ├── test_blueprint_merging.py ├── test_blueprint_ownership.py ├── test_container_service.py ├── test_from_sql.py ├── test_gitops.py ├── test_grant.py ├── test_identifiers.py ├── test_identities.py ├── test_parse.py ├── test_parse_collection.py ├── test_plan.py ├── test_polymorphic_resources.py ├── test_privs.py ├── test_props.py ├── test_resource_containers.py ├── test_resource_pointer.py ├── test_resource_refs.py ├── test_resource_rendering.py ├── test_resources.py ├── test_topological_sort.py └── test_vars.py ├── titan ├── __init__.py ├── __main__.py ├── adapters │ ├── __init__.py │ ├── permifrost.py │ └── py.typed ├── api.py ├── blueprint.py ├── blueprint_config.py ├── builder.py ├── builtins.py ├── cli.py ├── client.py ├── data_provider.py ├── data_types.py ├── enums.py ├── exceptions.py ├── gitops.py ├── identifiers.py ├── lifecycle.py ├── operations │ ├── __init__.py │ ├── blueprint.py │ ├── connector.py │ └── export.py ├── parse.py ├── parse_primitives.py ├── policy.py ├── privs.py ├── props.py ├── py.typed ├── resource_name.py ├── resource_tags.py ├── resources │ ├── __init__.py │ ├── account.py │ ├── account_parameter.py │ ├── aggregation_policy.py │ ├── alert.py │ ├── api_integration.py │ ├── authentication_policy.py │ ├── catalog_integration.py │ ├── column.py │ ├── compute_pool.py │ ├── database.py │ ├── dynamic_table.py │ ├── event_table.py │ ├── external_access_integration.py │ ├── external_function.py │ ├── external_volume.py │ ├── failover_group.py │ ├── file_format.py │ ├── function.py │ ├── grant.py │ ├── hybrid_table.py │ ├── iceberg_table.py │ ├── image_repository.py │ ├── masking_policy.py │ ├── materialized_view.py │ ├── network_policy.py │ ├── network_rule.py │ ├── notebook.py │ ├── notification_integration.py │ ├── packages_policy.py │ ├── password_policy.py │ ├── pipe.py │ ├── procedure.py │ ├── py.typed │ ├── replication_group.py │ ├── resource.py │ ├── resource_monitor.py │ ├── role.py │ ├── scanner_package.py │ ├── schema.py │ ├── secret.py │ ├── security_integration.py │ ├── sequence.py │ ├── service.py │ ├── session_policy.py │ ├── share.py │ ├── shared_database.py │ ├── stage.py │ ├── storage_integration.py │ ├── stream.py │ ├── table.py │ ├── tag.py │ ├── task.py │ ├── user.py │ ├── view.py │ └── warehouse.py ├── role_ref.py ├── scope.py ├── sql.py ├── titan.code-workspace └── var.py ├── tools ├── __reset_test_account.py ├── benchmark_export.py ├── check_resource_coverage.py ├── detect_privs.py ├── generate_resource.py ├── generate_resource_docs.py ├── manage_test_account.py ├── show_global_privs.py └── test_account_configs │ ├── aws.yml │ ├── azure.yml │ ├── base.yml │ ├── business_critical.yml │ ├── compute_pools.yml │ ├── enterprise.yml │ └── gcp.yml └── version.md /.github/workflows/bump-version.yml: -------------------------------------------------------------------------------- 1 | name: Bump package version on push 2 | on: 3 | push: 4 | branches: 5 | - main 6 | paths-ignore: 7 | - version.md 8 | 9 | jobs: 10 | bump-version: 11 | name: Bump package version 12 | if: "!contains(github.event.head_commit.message, 'Bump version')" 13 | runs-on: ubuntu-20.04 14 | steps: 15 | - name: actions/checkout 16 | uses: actions/checkout@v4 17 | with: 18 | persist-credentials: false 19 | - name: current_version 20 | run: echo "current_version=$(grep '# version' version.md | cut -d ' ' -f3)" >> $GITHUB_ENV 21 | - name: FragileTech/bump-version 22 | uses: FragileTech/bump-version@main 23 | with: 24 | current_version: "${{ env.current_version }}" 25 | files: version.md 26 | commit_name: Titan Systems Bot 27 | commit_email: info@applytitan.com 28 | login: titan-systems-bot 29 | token: "${{ secrets.BOT_TOKEN }}" 30 | -------------------------------------------------------------------------------- /.github/workflows/release-package.yml: -------------------------------------------------------------------------------- 1 | name: Build and release package to PyPi 2 | 3 | on: 4 | workflow_dispatch: 5 | workflow_call: 6 | secrets: 7 | PYPI_USERNAME: 8 | required: true 9 | PYPI_PASSWORD: 10 | required: true 11 | 12 | jobs: 13 | build-and-release: 14 | runs-on: ubuntu-20.04 15 | steps: 16 | - name: actions/checkout 17 | uses: actions/checkout@v4 18 | - name: Set up Python 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: '3.9' 22 | - name: Create a virtual environment 23 | run: | 24 | python -m venv .venv 25 | - name: Install dependencies 26 | run: | 27 | source ./.venv/bin/activate 28 | python -m pip install --upgrade pip 29 | make install-dev 30 | - name: Build and upload to PyPI 31 | run: | 32 | source ./.venv/bin/activate 33 | make submit 34 | env: 35 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 36 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include version.md -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: install install-dev test integration style check clean build docs coverage 2 | EDITION ?= standard or enterprise 3 | 4 | install: 5 | pip install -e . 6 | 7 | install-dev: 8 | pip install -e ".[dev]" 9 | 10 | test: 11 | python -m pytest 12 | 13 | integration: 14 | python -m pytest --snowflake -m "$(EDITION)" 15 | 16 | style: 17 | python -m black . 18 | codespell . 19 | 20 | 21 | typecheck: 22 | mypy --exclude="titan/resources/.*" --exclude="titan/sql.py" --follow-imports=skip titan/ 23 | 24 | check: style typecheck test 25 | 26 | clean: 27 | rm -rf build dist *.egg-info 28 | find . -name "__pycache__" -type d -exec rm -rf {} + 29 | 30 | build: 31 | mkdir -p dist 32 | zip -vrX dist/titan-$(shell python setup.py -V).zip titan/ 33 | 34 | docs: 35 | python tools/generate_resource_docs.py 36 | 37 | coverage: clean 38 | python tools/check_resource_coverage.py 39 | 40 | package: clean 41 | python -m build 42 | 43 | submit: package 44 | python -m twine upload dist/* -------------------------------------------------------------------------------- /docs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/docs/.gitkeep -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # `titan core` - Snowflake infrastructure as code 2 | 3 | Titan Core helps you provision, deploy, and secure resources in Snowflake. It replaces tools like Terraform, Schemachange, or Permifrost. 4 | 5 | Deploy any Snowflake resource, including users, roles, schemas, databases, integrations, pipes, stages, functions, stored procedures, and more. Convert adhoc, bug-prone SQL management scripts into simple, repeatable configuration. 6 | 7 | ## Titan Core is for 8 | 9 | * DevOps engineers looking to automate and manage Snowflake infrastructure. 10 | * Analytics engineers working with dbt who want to manage Snowflake resources without macros. 11 | * Data platform teams who need to reliably manage Snowflake with CI/CD. 12 | * Organizations that prefer a git-based workflow for infrastructure management. 13 | * Teams seeking to replace Terraform for Snowflake-related tasks. 14 | 15 | 16 | ## Key Features 17 | 18 | * **Declarative** » Generates the right SQL to make your config and account match 19 | 20 | * **Comprehensive** » Nearly every Snowflake resource is supported 21 | 22 | * **Flexible** » Write resource configuration in YAML or Python 23 | 24 | * **Fast** » Titan Core runs 50-90% faster than Terraform and Permifrost 25 | 26 | * **Migration-friendly** » Generate config automatically with the export CLI 27 | 28 | ## Contents 29 | 30 | * [Getting Started](getting-started.md): Installation and initial setup guide. 31 | * [Blueprint](blueprint.md): Customize and control how resources are deployed to Snowflake 32 | * [GitHub Action](titan-core-github-action.md) - For git-based workflows, including dbt. 33 | 34 | -------------------------------------------------------------------------------- /docs/resources/account_parameter.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # AccountParameter 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/alter-account) 9 | 10 | An account parameter in Snowflake that allows you to set or alter account-level parameters. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | account_parameter = AccountParameter( 19 | name="some_parameter", 20 | value="some_value", 21 | ) 22 | ``` 23 | 24 | 25 | ### YAML 26 | 27 | ```yaml 28 | account_parameters: 29 | - name: some_parameter 30 | value: some_value 31 | ``` 32 | 33 | 34 | ## Fields 35 | 36 | * `name` (string, required) - The name of the account parameter. 37 | * `value` ([Any](any.md), required) - The value to set for the account parameter. 38 | 39 | 40 | -------------------------------------------------------------------------------- /docs/resources/aggregation_policy.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # AggregationPolicy 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-aggregation-policy) 9 | 10 | Represents an aggregation policy in Snowflake, which defines constraints on aggregation operations. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | aggregation_policy = AggregationPolicy( 19 | name="some_aggregation_policy", 20 | body="AGGREGATION_CONSTRAINT(MIN_GROUP_SIZE => 5)", 21 | owner="SYSADMIN" 22 | ) 23 | ``` 24 | 25 | 26 | ### YAML 27 | 28 | ```yaml 29 | aggregation_policies: 30 | - name: some_aggregation_policy 31 | body: AGGREGATION_CONSTRAINT(MIN_GROUP_SIZE => 5) 32 | owner: SYSADMIN 33 | ``` 34 | 35 | 36 | ## Fields 37 | 38 | * `name` (string, required) - The name of the aggregation policy. 39 | * `body` (string, required) - The SQL expression defining the aggregation constraint. 40 | * `owner` (string or [Role](role.md)) - The owner of the aggregation policy. Defaults to "SYSADMIN". 41 | 42 | 43 | -------------------------------------------------------------------------------- /docs/resources/alert.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Alert 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-alert) 9 | 10 | Alerts trigger notifications when certain conditions are met. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | alert = Alert( 19 | name="some_alert", 20 | warehouse="some_warehouse", 21 | schedule="USING CRON * * * * *", 22 | condition="SELECT COUNT(*) FROM some_table", 23 | then="CALL SYSTEM$SEND_EMAIL('example@example.com', 'Alert Triggered', 'The alert condition was met.')", 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | alerts: 32 | - name: some_alert 33 | warehouse: some_warehouse 34 | schedule: USING CRON * * * * * 35 | condition: SELECT COUNT(*) FROM some_table 36 | then: CALL SYSTEM$SEND_EMAIL('example@example.com', 'Alert Triggered', 'The alert condition was met.') 37 | ``` 38 | 39 | 40 | ## Fields 41 | 42 | * `name` (string, required) - The name of the alert. 43 | * `warehouse` (string or [Warehouse](warehouse.md)) - The name of the warehouse to run the query on. 44 | * `schedule` (string) - The schedule for the alert to run on. 45 | * `condition` (string) - The condition for the alert to trigger on. 46 | * `then` (string) - The query to run when the alert triggers. 47 | * `owner` (string or [Role](role.md)) - The owner role of the alert. Defaults to "SYSADMIN". 48 | * `comment` (string) - A comment for the alert. Defaults to None. 49 | * `tags` (dict) - Tags for the alert. Defaults to None. 50 | 51 | 52 | -------------------------------------------------------------------------------- /docs/resources/api_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # APIIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-api-integration) 9 | 10 | Manages API integrations in Snowflake, allowing external services to interact with Snowflake resources securely. 11 | This class supports creating, replacing, and checking the existence of API integrations with various configurations. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | api_integration = APIIntegration( 20 | name="some_api_integration", 21 | api_provider="AWS_API_GATEWAY", 22 | api_aws_role_arn="arn:aws:iam::123456789012:role/MyRole", 23 | enabled=True, 24 | api_allowed_prefixes=["/prod/", "/dev/"], 25 | api_blocked_prefixes=["/test/"], 26 | api_key="ABCD1234", 27 | comment="Example API integration" 28 | ) 29 | ``` 30 | 31 | 32 | ### YAML 33 | 34 | ```yaml 35 | api_integrations: 36 | - name: some_api_integration 37 | api_provider: AWS_API_GATEWAY 38 | api_aws_role_arn: "arn:aws:iam::123456789012:role/MyRole" 39 | enabled: true 40 | api_allowed_prefixes: ["/prod/", "/dev/"] 41 | api_blocked_prefixes: ["/test/"] 42 | api_key: "ABCD1234" 43 | comment: "Example API integration" 44 | ``` 45 | 46 | 47 | ## Fields 48 | 49 | * `name` (string, required) - The unique name of the API integration. 50 | * `api_provider` (string or [ApiProvider](api_provider.md), required) - The provider of the API service. Defaults to AWS_API_GATEway. 51 | * `api_aws_role_arn` (string, required) - The AWS IAM role ARN associated with the API integration. 52 | * `api_key` (string) - The API key used for authentication. 53 | * `api_allowed_prefixes` (list) - The list of allowed prefixes for the API endpoints. 54 | * `api_blocked_prefixes` (list) - The list of blocked prefixes for the API endpoints. 55 | * `enabled` (bool, required) - Specifies if the API integration is enabled. Defaults to TRUE. 56 | * `comment` (string) - A comment or description for the API integration. 57 | 58 | 59 | -------------------------------------------------------------------------------- /docs/resources/authentication_policy.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # AuthenticationPolicy 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-authentication-policy) 9 | 10 | Defines the rules and constraints for authentication within the system, ensuring they meet specific security standards. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | authentication_policy = AuthenticationPolicy( 19 | name="some_authentication_policy", 20 | authentication_methods=["PASSWORD", "SAML"], 21 | mfa_authentication_methods=["PASSWORD"], 22 | mfa_enrollment="REQUIRED", 23 | client_types=["SNOWFLAKE_UI"], 24 | security_integrations=["ALL"], 25 | comment="Policy for secure authentication." 26 | ) 27 | ``` 28 | 29 | 30 | ### YAML 31 | 32 | ```yaml 33 | authentication_policies: 34 | - name: some_authentication_policy 35 | authentication_methods: 36 | - PASSWORD 37 | - SAML 38 | mfa_authentication_methods: 39 | - PASSWORD 40 | mfa_enrollment: REQUIRED 41 | client_types: 42 | - SNOWFLAKE_UI 43 | security_integrations: 44 | - ALL 45 | comment: Policy for secure authentication. 46 | ``` 47 | 48 | 49 | ## Fields 50 | 51 | * `name` (string, required) - The name of the authentication policy. 52 | * `authentication_methods` (list) - A list of allowed authentication methods. 53 | * `mfa_authentication_methods` (list) - A list of authentication methods that enforce multi-factor authentication (MFA). 54 | * `mfa_enrollment` (string) - Determines whether a user must enroll in multi-factor authentication. Defaults to OPTIONAL. 55 | * `client_types` (list) - A list of clients that can authenticate with Snowflake. 56 | * `security_integrations` (list) - A list of security integrations the authentication policy is associated with. 57 | * `comment` (string) - A comment or description for the authentication policy. 58 | * `owner` (string or [Role](role.md)) - The owner role of the authentication policy. Defaults to SECURITYADMIN. 59 | 60 | 61 | -------------------------------------------------------------------------------- /docs/resources/azure_storage_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # AzureStorageIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-storage-integration) 9 | 10 | Represents an Azure storage integration in Snowflake, which allows Snowflake to access external cloud storage using Azure credentials. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | azure_storage_integration = AzureStorageIntegration( 19 | name="some_azure_storage_integration", 20 | enabled=True, 21 | azure_tenant_id="some_tenant_id", 22 | storage_allowed_locations=["azure://somebucket/somepath/"], 23 | storage_blocked_locations=["azure://someotherbucket/somepath/"], 24 | comment="This is an Azure storage integration." 25 | ) 26 | ``` 27 | 28 | 29 | ### YAML 30 | 31 | ```yaml 32 | azure_storage_integrations: 33 | - name: some_azure_storage_integration 34 | enabled: true 35 | azure_tenant_id: some_tenant_id 36 | storage_allowed_locations: 37 | - azure://somebucket/somepath/ 38 | storage_blocked_locations: 39 | - azure://someotherbucket/somepath/ 40 | comment: This is an Azure storage integration. 41 | ``` 42 | 43 | 44 | ## Fields 45 | 46 | * `name` (string, required) - The name of the storage integration. 47 | * `enabled` (bool, required) - Specifies whether the storage integration is enabled. 48 | * `azure_tenant_id` (string, required) - The Azure tenant ID associated with the storage integration. 49 | * `storage_allowed_locations` (list) - The cloud storage locations that are allowed. 50 | * `storage_blocked_locations` (list) - The cloud storage locations that are blocked. 51 | * `owner` (string or [Role](role.md)) - The owner role of the storage integration. Defaults to "ACCOUNTADMIN". 52 | * `comment` (string) - A comment about the storage integration. 53 | 54 | 55 | -------------------------------------------------------------------------------- /docs/resources/compute_pool.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # ComputePool 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-compute-pool) 9 | 10 | A compute pool is a group of compute resources in Snowflake that can be used to execute SQL queries. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | compute_pool = ComputePool( 19 | name="some_compute_pool", 20 | owner="ACCOUNTADMIN", 21 | min_nodes=2, 22 | max_nodes=10, 23 | instance_family="CPU_X64_S", 24 | auto_resume=True, 25 | initially_suspended=False, 26 | auto_suspend_secs=1800, 27 | comment="Example compute pool" 28 | ) 29 | ``` 30 | 31 | 32 | ### YAML 33 | 34 | ```yaml 35 | compute_pools: 36 | - name: some_compute_pool 37 | owner: ACCOUNTADMIN 38 | min_nodes: 2 39 | max_nodes: 10 40 | instance_family: CPU_X64_S 41 | auto_resume: true 42 | initially_suspended: false 43 | auto_suspend_secs: 1800 44 | comment: Example compute pool 45 | ``` 46 | 47 | 48 | ## Fields 49 | 50 | * `name` (string, required) - The unique name of the compute pool. 51 | * `owner` (string or [Role](role.md)) - The owner of the compute pool. Defaults to "ACCOUNTADMIN". 52 | * `min_nodes` (int) - The minimum number of nodes in the compute pool. 53 | * `max_nodes` (int) - The maximum number of nodes in the compute pool. 54 | * `instance_family` (string or [InstanceFamily](instance_family.md)) - The family of instances to use for the compute nodes. 55 | * `auto_resume` (bool) - Whether the compute pool should automatically resume when queries are submitted. Defaults to True. 56 | * `initially_suspended` (bool) - Whether the compute pool should start in a suspended state. 57 | * `auto_suspend_secs` (int) - The number of seconds of inactivity after which the compute pool should automatically suspend. Defaults to 3600. 58 | * `comment` (string) - An optional comment about the compute pool. 59 | 60 | 61 | -------------------------------------------------------------------------------- /docs/resources/database_role.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # DatabaseRole 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-database-role) 9 | 10 | A database role in Snowflake is a collection of privileges that can be assigned to users or other roles within a specific database context. It is used to manage access control and permissions at the database level. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | database_role = DatabaseRole( 19 | name="some_database_role", 20 | database="some_database", 21 | owner="USERADMIN", 22 | tags={"department": "finance"}, 23 | comment="This role is for database-specific access control." 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | database_roles: 32 | - name: some_database_role 33 | database: some_database 34 | owner: USERADMIN 35 | tags: 36 | department: finance 37 | comment: This role is for database-specific access control. 38 | ``` 39 | 40 | 41 | ## Fields 42 | 43 | * `name` (string, required) - The name of the database role. 44 | * `database` (string) - The database this role is associated with. This is derived from the fully qualified name. 45 | * `owner` (string) - The owner of the database role. Defaults to "USERADMIN". 46 | * `tags` (dict) - Tags associated with the database role. 47 | * `comment` (string) - A comment about the database role. 48 | 49 | 50 | -------------------------------------------------------------------------------- /docs/resources/dynamic_table.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # DynamicTable 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-dynamic-table) 9 | 10 | Represents a dynamic table in Snowflake, which can be configured to refresh automatically, 11 | fully, or incrementally, and initialized on creation or on a schedule. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | dynamic_table = DynamicTable( 20 | name="some_dynamic_table", 21 | columns=[{"name": "id"}, {"name": "data"}], 22 | target_lag="1 HOUR", 23 | warehouse="some_warehouse", 24 | refresh_mode="AUTO", 25 | initialize="ON_CREATE", 26 | as_="SELECT id, data FROM source_table", 27 | comment="This is a sample dynamic table", 28 | owner="SYSADMIN" 29 | ) 30 | ``` 31 | 32 | 33 | ### YAML 34 | 35 | ```yaml 36 | dynamic_table: 37 | name: some_dynamic_table 38 | columns: 39 | - name: id 40 | - name: data 41 | target_lag: "1 HOUR" 42 | warehouse: some_warehouse 43 | refresh_mode: AUTO 44 | initialize: ON_CREATE 45 | as_: "SELECT id, data FROM source_table" 46 | comment: "This is a sample dynamic table" 47 | owner: SYSADMIN 48 | ``` 49 | 50 | 51 | ## Fields 52 | 53 | * `name` (string, required) - The name of the dynamic table. 54 | * `columns` (list, required) - A list of dicts defining the structure of the table. 55 | * `target_lag` (string) - The acceptable lag (delay) for data in the table. Defaults to "DOWNSTREAM". 56 | * `warehouse` (string or [Warehouse](warehouse.md), required) - The warehouse where the table is stored. 57 | * `as_` (string, required) - The query used to populate the table. 58 | * `refresh_mode` (string or [RefreshMode](refresh_mode.md)) - The mode of refreshing the table (AUTO, FULL, INCREMENTAL). 59 | * `initialize` (string or [InitializeBehavior](initialize_behavior.md)) - The behavior when the table is initialized (ON_CREATE, ON_SCHEDULE). 60 | * `comment` (string) - An optional comment for the table. 61 | * `owner` (string or [Role](role.md)) - The owner of the table. Defaults to "SYSADMIN". 62 | 63 | 64 | -------------------------------------------------------------------------------- /docs/resources/email_notification_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # EmailNotificationIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-notification-integration) 9 | 10 | Manages the configuration for email-based notification integrations within Snowflake. This integration 11 | allows specifying recipients who will receive notifications via email. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | email_notification_integration = EmailNotificationIntegration( 20 | name="some_email_notification_integration", 21 | enabled=True, 22 | allowed_recipients=["user1@example.com", "user2@example.com"], 23 | comment="Example email notification integration" 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | email_notification_integrations: 32 | - name: some_email_notification_integration 33 | enabled: true 34 | allowed_recipients: 35 | - user1@example.com 36 | - user2@example.com 37 | comment: "Example email notification integration" 38 | ``` 39 | 40 | 41 | ## Fields 42 | 43 | * `name` (string, required) - The name of the email notification integration. 44 | * `enabled` (bool, required) - Specifies whether the notification integration is enabled. 45 | * `allowed_recipients` (list) - A list of email addresses that are allowed to receive notifications. 46 | * `comment` (string) - An optional comment about the notification integration. 47 | * `owner` (string or [Role](role.md)) - The owner role of the notification integration. Defaults to "ACCOUNTADMIN". 48 | 49 | 50 | -------------------------------------------------------------------------------- /docs/resources/event_table.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # EventTable 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-event-table) 9 | 10 | An event table captures events, including logged messages from functions and procedures. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | event_table = EventTable( 19 | name="some_event_table", 20 | cluster_by=["timestamp", "user_id"], 21 | data_retention_time_in_days=365, 22 | max_data_extension_time_in_days=30, 23 | change_tracking=True, 24 | default_ddl_collation="utf8", 25 | copy_grants=True, 26 | comment="This is a sample event table.", 27 | tags={"department": "analytics"} 28 | ) 29 | ``` 30 | 31 | 32 | ### YAML 33 | 34 | ```yaml 35 | event_tables: 36 | - name: some_event_table 37 | cluster_by: 38 | - timestamp 39 | - user_id 40 | data_retention_time_in_days: 365 41 | max_data_extension_time_in_days: 30 42 | change_tracking: true 43 | default_ddl_collation: utf8 44 | copy_grants: true 45 | comment: This is a sample event table. 46 | tags: 47 | department: analytics 48 | ``` 49 | 50 | 51 | ## Fields 52 | 53 | * `name` (string, required) - The name of the event table. 54 | * `cluster_by` (list) - The expressions to cluster data by. 55 | * `data_retention_time_in_days` (int) - The number of days to retain data. 56 | * `max_data_extension_time_in_days` (int) - The maximum number of days to extend data retention. 57 | * `change_tracking` (bool) - Specifies whether change tracking is enabled. Defaults to False. 58 | * `default_ddl_collation` (string) - The default collation for DDL operations. 59 | * `copy_grants` (bool) - Specifies whether to copy grants. Defaults to False. 60 | * `comment` (string) - A comment for the event table. 61 | * `tags` (dict) - Tags associated with the event table. 62 | 63 | 64 | -------------------------------------------------------------------------------- /docs/resources/external_access_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # ExternalAccessIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-external-access-integration) 9 | 10 | External Access Integrations enable code within functions and stored procedures to utilize secrets and establish connections with external networks. This resource configures the rules and secrets that can be accessed by such code. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | external_access_integration = ExternalAccessIntegration( 19 | name="some_external_access_integration", 20 | allowed_network_rules=["rule1", "rule2"], 21 | enabled=True 22 | ) 23 | ``` 24 | 25 | 26 | ### YAML 27 | 28 | ```yaml 29 | external_access_integrations: 30 | - name: some_external_access_integration 31 | allowed_network_rules: 32 | - rule1 33 | - rule2 34 | enabled: true 35 | ``` 36 | 37 | 38 | ## Fields 39 | 40 | * `name` (string, required) - The name of the external access integration. 41 | * `allowed_network_rules` (list, required) - [NetworkRules](network_rule.md) that are allowed for this integration. 42 | * `allowed_api_authentication_integrations` (list) - API authentication integrations that are allowed. 43 | * `allowed_authentication_secrets` (list) - Authentication secrets that are allowed. 44 | * `enabled` (bool) - Specifies if the integration is enabled. Defaults to True. 45 | * `comment` (string) - An optional comment about the integration. 46 | * `owner` (string or [Role](role.md)) - The owner role of the external access integration. Defaults to "ACCOUNTADMIN". 47 | 48 | 49 | -------------------------------------------------------------------------------- /docs/resources/external_stage.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # ExternalStage 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-stage) 9 | 10 | Manages external stages in Snowflake, which are used to reference external storage locations. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | external_stage = ExternalStage( 19 | name="some_external_stage", 20 | url="https://example.com/storage", 21 | owner="SYSADMIN", 22 | storage_integration="some_integration" 23 | ) 24 | ``` 25 | 26 | 27 | ### YAML 28 | 29 | ```yaml 30 | stages: 31 | - name: some_external_stage 32 | type: external 33 | url: https://example.com/storage 34 | owner: SYSADMIN 35 | storage_integration: some_integration 36 | ``` 37 | 38 | 39 | ## Fields 40 | 41 | * `name` (string, required) - The name of the external stage. 42 | * `url` (string, required) - The URL pointing to the external storage location. 43 | * `owner` (string or [Role](role.md)) - The owner role of the external stage. Defaults to "SYSADMIN". 44 | * `storage_integration` (string) - The name of the storage integration used with this stage. 45 | * `credentials` (dict) - The credentials for accessing the external storage, if required. 46 | * `encryption` (dict) - The encryption settings used for data stored in the external location. 47 | * `directory` (dict) - Settings related to directory handling in the external storage. 48 | * `tags` (dict) - Tags associated with the external stage. 49 | * `comment` (string) - A comment about the external stage. 50 | 51 | 52 | -------------------------------------------------------------------------------- /docs/resources/future_grant.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # FutureGrant 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/grant-privilege) 9 | 10 | Represents a future grant of privileges on a resource to a role in Snowflake. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | # Database Object Privs: 19 | future_grant = FutureGrant( 20 | priv="CREATE TABLE", 21 | on_future_schemas_in=Database(name="somedb"), 22 | to="somerole", 23 | ) 24 | future_grant = FutureGrant( 25 | priv="CREATE TABLE", 26 | on_future_schemas_in_database="somedb", 27 | to="somerole", 28 | ) 29 | # Schema Object Privs: 30 | future_grant = FutureGrant( 31 | priv="SELECT", 32 | on_future_tables_in=Schema(name="someschema"), 33 | to="somerole", 34 | ) 35 | future_grant = FutureGrant( 36 | priv="READ", 37 | on_future_image_repositories_in_schema="someschema", 38 | to="somerole", 39 | ) 40 | ``` 41 | 42 | 43 | ### YAML 44 | 45 | ```yaml 46 | future_grants: 47 | - priv: SELECT 48 | on_future_tables_in_schema: someschema 49 | to: somerole 50 | ``` 51 | 52 | 53 | ## Fields 54 | 55 | * `priv` (string, required) - The privilege to grant. Examples include 'SELECT', 'INSERT', 'CREATE TABLE'. 56 | * `on_type` (string or [ResourceType](resource_type.md), required) - The type of resource on which the privilege is granted. 57 | * `in_type` (string or [ResourceType](resource_type.md), required) - The type of container resource in which the privilege is granted. 58 | * `in_name` (string, required) - The name of the container resource in which the privilege is granted. 59 | * `to` (string or [Role](role.md), required) - The role to which the privileges are granted. 60 | * `grant_option` (bool) - Specifies whether the grantee can grant the privileges to other roles. Defaults to False. 61 | 62 | 63 | -------------------------------------------------------------------------------- /docs/resources/gcs_storage_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # GCSStorageIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-storage-integration) 9 | 10 | Manages the integration of Google Cloud Storage (GCS) as an external stage for storing data. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | gcs_storage_integration = GCSStorageIntegration( 19 | name="some_gcs_storage_integration", 20 | enabled=True, 21 | storage_allowed_locations=['gcs://bucket/path/'], 22 | storage_blocked_locations=['gcs://bucket/blocked_path/'] 23 | ) 24 | ``` 25 | 26 | 27 | ### YAML 28 | 29 | ```yaml 30 | gcs_storage_integrations: 31 | - name: some_gcs_storage_integration 32 | enabled: true 33 | storage_allowed_locations: 34 | - 'gcs://bucket/path/' 35 | storage_blocked_locations: 36 | - 'gcs://bucket/blocked_path/' 37 | ``` 38 | 39 | 40 | ## Fields 41 | 42 | * `name` (string, required) - The name of the storage integration. 43 | * `enabled` (bool, required) - Specifies whether the storage integration is enabled. 44 | * `storage_allowed_locations` (list) - A list of allowed GCS locations for data storage. 45 | * `storage_blocked_locations` (list) - A list of blocked GCS locations for data storage. 46 | * `owner` (string or [Role](role.md)) - The owner role of the storage integration. Defaults to 'ACCOUNTADMIN'. 47 | * `comment` (string) - An optional comment about the storage integration. 48 | 49 | 50 | -------------------------------------------------------------------------------- /docs/resources/generic_secret.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # GenericSecret 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-secret) 9 | 10 | A Secret defines a set of sensitive data that can be used for authentication or other purposes. 11 | This class defines a generic secret. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | secret = GenericSecret( 20 | name="some_secret", 21 | secret_string="some_secret_string", 22 | comment="some_comment", 23 | owner="SYSADMIN", 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | secrets: 32 | - name: some_secret 33 | secret_type: GENERIC_STRING 34 | secret_string: some_secret_string 35 | comment: some_comment 36 | owner: SYSADMIN 37 | ``` 38 | 39 | 40 | ## Fields 41 | 42 | * `name` (string, required) - The name of the secret. 43 | * `secret_string` (string) - The secret string. 44 | * `comment` (string) - A comment for the secret. 45 | * `owner` (string or [Role](role.md)) - The owner of the secret. Defaults to SYSADMIN. 46 | 47 | 48 | -------------------------------------------------------------------------------- /docs/resources/glue_catalog_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # GlueCatalogIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-catalog-integration) 9 | 10 | Manages the integration of AWS Glue as a catalog in Snowflake, supporting the ICEBERG table format. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | glue_catalog_integration = GlueCatalogIntegration( 19 | name="some_catalog_integration", 20 | table_format="ICEBERG", 21 | glue_aws_role_arn="arn:aws:iam::123456789012:role/SnowflakeAccess", 22 | glue_catalog_id="some_glue_catalog_id", 23 | catalog_namespace="some_namespace", 24 | enabled=True, 25 | glue_region="us-west-2", 26 | comment="Integration for AWS Glue with Snowflake." 27 | ) 28 | ``` 29 | 30 | 31 | ### YAML 32 | 33 | ```yaml 34 | catalog_integrations: 35 | - name: some_catalog_integration 36 | table_format: ICEBERG 37 | glue_aws_role_arn: arn:aws:iam::123456789012:role/SnowflakeAccess 38 | glue_catalog_id: some_glue_catalog_id 39 | catalog_namespace: some_namespace 40 | enabled: true 41 | glue_region: us-west-2 42 | comment: Integration for AWS Glue with Snowflake. 43 | ``` 44 | 45 | 46 | ## Fields 47 | 48 | * `name` (string, required) - The name of the catalog integration. 49 | * `table_format` (string or [CatalogTableFormat](catalog_table_format.md), required) - The format of the table, defaults to ICEBERG. 50 | * `glue_aws_role_arn` (string, required) - The ARN for the AWS role to assume. 51 | * `glue_catalog_id` (string, required) - The Glue catalog ID. 52 | * `catalog_namespace` (string, required) - The namespace of the catalog. 53 | * `enabled` (bool, required) - Specifies whether the catalog integration is enabled. 54 | * `glue_region` (string) - The AWS region of the Glue catalog. Defaults to None. 55 | * `owner` (string or [Role](role.md)) - The owner role of the catalog integration. Defaults to "ACCOUNTADMIN". 56 | * `comment` (string) - An optional comment describing the catalog integration. 57 | 58 | 59 | -------------------------------------------------------------------------------- /docs/resources/grant.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Grant 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/grant-privilege) 9 | 10 | Represents a grant of privileges on a resource to a role in Snowflake. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | # Global Privs: 19 | grant = Grant(priv="CREATE WAREHOUSE", on="ACCOUNT", to="somerole") 20 | # Warehouse Privs: 21 | grant = Grant(priv="OPERATE", on=Warehouse(name="foo"), to="somerole") 22 | grant = Grant(priv="OPERATE", on_warehouse="foo", to="somerole") 23 | # Schema Privs: 24 | grant = Grant(priv="CREATE TABLE", on=Schema(name="foo"), to="somerole") 25 | grant = Grant(priv="CREATE TABLE", on_schema="foo", to="somerole") 26 | # Table Privs: 27 | grant = Grant(priv="SELECT", on_table="sometable", to="somerole") 28 | ``` 29 | 30 | 31 | ### YAML 32 | 33 | ```yaml 34 | - Grant: 35 | priv: "SELECT" 36 | on_table: "some_table" 37 | to: "some_role" 38 | grant_option: true 39 | ``` 40 | 41 | 42 | ## Fields 43 | 44 | * `priv` (string, required) - The privilege to grant. Examples include 'SELECT', 'INSERT', 'CREATE TABLE'. 45 | * `on` (string or [Resource](resource.md), required) - The resource on which the privilege is granted. Can be a string like 'ACCOUNT' or a specific resource object. 46 | * `to` (string or [Role](role.md), required) - The role to which the privileges are granted. 47 | * `grant_option` (bool) - Specifies whether the grantee can grant the privileges to other roles. Defaults to False. 48 | * `owner` (string or [Role](role.md)) - The owner role of the grant. Defaults to 'SYSADMIN'. 49 | 50 | 51 | -------------------------------------------------------------------------------- /docs/resources/grant_on_all.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # GrantOnAll 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/grant-privilege) 9 | 10 | Represents a grant of privileges on all resources of a specified type to a role in Snowflake. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | # Schema Privs: 19 | grant_on_all = GrantOnAll( 20 | priv="CREATE TABLE", 21 | on_all_schemas_in_database="somedb", 22 | to="somerole", 23 | ) 24 | grant_on_all = GrantOnAll( 25 | priv="CREATE VIEW", 26 | on_all_schemas_in=Database(name="somedb"), 27 | to="somerole", 28 | ) 29 | # Schema Object Privs: 30 | grant_on_all = GrantOnAll( 31 | priv="SELECT", 32 | on_all_tables_in_schema="someschema", 33 | to="somerole", 34 | ) 35 | grant_on_all = GrantOnAll( 36 | priv="SELECT", 37 | on_all_views_in_database="somedb", 38 | to="somerole", 39 | ) 40 | ``` 41 | 42 | 43 | ### YAML 44 | 45 | ```yaml 46 | grants_on_all: 47 | - priv: SELECT 48 | on_all_tables_in_schema: someschema 49 | to: somerole 50 | ``` 51 | 52 | 53 | ## Fields 54 | 55 | * `priv` (string, required) - The privilege to grant. Examples include 'SELECT', 'INSERT', 'CREATE TABLE'. 56 | * `on_type` (string or [ResourceType](resource_type.md), required) - The type of resource on which the privileges are granted. 57 | * `in_type` (string or [ResourceType](resource_type.md), required) - The type of container resource in which the privilege is granted. 58 | * `in_name` (string, required) - The name of the container resource in which the privilege is granted. 59 | * `to` (string or [Role](role.md), required) - The role to which the privileges are granted. 60 | * `grant_option` (bool) - Specifies whether the grantee can grant the privileges to other roles. Defaults to False. 61 | 62 | 63 | -------------------------------------------------------------------------------- /docs/resources/hybrid_table.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # HybridTable 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-hybrid-table) 9 | 10 | `[UNDER DEVELOPMENT]` 11 | A hybrid table is a Snowflake table type that is optimized for hybrid transactional and operational workloads that require low latency and high throughput on small random point reads and writes. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | hybrid_table = HybridTable( 20 | name="some_hybrid_table", 21 | columns=[Column(name="col1", type="STRING")], 22 | owner="SYSADMIN", 23 | comment="This is a hybrid table." 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | hybrid_tables: 32 | - name: some_hybrid_table 33 | columns: 34 | - name: col1 35 | type: STRING 36 | owner: SYSADMIN 37 | comment: This is a hybrid table. 38 | ``` 39 | 40 | 41 | ## Fields 42 | 43 | * `name` (string, required) - The name of the hybrid table. 44 | * `columns` (list, required) - The columns of the hybrid table. 45 | * `tags` (dict) - Tags associated with the hybrid table. 46 | * `owner` (string or [Role](role.md)) - The owner role of the hybrid table. Defaults to "SYSADMIN". 47 | * `comment` (string) - A comment for the hybrid table. 48 | 49 | 50 | -------------------------------------------------------------------------------- /docs/resources/image_repository.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # ImageRepository 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-image-repository) 9 | 10 | An image repository in Snowflake is a storage unit within a schema that allows for the management of OCIv2-compliant container images. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | image_repository = ImageRepository( 19 | name="some_image_repository", 20 | ) 21 | ``` 22 | 23 | 24 | ### YAML 25 | 26 | ```yaml 27 | image_repositories: 28 | - name: some_image_repository 29 | ``` 30 | 31 | 32 | ## Fields 33 | 34 | * `name` (string, required) - The unique identifier for the image repository within the schema. 35 | * `owner` (string or [Role](role.md)) - The owner role of the image repository. Defaults to "SYSADMIN". 36 | 37 | 38 | -------------------------------------------------------------------------------- /docs/resources/internal_stage.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # InternalStage 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-stage.html) 9 | 10 | Represents an internal stage in Snowflake, which is a named location used to store data files 11 | that will be loaded into or unloaded from Snowflake tables. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | internal_stage = InternalStage( 20 | name="some_internal_stage", 21 | owner="SYSADMIN", 22 | encryption={"type": "SNOWFLAKE_SSE"}, 23 | directory={"enable": True}, 24 | tags={"department": "finance"}, 25 | comment="Data loading stage" 26 | ) 27 | ``` 28 | 29 | 30 | ### YAML 31 | 32 | ```yaml 33 | stages: 34 | - name: some_internal_stage 35 | type: internal 36 | owner: SYSADMIN 37 | encryption: 38 | type: SNOWFLAKE_SSE 39 | directory: 40 | enable: true 41 | tags: 42 | department: finance 43 | comment: Data loading stage 44 | ``` 45 | 46 | 47 | ## Fields 48 | 49 | * `name` (string, required) - The name of the internal stage. 50 | * `owner` (string or [Role](role.md)) - The owner role of the internal stage. Defaults to "SYSADMIN". 51 | * `encryption` (dict) - A dictionary specifying encryption settings. 52 | * `directory` (dict) - A dictionary specifying directory usage settings. 53 | * `tags` (dict) - A dictionary of tags associated with the internal stage. 54 | * `comment` (string) - A comment for the internal stage. 55 | 56 | 57 | -------------------------------------------------------------------------------- /docs/resources/materialized_view.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # MaterializedView 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view) 9 | 10 | A Materialized View in Snowflake is a database object that contains the results of a query. 11 | It is physically stored and automatically updated as data changes, providing faster access to data. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | materialized_view = MaterializedView( 20 | name="some_materialized_view", 21 | owner="SYSADMIN", 22 | secure=True, 23 | as_="SELECT * FROM some_table", 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | materialized_views: 32 | - name: some_materialized_view 33 | owner: SYSADMIN 34 | secure: true 35 | as_: SELECT * FROM some_table 36 | ``` 37 | 38 | 39 | ## Fields 40 | 41 | * `name` (string, required) - The name of the materialized view. 42 | * `owner` (string or [Role](role.md)) - The owner role of the materialized view. Defaults to "SYSADMIN". 43 | * `secure` (bool) - Specifies if the materialized view is secure. Defaults to False. 44 | * `columns` (list) - A list of dictionaries specifying column definitions. 45 | * `tags` (dict) - Tags associated with the materialized view. 46 | * `copy_grants` (bool) - Specifies if grants should be copied from the source. Defaults to False. 47 | * `comment` (string) - A comment for the materialized view. 48 | * `cluster_by` (list) - A list of expressions defining the clustering of the materialized view. 49 | * `as_` (string, required) - The SELECT statement used to populate the materialized view. 50 | 51 | 52 | -------------------------------------------------------------------------------- /docs/resources/network_policy.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # NetworkPolicy 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-network-policy) 9 | 10 | A Network Policy in Snowflake defines a set of network rules and IP addresses 11 | that are allowed or blocked from accessing a Snowflake account. This helps in 12 | managing network traffic and securing access based on network policies. 13 | 14 | 15 | ## Examples 16 | 17 | ### Python 18 | 19 | ```python 20 | network_policy = NetworkPolicy( 21 | name="some_network_policy", 22 | allowed_network_rule_list=[NetworkRule(name="rule1"), NetworkRule(name="rule2")], 23 | blocked_network_rule_list=[NetworkRule(name="rule3")], 24 | allowed_ip_list=["192.168.1.1", "192.168.1.2"], 25 | blocked_ip_list=["10.0.0.1"], 26 | comment="Example network policy" 27 | ) 28 | ``` 29 | 30 | 31 | ### YAML 32 | 33 | ```yaml 34 | network_policies: 35 | - name: some_network_policy 36 | allowed_network_rule_list: 37 | - rule1 38 | - rule2 39 | blocked_network_rule_list: 40 | - rule3 41 | allowed_ip_list: ["192.168.1.1", "192.168.1.2"] 42 | blocked_ip_list: ["10.0.0.1"] 43 | comment: "Example network policy" 44 | ``` 45 | 46 | 47 | ## Fields 48 | 49 | * `name` (string, required) - The name of the network policy. 50 | * `allowed_network_rule_list` (list) - A list of allowed network rules. 51 | * `blocked_network_rule_list` (list) - A list of blocked network rules. 52 | * `allowed_ip_list` (list) - A list of allowed IP addresses. 53 | * `blocked_ip_list` (list) - A list of blocked IP addresses. 54 | * `comment` (string) - A comment about the network policy. 55 | * `owner` (string or [Role](role.md)) - The owner role of the network policy. Defaults to "SECURITYADMIN". 56 | 57 | 58 | -------------------------------------------------------------------------------- /docs/resources/network_rule.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # NetworkRule 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-network-rule) 9 | 10 | A Network Rule in Snowflake defines a set of network addresses, such as IP addresses or hostnames, 11 | that can be allowed or denied access to a Snowflake account. This helps in managing network traffic 12 | and securing access based on network policies. 13 | 14 | 15 | ## Examples 16 | 17 | ### Python 18 | 19 | ```python 20 | network_rule = NetworkRule( 21 | name="some_network_rule", 22 | type="IPV4", 23 | value_list=["192.168.1.1", "192.168.1.2"], 24 | mode="INGRESS", 25 | comment="Example network rule" 26 | ) 27 | ``` 28 | 29 | 30 | ### YAML 31 | 32 | ```yaml 33 | network_rules: 34 | - name: some_network_rule 35 | type: IPV4 36 | value_list: ["192.168.1.1", "192.168.1.2"] 37 | mode: INGRESS 38 | comment: "Example network rule" 39 | ``` 40 | 41 | 42 | ## Fields 43 | 44 | * `name` (string, required) - The name of the network rule. 45 | * `type` (string or [NetworkIdentifierType](network_identifier_type.md), required) - The type of network identifier. Defaults to IPV4. 46 | * `value_list` (list) - A list of values associated with the network rule. 47 | * `mode` (string or [NetworkRuleMode](network_rule_mode.md)) - The mode of the network rule. Defaults to INGRESS. 48 | * `comment` (string) - A comment about the network rule. 49 | * `owner` (string or [Role](role.md)) - The owner role of the network rule. Defaults to "SYSADMIN". 50 | 51 | 52 | -------------------------------------------------------------------------------- /docs/resources/oauth_secret.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # OAuthSecret 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-secret) 9 | 10 | A Secret defines a set of sensitive data that can be used for authentication or other purposes. 11 | This class defines an OAuth secret. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | # OAuth with client credentials flow: 20 | secret = OAuthSecret( 21 | name="some_secret", 22 | api_authentication="some_security_integration", 23 | oauth_scopes=["scope1", "scope2"], 24 | comment="some_comment", 25 | owner="SYSADMIN", 26 | ) 27 | # OAuth with authorization code grant flow: 28 | secret = OAuthSecret( 29 | name="another_secret", 30 | api_authentication="some_security_integration", 31 | oauth_refresh_token="34n;vods4nQsdg09wee4qnfvadH", 32 | oauth_refresh_token_expiry_time="2049-01-06 20:00:00", 33 | comment="some_comment", 34 | owner="SYSADMIN", 35 | ) 36 | ``` 37 | 38 | 39 | ### YAML 40 | 41 | ```yaml 42 | secrets: 43 | - name: some_secret 44 | secret_type: OAUTH2 45 | api_authentication: some_security_integration 46 | oauth_scopes: 47 | - scope1 48 | - scope2 49 | comment: some_comment 50 | owner: SYSADMIN 51 | - name: another_secret 52 | secret_type: OAUTH2 53 | api_authentication: some_security_integration 54 | oauth_refresh_token: 34n;vods4nQsdg09wee4qnfvadH 55 | oauth_refresh_token_expiry_time: 2049-01-06 20:00:00 56 | comment: some_comment 57 | owner: SYSADMIN 58 | ``` 59 | 60 | 61 | ## Fields 62 | 63 | * `name` (string, required) - The name of the secret. 64 | * `api_authentication` (string) - The security integration name for API authentication. 65 | * `oauth_scopes` (list) - The OAuth scopes for the secret. 66 | * `oauth_refresh_token` (string) - The OAuth refresh token. 67 | * `oauth_refresh_token_expiry_time` (string) - The expiry time of the OAuth refresh token. 68 | * `comment` (string) - A comment for the secret. 69 | * `owner` (string or [Role](role.md)) - The owner of the secret. Defaults to SYSADMIN. 70 | 71 | 72 | -------------------------------------------------------------------------------- /docs/resources/object_store_catalog_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # ObjectStoreCatalogIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-catalog-integration) 9 | 10 | Manages the integration of an object store as a catalog in Snowflake, supporting the ICEBERG table format. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | object_store_catalog_integration = ObjectStoreCatalogIntegration( 19 | name="some_catalog_integration", 20 | table_format="ICEBERG", 21 | enabled=True, 22 | comment="Integration for object storage." 23 | ) 24 | ``` 25 | 26 | 27 | ### YAML 28 | 29 | ```yaml 30 | catalog_integrations: 31 | - name: some_catalog_integration 32 | table_format: ICEBERG 33 | enabled: true 34 | comment: Integration for object storage. 35 | ``` 36 | 37 | 38 | ## Fields 39 | 40 | * `name` (string, required) - The name of the catalog integration. 41 | * `table_format` (string or [CatalogTableFormat](catalog_table_format.md), required) - The format of the table, defaults to ICEBERG. 42 | * `enabled` (bool) - Specifies whether the catalog integration is enabled. Defaults to True. 43 | * `comment` (string) - An optional comment describing the catalog integration. 44 | 45 | 46 | -------------------------------------------------------------------------------- /docs/resources/packages_policy.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # PackagesPolicy 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-packages-policy) 9 | 10 | A Packages Policy defines a set of rules for allowed and blocked packages 11 | that are applied to user-defined functions and stored procedures. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | packages_policy = PackagesPolicy( 20 | name="some_packages_policy", 21 | allowlist=["numpy", "pandas"], 22 | blocklist=["os", "sys"], 23 | comment="Policy for data processing packages." 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | packages_policy: 32 | - name: some_packages_policy 33 | allowlist: 34 | - numpy 35 | - pandas 36 | blocklist: 37 | - os 38 | - sys 39 | comment: Policy for data processing packages. 40 | ``` 41 | 42 | 43 | ## Fields 44 | 45 | * `name` (string, required) - The name of the packages policy. 46 | * `language` (string or [Language](language.md)) - The programming language for the packages. Defaults to PYTHON. 47 | * `allowlist` (list) - A list of package specifications that are explicitly allowed. 48 | * `blocklist` (list) - A list of package specifications that are explicitly blocked. 49 | * `additional_creation_blocklist` (list) - A list of package specifications that are blocked during creation. 50 | * `comment` (string) - A comment or description for the packages policy. 51 | * `owner` (string or [Role](role.md)) - The owner role of the packages policy. Defaults to SYSADMIN. 52 | 53 | 54 | -------------------------------------------------------------------------------- /docs/resources/parquet_file_format.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # ParquetFileFormat 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-file-format) 9 | 10 | A Parquet file format in Snowflake. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | file_format = ParquetFileFormat( 19 | name="some_file_format", 20 | owner="SYSADMIN", 21 | compression="AUTO", 22 | binary_as_text=True, 23 | trim_space=False, 24 | replace_invalid_characters=False, 25 | null_if=["NULL"], 26 | comment="This is a Parquet file format." 27 | ) 28 | ``` 29 | 30 | 31 | ### YAML 32 | 33 | ```yaml 34 | file_formats: 35 | - name: some_file_format 36 | owner: SYSADMIN 37 | compression: AUTO 38 | binary_as_text: true 39 | trim_space: false 40 | replace_invalid_characters: false 41 | null_if: 42 | - NULL 43 | comment: This is a Parquet file format. 44 | ``` 45 | 46 | 47 | ## Fields 48 | 49 | * `name` (string, required) - The name of the file format. 50 | * `owner` (string or [Role](role.md)) - The owner role of the file format. Defaults to "SYSADMIN". 51 | * `compression` (string) - The compression type for the file format. Defaults to "AUTO". 52 | * `binary_as_text` (bool) - Whether to interpret binary data as text. Defaults to True. 53 | * `trim_space` (bool) - Whether to trim spaces. Defaults to False. 54 | * `replace_invalid_characters` (bool) - Whether to replace invalid characters. Defaults to False. 55 | * `null_if` (list) - A list of strings to be interpreted as NULL. 56 | * `comment` (string) - A comment for the file format. 57 | 58 | 59 | -------------------------------------------------------------------------------- /docs/resources/password_secret.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # PasswordSecret 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-secret) 9 | 10 | A Secret defines a set of sensitive data that can be used for authentication or other purposes. 11 | This class defines a password secret. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | secret = PasswordSecret( 20 | name="some_secret", 21 | username="some_username", 22 | password="some_password", 23 | comment="some_comment", 24 | owner="SYSADMIN", 25 | ) 26 | ``` 27 | 28 | 29 | ### YAML 30 | 31 | ```yaml 32 | secrets: 33 | - name: some_secret 34 | secret_type: PASSWORD 35 | username: some_username 36 | password: some_password 37 | comment: some_comment 38 | owner: SYSADMIN 39 | ``` 40 | 41 | 42 | ## Fields 43 | 44 | * `name` (string, required) - The name of the secret. 45 | * `username` (string) - The username for the secret. 46 | * `password` (string) - The password for the secret. 47 | * `comment` (string) - A comment for the secret. 48 | * `owner` (string or [Role](role.md)) - The owner of the secret. Defaults to SYSADMIN. 49 | 50 | 51 | -------------------------------------------------------------------------------- /docs/resources/pipe.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Pipe 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-pipe) 9 | 10 | Represents a data ingestion pipeline in Snowflake, which automates the loading of data into tables. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | pipe = Pipe( 19 | name="some_pipe", 20 | as_="COPY INTO some_table FROM @%some_stage", 21 | owner="SYSADMIN", 22 | auto_ingest=True, 23 | error_integration="some_integration", 24 | aws_sns_topic="some_topic", 25 | integration="some_integration", 26 | comment="This is a sample pipe" 27 | ) 28 | ``` 29 | 30 | 31 | ### YAML 32 | 33 | ```yaml 34 | pipes: 35 | - name: some_pipe 36 | as_: "COPY INTO some_table FROM @%some_stage" 37 | owner: SYSADMIN 38 | auto_ingest: true 39 | error_integration: some_integration 40 | aws_sns_topic: some_topic 41 | integration: some_integration 42 | comment: "This is a sample pipe" 43 | ``` 44 | 45 | 46 | ## Fields 47 | 48 | * `name` (string, required) - The name of the pipe. 49 | * `as_` (string, required) - The SQL statement that defines the data loading operation. 50 | * `owner` (string or [Role](role.md)) - The owner role of the pipe. Defaults to "SYSADMIN". 51 | * `auto_ingest` (bool) - Specifies if the pipe automatically ingests data when files are added to the stage. Defaults to None. 52 | * `error_integration` (string) - The name of the integration used for error notifications. Defaults to None. 53 | * `aws_sns_topic` (string) - The AWS SNS topic where notifications are sent. Defaults to None. 54 | * `integration` (string) - The integration used for data loading. Defaults to None. 55 | * `comment` (string) - A comment for the pipe. Defaults to None. 56 | 57 | 58 | -------------------------------------------------------------------------------- /docs/resources/replication_group.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # ReplicationGroup 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-replication-group) 9 | 10 | A replication group in Snowflake. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | replication_group = ReplicationGroup( 19 | name="some_replication_group", 20 | object_types=["DATABASES"], 21 | allowed_accounts=["account1", "account2"], 22 | ) 23 | ``` 24 | 25 | 26 | ### YAML 27 | 28 | ```yaml 29 | replication_groups: 30 | - name: some_replication_group 31 | object_types: 32 | - DATABASES 33 | allowed_accounts: 34 | - account1 35 | - account2 36 | ``` 37 | 38 | 39 | ## Fields 40 | 41 | * `name` (string, required) - The name of the replication group. 42 | * `object_types` (list, required) - The object types to be replicated. 43 | * `allowed_accounts` (list, required) - The accounts allowed to replicate. 44 | * `allowed_databases` (list) - The databases allowed to replicate. 45 | * `allowed_shares` (list) - The shares allowed to replicate. 46 | * `allowed_integration_types` (list) - The integration types allowed to replicate. 47 | * `ignore_edition_check` (bool) - Whether to ignore the edition check. 48 | * `replication_schedule` (string) - The replication schedule. 49 | * `owner` (string or [Role](role.md)) - The owner of the replication group. Defaults to "SYSADMIN". 50 | 51 | 52 | -------------------------------------------------------------------------------- /docs/resources/resource_monitor.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # ResourceMonitor 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-resource-monitor) 9 | 10 | Manages the monitoring of resource usage within an account. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | resource_monitor = ResourceMonitor( 19 | name="some_resource_monitor", 20 | credit_quota=1000, 21 | frequency="DAILY", 22 | start_timestamp="2049-01-01 00:00", 23 | end_timestamp="2049-12-31 23:59", 24 | notify_users=["user1", "user2"] 25 | ) 26 | ``` 27 | 28 | 29 | ### YAML 30 | 31 | ```yaml 32 | resource_monitors: 33 | - name: some_resource_monitor 34 | credit_quota: 1000 35 | frequency: DAILY 36 | start_timestamp: "2049-01-01 00:00" 37 | end_timestamp: "2049-12-31 23:59" 38 | notify_users: 39 | - user1 40 | - user2 41 | ``` 42 | 43 | 44 | ## Fields 45 | 46 | * `name` (string, required) - The name of the resource monitor. 47 | * `credit_quota` (int) - The amount of credits that can be used by this monitor. Defaults to None. 48 | * `frequency` (string or [ResourceMonitorFrequency](resource_monitor_frequency.md)) - The frequency of monitoring. Defaults to None. 49 | * `start_timestamp` (string) - The start time for the monitoring period. Defaults to None. 50 | * `end_timestamp` (string) - The end time for the monitoring period. Defaults to None. 51 | * `notify_users` (list) - A list of users to notify when thresholds are reached. Defaults to None. 52 | 53 | 54 | -------------------------------------------------------------------------------- /docs/resources/role.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Role 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-role) 9 | 10 | A role in Snowflake defines a set of access controls and permissions. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | role = Role( 19 | name="some_role", 20 | owner="USERADMIN", 21 | comment="This is a sample role.", 22 | ) 23 | ``` 24 | 25 | 26 | ### YAML 27 | 28 | ```yaml 29 | roles: 30 | - name: some_role 31 | owner: USERADMIN 32 | comment: This is a sample role. 33 | ``` 34 | 35 | 36 | ## Fields 37 | 38 | * `name` (string, required) - The name of the role. 39 | * `owner` (string) - The owner of the role. Defaults to "USERADMIN". 40 | * `tags` (dict) - Tags associated with the role. 41 | * `comment` (string) - A comment for the role. 42 | 43 | 44 | -------------------------------------------------------------------------------- /docs/resources/role_grant.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # RoleGrant 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/grant-role) 9 | 10 | Represents a grant of a role to another role or user in Snowflake. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | # Grant to Role: 19 | role_grant = RoleGrant(role="somerole", to_role="someotherrole") 20 | role_grant = RoleGrant(role="somerole", to=Role(name="someotherrole")) 21 | # Grant to User: 22 | role_grant = RoleGrant(role="somerole", to_user="someuser") 23 | role_grant = RoleGrant(role="somerole", to=User(name="someuser")) 24 | ``` 25 | 26 | 27 | ### YAML 28 | 29 | ```yaml 30 | role_grants: 31 | - role: somerole 32 | to_role: someotherrole 33 | - role: somerole 34 | to_user: someuser 35 | ``` 36 | 37 | 38 | ## Fields 39 | 40 | * `role` (string or [Role](role.md), required) - The role to be granted. 41 | * `to_role` (string or [Role](role.md)) - The role to which the role is granted. 42 | * `to_user` (string or [User](user.md)) - The user to which the role is granted. 43 | 44 | 45 | -------------------------------------------------------------------------------- /docs/resources/s3storage_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # S3StorageIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-storage-integration) 9 | 10 | Manages the integration of Snowflake with S3 storage. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | s3_storage_integration = S3StorageIntegration( 19 | name="some_s3_storage_integration", 20 | enabled=True, 21 | storage_aws_role_arn="arn:aws:iam::123456789012:role/MyS3AccessRole", 22 | storage_allowed_locations=["s3://mybucket/myfolder/"], 23 | storage_blocked_locations=["s3://mybucket/myblockedfolder/"], 24 | storage_aws_object_acl="bucket-owner-full-control", 25 | comment="This is a sample S3 storage integration." 26 | ) 27 | ``` 28 | 29 | 30 | ### YAML 31 | 32 | ```yaml 33 | s3_storage_integrations: 34 | - name: some_s3_storage_integration 35 | enabled: true 36 | storage_aws_role_arn: "arn:aws:iam::123456789012:role/MyS3AccessRole" 37 | storage_allowed_locations: 38 | - "s3://mybucket/myfolder/" 39 | storage_blocked_locations: 40 | - "s3://mybucket/myblockedfolder/" 41 | storage_aws_object_acl: "bucket-owner-full-control" 42 | comment: "This is a sample S3 storage integration." 43 | ``` 44 | 45 | 46 | ## Fields 47 | 48 | * `name` (string, required) - The name of the storage integration. 49 | * `enabled` (bool, required) - Whether the storage integration is enabled. Defaults to True. 50 | * `storage_aws_role_arn` (string, required) - The AWS IAM role ARN to access the S3 bucket. 51 | * `storage_allowed_locations` (list, required) - A list of allowed locations for storage in the format 's3:////'. 52 | * `storage_blocked_locations` (list) - A list of blocked locations for storage in the format 's3:////'. Defaults to an empty list. 53 | * `storage_aws_object_acl` (string) - The ACL policy for objects stored in S3. Defaults to 'bucket-owner-full-control'. 54 | * `type` (string) - The type of storage integration. Defaults to 'EXTERNAL_STAGE'. 55 | * `owner` (string or [Role](role.md)) - The owner role of the storage integration. Defaults to 'ACCOUNTADMIN'. 56 | * `comment` (string) - An optional comment about the storage integration. 57 | 58 | 59 | -------------------------------------------------------------------------------- /docs/resources/schema.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Schema 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-schema) 9 | 10 | Represents a schema in Snowflake, which is a logical grouping of database objects such as tables, views, and stored procedures. Schemas are used to organize and manage such objects within a database. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | schema = Schema( 19 | name="some_schema", 20 | transient=True, 21 | managed_access=True, 22 | data_retention_time_in_days=7, 23 | max_data_extension_time_in_days=28, 24 | default_ddl_collation="utf8", 25 | tags={"project": "analytics"}, 26 | owner="SYSADMIN", 27 | comment="Schema for analytics project." 28 | ) 29 | ``` 30 | 31 | 32 | ### YAML 33 | 34 | ```yaml 35 | schemas: 36 | - name: some_schema 37 | transient: true 38 | managed_access: true 39 | data_retention_time_in_days: 7 40 | max_data_extension_time_in_days: 28 41 | default_ddl_collation: utf8 42 | tags: 43 | project: analytics 44 | owner: SYSADMIN 45 | comment: Schema for analytics project. 46 | ``` 47 | 48 | 49 | ## Fields 50 | 51 | * `name` (string, required) - The name of the schema. 52 | * `transient` (bool) - Specifies if the schema is transient. Defaults to False. 53 | * `managed_access` (bool) - Specifies if the schema has managed access. Defaults to False. 54 | * `data_retention_time_in_days` (int) - The number of days to retain data. Defaults to 1. 55 | * `max_data_extension_time_in_days` (int) - The maximum number of days to extend data retention. Defaults to 14. 56 | * `default_ddl_collation` (string) - The default DDL collation setting. 57 | * `tags` (dict) - Tags associated with the schema. 58 | * `owner` (string or [Role](role.md)) - The owner of the schema. Defaults to "SYSADMIN". 59 | * `comment` (string) - A comment about the schema. 60 | 61 | 62 | -------------------------------------------------------------------------------- /docs/resources/secret.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Secret 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-secret) 9 | 10 | A Secret defines a set of sensitive data that can be used for authentication or other purposes. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | secret = Secret( 19 | name="some_secret", 20 | type="OAUTH2", 21 | api_authentication="some_security_integration", 22 | oauth_scopes=["scope1", "scope2"], 23 | oauth_refresh_token="some_refresh_token", 24 | oauth_refresh_token_expiry_time="some_expiry_time", 25 | username="some_username", 26 | password="some_password", 27 | secret_string="some_secret_string", 28 | comment="some_comment", 29 | owner="SYSADMIN", 30 | ) 31 | ``` 32 | 33 | 34 | ### YAML 35 | 36 | ```yaml 37 | secrets: 38 | - name: some_secret 39 | type: OAUTH2 40 | api_authentication: some_security_integration 41 | oauth_scopes: 42 | - scope1 43 | - scope2 44 | oauth_refresh_token: some_refresh_token 45 | oauth_refresh_token_expiry_time: some_expiry_time 46 | username: some_username 47 | password: some_password 48 | secret_string: some_secret_string 49 | comment: some_comment 50 | owner: SYSADMIN 51 | ``` 52 | 53 | 54 | ## Fields 55 | 56 | * `name` (string, required) - The name of the secret. 57 | * `type` (string or [SecretType](secret_type.md), required) - The type of the secret. 58 | * `api_authentication` (string) - The security integration name for API authentication. 59 | * `oauth_scopes` (list) - The OAuth scopes for the secret. 60 | * `oauth_refresh_token` (string) - The OAuth refresh token. 61 | * `oauth_refresh_token_expiry_time` (string) - The expiry time of the OAuth refresh token. 62 | * `username` (string) - The username for the secret. 63 | * `password` (string) - The password for the secret. 64 | * `secret_string` (string) - The secret string. 65 | * `comment` (string) - A comment for the secret. 66 | * `owner` (string or [Role](role.md)) - The owner of the secret. Defaults to SYSADMIN. 67 | 68 | 69 | -------------------------------------------------------------------------------- /docs/resources/sequence.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Sequence 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-sequence) 9 | 10 | Manages the creation and configuration of sequences in Snowflake, which are objects that generate numeric values according to a specified sequence. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | sequence = Sequence( 19 | name="some_sequence", 20 | owner="SYSADMIN", 21 | start=100, 22 | increment=10, 23 | comment="This is a sample sequence." 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | sequences: 32 | - name: some_sequence 33 | owner: SYSADMIN 34 | start: 100 35 | increment: 10 36 | comment: This is a sample sequence. 37 | ``` 38 | 39 | 40 | ## Fields 41 | 42 | * `name` (string, required) - The name of the sequence. 43 | * `owner` (string or [Role](role.md)) - The owner role of the sequence. Defaults to "SYSADMIN". 44 | * `start` (int) - The starting value of the sequence. 45 | * `increment` (int) - The value by which the sequence is incremented. 46 | * `comment` (string) - A comment for the sequence. 47 | 48 | 49 | -------------------------------------------------------------------------------- /docs/resources/session_policy.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # SessionPolicy 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-session-policy) 9 | 10 | Manages session policies in Snowflake, which define timeout settings for user sessions to enhance security. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | session_policy = SessionPolicy( 19 | name="some_session_policy", 20 | session_idle_timeout_mins=30, 21 | session_ui_idle_timeout_mins=10, 22 | comment="Policy for standard users." 23 | ) 24 | ``` 25 | 26 | 27 | ### YAML 28 | 29 | ```yaml 30 | session_policies: 31 | - name: some_session_policy 32 | session_idle_timeout_mins: 30 33 | session_ui_idle_timeout_mins: 10 34 | comment: Policy for standard users. 35 | ``` 36 | 37 | 38 | ## Fields 39 | 40 | * `name` (string, required) - The name of the session policy. 41 | * `session_idle_timeout_mins` (int) - The maximum amount of time a session can remain idle before it is automatically terminated. 42 | * `session_ui_idle_timeout_mins` (int) - The maximum amount of time a user interface session can remain idle before it is automatically terminated. 43 | * `comment` (string) - A description or comment about the session policy. 44 | 45 | 46 | -------------------------------------------------------------------------------- /docs/resources/share.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Share 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-share) 9 | 10 | Represents a share resource in Snowflake, which allows sharing data across Snowflake accounts. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | share = Share( 19 | name="some_share", 20 | comment="This is a snowflake share." 21 | ) 22 | ``` 23 | 24 | 25 | ### YAML 26 | 27 | ```yaml 28 | shares: 29 | - name: some_share 30 | comment: This is a snowflake share. 31 | ``` 32 | 33 | 34 | ## Fields 35 | 36 | * `name` (string, required) - The name of the share. 37 | * `owner` (string or [Role](role.md)) - The owner of the share. Defaults to "ACCOUNTADMIN". 38 | * `comment` (string) - A comment about the share. 39 | 40 | 41 | -------------------------------------------------------------------------------- /docs/resources/snowflake_partner_oauth_security_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # SnowflakePartnerOAuthSecurityIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration) 9 | 10 | A security integration in Snowflake designed to manage external OAuth clients for authentication purposes. 11 | This integration supports specific OAuth clients such as Looker, Tableau Desktop, and Tableau Server. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | snowflake_partner_oauth_security_integration = SnowflakePartnerOAuthSecurityIntegration( 20 | name="some_security_integration", 21 | enabled=True, 22 | oauth_client="LOOKER", 23 | oauth_client_secret="secret123", 24 | oauth_redirect_uri="https://example.com/oauth/callback", 25 | oauth_issue_refresh_tokens=True, 26 | oauth_refresh_token_validity=7776000, 27 | comment="Integration for Looker OAuth" 28 | ) 29 | ``` 30 | 31 | 32 | ### YAML 33 | 34 | ```yaml 35 | security_integrations: 36 | - name: some_security_integration 37 | enabled: true 38 | oauth_client: LOOKER 39 | oauth_client_secret: secret123 40 | oauth_redirect_uri: https://example.com/oauth/callback 41 | oauth_issue_refresh_tokens: true 42 | oauth_refresh_token_validity: 7776000 43 | comment: Integration for Looker OAuth 44 | ``` 45 | 46 | 47 | ## Fields 48 | 49 | * `name` (string, required) - The name of the security integration. 50 | * `enabled` (bool) - Specifies if the security integration is enabled. Defaults to True. 51 | * `oauth_client` (string or [OAuthClient](oauth_client.md)) - The OAuth client used for authentication. Supported clients are 'LOOKER', 'TABLEAU_DESKTOP', and 'TABLEAU_SERVER'. 52 | * `oauth_client_secret` (string) - The secret associated with the OAuth client. 53 | * `oauth_redirect_uri` (string) - The redirect URI configured for the OAuth client. 54 | * `oauth_issue_refresh_tokens` (bool) - Indicates if refresh tokens should be issued. Defaults to True. 55 | * `oauth_refresh_token_validity` (int) - The validity period of the refresh token in seconds. 56 | * `comment` (string) - A comment about the security integration. 57 | 58 | 59 | -------------------------------------------------------------------------------- /docs/resources/snowservices_oauth_security_integration.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # SnowservicesOAuthSecurityIntegration 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration) 9 | 10 | Manages OAuth security integrations for Snowservices in Snowflake, allowing external authentication mechanisms. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | snowservices_oauth = SnowservicesOAuthSecurityIntegration( 19 | name="some_security_integration", 20 | enabled=True, 21 | comment="Integration for external OAuth services." 22 | ) 23 | ``` 24 | 25 | 26 | ### YAML 27 | 28 | ```yaml 29 | snowservices_oauth: 30 | - name: some_security_integration 31 | enabled: true 32 | comment: Integration for external OAuth services. 33 | ``` 34 | 35 | 36 | ## Fields 37 | 38 | * `name` (string, required) - The name of the security integration. 39 | * `enabled` (bool) - Specifies if the security integration is enabled. Defaults to True. 40 | * `comment` (string) - A comment about the security integration. 41 | 42 | 43 | -------------------------------------------------------------------------------- /docs/resources/stage_stream.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # StageStream 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-stream) 9 | 10 | Represents a stream on a stage in Snowflake, which allows for capturing data changes on the stage. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | stream = StageStream( 19 | name="some_stream", 20 | on_stage="some_stage", 21 | owner="SYSADMIN", 22 | copy_grants=True, 23 | comment="This is a sample stream." 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | streams: 32 | - name: some_stream 33 | on_stage: some_stage 34 | owner: SYSADMIN 35 | copy_grants: true 36 | comment: This is a sample stream. 37 | ``` 38 | 39 | 40 | ## Fields 41 | 42 | * `name` (string, required) - The name of the stream. 43 | * `on_stage` (string, required) - The name of the stage the stream is based on. 44 | * `owner` (string or [Role](role.md)) - The role that owns the stream. Defaults to "SYSADMIN". 45 | * `copy_grants` (bool) - Whether to copy grants from the source stage to the stream. 46 | * `comment` (string) - An optional description for the stream. 47 | 48 | 49 | -------------------------------------------------------------------------------- /docs/resources/table.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Table 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-table) 9 | 10 | A table in Snowflake. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | table = Table( 19 | name="some_table", 20 | columns=[{"name": "col1", "data_type": "STRING"}], 21 | owner="SYSADMIN", 22 | ) 23 | ``` 24 | 25 | 26 | ### YAML 27 | 28 | ```yaml 29 | tables: 30 | - name: some_table 31 | columns: 32 | - name: col1 33 | data_type: STRING 34 | owner: SYSADMIN 35 | ``` 36 | 37 | 38 | ## Fields 39 | 40 | * `name` (string, required) - The name of the table. 41 | * `columns` (list, required) - The columns of the table. 42 | * `constraints` (list) - The constraints of the table. 43 | * `transient` (bool) - Whether the table is transient. 44 | * `cluster_by` (list) - The clustering keys for the table. 45 | * `enable_schema_evolution` (bool) - Whether schema evolution is enabled. Defaults to False. 46 | * `data_retention_time_in_days` (int) - The data retention time in days. 47 | * `max_data_extension_time_in_days` (int) - The maximum data extension time in days. 48 | * `change_tracking` (bool) - Whether change tracking is enabled. Defaults to False. 49 | * `default_ddl_collation` (string) - The default DDL collation. 50 | * `copy_grants` (bool) - Whether to copy grants. Defaults to False. 51 | * `row_access_policy` (dict) - The row access policy. 52 | * `tags` (dict) - The tags for the table. 53 | * `owner` (string or [Role](role.md)) - The owner role of the table. Defaults to SYSADMIN. 54 | * `comment` (string) - A comment for the table. 55 | 56 | 57 | -------------------------------------------------------------------------------- /docs/resources/table_stream.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # TableStream 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-stream) 9 | 10 | Represents a stream on a table in Snowflake, which allows for change data capture on the table. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | stream = TableStream( 19 | name="some_stream", 20 | on_table="some_table", 21 | owner="SYSADMIN", 22 | copy_grants=True, 23 | at={"TIMESTAMP": "2022-01-01 00:00:00"}, 24 | before={"STREAM": "some_other_stream"}, 25 | append_only=False, 26 | show_initial_rows=True, 27 | comment="This is a sample stream." 28 | ) 29 | ``` 30 | 31 | 32 | ### YAML 33 | 34 | ```yaml 35 | streams: 36 | - name: some_stream 37 | on_table: some_table 38 | owner: SYSADMIN 39 | copy_grants: true 40 | at: 41 | TIMESTAMP: "2022-01-01 00:00:00" 42 | before: 43 | STREAM: some_other_stream 44 | append_only: false 45 | show_initial_rows: true 46 | comment: This is a sample stream. 47 | ``` 48 | 49 | 50 | ## Fields 51 | 52 | * `name` (string, required) - The name of the stream. 53 | * `on_table` (string, required) - The name of the table the stream is based on. 54 | * `owner` (string or [Role](role.md)) - The role that owns the stream. Defaults to "SYSADMIN". 55 | * `copy_grants` (bool) - Whether to copy grants from the source table to the stream. 56 | * `at` (dict) - A dictionary specifying the point in time for the stream to start, using keys like TIMESTAMP, OFFSET, STATEMENT, or STREAM. 57 | * `before` (dict) - A dictionary specifying the point in time for the stream to start, similar to 'at' but defining a point before the specified time. 58 | * `append_only` (bool) - If set to True, the stream records only append operations. 59 | * `show_initial_rows` (bool) - If set to True, the stream includes the initial rows of the table at the time of stream creation. 60 | * `comment` (string) - An optional description for the stream. 61 | 62 | 63 | -------------------------------------------------------------------------------- /docs/resources/tag.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Tag 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-tag) 9 | 10 | Represents a tag in Snowflake, which can be used to label various resources for better management and categorization. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | tag = Tag( 19 | name="cost_center", 20 | allowed_values=["finance", "engineering", "sales"], 21 | comment="This is a sample tag", 22 | ) 23 | ``` 24 | 25 | 26 | ### YAML 27 | 28 | ```yaml 29 | tags: 30 | - name: cost_center 31 | comment: This is a sample tag 32 | allowed_values: 33 | - finance 34 | - engineering 35 | - sales 36 | ``` 37 | 38 | 39 | ## Fields 40 | 41 | * `name` (string, required) - The name of the tag. 42 | * `allowed_values` (list) - A list of allowed values for the tag. 43 | * `comment` (string) - A comment or description for the tag. 44 | 45 | 46 | -------------------------------------------------------------------------------- /docs/resources/task.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # Task 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-task) 9 | 10 | Represents a scheduled task in Snowflake that performs a specified SQL statement at a recurring interval. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | task = Task( 19 | name="some_task", 20 | warehouse="some_warehouse", 21 | schedule="USING CRON 0 9 * * * UTC", 22 | state="SUSPENDED", 23 | as_="SELECT 1" 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | tasks: 32 | - name: some_task 33 | warehouse: some_warehouse 34 | schedule: "USING CRON 0 9 * * * UTC" 35 | state: SUSPENDED 36 | as_: | 37 | SELECT 1 38 | ``` 39 | 40 | 41 | ## Fields 42 | 43 | * `warehouse` (string or [Warehouse](warehouse.md)) - The warehouse used by the task. 44 | * `user_task_managed_initial_warehouse_size` (string or [WarehouseSize](warehouse_size.md)) - The initial warehouse size when the task is managed by the user. Defaults to None. 45 | * `schedule` (string) - The schedule on which the task runs. 46 | * `config` (string) - Configuration settings for the task. 47 | * `allow_overlapping_execution` (bool) - Whether the task can have overlapping executions. 48 | * `user_task_timeout_ms` (int) - The timeout in milliseconds after which the task is aborted. 49 | * `suspend_task_after_num_failures` (int) - The number of consecutive failures after which the task is suspended. 50 | * `error_integration` (string) - The integration used for error handling. 51 | * `copy_grants` (bool) - Whether to copy grants from the referenced objects. 52 | * `comment` (string) - A comment for the task. 53 | * `after` (list) - A list of tasks that must be completed before this task runs. 54 | * `when` (string) - A conditional expression that determines when the task runs. 55 | * `as_` (string) - The SQL statement that the task executes. 56 | * `state` (string or [TaskState](task_state.md), required) - The initial state of the task. Defaults to SUSPENDED. 57 | 58 | 59 | -------------------------------------------------------------------------------- /docs/resources/user.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # User 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-user) 9 | 10 | A user in Snowflake. 11 | 12 | 13 | ## Examples 14 | 15 | ### Python 16 | 17 | ```python 18 | user = User( 19 | name="some_user", 20 | owner="USERADMIN", 21 | email="some.user@example.com", 22 | type="PERSON", 23 | ) 24 | ``` 25 | 26 | 27 | ### YAML 28 | 29 | ```yaml 30 | users: 31 | - name: some_user 32 | owner: USERADMIN 33 | email: some.user@example.com 34 | type: PERSON 35 | ``` 36 | 37 | 38 | ## Fields 39 | 40 | * `name` (string, required) - The name of the user. 41 | * `owner` (string or [Role](role.md)) - The owner of the user. Defaults to "USERADMIN". 42 | * `password` (string) - The password of the user. 43 | * `login_name` (string) - The login name of the user. Defaults to the name in uppercase. 44 | * `display_name` (string) - The display name of the user. Defaults to the name in lowercase. 45 | * `first_name` (string) - The first name of the user. 46 | * `middle_name` (string) - The middle name of the user. 47 | * `last_name` (string) - The last name of the user. 48 | * `email` (string) - The email of the user. 49 | * `must_change_password` (bool) - Whether the user must change their password. Defaults to False. 50 | * `disabled` (bool) - Whether the user is disabled. Defaults to False. 51 | * `days_to_expiry` (int) - The number of days until the user's password expires. 52 | * `mins_to_unlock` (int) - The number of minutes until the user's account is unlocked. 53 | * `default_warehouse` (string) - The default warehouse for the user. 54 | * `default_namespace` (string) - The default namespace for the user. 55 | * `default_role` (string) - The default role for the user. 56 | * `default_secondary_roles` (list) - The default secondary roles for the user. 57 | * `mins_to_bypass_mfa` (int) - The number of minutes until the user can bypass Multi-Factor Authentication. 58 | * `rsa_public_key` (string) - The RSA public key for the user. 59 | * `rsa_public_key_2` (string) - The RSA public key for the user. 60 | * `comment` (string) - A comment for the user. 61 | * `network_policy` (string) - The network policy for the user. 62 | * `type` (string or [UserType](user_type.md)) - The type of the user. Defaults to "NULL". 63 | * `tags` (dict) - Tags for the user. 64 | 65 | 66 | -------------------------------------------------------------------------------- /docs/resources/view.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # View 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-view) 9 | 10 | Represents a view in Snowflake, which is a virtual table created by a stored query on the data. 11 | Views are used to simplify complex queries, improve security, or enhance performance. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | view = View( 20 | name="some_view", 21 | owner="SYSADMIN", 22 | secure=True, 23 | as_="SELECT * FROM some_table" 24 | ) 25 | ``` 26 | 27 | 28 | ### YAML 29 | 30 | ```yaml 31 | views: 32 | - name: some_view 33 | owner: SYSADMIN 34 | secure: true 35 | as_: "SELECT * FROM some_table" 36 | ``` 37 | 38 | 39 | ## Fields 40 | 41 | * `name` (string, required) - The name of the view. 42 | * `owner` (string or [Role](role.md)) - The owner role of the view. Defaults to "SYSADMIN". 43 | * `secure` (bool) - Specifies if the view is secure. 44 | * `volatile` (bool) - Specifies if the view is volatile. 45 | * `recursive` (bool) - Specifies if the view is recursive. 46 | * `columns` (list) - A list of dictionaries specifying column details. 47 | * `tags` (dict) - A dictionary of tags associated with the view. 48 | * `change_tracking` (bool) - Specifies if change tracking is enabled. 49 | * `copy_grants` (bool) - Specifies if grants should be copied from the base table. 50 | * `comment` (string) - A comment for the view. 51 | * `as_` (string) - The SELECT statement defining the view. 52 | 53 | 54 | -------------------------------------------------------------------------------- /docs/resources/view_stream.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: >- 3 | 4 | --- 5 | 6 | # ViewStream 7 | 8 | [Snowflake Documentation](https://docs.snowflake.com/en/sql-reference/sql/create-stream) 9 | 10 | Represents a stream on a view in Snowflake, allowing for real-time data processing and querying. 11 | This stream can be configured with various options such as time travel, append-only mode, and initial row visibility. 12 | 13 | 14 | ## Examples 15 | 16 | ### Python 17 | 18 | ```python 19 | view_stream = ViewStream( 20 | name="some_stream", 21 | on_view="some_view", 22 | owner="SYSADMIN", 23 | copy_grants=True, 24 | at={"TIMESTAMP": "2022-01-01 00:00:00"}, 25 | before={"STREAM": "some_other_stream"}, 26 | append_only=False, 27 | show_initial_rows=True, 28 | comment="This is a sample stream on a view." 29 | ) 30 | ``` 31 | 32 | 33 | ### YAML 34 | 35 | ```yaml 36 | streams: 37 | - name: some_stream 38 | on_view: some_view 39 | owner: SYSADMIN 40 | copy_grants: true 41 | at: 42 | TIMESTAMP: "2022-01-01 00:00:00" 43 | before: 44 | STREAM: some_other_stream 45 | append_only: false 46 | show_initial_rows: true 47 | comment: This is a sample stream on a view. 48 | ``` 49 | 50 | 51 | ## Fields 52 | 53 | * `name` (string, required) - The name of the stream. 54 | * `on_view` (string, required) - The name of the view the stream is based on. 55 | * `owner` (string or [Role](role.md)) - The role that owns the stream. Defaults to 'SYSADMIN'. 56 | * `copy_grants` (bool) - Whether to copy grants from the view to the stream. 57 | * `at` (dict) - A dictionary specifying the point in time for the stream to start, using keys like TIMESTAMP, OFFSET, STATEMENT, or STREAM. 58 | * `before` (dict) - A dictionary specifying the point in time for the stream to start, similar to 'at' but defining a point before the specified time. 59 | * `append_only` (bool) - If set to True, the stream records only append operations. 60 | * `show_initial_rows` (bool) - If set to True, the stream includes the initial rows of the view at the time of stream creation. 61 | * `comment` (string) - An optional description for the stream. 62 | 63 | 64 | -------------------------------------------------------------------------------- /examples/account-parameters.yml: -------------------------------------------------------------------------------- 1 | name: account-parameters-titan-example 2 | run_mode: sync 3 | allowlist: 4 | - account parameter 5 | 6 | account_parameters: 7 | - name: ALLOW_CLIENT_MFA_CACHING 8 | value: true 9 | - name: ALLOW_ID_TOKEN 10 | value: true 11 | - name: TIMEZONE 12 | value: "America/New_York" -------------------------------------------------------------------------------- /examples/dicom-image-classification-to-detect-pneumonia.yml: -------------------------------------------------------------------------------- 1 | roles: 2 | - name: data_scientist 3 | comment: "Role for users who perform image classification tasks and model training." 4 | - name: app_developer 5 | comment: "Role for users who manage the application and UDF deployment." 6 | 7 | role_grants: 8 | - role: data_scientist 9 | roles: 10 | - SYSADMIN 11 | 12 | databases: 13 | - name: IND_SOL_DICOM 14 | owner: data_scientist 15 | comment: "Database dedicated to storing DICOM images and related data for pneumonia detection." 16 | 17 | schemas: 18 | - name: DICOM 19 | database: IND_SOL_DICOM 20 | owner: data_scientist 21 | comment: "Schema for organizing tables and UDFs related to DICOM image processing." 22 | 23 | warehouses: 24 | - name: snowopt_wh 25 | warehouse_size: XSMALL 26 | auto_suspend: 30 27 | auto_resume: true 28 | initially_suspended: true 29 | comment: "Warehouse optimized for model training and heavy computations." 30 | - name: standard_wh 31 | warehouse_size: XSMALL 32 | auto_suspend: 30 33 | auto_resume: true 34 | initially_suspended: true 35 | comment: "General purpose warehouse for regular workloads." 36 | 37 | stages: 38 | - name: dicom_images_stage 39 | database: IND_SOL_DICOM 40 | schema: DICOM 41 | owner: data_scientist 42 | type: external 43 | url: "s3://your-bucket-name" 44 | credentials: 45 | aws_key_id: "your-aws-key-id" 46 | aws_secret_key: "your-aws-secret-key" 47 | comment: "External stage for storing and accessing DICOM images from S3." 48 | 49 | grants: 50 | - to_role: data_scientist 51 | priv: all 52 | on_warehouse: snowopt_wh 53 | - to_role: data_scientist 54 | priv: all 55 | on_database: IND_SOL_DICOM 56 | - to_role: app_developer 57 | priv: usage 58 | on_warehouse: standard_wh 59 | - to_role: app_developer 60 | priv: usage 61 | on_database: IND_SOL_DICOM 62 | 63 | -------------------------------------------------------------------------------- /examples/for-each-example.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | - name: schemas 3 | type: list 4 | default: 5 | - schema1 6 | - schema2 7 | 8 | roles: 9 | - for_each: var.schemas 10 | name: "AR_{{ each.value }}_ROLE" 11 | -------------------------------------------------------------------------------- /examples/snowflake-tutorials-create-your-first-iceberg-table.yml: -------------------------------------------------------------------------------- 1 | name: snowflake-tutorials-create-your-first-iceberg-table 2 | run_mode: create-or-update 3 | 4 | vars: 5 | - name: storage_role_arn 6 | type: string 7 | default: arn:aws:iam::123456789012:role/some_role_name 8 | sensitive: true 9 | - name: storage_base_url 10 | type: string 11 | default: s3://my-s3-bucket/my-s3-path 12 | - name: storage_aws_external_id 13 | type: string 14 | 15 | roles: 16 | - name: iceberg_tutorial_role 17 | 18 | role_grants: 19 | - role: iceberg_tutorial_role 20 | roles: 21 | - SYSADMIN 22 | 23 | databases: 24 | - name: iceberg_tutorial_db 25 | owner: iceberg_tutorial_role 26 | external_volume: iceberg_external_volume 27 | catalog: SNOWFLAKE 28 | 29 | warehouses: 30 | - name: iceberg_tutorial_wh 31 | owner: iceberg_tutorial_role 32 | warehouse_size: XSMALL 33 | auto_suspend: 30 34 | auto_resume: true 35 | initially_suspended: true 36 | 37 | grants: 38 | - GRANT CREATE DATABASE ON ACCOUNT TO ROLE iceberg_tutorial_role 39 | - GRANT CREATE WAREHOUSE ON ACCOUNT TO ROLE iceberg_tutorial_role 40 | - GRANT CREATE EXTERNAL VOLUME ON ACCOUNT TO ROLE iceberg_tutorial_role 41 | 42 | external_volumes: 43 | - name: iceberg_external_volume 44 | owner: iceberg_tutorial_role 45 | allow_writes: true 46 | storage_locations: 47 | - name: my-s3-us-west-2 48 | storage_provider: S3 49 | storage_base_url: "{{ var.storage_base_url }}" 50 | storage_aws_role_arn: "{{ var.storage_role_arn }}" 51 | storage_aws_external_id: "{{ var.storage_aws_external_id }}" 52 | 53 | iceberg_tables: 54 | - name: customer_iceberg 55 | database: iceberg_tutorial_db 56 | schema: public 57 | owner: iceberg_tutorial_role 58 | catalog: SNOWFLAKE 59 | external_volume: iceberg_external_volume 60 | base_location: customer_iceberg 61 | columns: 62 | - name: c_custkey 63 | data_type: INTEGER 64 | - name: c_name 65 | data_type: STRING 66 | - name: c_address 67 | data_type: STRING 68 | - name: c_nationkey 69 | data_type: INTEGER 70 | - name: c_phone 71 | data_type: STRING 72 | - name: c_acctbal 73 | data_type: INTEGER 74 | - name: c_mktsegment 75 | data_type: STRING 76 | - name: c_comment 77 | data_type: STRING -------------------------------------------------------------------------------- /examples/terraform-tags-example.yml: -------------------------------------------------------------------------------- 1 | databases: 2 | - name: "database" 3 | tags: 4 | cost_center: "finance" 5 | schemas: 6 | - name: "schema" 7 | 8 | tables: 9 | - name: "TABLE_NAME" 10 | database: "database" 11 | schema: "schema" 12 | comment: "Titan example table" 13 | columns: 14 | - name: "column1" 15 | data_type: "VARIANT" 16 | tags: 17 | cost_center: "engineering" 18 | - name: "column2" 19 | data_type: "VARCHAR(16)" 20 | tags: 21 | cost_center: "engineering" 22 | tags: 23 | cost_center: "engineering" 24 | 25 | tags: 26 | - name: "cost_center" 27 | allowed_values: ["finance", "engineering"] 28 | database: "database" 29 | schema: "public" -------------------------------------------------------------------------------- /images/github-explainer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/images/github-explainer.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 120 3 | target-version = ['py39'] 4 | include = '\.pyi?$' 5 | extend-exclude = ''' 6 | # A regex preceded with ^/ will apply only to files and directories 7 | # in the root of the project. 8 | ^/foo.py # exclude a file named foo.py in the root of the project (in addition to the defaults) 9 | ''' 10 | 11 | [tool.pyright] 12 | pythonVersion = '3.9' 13 | venvPath = '' 14 | venv = '' 15 | 16 | [tool.ruff] 17 | line-length = 120 18 | 19 | [tool.pytest.ini_options] 20 | addopts = "-n 48" 21 | markers = [ 22 | "requires_snowflake: Mark a test as requiring a Snowflake connection.", 23 | "enterprise: Mark a test that only works on Enterprise Edition Snowflake.", 24 | "standard: Mark a test that works on Standard Edition Snowflake.", 25 | ] 26 | filterwarnings = [ 27 | "ignore:.*urllib3.contrib.pyopenssl.*:DeprecationWarning" 28 | ] 29 | 30 | [tool.codespell] 31 | ignore-words-list = [ 32 | "priv", 33 | "sproc", 34 | "snowpark", 35 | "pathspec", 36 | ] 37 | skip = [ 38 | "./build/", 39 | ] -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | .[dev] -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | . -------------------------------------------------------------------------------- /scripts/install: -------------------------------------------------------------------------------- 1 | WITH install AS PROCEDURE() 2 | RETURNS OBJECT NOT NULL 3 | LANGUAGE PYTHON 4 | RUNTIME_VERSION = '3.9' 5 | PACKAGES = ('snowflake-snowpark-python', 'inflection', 'pyparsing') 6 | IMPORTS = ('@titan_aws/releases/titan-0.1.6.zip') 7 | HANDLER = 'titan.spi.install' 8 | EXECUTE AS CALLER 9 | CALL install() 10 | ; -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | 4 | setup( 5 | name="titan-core", 6 | # Package version is managed by the string inside version.md. By default, 7 | # setuptools doesnt copy this file into the build package. So we direct 8 | # setuptools to include it using the `include_package_data=True` option 9 | # as well as the MANIFEST.in file which has the `include version.md` directive. 10 | version=open("version.md", encoding="utf-8").read().split(" ")[2], 11 | include_package_data=True, 12 | description="Titan Core: Snowflake infrastructure as code", 13 | long_description=open("README.md", encoding="utf-8").read(), 14 | long_description_content_type="text/markdown", 15 | url="https://github.com/Titan-Systems/titan", 16 | author="TJ Murphy", 17 | packages=find_packages(include=["titan", "titan.*"]), 18 | python_requires=">=3.9", 19 | project_urls={ 20 | "Homepage": "https://github.com/Titan-Systems/titan", 21 | }, 22 | entry_points={ 23 | "console_scripts": [ 24 | "titan=titan.cli:titan_cli", 25 | ], 26 | }, 27 | classifiers=[ 28 | "Development Status :: 4 - Beta", 29 | "License :: OSI Approved :: Apache Software License", 30 | "Intended Audience :: Developers", 31 | "Operating System :: OS Independent", 32 | "Programming Language :: Python :: 3 :: Only", 33 | "Programming Language :: SQL", 34 | "Topic :: Database", 35 | ], 36 | install_requires=[ 37 | "click==8.1.7", 38 | "inflection==0.5.1", 39 | "pyparsing==3.0.9", 40 | "pyyaml", 41 | "snowflake-connector-python==3.12.3", 42 | "snowflake-snowpark-python==1.24.0", 43 | "pyOpenSSL>=22.1.0", 44 | "jinja2", 45 | "pathspec", 46 | ], 47 | extras_require={ 48 | "dev": [ 49 | "black", 50 | "build", 51 | "codespell==2.2.6", 52 | "mypy", 53 | "pytest-cov", 54 | "pytest-profiling!=1.8.0", 55 | "pytest-xdist", 56 | "pytest>=6.0", 57 | "python-dotenv", 58 | "ruff", 59 | "tabulate", 60 | "twine!=5.1.0", 61 | "types-pytz", 62 | "types-pyyaml", 63 | ] 64 | }, 65 | ) 66 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/json/account_parameter.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "INITIAL_REPLICATION_SIZE_LIMIT_IN_TB", 3 | "value": 11.0 4 | } -------------------------------------------------------------------------------- /tests/fixtures/json/aggregation_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "my_policy", 3 | "body": "AGGREGATION_CONSTRAINT(MIN_GROUP_SIZE => 5)", 4 | "owner": "SYSADMIN" 5 | } -------------------------------------------------------------------------------- /tests/fixtures/json/alert.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "test_alert", 3 | "warehouse": "STATIC_WAREHOUSE", 4 | "schedule": "5 minutes", 5 | "condition": "SELECT 1", 6 | "then": "SELECT 2", 7 | "owner": "SYSADMIN", 8 | "comment": "This is a test alert" 9 | } -------------------------------------------------------------------------------- /tests/fixtures/json/api_integration.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_API_INTEGRATION", 3 | "owner": "ACCOUNTADMIN", 4 | "api_provider": "AWS_API_GATEWAY", 5 | "api_key": "api-987654321", 6 | "api_aws_role_arn": "arn:aws:iam::123456789012:role/my_cloud_account_role", 7 | "api_allowed_prefixes": [ 8 | "https://xyz.execute-api.us-west-2.amazonaws.com/production" 9 | ], 10 | "api_blocked_prefixes": [ 11 | "https://xyz.execute-api.us-west-2.amazonaws.com/development" 12 | ], 13 | "enabled": true, 14 | "comment": "This is a test API integration" 15 | } -------------------------------------------------------------------------------- /tests/fixtures/json/authentication_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "restrict_client_types_policy", 3 | "client_types": [ 4 | "SNOWFLAKE_UI" 5 | ], 6 | "comment": null, 7 | "owner": "SECURITYADMIN", 8 | "mfa_enrollment": "OPTIONAL", 9 | "security_integrations": [ 10 | "ALL" 11 | ], 12 | "authentication_methods": [ 13 | "ALL" 14 | ], 15 | "mfa_authentication_methods": [ 16 | "PASSWORD" 17 | ] 18 | } -------------------------------------------------------------------------------- /tests/fixtures/json/azure_storage_integration.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "AZURE_INT", 3 | "type": "EXTERNAL_STAGE", 4 | "storage_provider": "AZURE", 5 | "owner": "ACCOUNTADMIN", 6 | "enabled": true, 7 | "azure_tenant_id": "a123b4c5-1234-123a-a12b-1a23b45678c9", 8 | "storage_allowed_locations": [ 9 | "azure://myaccount.blob.core.windows.net/mycontainer/path1/", 10 | "azure://myaccount.blob.core.windows.net/mycontainer/path2/" 11 | ], 12 | "storage_blocked_locations": [ 13 | "azure://myaccount.blob.core.windows.net/mycontainer/path3/", 14 | "azure://myaccount.blob.core.windows.net/mycontainer/path4/" 15 | ], 16 | "comment": "This is an external stage for Azure." 17 | } -------------------------------------------------------------------------------- /tests/fixtures/json/column.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/fixtures/json/column.json -------------------------------------------------------------------------------- /tests/fixtures/json/compute_pool.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_COMPUTE_POOL", 3 | "min_nodes": 1, 4 | "max_nodes": 1, 5 | "instance_family": "CPU_X64_XS", 6 | "auto_resume": true, 7 | "auto_suspend_secs": 3600, 8 | "comment": null, 9 | "initially_suspended": null, 10 | "owner": "SYSADMIN" 11 | } -------------------------------------------------------------------------------- /tests/fixtures/json/csv_file_format.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "CSV", 3 | "name": "MY_CSV_FORMAT", 4 | "owner": "SYSADMIN", 5 | "field_delimiter": "|", 6 | "skip_header": 1, 7 | "null_if": [ 8 | "NULL", 9 | "null" 10 | ], 11 | "empty_field_as_null": true, 12 | "compression": "GZIP", 13 | "record_delimiter": "\n", 14 | "file_extension": null, 15 | "parse_header": false, 16 | "skip_blank_lines": false, 17 | "date_format": "AUTO", 18 | "time_format": "AUTO", 19 | "timestamp_format": "AUTO", 20 | "binary_format": "HEX", 21 | "escape": null, 22 | "escape_unenclosed_field": "\\", 23 | "trim_space": false, 24 | "field_optionally_enclosed_by": null, 25 | "error_on_column_count_mismatch": true, 26 | "replace_invalid_characters": false, 27 | "skip_byte_order_mark": false, 28 | "encoding": "UTF8", 29 | "comment": null 30 | } -------------------------------------------------------------------------------- /tests/fixtures/json/database.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "TEST_DATABASE", 3 | "transient": false, 4 | "owner": "SYSADMIN", 5 | "data_retention_time_in_days": 1, 6 | "max_data_extension_time_in_days": 14, 7 | "external_volume": null, 8 | "catalog": null, 9 | "default_ddl_collation": "en", 10 | "comment": "This is a test database" 11 | } -------------------------------------------------------------------------------- /tests/fixtures/json/database_role.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOMEDBROLE", 3 | "comment": null, 4 | "database": "STATIC_DATABASE", 5 | "owner": "USERADMIN" 6 | } -------------------------------------------------------------------------------- /tests/fixtures/json/database_role_grant.json: -------------------------------------------------------------------------------- 1 | { 2 | "database_role": "STATIC_DATABASE.STATIC_DATABASE_ROLE", 3 | "to_role": "STATIC_ROLE", 4 | "to_database_role": null 5 | } 6 | -------------------------------------------------------------------------------- /tests/fixtures/json/dynamic_table.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "some_dynamic_table", 3 | "columns": [ 4 | { 5 | "name": "id", 6 | "comment": "This is the first column" 7 | } 8 | ], 9 | "target_lag": "1 HOUR", 10 | "warehouse": "static_warehouse", 11 | "refresh_mode": "AUTO", 12 | "initialize": "ON_CREATE", 13 | "as_": "SELECT id FROM static_database.public.static_table", 14 | "comment": "This is a sample dynamic table", 15 | "owner": "SYSADMIN" 16 | } -------------------------------------------------------------------------------- /tests/fixtures/json/event_table.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "EXAMPLE_EVENT_TABLE", 3 | "cluster_by": null, 4 | "data_retention_time_in_days": 1, 5 | "max_data_extension_time_in_days": 1, 6 | "change_tracking": true, 7 | "default_ddl_collation": "en_US", 8 | "copy_grants": null, 9 | "comment": "This is an example event table for logging user events.", 10 | "owner": "SYSADMIN" 11 | } -------------------------------------------------------------------------------- /tests/fixtures/json/external_access_integration.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_INT", 3 | "allowed_network_rules": [ 4 | "STATIC_DATABASE.PUBLIC.STATIC_NETWORK_RULE" 5 | ], 6 | "allowed_api_authentication_integrations": null, 7 | "allowed_authentication_secrets": null, 8 | "enabled": true, 9 | "comment": "Example external access integration for demonstration purposes.", 10 | "owner": "ACCOUNTADMIN" 11 | } -------------------------------------------------------------------------------- /tests/fixtures/json/external_function.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/fixtures/json/external_function.json -------------------------------------------------------------------------------- /tests/fixtures/json/external_stage.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "EXTERNAL_STAGE_EXAMPLE", 3 | "url": "s3://titan-snowflake/", 4 | "owner": "SYSADMIN", 5 | "type": "EXTERNAL", 6 | "storage_integration": null, 7 | "credentials": null, 8 | "encryption": { 9 | "type": "AWS_SSE_KMS", 10 | "kms_key_id": "alias/MyAliasName" 11 | }, 12 | "directory": { 13 | "enable": true, 14 | "refresh_on_create": true 15 | }, 16 | "comment": "This is an example of an external stage" 17 | } -------------------------------------------------------------------------------- /tests/fixtures/json/external_table_stream.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_EXTERNAL_TABLE_STREAM", 3 | "on_external_table": "my_ext_table", 4 | "owner": "SYSADMIN", 5 | "copy_grants": null, 6 | "at": null, 7 | "before": null, 8 | "insert_only": true, 9 | "comment": null 10 | } -------------------------------------------------------------------------------- /tests/fixtures/json/external_volume.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "exvol", 3 | "storage_locations": [ 4 | { 5 | "name": "my-s3-us-west-2", 6 | "storage_provider": "S3", 7 | "storage_base_url": "s3://MY_EXAMPLE_BUCKET/", 8 | "storage_aws_role_arn": "arn:aws:iam::123456789012:role/myrole", 9 | "storage_aws_external_id": "iceberg_external_id", 10 | "encryption": { 11 | "type": "AWS_SSE_KMS", 12 | "kms_key_id": "1234abcd-12ab-34cd-56ef-1234567890ab" 13 | } 14 | } 15 | ], 16 | "owner": "ACCOUNTADMIN", 17 | "allow_writes": true, 18 | "comment": "This is a comment" 19 | } -------------------------------------------------------------------------------- /tests/fixtures/json/failover_group.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/fixtures/json/failover_group.json -------------------------------------------------------------------------------- /tests/fixtures/json/file_format.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/fixtures/json/file_format.json -------------------------------------------------------------------------------- /tests/fixtures/json/future_grant.json: -------------------------------------------------------------------------------- 1 | { 2 | "priv": "USAGE", 3 | "on_type": "SCHEMA", 4 | "in_type": "DATABASE", 5 | "in_name": "STATIC_DATABASE", 6 | "to": "STATIC_ROLE", 7 | "to_type": "ROLE", 8 | "grant_option": false 9 | } -------------------------------------------------------------------------------- /tests/fixtures/json/gcs_storage_integration.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "GCS_INT", 3 | "type": "EXTERNAL_STAGE", 4 | "storage_provider": "GCS", 5 | "owner": "ACCOUNTADMIN", 6 | "enabled": true, 7 | "storage_allowed_locations": [ 8 | "gcs://mybucket1/path1/", 9 | "gcs://mybucket2/path2/" 10 | ], 11 | "comment": "GCS external stage for mybucket1 and mybucket2", 12 | "storage_blocked_locations": [ 13 | "gcs://mybucket1/path1/secret/", 14 | "gcs://mybucket2/path2/secret/" 15 | ] 16 | } -------------------------------------------------------------------------------- /tests/fixtures/json/generic_secret.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "my_generic_secret", 3 | "secret_type": "GENERIC_STRING", 4 | "secret_string": "my_generic_secret_string", 5 | "comment": "Generic secret for various purposes", 6 | "owner": "SYSADMIN" 7 | } -------------------------------------------------------------------------------- /tests/fixtures/json/glue_catalog_integration.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "GLUECATALOGINT", 3 | "catalog_source": "GLUE", 4 | "catalog_namespace": "some-namespace", 5 | "table_format": "ICEBERG", 6 | "glue_aws_role_arn": "arn:aws:iam::123456789012:role/my-role", 7 | "glue_catalog_id": "123456789012", 8 | "glue_region": "us-east-1", 9 | "enabled": true, 10 | "owner": "ACCOUNTADMIN", 11 | "comment": "This is a test catalog integration" 12 | } -------------------------------------------------------------------------------- /tests/fixtures/json/grant.json: -------------------------------------------------------------------------------- 1 | { 2 | "priv": "USAGE", 3 | "on_type": "DATABASE", 4 | "on": "STATIC_DATABASE", 5 | "to": "STATIC_ROLE", 6 | "to_type": "ROLE", 7 | "owner": "SYSADMIN", 8 | "grant_option": false, 9 | "_privs": [ 10 | "USAGE" 11 | ] 12 | } -------------------------------------------------------------------------------- /tests/fixtures/json/image_repository.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_REPOSITORY", 3 | "owner": "SYSADMIN" 4 | } -------------------------------------------------------------------------------- /tests/fixtures/json/internal_stage.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "INTERNAL_STAGE_EXAMPLE", 3 | "owner": "SYSADMIN", 4 | "type": "INTERNAL", 5 | "encryption": { 6 | "type": "SNOWFLAKE_SSE" 7 | }, 8 | "directory": { 9 | "enable": true, 10 | "refresh_on_create": false 11 | }, 12 | "comment": "This is an example of an internal stage." 13 | } -------------------------------------------------------------------------------- /tests/fixtures/json/javascript_udf.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/fixtures/json/javascript_udf.json -------------------------------------------------------------------------------- /tests/fixtures/json/json_file_format.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "my_json_format", 3 | "type": "JSON", 4 | "owner": "SYSADMIN", 5 | "comment": null, 6 | "compression": "AUTO", 7 | "date_format": "AUTO", 8 | "time_format": "AUTO", 9 | "timestamp_format": "AUTO", 10 | "binary_format": "HEX", 11 | "trim_space": false, 12 | "null_if": null, 13 | "file_extension": "json", 14 | "enable_octal": false, 15 | "allow_duplicate": false, 16 | "strip_outer_array": false, 17 | "strip_null_values": false, 18 | "replace_invalid_characters": false, 19 | "ignore_utf8_errors": false, 20 | "skip_byte_order_mark": true 21 | } -------------------------------------------------------------------------------- /tests/fixtures/json/masking_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "some_masking_policy", 3 | "args": [ 4 | {"name": "val", "data_type": "VARCHAR"} 5 | ], 6 | "returns": "VARCHAR(16777216)", 7 | "body": "CASE WHEN current_role() IN ('ANALYST') THEN VAL ELSE '*********' END", 8 | "comment": "Masks email addresses", 9 | "exempt_other_policies": false, 10 | "owner": "SYSADMIN" 11 | } 12 | -------------------------------------------------------------------------------- /tests/fixtures/json/materialized_view.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_MV", 3 | "owner": "SYSADMIN", 4 | "comment": "Test view", 5 | "as_": "SELECT id FROM STATIC_DATABASE.public.static_table", 6 | "cluster_by": [ 7 | "id" 8 | ], 9 | "copy_grants": false, 10 | "secure": false, 11 | "columns": null 12 | } -------------------------------------------------------------------------------- /tests/fixtures/json/network_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "some_network_policy", 3 | "allowed_network_rule_list": [ 4 | "static_database.public.static_network_rule_ingress_allow_all" 5 | ], 6 | "blocked_network_rule_list": [ 7 | "static_database.public.static_network_rule_ingress_suspicious_ip" 8 | ], 9 | "allowed_ip_list": [ 10 | "192.168.1.0/24" 11 | ], 12 | "blocked_ip_list": [ 13 | "192.168.1.99" 14 | ], 15 | "comment": "Comment for a network policy", 16 | "owner": "SECURITYADMIN" 17 | } -------------------------------------------------------------------------------- /tests/fixtures/json/network_rule.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "NETWORK_RULE_EXAMPLE", 3 | "value_list": [ 4 | "example.com:443" 5 | ], 6 | "mode": "EGRESS", 7 | "type": "HOST_PORT", 8 | "comment": "Network rule for testing", 9 | "owner": "SYSADMIN" 10 | } -------------------------------------------------------------------------------- /tests/fixtures/json/notebook.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mynotebook", 3 | "from_": null, 4 | "main_file": null, 5 | "query_warehouse": "static_warehouse", 6 | "comment": "This is a test notebook", 7 | "default_version": null, 8 | "owner": "SYSADMIN" 9 | } -------------------------------------------------------------------------------- /tests/fixtures/json/notification_integration.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/fixtures/json/notification_integration.json -------------------------------------------------------------------------------- /tests/fixtures/json/oauth_secret.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "new_oauth_secret", 3 | "api_authentication": "STATIC_SECURITY_INTEGRATION", 4 | "secret_type": "OAUTH2", 5 | "oauth_refresh_token": "new_refresh_token", 6 | "oauth_refresh_token_expiry_time": "2049-12-31 23:59:59", 7 | "comment": "New OAuth secret for testing", 8 | "owner": "SYSADMIN" 9 | } -------------------------------------------------------------------------------- /tests/fixtures/json/object_store_catalog_integration.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MYCATALOGINT", 3 | "catalog_source": "OBJECT_STORE", 4 | "table_format": "ICEBERG", 5 | "enabled": true, 6 | "comment": "This is a test catalog integration", 7 | "owner": "ACCOUNTADMIN" 8 | } -------------------------------------------------------------------------------- /tests/fixtures/json/packages_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "EXAMPLE_POLICY", 3 | "language": "PYTHON", 4 | "allowlist": [ 5 | "numpy", 6 | "pandas" 7 | ], 8 | "blocklist": [ 9 | "os", 10 | "sys" 11 | ], 12 | "additional_creation_blocklist": [ 13 | "exec", 14 | "eval" 15 | ], 16 | "comment": "This is an example packages policy.", 17 | "owner": "SYSADMIN" 18 | } -------------------------------------------------------------------------------- /tests/fixtures/json/parquet_file_format.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "my_parquet_format", 3 | "type": "PARQUET", 4 | "compression": "SNAPPY", 5 | "owner": "SYSADMIN", 6 | "binary_as_text": true, 7 | "comment": "A file format comment", 8 | "null_if": null, 9 | "replace_invalid_characters": false, 10 | "trim_space": false 11 | } -------------------------------------------------------------------------------- /tests/fixtures/json/password_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "PASSWORD_POLICY_FIXTURE", 3 | "owner": "SYSADMIN", 4 | "password_min_length": 12, 5 | "password_max_length": 24, 6 | "password_min_upper_case_chars": 2, 7 | "password_min_lower_case_chars": 2, 8 | "password_min_numeric_chars": 2, 9 | "password_min_special_chars": 2, 10 | "password_min_age_days": 1, 11 | "password_max_age_days": 30, 12 | "password_max_retries": 3, 13 | "password_lockout_time_mins": 30, 14 | "password_history": 5, 15 | "comment": "production account password policy" 16 | } -------------------------------------------------------------------------------- /tests/fixtures/json/password_secret.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "my_password_secret", 3 | "secret_type": "PASSWORD", 4 | "username": "my_username", 5 | "password": "my_password", 6 | "comment": "Password secret for accessing external database", 7 | "owner": "SYSADMIN" 8 | } -------------------------------------------------------------------------------- /tests/fixtures/json/pipe.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_PIPE", 3 | "auto_ingest": false, 4 | "integration": null, 5 | "aws_sns_topic": null, 6 | "as_": "copy into STATIC_DATABASE.PUBLIC.STATIC_TABLE from @STATIC_DATABASE.PUBLIC.STATIC_STAGE", 7 | "comment": null, 8 | "error_integration": null, 9 | "owner": "SYSADMIN" 10 | } -------------------------------------------------------------------------------- /tests/fixtures/json/python_stored_procedure.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/fixtures/json/python_stored_procedure.json -------------------------------------------------------------------------------- /tests/fixtures/json/python_udf.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MY_PYTHON_UDF", 3 | "owner": "SYSADMIN", 4 | "args": [ 5 | { 6 | "name": "input_arg", 7 | "data_type": "VARIANT" 8 | } 9 | ], 10 | "returns": "NUMBER(38,0)", 11 | "runtime_version": "3.8", 12 | "packages": [ 13 | "snowflake-snowpark-python", 14 | "pyparsing" 15 | ], 16 | "handler": "main", 17 | "as_": "def main(input_arg): return 42", 18 | "language": "PYTHON", 19 | "comment": null, 20 | "copy_grants": false, 21 | "external_access_integrations": null, 22 | "imports": null, 23 | "null_handling": null, 24 | "secrets": null, 25 | "secure": null, 26 | "volatility": null 27 | } -------------------------------------------------------------------------------- /tests/fixtures/json/replication_group.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_REPLICATION_GROUP", 3 | "object_types": [ 4 | "DATABASES", 5 | "SHARES" 6 | ], 7 | "allowed_databases": [ 8 | "STATIC_DATABASE" 9 | ], 10 | "allowed_shares": [ 11 | "STATIC_SHARE" 12 | ], 13 | "allowed_integration_types": null, 14 | "allowed_accounts": [ 15 | "myorg.myaccount2" 16 | ], 17 | "ignore_edition_check": null, 18 | "replication_schedule": "10 MINUTE", 19 | "owner": "SYSADMIN" 20 | } -------------------------------------------------------------------------------- /tests/fixtures/json/resource_monitor.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "my_mon_2", 3 | "credit_quota": 5, 4 | "frequency": "DAILY", 5 | "start_timestamp": "IMMEDIATELY", 6 | "end_timestamp": "2049-12-31 23:59", 7 | "owner": "ACCOUNTADMIN", 8 | "notify_users": [ 9 | "STATIC_USER" 10 | ] 11 | } -------------------------------------------------------------------------------- /tests/fixtures/json/role.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "TEST_ROLE", 3 | "owner": "USERADMIN", 4 | "comment": null 5 | } -------------------------------------------------------------------------------- /tests/fixtures/json/role_grant.json: -------------------------------------------------------------------------------- 1 | { 2 | "role": "STATIC_ROLE", 3 | "to_role": "EMPTY", 4 | "to_user": null 5 | } -------------------------------------------------------------------------------- /tests/fixtures/json/s3_storage_integration.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "S3_INT", 3 | "type": "EXTERNAL_STAGE", 4 | "storage_provider": "S3", 5 | "storage_aws_role_arn": "arn:aws:iam::001234567890:role/myrole", 6 | "enabled": true, 7 | "storage_allowed_locations": [ 8 | "s3://mybucket1/path1/", 9 | "s3://mybucket2/path2/" 10 | ], 11 | "storage_blocked_locations": null, 12 | "comment": null, 13 | "owner": "ACCOUNTADMIN", 14 | "storage_aws_object_acl": "bucket-owner-full-control" 15 | } -------------------------------------------------------------------------------- /tests/fixtures/json/scanner_package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "CIS_BENCHMARKS", 3 | "enabled": true, 4 | "schedule": "0 0 0 * * UTC" 5 | } -------------------------------------------------------------------------------- /tests/fixtures/json/schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "TEST_SCHEMA", 3 | "transient": false, 4 | "managed_access": false, 5 | "data_retention_time_in_days": 1, 6 | "max_data_extension_time_in_days": 14, 7 | "default_ddl_collation": null, 8 | "owner": "SYSADMIN", 9 | "comment": "This is a schema for testing purposes" 10 | } -------------------------------------------------------------------------------- /tests/fixtures/json/sequence.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOMESEQ", 3 | "owner": "SYSADMIN", 4 | "start": 1, 5 | "increment": 2, 6 | "comment": "+3" 7 | } -------------------------------------------------------------------------------- /tests/fixtures/json/service.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_SERVICE", 3 | "compute_pool": "static_compute_pool", 4 | "specification": "spec:\n container:\n - name: container-name\n image: /some/image/path:latest\n env:\n PORT: 8000\n EXAMPLE_ENV_VARIABLE: my_value\n endpoint:\n - name: apiendpoint\n port: 8000\n public: true", 5 | "min_instances": 1, 6 | "max_instances": 1, 7 | "stage": null, 8 | "yaml_file_stage_path": null, 9 | "external_access_integrations": null, 10 | "auto_resume": true, 11 | "query_warehouse": null, 12 | "comment": null 13 | } -------------------------------------------------------------------------------- /tests/fixtures/json/session_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "session_policy_example", 3 | "session_idle_timeout_mins": 30, 4 | "session_ui_idle_timeout_mins": 15, 5 | "comment": "This is a test session policy" 6 | } -------------------------------------------------------------------------------- /tests/fixtures/json/share.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_SHARE", 3 | "owner": "ACCOUNTADMIN", 4 | "comment": "A share for testing" 5 | } -------------------------------------------------------------------------------- /tests/fixtures/json/stage.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/fixtures/json/stage.json -------------------------------------------------------------------------------- /tests/fixtures/json/stage_stream.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_STAGE_STREAM", 3 | "on_stage": "STATIC_DATABASE.PUBLIC.STATIC_STAGE", 4 | "owner": "SYSADMIN", 5 | "copy_grants": null, 6 | "comment": null 7 | } -------------------------------------------------------------------------------- /tests/fixtures/json/table.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MYTABLE", 3 | "columns": [ 4 | { 5 | "name": "id", 6 | "data_type": "INT", 7 | "collate": null, 8 | "comment": null, 9 | "not_null": false, 10 | "constraint": null, 11 | "default": null, 12 | "tags": null 13 | }, 14 | { 15 | "name": "amount", 16 | "data_type": "NUMBER", 17 | "collate": null, 18 | "comment": null, 19 | "not_null": false, 20 | "constraint": null, 21 | "default": null, 22 | "tags": null 23 | } 24 | ], 25 | "constraints": null, 26 | "transient": false, 27 | "cluster_by": null, 28 | "enable_schema_evolution": false, 29 | "data_retention_time_in_days": null, 30 | "max_data_extension_time_in_days": null, 31 | "change_tracking": false, 32 | "default_ddl_collation": null, 33 | "copy_grants": null, 34 | "row_access_policy": null, 35 | "owner": "SYSADMIN", 36 | "comment": null 37 | } -------------------------------------------------------------------------------- /tests/fixtures/json/table_stream.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_TABLE_STREAM", 3 | "on_table": "STATIC_DATABASE.PUBLIC.STATIC_TABLE", 4 | "owner": "SYSADMIN", 5 | "copy_grants": null, 6 | "at": { 7 | "stream": "STATIC_DATABASE.PUBLIC.STATIC_STREAM" 8 | }, 9 | "before": null, 10 | "append_only": false, 11 | "show_initial_rows": null, 12 | "comment": null 13 | } -------------------------------------------------------------------------------- /tests/fixtures/json/tag.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cost_center", 3 | "owner": "SYSADMIN", 4 | "comment": "cost_center tag", 5 | "allowed_values": [ 6 | "finance", 7 | "engineering", 8 | "sales" 9 | ] 10 | } -------------------------------------------------------------------------------- /tests/fixtures/json/task.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOMETASK", 3 | "schedule": "60 MINUTE", 4 | "state": "SUSPENDED", 5 | "as_": "SELECT 1", 6 | "after": null, 7 | "allow_overlapping_execution": false, 8 | "comment": null, 9 | "config": null, 10 | "copy_grants": null, 11 | "error_integration": null, 12 | "owner": "SYSADMIN", 13 | "suspend_task_after_num_failures": 5, 14 | "user_task_managed_initial_warehouse_size": "XSMALL", 15 | "user_task_timeout_ms": 3600000, 16 | "warehouse": null, 17 | "when": null 18 | } -------------------------------------------------------------------------------- /tests/fixtures/json/user.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "JILL", 3 | "owner": "USERADMIN", 4 | "password": "p4ssw0rd", 5 | "login_name": "jill", 6 | "display_name": "jill", 7 | "first_name": null, 8 | "middle_name": null, 9 | "last_name": null, 10 | "email": null, 11 | "must_change_password": false, 12 | "disabled": false, 13 | "days_to_expiry": null, 14 | "mins_to_unlock": null, 15 | "default_warehouse": "XSMALL_WH", 16 | "default_namespace": null, 17 | "default_role": "PUBLIC", 18 | "default_secondary_roles": null, 19 | "mins_to_bypass_mfa": null, 20 | "rsa_public_key": null, 21 | "rsa_public_key_2": null, 22 | "comment": null, 23 | "network_policy": null, 24 | "type": "NULL" 25 | } -------------------------------------------------------------------------------- /tests/fixtures/json/view.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "MY_VIEW", 3 | "owner": "SYSADMIN", 4 | "volatile": true, 5 | "as_": "SELECT id FROM STATIC_DATABASE.public.static_table", 6 | "change_tracking": false, 7 | "columns": [ 8 | { 9 | "name": "id", 10 | "data_type": null, 11 | "comment": "this is a column comment", 12 | "not_null": false 13 | } 14 | ], 15 | "comment": "This is a view", 16 | "copy_grants": false, 17 | "recursive": null, 18 | "secure": false 19 | } -------------------------------------------------------------------------------- /tests/fixtures/json/view_stream.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "SOME_VIEW_STREAM", 3 | "on_view": "STATIC_DATABASE.PUBLIC.STATIC_VIEW", 4 | "owner": "SYSADMIN", 5 | "copy_grants": null, 6 | "at": null, 7 | "before": null, 8 | "append_only": null, 9 | "show_initial_rows": null, 10 | "comment": null 11 | } -------------------------------------------------------------------------------- /tests/fixtures/json/warehouse.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "XSMALL_WH", 3 | "owner": "SYSADMIN", 4 | "warehouse_type": "STANDARD", 5 | "warehouse_size": "XSMALL", 6 | "max_cluster_count": 1, 7 | "min_cluster_count": 1, 8 | "scaling_policy": "STANDARD", 9 | "auto_suspend": 30, 10 | "auto_resume": false, 11 | "initially_suspended": true, 12 | "resource_monitor": null, 13 | "comment": "My XSMALL warehouse", 14 | "enable_query_acceleration": false, 15 | "query_acceleration_max_scale_factor": 8, 16 | "max_concurrency_level": 8, 17 | "statement_queued_timeout_in_seconds": 0, 18 | "statement_timeout_in_seconds": 172800 19 | } -------------------------------------------------------------------------------- /tests/fixtures/sql/account_parameter.sql: -------------------------------------------------------------------------------- 1 | ALTER ACCOUNT SET INITIAL_REPLICATION_SIZE_LIMIT_IN_TB = 11.0; -------------------------------------------------------------------------------- /tests/fixtures/sql/aggregation_policy.sql: -------------------------------------------------------------------------------- 1 | CREATE AGGREGATION POLICY my_policy AS () 2 | RETURNS AGGREGATION_CONSTRAINT -> 3 | AGGREGATION_CONSTRAINT(MIN_GROUP_SIZE => 5) 4 | ; 5 | 6 | CREATE AGGREGATION POLICY my_policy AS () 7 | RETURNS AGGREGATION_CONSTRAINT -> 8 | CASE 9 | WHEN CURRENT_ROLE() = 'ADMIN' 10 | THEN NO_AGGREGATION_CONSTRAINT() 11 | ELSE AGGREGATION_CONSTRAINT(MIN_GROUP_SIZE => 5) 12 | END; -------------------------------------------------------------------------------- /tests/fixtures/sql/alert.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE ALERT myalert 2 | WAREHOUSE = mywarehouse 3 | SCHEDULE = '1 minute' 4 | IF( EXISTS( 5 | SELECT gauge_value FROM gauge WHERE gauge_value>200)) 6 | THEN 7 | INSERT INTO gauge_value_exceeded_history VALUES (current_timestamp()); 8 | 9 | CREATE OR REPLACE ALERT alert_new_rows 10 | WAREHOUSE = my_warehouse 11 | SCHEDULE = '1 MINUTE' 12 | IF (EXISTS ( 13 | SELECT * 14 | FROM my_table 15 | WHERE row_timestamp BETWEEN SNOWFLAKE.ALERT.LAST_SUCCESSFUL_SCHEDULED_TIME() 16 | AND SNOWFLAKE.ALERT.SCHEDULED_TIME() 17 | )) 18 | THEN CALL SYSTEM$SEND_EMAIL(...) 19 | ; 20 | 21 | -------------------------------------------------------------------------------- /tests/fixtures/sql/api_integration.sql: -------------------------------------------------------------------------------- 1 | create or replace api integration demonstration_external_api_integration_01 2 | api_provider=aws_api_gateway 3 | api_aws_role_arn='arn:aws:iam::123456789012:role/my_cloud_account_role' 4 | api_allowed_prefixes=('https://xyz.execute-api.us-west-2.amazonaws.com/production') 5 | enabled=true; 6 | 7 | -------------------------------------------------------------------------------- /tests/fixtures/sql/authentication_policy.sql: -------------------------------------------------------------------------------- 1 | CREATE AUTHENTICATION POLICY restrict_client_types_policy 2 | CLIENT_TYPES = ('SNOWFLAKE_UI'); -------------------------------------------------------------------------------- /tests/fixtures/sql/azure_storage_integration.sql: -------------------------------------------------------------------------------- 1 | 2 | CREATE STORAGE INTEGRATION azure_int 3 | TYPE = EXTERNAL_STAGE 4 | STORAGE_PROVIDER = 'AZURE' 5 | ENABLED = TRUE 6 | AZURE_TENANT_ID = '' 7 | STORAGE_ALLOWED_LOCATIONS = ('azure://myaccount.blob.core.windows.net/mycontainer/path1/', 'azure://myaccount.blob.core.windows.net/mycontainer/path2/'); 8 | 9 | 10 | CREATE STORAGE INTEGRATION azure_int 11 | TYPE = EXTERNAL_STAGE 12 | STORAGE_PROVIDER = 'AZURE' 13 | ENABLED = TRUE 14 | AZURE_TENANT_ID = 'a123b4c5-1234-123a-a12b-1a23b45678c9' 15 | STORAGE_ALLOWED_LOCATIONS = ('*') 16 | STORAGE_BLOCKED_LOCATIONS = ('azure://myaccount.blob.core.windows.net/mycontainer/path3/', 'azure://myaccount.blob.core.windows.net/mycontainer/path4/'); 17 | 18 | -------------------------------------------------------------------------------- /tests/fixtures/sql/column.sql: -------------------------------------------------------------------------------- 1 | id INT; 2 | location GEOGRAPHY; 3 | losing_bids ARRAY; 4 | very_long_string VARCHAR(65535); 5 | v VARCHAR COLLATE 'sp'; 6 | col_with_comment INT COMMENT 'This is a comment'; -------------------------------------------------------------------------------- /tests/fixtures/sql/compute_pool.sql: -------------------------------------------------------------------------------- 1 | CREATE COMPUTE POOL tutorial_compute_pool 2 | MIN_NODES = 1 3 | MAX_NODES = 1 4 | INSTANCE_FAMILY = CPU_X64_XS; 5 | 6 | CREATE COMPUTE POOL tutorial_compute_pool 7 | MIN_NODES = 1 8 | MAX_NODES = 1 9 | INSTANCE_FAMILY = CPU_X64_XS 10 | AUTO_RESUME = FALSE; -------------------------------------------------------------------------------- /tests/fixtures/sql/csv_file_format.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FILE FORMAT my_csv_format 2 | TYPE = CSV 3 | FIELD_DELIMITER = '|' 4 | SKIP_HEADER = 1 5 | NULL_IF = ('NULL', 'null') 6 | EMPTY_FIELD_AS_NULL = true 7 | COMPRESSION = gzip; -------------------------------------------------------------------------------- /tests/fixtures/sql/database.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE mytestdb; 2 | 3 | CREATE DATABASE mytestdb2 DATA_RETENTION_TIME_IN_DAYS = 10; 4 | 5 | CREATE TRANSIENT DATABASE mytestdb3; -------------------------------------------------------------------------------- /tests/fixtures/sql/database_role.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE ROLE d1.dr1; -------------------------------------------------------------------------------- /tests/fixtures/sql/dynamic_table.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE DYNAMIC TABLE product ( 2 | product_id INT, 3 | product_name VARCHAR 4 | ) 5 | TARGET_LAG = '20 minutes' 6 | WAREHOUSE = mywh 7 | REFRESH_MODE = AUTO 8 | INITIALIZE = ON_CREATE 9 | AS 10 | SELECT product_id, product_name FROM staging_table; 11 | 12 | 13 | CREATE OR REPLACE DYNAMIC TABLE names ( 14 | id INT, 15 | first_name VARCHAR, 16 | last_name VARCHAR 17 | ) 18 | TARGET_LAG = DOWNSTREAM 19 | WAREHOUSE = mywh 20 | REFRESH_MODE = INCREMENTAL 21 | INITIALIZE = ON_SCHEDULE 22 | AS 23 | SELECT var:id::int id, var:fname::string first_name, 24 | var:lname::string last_name FROM raw; -------------------------------------------------------------------------------- /tests/fixtures/sql/event_table.sql: -------------------------------------------------------------------------------- 1 | CREATE EVENT TABLE my_events; -------------------------------------------------------------------------------- /tests/fixtures/sql/external_access_integration.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE EXTERNAL ACCESS INTEGRATION SOME_EXTERNAL_ACCESS_INTEGRATION 2 | ALLOWED_NETWORK_RULES = (STATIC_DATABASE.PUBLIC.STATIC_NETWORK_RULE) 3 | COMMENT = 'Example external access integration for demonstration purposes' 4 | ENABLED = true; -------------------------------------------------------------------------------- /tests/fixtures/sql/external_function.sql: -------------------------------------------------------------------------------- 1 | create or replace EXTERNAL FUNCTION local_echo(string_col VARCHAR, somesuch INTEGER) 2 | returns variant 3 | api_integration = demonstration_external_api_integration_01 4 | HEADERS = ( 5 | 'volume-measure' = 'liters', 6 | 'distance-measure' = 'kilometers' 7 | ) 8 | request_translator = "DB"."SCHEMA".function 9 | as 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'; 10 | 11 | create or replace SECURE EXTERNAL FUNCTION local_echo2(string_col VARCHAR) 12 | returns variant 13 | api_integration = demonstration_external_api_integration_02 14 | as 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'; 15 | -------------------------------------------------------------------------------- /tests/fixtures/sql/external_stage.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE STAGE s3_external_stage 2 | url = 'https://s3.amazonaws.com/tripdata/'; 3 | -------------------------------------------------------------------------------- /tests/fixtures/sql/external_volume.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE EXTERNAL VOLUME exvol 2 | STORAGE_LOCATIONS = 3 | ( 4 | ( 5 | NAME = 'my-s3-us-west-2' 6 | STORAGE_PROVIDER = 'S3' 7 | STORAGE_BASE_URL = 's3://MY_EXAMPLE_BUCKET/' 8 | STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::123456789012:role/myrole' 9 | ENCRYPTION=(TYPE='AWS_SSE_KMS' KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab') 10 | ), 11 | 12 | ( 13 | NAME = 'my-us-east-1' 14 | STORAGE_PROVIDER = 'GCS' 15 | STORAGE_BASE_URL = 'gcs://mybucket1/path1/' 16 | ENCRYPTION=(TYPE='GCS_SSE_KMS' KMS_KEY_ID = '1234abcd-12ab-34cd-56ef-1234567890ab') 17 | ) 18 | ) 19 | ; 20 | 21 | CREATE EXTERNAL VOLUME exvol 22 | STORAGE_LOCATIONS = 23 | ( 24 | ( 25 | NAME = 'my-us-east-1' 26 | STORAGE_PROVIDER = 'GCS' 27 | STORAGE_BASE_URL = 'gcs://mybucket1/path1/' 28 | ENCRYPTION=(TYPE='GCS_SSE_KMS' KMS_KEY_ID = '1234abcd-12ab-34cd-56ef-1234567890ab') 29 | ) 30 | ) 31 | ; -------------------------------------------------------------------------------- /tests/fixtures/sql/failover_group.sql: -------------------------------------------------------------------------------- 1 | CREATE FAILOVER GROUP myfg 2 | OBJECT_TYPES = USERS, ROLES, WAREHOUSES, RESOURCE MONITORS, DATABASES, SHARES, INTEGRATIONS, NETWORK POLICIES 3 | ALLOWED_DATABASES = db1 4 | ALLOWED_SHARES = s1 5 | ALLOWED_INTEGRATION_TYPES = SECURITY INTEGRATIONS 6 | ALLOWED_ACCOUNTS = myorg.myaccount1, myorg.myaccount2 7 | REPLICATION_SCHEDULE = '10 MINUTE'; -------------------------------------------------------------------------------- /tests/fixtures/sql/future_grant.sql: -------------------------------------------------------------------------------- 1 | -- Future Schema Privileges 2 | GRANT CREATE ROW ACCESS POLICY ON FUTURE SCHEMAS IN DATABASE somedb TO ROLE somerole; 3 | grant usage on future schemas in database mydb to role role1; 4 | 5 | -- Future Schema Objects Privileges 6 | GRANT INSERT ON FUTURE TABLES IN SCHEMA mydb.myschema TO ROLE somerole; -------------------------------------------------------------------------------- /tests/fixtures/sql/gcs_storage_integration.sql: -------------------------------------------------------------------------------- 1 | CREATE STORAGE INTEGRATION gcs_int 2 | TYPE = EXTERNAL_STAGE 3 | STORAGE_PROVIDER = 'GCS' 4 | ENABLED = TRUE 5 | STORAGE_ALLOWED_LOCATIONS = ('gcs://mybucket1/path1/', 'gcs://mybucket2/path2/'); 6 | 7 | CREATE STORAGE INTEGRATION gcs_int 8 | TYPE = EXTERNAL_STAGE 9 | STORAGE_PROVIDER = 'GCS' 10 | ENABLED = TRUE 11 | STORAGE_ALLOWED_LOCATIONS = ('*') 12 | STORAGE_BLOCKED_LOCATIONS = ('gcs://mybucket3/path3/', 'gcs://mybucket4/path4/'); 13 | -------------------------------------------------------------------------------- /tests/fixtures/sql/generic_secret.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE SECRET my_generic_secret 2 | TYPE = GENERIC_STRING 3 | SECRET_STRING = 'my_generic_secret_string' 4 | COMMENT = 'Generic secret for various purposes'; 5 | -------------------------------------------------------------------------------- /tests/fixtures/sql/glue_catalog_integration.sql: -------------------------------------------------------------------------------- 1 | CREATE CATALOG INTEGRATION glueCatalogInt 2 | CATALOG_SOURCE=GLUE 3 | CATALOG_NAMESPACE='some-namespace' 4 | TABLE_FORMAT=ICEBERG 5 | GLUE_AWS_ROLE_ARN='arn:aws:iam::123456789012:role/my-role' 6 | GLUE_CATALOG_ID='123456789012' 7 | GLUE_REGION='us-east-1' 8 | ENABLED=TRUE 9 | COMMENT='This is a test catalog integration'; -------------------------------------------------------------------------------- /tests/fixtures/sql/grant.sql: -------------------------------------------------------------------------------- 1 | -- Global Privileges 2 | GRANT IMPORT SHARE ON ACCOUNT TO ROLE somerole; 3 | GRANT ALL PRIVILEGES ON ACCOUNT TO ROLE somerole; 4 | 5 | -- Account Object Privileges 6 | GRANT OPERATE ON WAREHOUSE report_wh TO ROLE analyst; 7 | GRANT OPERATE ON WAREHOUSE report_wh TO ROLE analyst WITH GRANT OPTION; 8 | GRANT ALL ON REPLICATION GROUP some_group TO ROLE somerole; 9 | 10 | -- Schema Privileges 11 | GRANT MODIFY ON SCHEMA someschema TO ROLE somerole; 12 | GRANT ADD SEARCH OPTIMIZATION ON SCHEMA someschema TO ROLE somerole; 13 | GRANT CREATE SNOWFLAKE.ML.FORECAST ON SCHEMA someschema TO ROLE somerole; 14 | GRANT CREATE MATERIALIZED VIEW ON SCHEMA mydb.myschema TO ROLE myrole; 15 | 16 | 17 | -- Schema Object Privileges 18 | GRANT ALL PRIVILEGES ON FUNCTION mydb.myschema.add5(number) TO ROLE analyst; 19 | GRANT ALL PRIVILEGES ON FUNCTION mydb.myschema.add5(string) TO ROLE analyst; 20 | GRANT USAGE ON PROCEDURE mydb.myschema.myprocedure(number) TO ROLE analyst; 21 | 22 | 23 | -------------------------------------------------------------------------------- /tests/fixtures/sql/image_repository.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE IMAGE REPOSITORY tutorial_repository; -------------------------------------------------------------------------------- /tests/fixtures/sql/internal_stage.sql: -------------------------------------------------------------------------------- 1 | CREATE STAGE stage_with_encryption 2 | ENCRYPTION = (TYPE = 'SNOWFLAKE_FULL'); 3 | 4 | CREATE STAGE my_int_stage_1; 5 | 6 | CREATE STAGE my_int_stage_2 7 | ENCRYPTION = (TYPE = 'SNOWFLAKE_SSE'); 8 | 9 | CREATE STAGE stage_with_directory 10 | DIRECTORY = (ENABLE = TRUE) 11 | COMMENT = 'This is a stage with a directory'; -------------------------------------------------------------------------------- /tests/fixtures/sql/javascript_udf.sql: -------------------------------------------------------------------------------- 1 | -- Valid UDF. 'N' must be capitalized. 2 | CREATE OR REPLACE FUNCTION add5(n double) 3 | RETURNS double 4 | LANGUAGE JAVASCRIPT 5 | AS 'return N + 5;'; 6 | 7 | -- Valid UDF. Lowercase argument is double-quoted. 8 | CREATE OR REPLACE FUNCTION add5_quoted("n" double) 9 | VOLATILE 10 | RETURNS double 11 | LANGUAGE JAVASCRIPT 12 | AS 'return n + 5;'; 13 | 14 | -- Invalid UDF. Error returned at runtime because JavaScript identifier 'n' cannot be resolved. 15 | CREATE OR REPLACE FUNCTION add5_lowercase(n double) 16 | RETURNS double 17 | LANGUAGE JAVASCRIPT 18 | AS $$return n + 5;$$; 19 | 20 | -------------------------------------------------------------------------------- /tests/fixtures/sql/json_file_format.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FILE FORMAT my_json_format 2 | TYPE = JSON; -------------------------------------------------------------------------------- /tests/fixtures/sql/materialized_view.sql: -------------------------------------------------------------------------------- 1 | CREATE MATERIALIZED VIEW mymv 2 | COMMENT='Test view' 3 | AS 4 | SELECT col1, col2 FROM mytable; -------------------------------------------------------------------------------- /tests/fixtures/sql/network_policy.sql: -------------------------------------------------------------------------------- 1 | CREATE NETWORK POLICY mypolicy1 ALLOWED_IP_LIST=('192.168.1.0/24') 2 | BLOCKED_IP_LIST=('192.168.1.99'); 3 | 4 | CREATE NETWORK POLICY mypolicy2 ALLOWED_IP_LIST=('192.168.1.0','192.168.1.100'); -------------------------------------------------------------------------------- /tests/fixtures/sql/network_rule.sql: -------------------------------------------------------------------------------- 1 | CREATE NETWORK RULE corporate_network 2 | TYPE = AWSVPCEID 3 | VALUE_LIST = ('vpce-123abc3420c1931') 4 | MODE = INTERNAL_STAGE 5 | COMMENT = 'corporate privatelink endpoint'; 6 | 7 | CREATE NETWORK RULE cloud_network 8 | TYPE = IPV4 9 | VALUE_LIST = ('47.88.25.32/27') 10 | COMMENT ='cloud egress ip range'; 11 | 12 | CREATE NETWORK RULE external_access_rule 13 | TYPE = HOST_PORT 14 | MODE = EGRESS 15 | VALUE_LIST = ('example.com', 'company.com:443'); -------------------------------------------------------------------------------- /tests/fixtures/sql/notebook.sql: -------------------------------------------------------------------------------- 1 | CREATE NOTEBOOK mynotebook; 2 | 3 | CREATE NOTEBOOK mynotebook 4 | QUERY_WAREHOUSE = my_warehouse; 5 | 6 | CREATE NOTEBOOK mynotebook 7 | FROM '@my_db.my_schema.my_stage' 8 | MAIN_FILE = 'my_notebook_file.ipynb' 9 | QUERY_WAREHOUSE = my_warehouse; -------------------------------------------------------------------------------- /tests/fixtures/sql/notification_integration.sql: -------------------------------------------------------------------------------- 1 | CREATE NOTIFICATION INTEGRATION my_email_int 2 | TYPE=EMAIL 3 | ENABLED=TRUE 4 | ALLOWED_RECIPIENTS=('first.last@example.com','first2.last2@example.com'); 5 | 6 | -------------------------------------------------------------------------------- /tests/fixtures/sql/oauth_secret.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE SECRET my_oauth_secret_with_token 2 | TYPE = OAUTH2 3 | API_AUTHENTICATION = 'my_security_integration' 4 | OAUTH_REFRESH_TOKEN = 'my_refresh_token' 5 | OAUTH_REFRESH_TOKEN_EXPIRY_TIME = '2022-12-31 23:59:59' 6 | COMMENT = 'OAuth2 secret for accessing external API'; 7 | 8 | CREATE OR REPLACE SECRET my_oauth_secret_with_scopes 9 | TYPE = OAUTH2 10 | API_AUTHENTICATION = 'my_security_integration' 11 | OAUTH_SCOPES = ( 'scope1', 'scope2' ) 12 | COMMENT = 'OAuth2 secret for accessing external API'; -------------------------------------------------------------------------------- /tests/fixtures/sql/object_store_catalog_integration.sql: -------------------------------------------------------------------------------- 1 | CREATE CATALOG INTEGRATION myCatalogInt 2 | CATALOG_SOURCE=OBJECT_STORE 3 | TABLE_FORMAT=ICEBERG 4 | ENABLED=TRUE 5 | COMMENT='This is a test catalog integration'; -------------------------------------------------------------------------------- /tests/fixtures/sql/packages_policy.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PACKAGES POLICY example_policy 2 | LANGUAGE PYTHON 3 | ALLOWLIST = ('numpy', 'pandas') 4 | BLOCKLIST = ('os', 'sys') 5 | ADDITIONAL_CREATION_BLOCKLIST = ('exec', 'eval') 6 | COMMENT = 'This is an example packages policy.' 7 | -------------------------------------------------------------------------------- /tests/fixtures/sql/parquet_file_format.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FILE FORMAT my_parquet_format 2 | TYPE = PARQUET 3 | COMPRESSION = SNAPPY; -------------------------------------------------------------------------------- /tests/fixtures/sql/password_policy.sql: -------------------------------------------------------------------------------- 1 | CREATE PASSWORD POLICY PASSWORD_POLICY_PROD_1 2 | PASSWORD_MIN_LENGTH = 12 3 | PASSWORD_MAX_LENGTH = 24 4 | PASSWORD_MIN_UPPER_CASE_CHARS = 2 5 | PASSWORD_MIN_LOWER_CASE_CHARS = 2 6 | PASSWORD_MIN_NUMERIC_CHARS = 2 7 | PASSWORD_MIN_SPECIAL_CHARS = 2 8 | PASSWORD_MIN_AGE_DAYS = 1 9 | PASSWORD_MAX_AGE_DAYS = 30 10 | PASSWORD_MAX_RETRIES = 3 11 | PASSWORD_LOCKOUT_TIME_MINS = 30 12 | PASSWORD_HISTORY = 5 13 | COMMENT = 'production account password policy'; -------------------------------------------------------------------------------- /tests/fixtures/sql/password_secret.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | CREATE OR REPLACE SECRET my_password_secret 4 | TYPE = PASSWORD 5 | USERNAME = 'my_username' 6 | PASSWORD = 'my_password' 7 | COMMENT = 'Password secret for accessing external database'; 8 | 9 | -------------------------------------------------------------------------------- /tests/fixtures/sql/pipe.sql: -------------------------------------------------------------------------------- 1 | create pipe mypipe as copy into mytable from @mystage; 2 | 3 | create pipe mypipe2 as copy into mytable(C1, C2) from (select $5, $4 from @mystage); 4 | 5 | create pipe mypipe_s3 6 | auto_ingest = true 7 | aws_sns_topic = 'arn:aws:sns:us-west-2:001234567890:s3_mybucket' 8 | as 9 | copy into snowpipe_db.public.mytable 10 | from @snowpipe_db.public.mystage 11 | file_format = (type = 'JSON'); 12 | 13 | create pipe mypipe_gcs 14 | auto_ingest = true 15 | integration = 'MYINT' 16 | as 17 | copy into snowpipe_db.public.mytable 18 | from @snowpipe_db.public.mystage 19 | file_format = (type = 'JSON'); 20 | 21 | create pipe mypipe_azure 22 | auto_ingest = true 23 | integration = 'MYINT' 24 | as 25 | copy into snowpipe_db.public.mytable 26 | from @snowpipe_db.public.mystage 27 | file_format = (type = 'JSON'); -------------------------------------------------------------------------------- /tests/fixtures/sql/python_stored_procedure.sql: -------------------------------------------------------------------------------- 1 | create or replace procedure clean_table(table_name STRING) 2 | returns int 3 | language python 4 | runtime_version = '3.8' 5 | packages = ('snowflake-snowpark-python') 6 | handler = 'clean_table_handler' 7 | AS 8 | $$ 9 | import snowflake.snowpark 10 | 11 | def clean_table_handler(session: snowflake.snowpark.session.Session, 12 | table_name: str) -> int: 13 | table = session.table(table_name) 14 | result = table.delete(~table['fruit'].rlike('[a-z]+')) 15 | # equivalent to `DELETE FROM dirty_data WHERE fruit NOT RLIKE '[a-z]+';` 16 | 17 | return result.rows_deleted 18 | $$; 19 | -------------------------------------------------------------------------------- /tests/fixtures/sql/python_udf.sql: -------------------------------------------------------------------------------- 1 | create or replace function clean_table(table_name STRING) 2 | returns int 3 | language python 4 | runtime_version = '3.8' 5 | packages = ('snowflake-snowpark-python') 6 | handler = 'main' 7 | AS 8 | $$ 9 | def main(_): return 42 10 | $$; 11 | -------------------------------------------------------------------------------- /tests/fixtures/sql/replication_group.sql: -------------------------------------------------------------------------------- 1 | CREATE REPLICATION GROUP myrg 2 | OBJECT_TYPES = DATABASES, SHARES 3 | ALLOWED_DATABASES = db1 4 | ALLOWED_SHARES = s1 5 | ALLOWED_ACCOUNTS = myorg.myaccount2 6 | REPLICATION_SCHEDULE = '10 MINUTE'; -------------------------------------------------------------------------------- /tests/fixtures/sql/resource_monitor.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE RESOURCE MONITOR my_mon_1 2 | WITH 3 | credit_quota=5 4 | FREQUENCY = DAILY 5 | START_TIMESTAMP = '2020-01-01 00:00:00' 6 | NOTIFY_USERS = ( teej, jack, jill ) 7 | ; 8 | 9 | 10 | CREATE OR REPLACE RESOURCE MONITOR my_mon_2 11 | WITH 12 | credit_quota=5 13 | FREQUENCY = DAILY 14 | START_TIMESTAMP = IMMEDIATELY 15 | NOTIFY_USERS = ( teej, jack, jill ) 16 | ; -------------------------------------------------------------------------------- /tests/fixtures/sql/role.sql: -------------------------------------------------------------------------------- 1 | CREATE ROLE myrole; 2 | CREATE OR REPLACE ROLE IF NOT EXISTS LOADER 3 | TAG (purpose = 'pass butter') 4 | COMMENT = 'A role for loading data' 5 | ; -------------------------------------------------------------------------------- /tests/fixtures/sql/role_grant.sql: -------------------------------------------------------------------------------- 1 | GRANT ROLE STATIC_ROLE TO ROLE SYSADMIN; 2 | GRANT ROLE STATIC_ROLE TO USER STATIC_USER; 3 | -------------------------------------------------------------------------------- /tests/fixtures/sql/s3_storage_integration.sql: -------------------------------------------------------------------------------- 1 | CREATE STORAGE INTEGRATION s3_int 2 | TYPE = EXTERNAL_STAGE 3 | STORAGE_PROVIDER = 'S3' 4 | STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::001234567890:role/myrole' 5 | ENABLED = TRUE 6 | STORAGE_ALLOWED_LOCATIONS = ('s3://mybucket1/path1/', 's3://mybucket2/path2/'); 7 | 8 | CREATE STORAGE INTEGRATION s3_int 9 | TYPE = EXTERNAL_STAGE 10 | STORAGE_PROVIDER = 'S3' 11 | STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::001234567890:role/myrole' 12 | ENABLED = TRUE 13 | STORAGE_ALLOWED_LOCATIONS = ('*') 14 | STORAGE_BLOCKED_LOCATIONS = ('s3://mybucket3/path3/', 's3://mybucket4/path4/'); -------------------------------------------------------------------------------- /tests/fixtures/sql/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA myschema; 2 | 3 | CREATE TRANSIENT SCHEMA tschema; 4 | 5 | CREATE SCHEMA mschema WITH MANAGED ACCESS; 6 | 7 | -------------------------------------------------------------------------------- /tests/fixtures/sql/security_integration.sql: -------------------------------------------------------------------------------- 1 | CREATE SECURITY INTEGRATION td_oauth_int1 2 | TYPE = oauth 3 | ENABLED = true 4 | OAUTH_CLIENT = tableau_desktop; 5 | 6 | CREATE SECURITY INTEGRATION td_oauth_int2 7 | TYPE = oauth 8 | ENABLED = true 9 | OAUTH_CLIENT = tableau_desktop 10 | OAUTH_REFRESH_TOKEN_VALIDITY = 36000 11 | BLOCKED_ROLES_LIST = ('SYSADMIN'); 12 | 13 | CREATE SECURITY INTEGRATION ts_oauth_int1 14 | TYPE = oauth 15 | ENABLED = true 16 | OAUTH_CLIENT = tableau_server; 17 | 18 | CREATE SECURITY INTEGRATION ts_oauth_int2 19 | TYPE = oauth 20 | ENABLED = true 21 | OAUTH_CLIENT = tableau_server 22 | OAUTH_REFRESH_TOKEN_VALIDITY = 86400 23 | BLOCKED_ROLES_LIST = ('SYSADMIN'); 24 | 25 | CREATE SECURITY INTEGRATION oauth_kp_int 26 | TYPE = oauth 27 | ENABLED = true 28 | OAUTH_CLIENT = custom 29 | OAUTH_CLIENT_TYPE = 'CONFIDENTIAL' 30 | OAUTH_REDIRECT_URI = 'https://localhost.com' 31 | OAUTH_ISSUE_REFRESH_TOKENS = TRUE 32 | OAUTH_REFRESH_TOKEN_VALIDITY = 86400 33 | PRE_AUTHORIZED_ROLES_LIST = ('MYROLE') 34 | BLOCKED_ROLES_LIST = ('SYSADMIN'); 35 | 36 | CREATE SECURITY INTEGRATION IF NOT EXISTS "Application Authentication Test" 37 | TYPE=oauth 38 | OAUTH_CLIENT=snowservices_ingress 39 | ENABLED=true; -------------------------------------------------------------------------------- /tests/fixtures/sql/sequence.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE SEQUENCE seq_01 START = 1 INCREMENT = 1; 2 | CREATE OR REPLACE SEQUENCE seq90; 3 | CREATE OR REPLACE SEQUENCE seq_5 START = 1 INCREMENT = 5; 4 | CREATE SEQUENCE fully_loaded WITH 5 | START WITH 111 6 | INCREMENT BY 999 7 | COMMENT = 'comment'; -------------------------------------------------------------------------------- /tests/fixtures/sql/service.sql: -------------------------------------------------------------------------------- 1 | -- CREATE SERVICE echo_service 2 | -- IN COMPUTE POOL tutorial_compute_pool 3 | -- FROM @tutorial_stage 4 | -- SPECIFICATION_FILE='echo_spec.yaml' 5 | -- MIN_INSTANCES=2 6 | -- MAX_INSTANCES=2 7 | -- ; 8 | 9 | CREATE SERVICE titan_service_test 10 | IN COMPUTE POOL some_compute_pool 11 | FROM SPECIFICATION $$ 12 | spec: 13 | container: 14 | - name: container_name 15 | image: /some/image/path:latest 16 | env: 17 | PORT: 8000 18 | EXAMPLE_ENV_VARIABLE: my_value 19 | endpoint: 20 | - name: apiendpoint 21 | port: 8000 22 | public: true 23 | $$ 24 | MIN_INSTANCES=1 25 | MAX_INSTANCES=1 26 | ; -------------------------------------------------------------------------------- /tests/fixtures/sql/session_policy.sql: -------------------------------------------------------------------------------- 1 | CREATE SESSION POLICY session_policy_prod_1 2 | SESSION_IDLE_TIMEOUT_MINS = 60 3 | SESSION_UI_IDLE_TIMEOUT_MINS = 30 4 | COMMENT = 'session policy for use in the prod_1 environment' 5 | ; -------------------------------------------------------------------------------- /tests/fixtures/sql/share.sql: -------------------------------------------------------------------------------- 1 | CREATE SHARE SOME_SHARE COMMENT = 'A share for testing'; -------------------------------------------------------------------------------- /tests/fixtures/sql/snowflake_iceberg_table.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tests/fixtures/sql/snowflake_iceberg_table.sql -------------------------------------------------------------------------------- /tests/fixtures/sql/stage_stream.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM dirtable_mystage_s ON STAGE mystage; -------------------------------------------------------------------------------- /tests/fixtures/sql/table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE mytable (id INT PRIMARY KEY, amount NUMBER); 2 | CREATE TABLE example (col1 number comment 'a column comment') COMMENT='a table comment'; 3 | 4 | -- Name includes schema 5 | CREATE TABLE someschema.sometable (id int); 6 | 7 | 8 | -- Name includes db and schema 9 | -- CREATE TABLE somedb.someschema.sometable (id int); 10 | 11 | CREATE TABLE someschema.sometable (modified TIMESTAMP_NTZ(9) NOT NULL); -------------------------------------------------------------------------------- /tests/fixtures/sql/table_stream.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM mystream ON TABLE mytable; 2 | 3 | CREATE STREAM mystream ON TABLE mytable AT(STREAM => 'oldstream'); 4 | 5 | CREATE OR REPLACE STREAM mystream ON TABLE mytable AT(STREAM => 'mystream'); 6 | 7 | CREATE STREAM mystream ON TABLE mytable BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); -------------------------------------------------------------------------------- /tests/fixtures/sql/tag.sql: -------------------------------------------------------------------------------- 1 | CREATE TAG cost_center COMMENT = 'cost_center tag'; 2 | 3 | CREATE OR REPLACE TAG fruit_type ALLOWED_VALUES 'apple', 'banana', 'watermelon'; -------------------------------------------------------------------------------- /tests/fixtures/sql/task.sql: -------------------------------------------------------------------------------- 1 | /* TIMESTAMP_INPUT_FORMAT = 'YYYY-MM-DD HH24' -- This is a session parameter, not supported yet */ 2 | 3 | CREATE TASK t1 4 | SCHEDULE = '60 MINUTE' 5 | USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE = 'XSMALL' 6 | AS 7 | INSERT INTO mytable(ts) VALUES(CURRENT_TIMESTAMP); 8 | 9 | 10 | CREATE TASK mytask_minute 11 | WAREHOUSE = mywh 12 | SCHEDULE = '5 MINUTE' 13 | AS 14 | INSERT INTO mytable(ts) VALUES(CURRENT_TIMESTAMP); 15 | 16 | CREATE TASK mytask1 17 | WAREHOUSE = mywh 18 | SCHEDULE = '5 minute' 19 | WHEN 20 | SYSTEM$STREAM_HAS_DATA('MYSTREAM') 21 | AS 22 | INSERT INTO mytable1(id,name) SELECT id, name FROM mystream WHERE METADATA$ACTION = 'INSERT'; 23 | 24 | CREATE TASK task5 25 | AFTER task2, task3, task4 26 | AS 27 | INSERT INTO t1(ts) VALUES(CURRENT_TIMESTAMP); 28 | 29 | 30 | CREATE TASK t1 31 | USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE = 'XSMALL' 32 | SCHEDULE = '2 minute' 33 | AS 34 | EXECUTE IMMEDIATE 35 | $$ 36 | DECLARE 37 | radius_of_circle float; 38 | area_of_circle float; 39 | BEGIN 40 | radius_of_circle := 3; 41 | area_of_circle := pi() * radius_of_circle * radius_of_circle; 42 | return area_of_circle; 43 | END; 44 | $$; -------------------------------------------------------------------------------- /tests/fixtures/sql/user.sql: -------------------------------------------------------------------------------- 1 | CREATE USER IF NOT EXISTS jill 2 | PASSWORD = 'p4ssw0rd' 3 | DEFAULT_ROLE = PUBLIC 4 | DEFAULT_WAREHOUSE = XSMALL_WH 5 | MUST_CHANGE_PASSWORD = FALSE 6 | ; 7 | 8 | CREATE USER user1 PASSWORD='abc123' DEFAULT_ROLE = myrole DEFAULT_SECONDARY_ROLES = ('ALL') MUST_CHANGE_PASSWORD = TRUE; -------------------------------------------------------------------------------- /tests/fixtures/sql/view.sql: -------------------------------------------------------------------------------- 1 | CREATE VIEW myview 2 | COMMENT='Test view' 3 | AS 4 | SELECT col1, col2 FROM mytable; 5 | 6 | CREATE OR REPLACE SECURE VIEW myview 7 | COMMENT='Test secure view' 8 | AS 9 | SELECT col1, col2 FROM mytable; 10 | 11 | CREATE VIEW employee_hierarchy 12 | (title COMMENT 'employee title', employee_ID, manager_ID, "MGR_EMP_ID (SHOULD BE SAME)", "MGR TITLE") 13 | AS 14 | SELECT * FROM employees; 15 | 16 | -------------------------------------------------------------------------------- /tests/fixtures/sql/view_stream.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM mystream ON VIEW myview; -------------------------------------------------------------------------------- /tests/fixtures/sql/warehouse.sql: -------------------------------------------------------------------------------- 1 | 2 | CREATE WAREHOUSE IF NOT EXISTS XSMALL_WH 3 | WITH 4 | WAREHOUSE_SIZE = 'XSMALL' 5 | WAREHOUSE_TYPE = 'STANDARD' 6 | AUTO_SUSPEND = 60 7 | AUTO_RESUME = FALSE 8 | initially_suspended = true 9 | RESOURCE_MONITOR = my_mon 10 | COMMENT = 'My XSMALL warehouse' 11 | ; 12 | 13 | 14 | 15 | -- CREATE WAREHOUSE IF NOT EXISTS XSMALL_WH2 AUTO_SUSPEND = NULL; 16 | 17 | CREATE WAREHOUSE lowercase_wh 18 | warehouse_size = x6large 19 | warehouse_type = snowpark-optimized 20 | scaling_policy = economy 21 | initially_suspended = true 22 | ; 23 | 24 | 25 | -------------------------------------------------------------------------------- /tests/integration/data_provider/test_fetch.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | from tests.helpers import safe_fetch 6 | from titan import data_provider 7 | from titan.enums import AccountEdition, ResourceType 8 | from titan.identifiers import FQN, URN 9 | from titan.resource_name import ResourceName 10 | 11 | pytestmark = pytest.mark.requires_snowflake 12 | 13 | TEST_ROLE = os.environ.get("TEST_SNOWFLAKE_ROLE") 14 | TEST_USER = os.environ.get("TEST_SNOWFLAKE_USER") 15 | 16 | 17 | @pytest.mark.skip("very slow") 18 | def test_fetch_over_1000_objects(cursor, test_db): 19 | for i in range(1005): 20 | cursor.execute(f"CREATE SCHEMA {test_db}_schema_{i}") 21 | 22 | schema = safe_fetch( 23 | cursor, 24 | URN( 25 | ResourceType.SCHEMA, 26 | fqn=FQN(database=ResourceName(test_db), name=ResourceName(f"{test_db}_schema_1004")), 27 | account_locator="", 28 | ), 29 | ) 30 | assert schema is not None 31 | assert schema["name"] == f"{test_db}_SCHEMA_1004" 32 | 33 | 34 | def test_fetch_quoted_identifier(cursor, test_db): 35 | cursor.execute(f'CREATE SCHEMA {test_db}."multiCaseString"') 36 | schema = safe_fetch( 37 | cursor, 38 | URN(ResourceType.SCHEMA, fqn=FQN(database=ResourceName(test_db), name=ResourceName('"multiCaseString"'))), 39 | ) 40 | assert schema is not None 41 | assert schema["name"] == '"multiCaseString"' 42 | -------------------------------------------------------------------------------- /tests/integration/data_provider/test_fetch_owner.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | from tests.helpers import safe_fetch 6 | from titan import resources as res 7 | 8 | pytestmark = pytest.mark.requires_snowflake 9 | 10 | TEST_ROLE = os.environ.get("TEST_SNOWFLAKE_ROLE") 11 | TEST_USER = os.environ.get("TEST_SNOWFLAKE_USER") 12 | 13 | 14 | def test_fetch_owner(cursor, suffix, test_db, marked_for_cleanup): 15 | database_role = res.DatabaseRole( 16 | name=f"TEST_FETCH_OWNER_DATABASE_ROLE_{suffix}", 17 | database=test_db, 18 | ) 19 | schema = res.Schema( 20 | name="SOME_SCHEMA", 21 | database=test_db, 22 | ) 23 | cursor.execute(database_role.create_sql()) 24 | cursor.execute(schema.create_sql()) 25 | marked_for_cleanup.append(database_role) 26 | marked_for_cleanup.append(schema) 27 | 28 | result = safe_fetch(cursor, schema.urn) 29 | assert result is not None 30 | assert result["owner"] == TEST_ROLE 31 | 32 | cursor.execute(f"GRANT OWNERSHIP ON SCHEMA {test_db}.SOME_SCHEMA TO DATABASE ROLE {database_role.name}") 33 | 34 | result = safe_fetch(cursor, schema.urn) 35 | assert result is not None 36 | assert result["owner"] == str(database_role.urn.fqn) 37 | 38 | cursor.execute(f"GRANT OWNERSHIP ON SCHEMA {test_db}.SOME_SCHEMA TO ROLE {TEST_ROLE}") 39 | -------------------------------------------------------------------------------- /tests/integration/test_examples.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import yaml 3 | 4 | from tests.helpers import dump_resource_change, get_examples_yml 5 | from titan.blueprint import Blueprint 6 | from titan.enums import ResourceType 7 | from titan.gitops import collect_blueprint_config 8 | 9 | EXAMPLES_YML = list(get_examples_yml()) 10 | VARS = { 11 | "for-each-example": { 12 | "schemas": [ 13 | "schema1", 14 | "schema2", 15 | "schema3", 16 | ] 17 | }, 18 | } 19 | 20 | 21 | @pytest.fixture( 22 | params=EXAMPLES_YML, 23 | ids=[example_name for example_name, _ in EXAMPLES_YML], 24 | scope="function", 25 | ) 26 | def example(request): 27 | example_name, example_content = request.param 28 | yield example_name, yaml.safe_load(example_content) 29 | 30 | 31 | @pytest.mark.enterprise 32 | @pytest.mark.requires_snowflake 33 | def test_example(example, cursor, marked_for_cleanup, blueprint_vars): 34 | example_name, example_content = example 35 | blueprint_vars = VARS.get(example_name, blueprint_vars) 36 | 37 | if example_name == "dbt-with-schema-access-role-tree": 38 | pytest.skip("Skipping until issues are resolved") 39 | 40 | cursor.execute("USE WAREHOUSE CI") 41 | blueprint_config = collect_blueprint_config(example_content.copy(), {"vars": blueprint_vars}) 42 | assert blueprint_config.resources is not None 43 | for resource in blueprint_config.resources: 44 | marked_for_cleanup.append(resource) 45 | blueprint = Blueprint.from_config(blueprint_config) 46 | plan = blueprint.plan(cursor.connection) 47 | cmds = blueprint.apply(cursor.connection, plan) 48 | assert cmds 49 | 50 | blueprint_config = collect_blueprint_config(example_content.copy(), {"vars": blueprint_vars}) 51 | blueprint = Blueprint.from_config(blueprint_config) 52 | plan = blueprint.plan(cursor.connection) 53 | unexpected_drift = [change for change in plan if not change_is_expected(change)] 54 | if len(unexpected_drift) > 0: 55 | debug = "\n".join([dump_resource_change(change) for change in unexpected_drift]) 56 | assert False, f"Unexpected drift:\n{debug}" 57 | 58 | 59 | def change_is_expected(change): 60 | return change.urn.resource_type == ResourceType.GRANT and change.after.get("priv", "") == "ALL" 61 | -------------------------------------------------------------------------------- /tests/integration/test_export.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from titan.identifiers import URN, parse_FQN 4 | from titan.operations.export import export_resources, _format_resource_config 5 | from titan.enums import ResourceType 6 | from titan.data_provider import fetch_resource 7 | 8 | pytestmark = pytest.mark.requires_snowflake 9 | 10 | 11 | def test_export_all(cursor): 12 | assert export_resources(session=cursor.connection) 13 | 14 | 15 | def test_export_schema(cursor): 16 | urn = URN(ResourceType.SCHEMA, parse_FQN("STATIC_DATABASE.STATIC_SCHEMA", is_db_scoped=True)) 17 | resource = fetch_resource(cursor, urn) 18 | assert resource 19 | resource_cfg = _format_resource_config(urn, resource, ResourceType.SCHEMA) 20 | assert resource_cfg 21 | assert "database" in resource_cfg 22 | assert resource_cfg["database"] == "STATIC_DATABASE" 23 | -------------------------------------------------------------------------------- /tests/integration/test_update.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | from tests.helpers import safe_fetch 6 | from titan import lifecycle 7 | from titan import resources as res 8 | from titan.blueprint import Blueprint 9 | 10 | pytestmark = pytest.mark.requires_snowflake 11 | 12 | TEST_ROLE = os.environ.get("TEST_SNOWFLAKE_ROLE") 13 | 14 | 15 | def test_update_schema(cursor, test_db, marked_for_cleanup): 16 | sch = res.Schema(name="TEST_UPDATE_SCHEMA", database=test_db, max_data_extension_time_in_days=10) 17 | cursor.execute(sch.create_sql()) 18 | marked_for_cleanup.append(sch) 19 | result = safe_fetch(cursor, sch.urn) 20 | assert result is not None 21 | assert result["max_data_extension_time_in_days"] == 10 22 | cursor.execute(lifecycle.update_resource(sch.urn, {"max_data_extension_time_in_days": 9}, res.Schema.props)) 23 | result = safe_fetch(cursor, sch.urn) 24 | assert result is not None 25 | assert result["max_data_extension_time_in_days"] == 9 26 | 27 | 28 | def test_update_array_props(cursor, test_db, suffix, marked_for_cleanup): 29 | network_rule_data = { 30 | "name": f"network_rule_to_update_{suffix}", 31 | "type": "IPV4", 32 | "value_list": ["192.168.1.1"], 33 | "mode": "INGRESS", 34 | "comment": "Example network rule", 35 | "database": test_db, 36 | "schema": "PUBLIC", 37 | "owner": TEST_ROLE, 38 | } 39 | network_rule = res.NetworkRule(**network_rule_data) 40 | marked_for_cleanup.append(network_rule) 41 | cursor.execute(network_rule.create_sql()) 42 | result = safe_fetch(cursor, network_rule.urn) 43 | assert result is not None 44 | assert result["value_list"] == ["192.168.1.1"] 45 | 46 | network_rule_data["value_list"] = ["192.168.1.1", "192.168.1.2"] 47 | network_rule = res.NetworkRule(**network_rule_data) 48 | bp = Blueprint() 49 | bp.add(network_rule) 50 | plan = bp.plan(cursor.connection) 51 | assert len(plan) == 1 52 | bp.apply(cursor.connection, plan) 53 | result = safe_fetch(cursor, network_rule.urn) 54 | assert result is not None 55 | assert result["value_list"] == ["192.168.1.1", "192.168.1.2"] 56 | -------------------------------------------------------------------------------- /tests/test_adapters.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from titan.adapters import permifrost 4 | from titan.enums import ResourceType 5 | from titan.privs import DatabasePriv, WarehousePriv 6 | from titan.resources import Grant, RoleGrant 7 | from titan.resources.resource import ResourcePointer 8 | 9 | 10 | @pytest.mark.skip("skipping due to pending deprecation") 11 | @pytest.mark.requires_snowflake 12 | def test_permifrost(cursor): 13 | resources = permifrost.read_permifrost_config(cursor.connection, "tests/fixtures/adapters/permifrost.yml") 14 | assert ResourcePointer(name="loading", resource_type=ResourceType.WAREHOUSE) in resources 15 | assert Grant(priv=WarehousePriv.OPERATE, on_warehouse="loading", to="accountadmin") in resources 16 | assert RoleGrant(role="engineer", to_role="sysadmin") in resources 17 | assert ResourcePointer(name="raw", resource_type=ResourceType.DATABASE) in resources 18 | assert Grant(priv=DatabasePriv.USAGE, on_database="raw", to="sysadmin") in resources 19 | assert ResourcePointer(name="raw", resource_type=ResourceType.DATABASE) in resources 20 | assert RoleGrant(role="sysadmin", to_user="eburke") in resources 21 | assert RoleGrant(role="eburke", to_user="eburke") in resources 22 | -------------------------------------------------------------------------------- /tests/test_container_service.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from titan.blueprint import Blueprint 4 | from titan.resources import ( 5 | ComputePool, 6 | Database, 7 | ImageRepository, 8 | Role, 9 | Schema, 10 | SnowflakePartnerOAuthSecurityIntegration, 11 | Warehouse, 12 | ) 13 | 14 | 15 | def test_container_service(): 16 | "Test a container service in Titan end-to-end" 17 | 18 | bp = Blueprint() 19 | 20 | db = Database(name="container_test_db") 21 | sch = Schema(name="container_test_schema") 22 | db.add(sch) 23 | 24 | admin_role = Role(name="container_test_admin_role") 25 | wh = Warehouse(name="container_test_wh", auto_suspend=60, auto_resume=True) 26 | 27 | compute_pool = ComputePool( 28 | name="titan_app_compute_pool_test", min_nodes=1, max_nodes=1, instance_family="CPU_X64_XS" 29 | ) 30 | 31 | image_repository = ImageRepository(name="container_test_image_repo") 32 | 33 | bp.add(compute_pool, image_repository) 34 | 35 | # # Grant permissions 36 | # bp.grant_all_on_database("titan_db_test", "container_test_admin_role") 37 | # bp.grant_all_on_schema("titan_db_test.titan_app_test", "container_test_admin_role") 38 | # bp.grant_select_on_all_tables("titan_db_test.titan_app_test", "container_test_admin_role") 39 | # bp.grant_select_on_future_tables("titan_db_test.titan_app_test", "container_test_admin_role") 40 | 41 | # bp.grant_all_on_warehouse("container_test_wh", "container_test_admin_role") 42 | 43 | # bp.grant_usage_on_compute_pool("titan_app_compute_pool_test", "container_test_admin_role") 44 | # bp.grant_monitor_on_compute_pool("titan_app_compute_pool_test", "container_test_admin_role") 45 | 46 | # bp.grant_ownership_on_integration("Application Authentication Test", "container_test_admin_role") 47 | 48 | # bp.grant_bind_service_endpoint_on_account("container_test_admin_role") 49 | 50 | bp.add(db, admin_role, wh) 51 | -------------------------------------------------------------------------------- /tests/test_from_sql.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from titan import resources as res 4 | from titan.enums import ResourceType 5 | from titan.resources.resource import ResourcePointer 6 | 7 | 8 | def test_from_sql_fqn_parsing(): 9 | grant = res.Grant.from_sql('GRANT USAGE ON SCHEMA "My_databasE".my_schema TO ROLE my_role') 10 | assert isinstance(grant.to, ResourcePointer) 11 | assert grant.to.resource_type == ResourceType.ROLE 12 | assert grant.to.name == "my_role" 13 | assert grant.on == '"My_databasE".MY_SCHEMA' 14 | schema_ref = None 15 | for ref in grant.refs: 16 | if ref.resource_type == ResourceType.SCHEMA and ref.name == "my_schema": 17 | schema_ref = ref 18 | assert schema_ref 19 | assert schema_ref.container.name == '"My_databasE"' 20 | -------------------------------------------------------------------------------- /tests/test_parse_collection.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from titan.parse import format_collection_string, parse_collection_string 4 | 5 | 6 | # Test cases for parse_collection_string 7 | def test_parse_database_level(): 8 | assert parse_collection_string("SOME_DATABASE.") == { 9 | "in_name": "SOME_DATABASE", 10 | "in_type": "database", 11 | "on_type": "TABLE", 12 | } 13 | 14 | 15 | def test_parse_schema_level(): 16 | assert parse_collection_string("SOME_DATABASE.SOME_SCHEMA.") == { 17 | "in_name": "SOME_DATABASE.SOME_SCHEMA", 18 | "in_type": "schema", 19 | "on_type": "VIEW", 20 | } 21 | 22 | 23 | def test_parse_invalid_format(): 24 | with pytest.raises(ValueError): 25 | parse_collection_string("SOME_DATABASE") 26 | 27 | 28 | def test_parse_incorrect_brackets(): 29 | with pytest.raises(ValueError): 30 | parse_collection_string("SOME_DATABASE." 37 | 38 | 39 | def test_format_schema_level(): 40 | collection_dict = {"in_name": "SOME_DATABASE.SOME_SCHEMA", "in_type": "schema", "on_type": "VIEW"} 41 | assert format_collection_string(collection_dict) == "SOME_DATABASE.SOME_SCHEMA." 42 | -------------------------------------------------------------------------------- /tests/test_privs.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from titan.privs import PRIVS_FOR_RESOURCE_TYPE 4 | from titan.enums import ResourceType 5 | 6 | 7 | @pytest.mark.skip(reason="Needs to be adapted for pseudo-resources like external volume storage location") 8 | def test_resource_privs_is_complete(): 9 | for resource_type in ResourceType: 10 | assert resource_type in PRIVS_FOR_RESOURCE_TYPE, f"{resource_type} is missing from PRIVS_FOR_RESOURCE_TYPE" 11 | -------------------------------------------------------------------------------- /tests/test_resource_pointer.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import titan.resources as res 4 | 5 | from titan.enums import ResourceType 6 | from titan.resources.resource import ResourcePointer 7 | 8 | 9 | def test_fqn_construction(): 10 | ptr = ResourcePointer(name="my_db.my_schema.my_table", resource_type=ResourceType.TABLE) 11 | assert ptr.name == "my_table" 12 | assert ptr.container.name == "my_schema" 13 | assert ptr.container.container.name == "my_db" 14 | 15 | 16 | def test_resource_pointer_type_checking(): 17 | invalid_pointer = ResourcePointer(name="my_network_rule", resource_type=ResourceType.DATABASE) 18 | with pytest.raises(TypeError): 19 | res.NetworkPolicy(name="my_network_policy", allowed_network_rule_list=[invalid_pointer]) 20 | 21 | with pytest.raises(TypeError): 22 | res.NetworkPolicy(name="my_network_policy", allowed_network_rule_list=[111]) 23 | -------------------------------------------------------------------------------- /tests/test_resource_refs.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from tests.helpers import get_json_fixtures 4 | from titan import resources as res 5 | from titan.enums import ResourceType 6 | from titan.resources.resource import convert_role_ref 7 | from titan.resources.resource import ResourcePointer 8 | 9 | JSON_FIXTURES = list(get_json_fixtures()) 10 | 11 | 12 | @pytest.fixture( 13 | params=JSON_FIXTURES, 14 | ids=[resource_cls.__name__ for resource_cls, _ in JSON_FIXTURES], 15 | scope="function", 16 | ) 17 | def json_fixture(request): 18 | resource_cls, data = request.param 19 | yield resource_cls, data 20 | 21 | 22 | def test_resource_requires(json_fixture): 23 | role = res.Role(name="dummy") 24 | resource_cls, data = json_fixture 25 | resource = resource_cls(**data) 26 | assert role.requires(resource) is None 27 | assert resource in role.refs 28 | 29 | 30 | def test_convert_role_ref(): 31 | role_ref = convert_role_ref("dummy") 32 | assert isinstance(role_ref, ResourcePointer) 33 | assert role_ref.resource_type == ResourceType.ROLE 34 | assert role_ref.name == "dummy" 35 | 36 | database_role_ref = convert_role_ref("dummy.database_role") 37 | assert isinstance(database_role_ref, ResourcePointer) 38 | assert database_role_ref.resource_type == ResourceType.DATABASE_ROLE 39 | assert database_role_ref.name == "database_role" 40 | assert database_role_ref.container.name == "dummy" 41 | 42 | role = res.Role(name="dummy") 43 | assert convert_role_ref(role) == role 44 | database_role = res.DatabaseRole(name="dummy", database="dummy_db") 45 | assert convert_role_ref(database_role) == database_role 46 | role_ptr = ResourcePointer(name="dummy", resource_type=ResourceType.ROLE) 47 | assert convert_role_ref(role_ptr) == role_ptr 48 | database_role_ptr = ResourcePointer(name="dummy", resource_type=ResourceType.DATABASE_ROLE) 49 | assert convert_role_ref(database_role_ptr) == database_role_ptr 50 | 51 | with pytest.raises(TypeError): 52 | convert_role_ref(None) 53 | 54 | with pytest.raises(TypeError): 55 | convert_role_ref(111) 56 | 57 | with pytest.raises(TypeError): 58 | convert_role_ref(ResourcePointer(name="dummy", resource_type=ResourceType.DATABASE)) 59 | -------------------------------------------------------------------------------- /tests/test_resource_rendering.py: -------------------------------------------------------------------------------- 1 | import titan.resources as res 2 | 3 | 4 | def test_resource_pointer_rendering(): 5 | db = res.Database(name="DB") 6 | schema = res.Schema(name="SCH") 7 | network_rule = res.NetworkRule( 8 | name="TITAN_TEST_NETWORK_RULE", 9 | type="IPV4", 10 | value_list=["85.83.225.229"], 11 | mode="INGRESS", 12 | database=db, 13 | schema=schema, 14 | ) 15 | 16 | network_policy = res.NetworkPolicy( 17 | name="TITAN_TEST_NETWORK_POLICY", 18 | allowed_network_rule_list=[network_rule], 19 | blocked_network_rule_list=None, 20 | allowed_ip_list=None, 21 | blocked_ip_list=None, 22 | database=db, 23 | schema=schema, 24 | ) 25 | rendered = network_policy.create_sql() 26 | assert ( 27 | rendered 28 | == "CREATE NETWORK POLICY TITAN_TEST_NETWORK_POLICY ALLOWED_NETWORK_RULE_LIST = (DB.SCH.TITAN_TEST_NETWORK_RULE)" 29 | ) 30 | -------------------------------------------------------------------------------- /tests/test_vars.py: -------------------------------------------------------------------------------- 1 | from titan import resources as res 2 | from titan import var 3 | from titan.var import VarString 4 | 5 | 6 | def test_blueprint_vars_comparison_with_system_names(): 7 | database = res.Database(name=var.database_name) 8 | assert isinstance(database.name, VarString) 9 | 10 | schema = res.Schema(name=var.schema_name) 11 | assert isinstance(schema.name, VarString) 12 | 13 | 14 | def test_vars_in_owner(): 15 | schema = res.Schema(name="schema", owner="role_{{ var.role_name }}") 16 | assert isinstance(schema._data.owner, VarString) 17 | 18 | 19 | def test_vars_database_role(): 20 | role = res.DatabaseRole(name="role_{{ var.role_name }}", database="db_{{ var.db_name }}") 21 | assert isinstance(role._data.name, VarString) 22 | assert isinstance(role._data.database, VarString) 23 | -------------------------------------------------------------------------------- /titan/__init__.py: -------------------------------------------------------------------------------- 1 | import logging.config 2 | 3 | # __version__ = open("version.md", encoding="utf-8").read().split(" ")[2] 4 | 5 | from .blueprint import Blueprint 6 | from .resources import * # noqa: F403 7 | 8 | logger = logging.getLogger("titan") 9 | 10 | 11 | __all__ = [ 12 | "Blueprint", 13 | ] 14 | 15 | LOGO = r""" 16 | __ _ __ 17 | / /_(_) /____ ___ 18 | / __/ / __/ _ `/ _ \ 19 | \__/_/\__/\_,_/_//_/ 20 | 21 | 22 | """.strip( 23 | "\n" 24 | ) 25 | -------------------------------------------------------------------------------- /titan/__main__.py: -------------------------------------------------------------------------------- 1 | from titan.cli import titan_cli 2 | 3 | 4 | if __name__ == "__main__": 5 | titan_cli() 6 | -------------------------------------------------------------------------------- /titan/adapters/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/titan/adapters/__init__.py -------------------------------------------------------------------------------- /titan/adapters/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/titan/adapters/py.typed -------------------------------------------------------------------------------- /titan/api.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/titan/api.py -------------------------------------------------------------------------------- /titan/builder.py: -------------------------------------------------------------------------------- 1 | def tidy_sql(*parts): 2 | if isinstance(parts[0], list): 3 | parts = parts[0] 4 | return " ".join([str(part) for part in parts if part != "" and part is not None]) 5 | -------------------------------------------------------------------------------- /titan/builtins.py: -------------------------------------------------------------------------------- 1 | SYSTEM_DATABASES = [ 2 | "SNOWFLAKE", 3 | "WORKSHEETS_APP", 4 | ] 5 | 6 | SYSTEM_SCHEMAS = [ 7 | "PUBLIC", 8 | "INFORMATION_SCHEMA", 9 | ] 10 | 11 | SYSTEM_ROLES = [ 12 | "ACCOUNTADMIN", 13 | "ORGADMIN", 14 | "PUBLIC", 15 | "SECURITYADMIN", 16 | "SYSADMIN", 17 | "USERADMIN", 18 | ] 19 | 20 | SYSTEM_USERS = [ 21 | "SNOWFLAKE", 22 | ] 23 | 24 | SYSTEM_SECURITY_INTEGRATIONS = [ 25 | "APPLICA", 26 | ] 27 | -------------------------------------------------------------------------------- /titan/data_types.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Union 2 | 3 | from .enums import DataType 4 | 5 | NUMBER_TYPES = ("NUMBER", "DECIMAL", "DEC", "NUMERIC", "INT", "INTEGER", "BIGINT", "SMALLINT", "TINYINT", "BYTEINT") 6 | FLOAT_TYPES = ("FLOAT", "FLOAT4", "FLOAT8", "REAL", "DOUBLE", "DOUBLE PRECISION", "REAL") 7 | VARCHAR_TYPES = ("VARCHAR", "STRING", "TEXT", "NVARCHAR", "NVARCHAR2", "CHAR VARYING", "NCHAR VARYING") 8 | 9 | 10 | def convert_to_canonical_data_type(data_type: Union[str, DataType, None]) -> Optional[str]: 11 | if data_type is None: 12 | return None 13 | if isinstance(data_type, DataType): 14 | data_type = str(data_type) 15 | data_type = data_type.upper() 16 | if data_type in NUMBER_TYPES: 17 | return "NUMBER(38,0)" 18 | if data_type in FLOAT_TYPES: 19 | return "FLOAT" 20 | if data_type in ("BOOLEAN", "BOOL"): 21 | return "BOOLEAN" 22 | if data_type in VARCHAR_TYPES: 23 | return "VARCHAR(16777216)" 24 | if data_type in ("CHAR", "CHARACTER", "NCHAR"): 25 | return "VARCHAR(1)" 26 | if data_type in ("BINARY", "VARBINARY"): 27 | return "BINARY(8388608)" 28 | if data_type in ("DATETIME", "TIMESTAMP", "TIMESTAMP_NTZ"): 29 | return "TIMESTAMP_NTZ(9)" 30 | if data_type in ("TIMESTAMP_LTZ", "TIMESTAMP_TZ"): 31 | return "TIMESTAMP_LTZ(9)" 32 | if data_type in ("TIME"): 33 | return "TIME(9)" 34 | return data_type 35 | 36 | 37 | def convert_to_simple_data_type(data_type: str) -> str: 38 | if data_type in NUMBER_TYPES: 39 | return "NUMBER" 40 | if data_type in FLOAT_TYPES: 41 | return "FLOAT" 42 | if data_type in ("BOOLEAN", "BOOL"): 43 | return "BOOLEAN" 44 | if data_type in VARCHAR_TYPES: 45 | return "VARCHAR" 46 | return data_type 47 | -------------------------------------------------------------------------------- /titan/exceptions.py: -------------------------------------------------------------------------------- 1 | class MissingVarException(Exception): 2 | pass 3 | 4 | 5 | class DuplicateResourceException(Exception): 6 | pass 7 | 8 | 9 | class MissingResourceException(Exception): 10 | pass 11 | 12 | 13 | class MissingPrivilegeException(Exception): 14 | pass 15 | 16 | 17 | class MarkedForReplacementException(Exception): 18 | pass 19 | 20 | 21 | class NonConformingPlanException(Exception): 22 | pass 23 | 24 | 25 | class ResourceInsertionException(Exception): 26 | pass 27 | 28 | 29 | class OrphanResourceException(Exception): 30 | pass 31 | 32 | 33 | class InvalidOwnerException(Exception): 34 | pass 35 | 36 | 37 | class InvalidResourceException(Exception): 38 | pass 39 | 40 | 41 | class WrongContainerException(Exception): 42 | pass 43 | 44 | 45 | class WrongEditionException(Exception): 46 | pass 47 | 48 | 49 | class ResourceHasContainerException(Exception): 50 | pass 51 | 52 | 53 | class NotADAGException(Exception): 54 | pass 55 | -------------------------------------------------------------------------------- /titan/operations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/titan/operations/__init__.py -------------------------------------------------------------------------------- /titan/operations/blueprint.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from titan.blueprint import Blueprint 4 | from titan.blueprint import plan_from_dict 5 | from titan.blueprint_config import BlueprintConfig 6 | 7 | from titan.gitops import collect_blueprint_config 8 | from titan.operations.connector import connect 9 | 10 | 11 | def blueprint_plan(yaml_config: dict, cli_config: dict[str, Any]): 12 | blueprint_config = collect_blueprint_config(yaml_config, cli_config) 13 | blueprint = Blueprint.from_config(blueprint_config) 14 | session = connect() 15 | plan_obj = blueprint.plan(session) 16 | return plan_obj 17 | 18 | 19 | def blueprint_apply(yaml_config: dict, cli_config: dict): 20 | blueprint_config = collect_blueprint_config(yaml_config, cli_config) 21 | blueprint = Blueprint.from_config(blueprint_config) 22 | session = connect() 23 | blueprint.apply(session) 24 | 25 | 26 | def blueprint_apply_plan(plan_dict: dict, cli_config: dict): 27 | blueprint_config = BlueprintConfig(**cli_config) 28 | blueprint = Blueprint.from_config(blueprint_config) 29 | plan = plan_from_dict(plan_dict) 30 | session = connect() 31 | blueprint.apply(session, plan) 32 | -------------------------------------------------------------------------------- /titan/parse_primitives.py: -------------------------------------------------------------------------------- 1 | import pyparsing as pp 2 | 3 | Identifier = pp.Word(pp.alphanums + "_", pp.alphanums + "_$") | pp.dbl_quoted_string 4 | FullyQualifiedIdentifier = ( 5 | pp.delimited_list(Identifier, delim=".", min=4, max=4) 6 | ^ pp.delimited_list(Identifier, delim=".", min=3, max=3) 7 | ^ pp.delimited_list(Identifier, delim=".", min=2, max=2) 8 | ^ Identifier 9 | ) 10 | -------------------------------------------------------------------------------- /titan/policy.py: -------------------------------------------------------------------------------- 1 | """ 2 | Policies are used to enforce constraints over a Titan project. 3 | 4 | All Titan projects use the Titan Standard Policy by default 5 | """ 6 | 7 | from enum import Enum 8 | from typing import Callable 9 | 10 | 11 | class EnforcementLevel(Enum): 12 | ADVISORY = "ADVISORY" 13 | MANDATORY = "MANDATORY" 14 | 15 | 16 | class Policy: 17 | def __init__(self, name: str, description: str, enforcement_level: str, validate: Callable): 18 | self.name = name 19 | self.description = description 20 | self.enforcement_level = enforcement_level 21 | self.validate = validate 22 | 23 | 24 | class OwnershipPolicy(Policy): 25 | pass 26 | 27 | 28 | class PolicyPack: 29 | def __init__(self, name: str, policies): 30 | self.name = name 31 | self.policies = policies 32 | -------------------------------------------------------------------------------- /titan/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/titan/py.typed -------------------------------------------------------------------------------- /titan/resource_tags.py: -------------------------------------------------------------------------------- 1 | from typing import Mapping 2 | 3 | 4 | class ResourceTags(Mapping): 5 | def __init__(self, tags: dict[str, str]): 6 | self.tags: dict[str, str] = {} 7 | if isinstance(tags, ResourceTags): 8 | tags = tags.to_dict() 9 | for key, value in tags.items(): 10 | self[key] = value 11 | 12 | def __setitem__(self, key, value): 13 | if not isinstance(key, str) or not isinstance(value, str): 14 | raise ValueError("Keys and values must be strings.") 15 | if len(key) > 256: 16 | raise ValueError("Keys cannot be longer than 256 characters.") 17 | if len(self.tags) >= 50: 18 | raise ValueError("Cannot have more than 50 key-value pairs.") 19 | self.tags[key] = value 20 | 21 | def __getitem__(self, key): 22 | return self.tags[key] 23 | 24 | def __hash__(self): 25 | return hash(frozenset(self.tags.items())) 26 | 27 | def __len__(self): 28 | return len(self.tags) 29 | 30 | def __iter__(self): 31 | return self.tags.__iter__() 32 | 33 | def to_dict(self): 34 | return self.tags.copy() 35 | 36 | def tag_names(self): 37 | return self.tags.keys() 38 | 39 | def items(self): 40 | return self.tags.items() 41 | 42 | def keys(self): 43 | return self.tags.keys() 44 | 45 | def values(self): 46 | return self.tags.values() 47 | -------------------------------------------------------------------------------- /titan/resources/account.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from ..enums import AccountEdition, ResourceType 4 | from ..props import Props 5 | from ..resource_name import ResourceName 6 | from ..scope import OrganizationScope 7 | from .resource import NamedResource, Resource, ResourceContainer, ResourceSpec 8 | 9 | 10 | @dataclass(unsafe_hash=True) 11 | class _Account(ResourceSpec): 12 | name: ResourceName 13 | locator: str 14 | edition: AccountEdition = None 15 | region: str = None 16 | comment: str = None 17 | 18 | 19 | class Account(NamedResource, Resource, ResourceContainer): 20 | """ 21 | CREATE ACCOUNT 22 | ADMIN_NAME = 23 | { ADMIN_PASSWORD = '' | ADMIN_RSA_PUBLIC_KEY = } 24 | [ FIRST_NAME = ] 25 | [ LAST_NAME = ] 26 | EMAIL = '' 27 | [ MUST_CHANGE_PASSWORD = { TRUE | FALSE } ] 28 | EDITION = { STANDARD | ENTERPRISE | BUSINESS_CRITICAL } 29 | [ REGION_GROUP = ] 30 | [ REGION = ] 31 | [ COMMENT = '' ] 32 | """ 33 | 34 | resource_type = ResourceType.ACCOUNT 35 | props = Props() 36 | scope = OrganizationScope() 37 | spec = _Account 38 | 39 | def __init__( 40 | self, 41 | name: str, 42 | locator: str, 43 | edition: AccountEdition = None, 44 | comment: str = None, 45 | **kwargs, 46 | ): 47 | super().__init__(name="ACCOUNT", **kwargs) 48 | self._data: _Account = _Account( 49 | name=self._name, 50 | locator=locator, 51 | edition=edition, 52 | comment=comment, 53 | ) 54 | -------------------------------------------------------------------------------- /titan/resources/account_parameter.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any 3 | 4 | from ..enums import ResourceType 5 | from ..parse import parse_alter_account_parameter 6 | from ..props import Props 7 | from ..resource_name import ResourceName 8 | from ..scope import AccountScope 9 | from .resource import NamedResource, Resource, ResourceSpec 10 | 11 | 12 | @dataclass(unsafe_hash=True) 13 | class _AccountParameter(ResourceSpec): 14 | name: ResourceName 15 | value: Any 16 | 17 | 18 | class AccountParameter(NamedResource, Resource): 19 | """ 20 | Description: 21 | An account parameter in Snowflake that allows you to set or alter account-level parameters. 22 | 23 | Snowflake Docs: 24 | https://docs.snowflake.com/en/sql-reference/sql/alter-account 25 | 26 | Fields: 27 | name (string, required): The name of the account parameter. 28 | value (Any, required): The value to set for the account parameter. 29 | 30 | Python: 31 | 32 | ```python 33 | account_parameter = AccountParameter( 34 | name="some_parameter", 35 | value="some_value", 36 | ) 37 | ``` 38 | 39 | Yaml: 40 | 41 | ```yaml 42 | account_parameters: 43 | - name: some_parameter 44 | value: some_value 45 | ``` 46 | """ 47 | 48 | resource_type = ResourceType.ACCOUNT_PARAMETER 49 | props = Props() 50 | scope = AccountScope() 51 | spec = _AccountParameter 52 | 53 | def __init__(self, name: str, value: Any, **kwargs): 54 | super().__init__(name=name, **kwargs) 55 | self._data: _AccountParameter = _AccountParameter( 56 | name=self._name, 57 | value=value, 58 | ) 59 | 60 | @classmethod 61 | def from_sql(cls, sql): 62 | props = parse_alter_account_parameter(sql) 63 | return cls(**props) 64 | -------------------------------------------------------------------------------- /titan/resources/image_repository.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from ..enums import AccountEdition, ResourceType 4 | from ..props import Props 5 | from ..resource_name import ResourceName 6 | from ..role_ref import RoleRef 7 | from ..scope import SchemaScope 8 | from .resource import NamedResource, Resource, ResourceSpec 9 | 10 | 11 | @dataclass(unsafe_hash=True) 12 | class _ImageRepository(ResourceSpec): 13 | name: ResourceName 14 | owner: RoleRef = "SYSADMIN" 15 | 16 | 17 | class ImageRepository(NamedResource, Resource): 18 | """ 19 | Description: 20 | An image repository in Snowflake is a storage unit within a schema that allows for the management of OCIv2-compliant container images. 21 | 22 | Snowflake Docs: 23 | https://docs.snowflake.com/en/sql-reference/sql/create-image-repository 24 | 25 | Fields: 26 | name (string, required): The unique identifier for the image repository within the schema. 27 | owner (string or Role): The owner role of the image repository. Defaults to "SYSADMIN". 28 | 29 | Python: 30 | 31 | ```python 32 | image_repository = ImageRepository( 33 | name="some_image_repository", 34 | ) 35 | ``` 36 | 37 | Yaml: 38 | 39 | ```yaml 40 | image_repositories: 41 | - name: some_image_repository 42 | ``` 43 | """ 44 | 45 | edition = {AccountEdition.ENTERPRISE, AccountEdition.BUSINESS_CRITICAL} 46 | resource_type = ResourceType.IMAGE_REPOSITORY 47 | props = Props() 48 | scope = SchemaScope() 49 | spec = _ImageRepository 50 | 51 | def __init__( 52 | self, 53 | name: str, 54 | owner: str = "SYSADMIN", 55 | **kwargs, 56 | ): 57 | super().__init__(name, **kwargs) 58 | self._data: _ImageRepository = _ImageRepository( 59 | name=self._name, 60 | owner=owner, 61 | ) 62 | -------------------------------------------------------------------------------- /titan/resources/masking_policy.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from titan.enums import AccountEdition, ResourceType 4 | from titan.props import Props, StringProp, BoolProp, ArgsProp, ReturnsProp, QueryProp 5 | from titan.scope import SchemaScope 6 | from titan.resource_name import ResourceName 7 | from titan.resources.resource import Arg, NamedResource, Resource, ResourceSpec 8 | from titan.role_ref import RoleRef 9 | from titan.data_types import convert_to_canonical_data_type 10 | 11 | 12 | @dataclass(unsafe_hash=True) 13 | class _MaskingPolicy(ResourceSpec): 14 | name: ResourceName 15 | args: list[Arg] 16 | returns: str 17 | body: str 18 | comment: str = None 19 | exempt_other_policies: bool = False 20 | owner: RoleRef = "SYSADMIN" 21 | 22 | def __post_init__(self): 23 | super().__post_init__() 24 | if len(self.args) == 0: 25 | raise ValueError("At least one argument is required") 26 | self.returns = convert_to_canonical_data_type(self.returns) 27 | 28 | 29 | class MaskingPolicy(NamedResource, Resource): 30 | edition = {AccountEdition.ENTERPRISE, AccountEdition.BUSINESS_CRITICAL} 31 | resource_type = ResourceType.MASKING_POLICY 32 | props = Props( 33 | args=ArgsProp(), 34 | returns=ReturnsProp("returns", eq=False), 35 | body=QueryProp("->"), 36 | comment=StringProp("comment"), 37 | exempt_other_policies=BoolProp("exempt_other_policies"), 38 | ) 39 | scope = SchemaScope() 40 | spec = _MaskingPolicy 41 | 42 | def __init__( 43 | self, 44 | name: str, 45 | args: list[dict], 46 | returns: str, 47 | body: str, 48 | comment: str = None, 49 | exempt_other_policies: bool = False, 50 | owner: str = "SYSADMIN", 51 | **kwargs, 52 | ): 53 | super().__init__(name, **kwargs) 54 | self._data: _MaskingPolicy = _MaskingPolicy( 55 | name=self._name, 56 | args=args, 57 | returns=returns, 58 | body=body, 59 | comment=comment, 60 | exempt_other_policies=exempt_other_policies, 61 | owner=owner, 62 | ) 63 | -------------------------------------------------------------------------------- /titan/resources/notebook.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | 3 | from ..enums import ResourceType 4 | from ..props import ( 5 | IdentifierProp, 6 | Props, 7 | StringProp, 8 | TagsProp, 9 | ) 10 | from ..resource_name import ResourceName 11 | from ..scope import SchemaScope 12 | from .resource import NamedResource, Resource, ResourceSpec 13 | from ..role_ref import RoleRef 14 | from .warehouse import Warehouse 15 | 16 | # TODO: I can't get version to work at all with Snowflake, I suspect it's buggy. 17 | 18 | 19 | @dataclass(unsafe_hash=True) 20 | class _Notebook(ResourceSpec): 21 | name: ResourceName 22 | # version: str = None 23 | from_: str = field(default=None, metadata={"fetchable": False}) 24 | main_file: str = None 25 | comment: str = None 26 | default_version: str = None 27 | query_warehouse: Warehouse = None 28 | owner: RoleRef = "SYSADMIN" 29 | 30 | 31 | class Notebook(NamedResource, Resource): 32 | resource_type = ResourceType.NOTEBOOK 33 | props = Props( 34 | # version=StringProp("version", eq=False), 35 | from_=StringProp("from", eq=False), 36 | main_file=StringProp("main_file"), 37 | comment=StringProp("comment"), 38 | default_version=StringProp("default_version"), 39 | query_warehouse=IdentifierProp("query_warehouse"), 40 | tags=TagsProp(), 41 | ) 42 | scope = SchemaScope() 43 | spec = _Notebook 44 | 45 | def __init__( 46 | self, 47 | name: str, 48 | # version: str = None, 49 | from_: str = None, 50 | main_file: str = None, 51 | comment: str = None, 52 | default_version: str = None, 53 | query_warehouse: str = None, 54 | owner: str = "SYSADMIN", 55 | **kwargs, 56 | ): 57 | super().__init__(name, **kwargs) 58 | self._data: _Notebook = _Notebook( 59 | name=self._name, 60 | # version=version, 61 | from_=from_, 62 | main_file=main_file, 63 | comment=comment, 64 | default_version=default_version, 65 | query_warehouse=query_warehouse, 66 | owner=owner, 67 | ) 68 | -------------------------------------------------------------------------------- /titan/resources/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/titan/resources/py.typed -------------------------------------------------------------------------------- /titan/resources/scanner_package.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from ..enums import ResourceType 4 | from ..props import ( 5 | Props, 6 | ) 7 | from ..resource_name import ResourceName 8 | from ..scope import AccountScope 9 | from .resource import NamedResource, Resource, ResourceSpec 10 | 11 | 12 | @dataclass(unsafe_hash=True) 13 | class _ScannerPackage(ResourceSpec): 14 | name: ResourceName 15 | enabled: bool = True 16 | schedule: str = "0 0 * * * UTC" 17 | 18 | def __post_init__(self): 19 | super().__post_init__() 20 | if self.name == "SECURITY_ESSENTIALS": 21 | raise ValueError("SECURITY_ESSENTIALS is a system scanner package and cannot be used") 22 | 23 | 24 | class ScannerPackage(NamedResource, Resource): 25 | 26 | resource_type = ResourceType.SCANNER_PACKAGE 27 | props = Props() 28 | scope = AccountScope() 29 | spec = _ScannerPackage 30 | 31 | def __init__( 32 | self, 33 | name: str, 34 | enabled: bool = True, 35 | schedule: str = "0 0 * * * UTC", 36 | **kwargs, 37 | ): 38 | super().__init__(name, **kwargs) 39 | self._data: _ScannerPackage = _ScannerPackage( 40 | name=self._name, 41 | enabled=enabled, 42 | schedule=schedule, 43 | ) 44 | -------------------------------------------------------------------------------- /titan/resources/share.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from ..enums import ResourceType 4 | from ..props import ( 5 | Props, 6 | StringProp, 7 | ) 8 | from ..resource_name import ResourceName 9 | from ..scope import AccountScope 10 | from .resource import NamedResource, Resource, ResourceSpec 11 | from .role import Role 12 | 13 | 14 | @dataclass(unsafe_hash=True) 15 | class _Share(ResourceSpec): 16 | name: ResourceName 17 | owner: Role = "ACCOUNTADMIN" 18 | comment: str = None 19 | 20 | 21 | class Share(NamedResource, Resource): 22 | """ 23 | Description: 24 | Represents a share resource in Snowflake, which allows sharing data across Snowflake accounts. 25 | 26 | Snowflake Docs: 27 | https://docs.snowflake.com/en/sql-reference/sql/create-share 28 | 29 | Fields: 30 | name (string, required): The name of the share. 31 | owner (string or Role): The owner of the share. Defaults to "ACCOUNTADMIN". 32 | comment (string): A comment about the share. 33 | 34 | Python: 35 | 36 | ```python 37 | share = Share( 38 | name="some_share", 39 | comment="This is a snowflake share." 40 | ) 41 | ``` 42 | 43 | Yaml: 44 | 45 | ```yaml 46 | shares: 47 | - name: some_share 48 | comment: This is a snowflake share. 49 | ``` 50 | """ 51 | 52 | resource_type = ResourceType.SHARE 53 | props = Props( 54 | comment=StringProp("comment"), 55 | ) 56 | scope = AccountScope() 57 | spec = _Share 58 | 59 | def __init__( 60 | self, 61 | name: str, 62 | owner: str = "ACCOUNTADMIN", 63 | comment: str = None, 64 | **kwargs, 65 | ): 66 | super().__init__(name, **kwargs) 67 | self._data = _Share( 68 | name=self._name, 69 | owner=owner, 70 | comment=comment, 71 | ) 72 | -------------------------------------------------------------------------------- /titan/resources/shared_database.py: -------------------------------------------------------------------------------- 1 | # from dataclasses import dataclass 2 | 3 | # from .resource import Resource, ResourceSpec 4 | # from ..enums import ResourceType 5 | # from ..scope import AccountScope 6 | # from ..props import Props, IdentifierProp 7 | 8 | 9 | # @dataclass(unsafe_hash=True) 10 | # class _SharedDatabase(ResourceSpec): 11 | # name: str 12 | # from_share: str 13 | # owner: str = "ACCOUNTADMIN" 14 | 15 | 16 | # class SharedDatabase(Resource): 17 | # """ 18 | # CREATE DATABASE FROM SHARE . 19 | # """ 20 | 21 | # resource_type = ResourceType.DATABASE 22 | # props = Props( 23 | # from_share=IdentifierProp("from share", eq=False), 24 | # ) 25 | # scope = AccountScope() 26 | # spec = _SharedDatabase 27 | 28 | # def __init__( 29 | # self, 30 | # name: str, 31 | # from_share: str, 32 | # owner: str = "ACCOUNTADMIN", 33 | # **kwargs, 34 | # ): 35 | # super().__init__(**kwargs) 36 | # self._data = _SharedDatabase( 37 | # name=name, 38 | # from_share=from_share, 39 | # owner=owner, 40 | # ) 41 | -------------------------------------------------------------------------------- /titan/role_ref.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, Union 2 | 3 | from .var import VarString 4 | 5 | if TYPE_CHECKING: 6 | from titan.resources.role import DatabaseRole, Role 7 | 8 | RoleRef = Union["Role", "DatabaseRole", VarString, str] 9 | -------------------------------------------------------------------------------- /titan/scope.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | 3 | from .identifiers import FQN 4 | from .resource_name import ResourceName 5 | 6 | 7 | class ResourceScope(ABC): 8 | def fully_qualified_name(self, container, resource_name: ResourceName) -> FQN: 9 | raise NotImplementedError 10 | 11 | 12 | class OrganizationScope(ResourceScope): 13 | def fully_qualified_name(self, _, resource_name: ResourceName) -> FQN: 14 | return FQN(name=resource_name) 15 | 16 | 17 | class AccountScope(ResourceScope): 18 | def fully_qualified_name(self, _, resource_name: ResourceName) -> FQN: 19 | return FQN(name=resource_name) 20 | 21 | 22 | class DatabaseScope(ResourceScope): 23 | def fully_qualified_name(self, database, resource_name: ResourceName) -> FQN: 24 | db = database.name if database else None 25 | return FQN(name=resource_name, database=db) 26 | 27 | 28 | class SchemaScope(ResourceScope): 29 | def fully_qualified_name(self, schema, resource_name: ResourceName) -> FQN: 30 | db, sch = None, None 31 | if schema: 32 | db = schema.container.name if schema.container else None 33 | sch = schema.name if schema else None 34 | return FQN(name=resource_name, database=db, schema=sch) 35 | 36 | 37 | class TableScope(ResourceScope): 38 | def fully_qualified_name(self, _, resource_name: ResourceName): 39 | raise NotImplementedError 40 | # return FQN( 41 | # name=resource_name.upper(), 42 | # database=self.database_name, 43 | # schema=self.schema_name, 44 | # table=self.table_name, 45 | # ) 46 | 47 | 48 | class AnonymousScope(ResourceScope): 49 | def fully_qualified_name(self, _, resource_name: ResourceName) -> FQN: 50 | return FQN(name=resource_name) 51 | 52 | 53 | def resource_can_be_contained_in(resource, container): 54 | container_type = container.__class__.__name__ 55 | if container_type == "ResourcePointer": 56 | container_type = container.resource_type.value.title() 57 | if ( 58 | (isinstance(resource.scope, AccountScope) and container_type == "Account") 59 | or (isinstance(resource.scope, DatabaseScope) and container_type == "Database") 60 | or (isinstance(resource.scope, SchemaScope) and container_type == "Schema") 61 | ): 62 | return True 63 | return False 64 | -------------------------------------------------------------------------------- /titan/sql.py: -------------------------------------------------------------------------------- 1 | _refs = [] 2 | track_ref = _refs.append 3 | 4 | 5 | def capture_refs(): 6 | global _refs 7 | refs = list(_refs) 8 | _refs.clear() 9 | return refs 10 | 11 | 12 | def raise_if_hanging_refs(): 13 | global _refs 14 | if _refs: 15 | raise Exception(f"Hanging refs: {_refs}") 16 | 17 | 18 | class SQL: 19 | def __init__(self, sql: str): 20 | self.refs = capture_refs() 21 | self.sql = sql 22 | -------------------------------------------------------------------------------- /titan/titan.code-workspace: -------------------------------------------------------------------------------- 1 | { 2 | "folders": [ 3 | { 4 | "path": ".." 5 | } 6 | ], 7 | "settings": { 8 | "files.exclude": { 9 | "**/.venv": true, 10 | "**/build": true 11 | }, 12 | "search.exclude": { 13 | "**/.venv": true, 14 | "**/build": true 15 | }, 16 | "python.analysis.exclude": [ 17 | "**/.venv", 18 | "**/build" 19 | ], 20 | "cSpell.enabled": true 21 | } 22 | } -------------------------------------------------------------------------------- /titan/var.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import jinja2.exceptions 4 | from jinja2 import Environment, StrictUndefined 5 | 6 | from .exceptions import MissingVarException 7 | 8 | GLOBAL_JINJA_ENV = Environment(undefined=StrictUndefined) 9 | 10 | 11 | class VarString: 12 | def __init__(self, string: str): 13 | self.string = string 14 | 15 | def to_string(self, vars: dict): 16 | try: 17 | return GLOBAL_JINJA_ENV.from_string(self.string).render(var=vars) 18 | except jinja2.exceptions.UndefinedError: 19 | raise MissingVarException(f"Missing var: {self.string}") 20 | 21 | def __eq__(self, other: Any): 22 | return False 23 | 24 | def __repr__(self): 25 | return f"VarString({self.string})" 26 | 27 | 28 | class VarStub(dict): 29 | def __missing__(self, key) -> str: 30 | # Return the string "{{ var.key }}" if the key is not found 31 | return f"{{{{ var.{key} }}}}" 32 | 33 | 34 | def __getattr__(name) -> VarString: 35 | # This function will be called when an attribute is not found in the module 36 | # You can implement your logic here to return dynamic properties 37 | return VarString("{{var." + name + "}}") 38 | 39 | 40 | def string_contains_var(string: str) -> bool: 41 | return "{{" in string and "}}" in string 42 | 43 | 44 | def process_for_each(resource_value: str, each_value: str) -> str: 45 | vars = VarStub() 46 | return GLOBAL_JINJA_ENV.from_string(resource_value).render(var=vars, each={"value": each_value}) 47 | -------------------------------------------------------------------------------- /tools/benchmark_export.py: -------------------------------------------------------------------------------- 1 | from titan.operations.export import export_resources 2 | 3 | 4 | def main(): 5 | export_resources() 6 | 7 | 8 | if __name__ == "__main__": 9 | main() 10 | -------------------------------------------------------------------------------- /tools/detect_privs.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import snowflake.connector 4 | 5 | from titan.data_provider import _show_grants_to_role 6 | from titan.builtins import SYSTEM_ROLES 7 | from titan.privs import SchemaPriv 8 | 9 | connection_params = { 10 | "account": os.environ["SNOWFLAKE_ACCOUNT"], 11 | "user": os.environ["SNOWFLAKE_USER"], 12 | "password": os.environ["SNOWFLAKE_PASSWORD"], 13 | "role": os.environ["SNOWFLAKE_ROLE"], 14 | } 15 | 16 | 17 | def main(): 18 | conn = snowflake.connector.connect(**connection_params) 19 | 20 | grant_owner_pairs = [] 21 | for role in SYSTEM_ROLES: 22 | grants = _show_grants_to_role(conn, role) 23 | if grants is None: 24 | continue 25 | for grant in grants: 26 | if grant["grant_option"] == "true" and grant["granted_on"] == "ACCOUNT" and grant["granted_by"] == "": 27 | grant_owner_pairs.append((role, grant["privilege"])) 28 | grant_owner_pairs.sort() 29 | print("GLOBAL_PRIV_DEFAULT_OWNERS = {") 30 | for role, priv in grant_owner_pairs: 31 | priv_enum = priv.replace(" ", "_").upper() 32 | print(f' AccountPriv.{priv_enum}: "{role}",') 33 | print("}") 34 | 35 | current_schema_privs = [e.value for e in SchemaPriv] 36 | print(current_schema_privs) 37 | print("\n") 38 | print("class SchemaPriv(ParseableEnum):") 39 | with conn.cursor() as cur: 40 | cur.execute("USE ROLE SYSADMIN") 41 | cur.execute("GRANT ALL ON SCHEMA STATIC_DATABASE.STATIC_SCHEMA TO ROLE CI") 42 | cur.execute("SHOW GRANTS ON SCHEMA STATIC_DATABASE.STATIC_SCHEMA") 43 | for grant in cur.fetchall(): 44 | privilege = grant[1] 45 | grantee_name = grant[5] 46 | if grantee_name != "CI": 47 | continue 48 | if privilege not in current_schema_privs: 49 | privilege_enum = privilege.replace(" ", "_").upper() 50 | print(f' {privilege_enum} = "{privilege}"') 51 | 52 | 53 | if __name__ == "__main__": 54 | main() 55 | -------------------------------------------------------------------------------- /tools/generate_resource.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | 5 | # Set repo_root to the parent directory that this file lives in 6 | REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 7 | 8 | 9 | def main(resource_name): 10 | resource_path = os.path.join(REPO_ROOT, "titan", "resources", f"{resource_name}.py") 11 | sql_fixture_path = os.path.join(REPO_ROOT, "tests", "fixtures", "sql", f"{resource_name}.sql") 12 | json_fixture_path = os.path.join(REPO_ROOT, "tests", "fixtures", "json", f"{resource_name}.json") 13 | 14 | # Create the resource file 15 | with open(resource_path, "w") as f: 16 | f.write("# This is the resource file for " + resource_name) 17 | 18 | # Create the SQL fixture file 19 | with open(sql_fixture_path, "w") as f: 20 | f.write("-- SQL fixture for " + resource_name) 21 | 22 | # Create the JSON fixture file 23 | with open(json_fixture_path, "w") as f: 24 | f.write("") 25 | 26 | 27 | if __name__ == "__main__": 28 | main(sys.argv[1]) 29 | -------------------------------------------------------------------------------- /tools/show_global_privs.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import snowflake.connector 4 | import yaml 5 | from dotenv import dotenv_values 6 | 7 | 8 | def get_connection(env_vars): 9 | return snowflake.connector.connect( 10 | account=env_vars["SNOWFLAKE_ACCOUNT"], 11 | user=env_vars["SNOWFLAKE_USER"], 12 | password=env_vars["SNOWFLAKE_PASSWORD"], 13 | role="ACCOUNTADMIN", 14 | ) 15 | 16 | 17 | def main(): 18 | env_vars = dotenv_values("env/.env.aws.enterprise") 19 | conn = get_connection(env_vars) 20 | cursor = conn.cursor() 21 | for role in ["ACCOUNTADMIN", "SYSADMIN", "SECURITYADMIN", "USERADMIN"]: 22 | cursor.execute(f"SHOW GRANTS TO ROLE {role}") 23 | grants = cursor.fetchall() 24 | print(f"{role}:") 25 | for grant in grants: 26 | priv = grant[1] 27 | granted_on = grant[2] 28 | name = grant[3] 29 | granted_by = grant[7] 30 | if granted_by != "": 31 | continue 32 | print(f" - [{granted_on} : {name}] {priv}") 33 | print() 34 | 35 | 36 | if __name__ == "__main__": 37 | main() 38 | -------------------------------------------------------------------------------- /tools/test_account_configs/aws.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tools/test_account_configs/aws.yml -------------------------------------------------------------------------------- /tools/test_account_configs/azure.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tools/test_account_configs/azure.yml -------------------------------------------------------------------------------- /tools/test_account_configs/business_critical.yml: -------------------------------------------------------------------------------- 1 | allowlist: 2 | - "tag" 3 | - "tag reference" 4 | 5 | grants: 6 | - GRANT APPLY AGGREGATION POLICY ON ACCOUNT TO ROLE EVERY_PRIVILEGE 7 | - GRANT APPLY PROJECTION POLICY ON ACCOUNT TO ROLE EVERY_PRIVILEGE 8 | - GRANT APPLY ROW ACCESS POLICY ON ACCOUNT TO ROLE EVERY_PRIVILEGE 9 | - GRANT APPLY TAG ON ACCOUNT TO ROLE EVERY_PRIVILEGE 10 | 11 | schemas: 12 | - name: tagged_schema 13 | database: static_database 14 | tags: 15 | static_database.public.static_tag: STATIC_TAG_VALUE 16 | 17 | tags: 18 | - name: static_tag 19 | database: static_database 20 | schema: PUBLIC 21 | comment: This is a static tag 22 | allowed_values: 23 | - STATIC_TAG_VALUE 24 | -------------------------------------------------------------------------------- /tools/test_account_configs/compute_pools.yml: -------------------------------------------------------------------------------- 1 | allowlist: 2 | - "compute pool" 3 | 4 | compute_pools: 5 | - name: static_compute_pool 6 | min_nodes: 1 7 | max_nodes: 1 8 | initially_suspended: true 9 | auto_resume: false 10 | auto_suspend_secs: 60 11 | instance_family: CPU_X64_XS -------------------------------------------------------------------------------- /tools/test_account_configs/enterprise.yml: -------------------------------------------------------------------------------- 1 | allowlist: 2 | - "tag" 3 | - "tag reference" 4 | - "masking policy" 5 | 6 | grants: 7 | - GRANT APPLY AGGREGATION POLICY ON ACCOUNT TO ROLE EVERY_PRIVILEGE 8 | - GRANT APPLY PROJECTION POLICY ON ACCOUNT TO ROLE EVERY_PRIVILEGE 9 | - GRANT APPLY ROW ACCESS POLICY ON ACCOUNT TO ROLE EVERY_PRIVILEGE 10 | - GRANT APPLY TAG ON ACCOUNT TO ROLE EVERY_PRIVILEGE 11 | 12 | schemas: 13 | - name: tagged_schema 14 | database: static_database 15 | tags: 16 | static_database.public.static_tag: STATIC_TAG_VALUE 17 | 18 | tags: 19 | - name: static_tag 20 | database: static_database 21 | schema: PUBLIC 22 | comment: This is a static tag 23 | allowed_values: 24 | - STATIC_TAG_VALUE 25 | 26 | masking_policies: 27 | - name: static_masking_policy 28 | database: static_database 29 | schema: public 30 | args: 31 | - name: email 32 | data_type: VARCHAR 33 | returns: VARCHAR(16777216) 34 | body: "CASE WHEN current_role() IN ('ANALYST') THEN email ELSE '*********' END" 35 | comment: Masks email addresses 36 | exempt_other_policies: false 37 | owner: SYSADMIN 38 | -------------------------------------------------------------------------------- /tools/test_account_configs/gcp.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Titan-Systems/titan/a2679a578038dce65bed4af355ac5bf692d815ca/tools/test_account_configs/gcp.yml -------------------------------------------------------------------------------- /version.md: -------------------------------------------------------------------------------- 1 | # version 0.11.4 2 | --------------------------------------------------------------------------------