├── .github └── workflows │ ├── build.yml │ ├── deploy.yml │ └── snyk.yml ├── .gitignore ├── CHANGELOG ├── LICENSE ├── Makefile ├── README.md ├── VERSION ├── config ├── .gitignore ├── bigquery.yml.sample ├── postgres.yml.sample ├── redshift.yml.sample └── snowflake.yml.sample ├── dist └── .gitignore ├── go.mod ├── go.sum ├── integration ├── docker-compose.yml ├── resources │ ├── bad-mixed.yml │ ├── good-postgres-truncated.yml │ ├── good-postgres-with-template.yml │ ├── good-postgres.yml │ └── postgres-sql │ │ ├── bad │ │ └── 1.sql │ │ └── good │ │ ├── 1.sql │ │ ├── 2a.sql │ │ ├── 2b.sql │ │ ├── 3.sql │ │ ├── assert.sql │ │ └── output.sql ├── run_tests.sh ├── setup_consul.sh └── setup_postgres.sql └── sql_runner ├── aws_utils.go ├── aws_utils_test.go ├── bigquery_target.go ├── consul_provider.go ├── consul_utils.go ├── consul_utils_test.go ├── db.go ├── file_utils.go ├── file_utils_test.go ├── lock_file.go ├── lock_file_test.go ├── main.go ├── main_test.go ├── options.go ├── playbook.go ├── playbook_test.go ├── postgres_target.go ├── postgres_target_test.go ├── provider.go ├── review.go ├── run.go ├── run_test.go ├── scanner.go ├── snowflake_target.go ├── snowflake_target_test.go ├── sql_provider.go ├── template.go ├── yaml_provider.go ├── yaml_utils.go └── yaml_utils_test.go /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | 17 | - name: Setup Go 18 | uses: actions/setup-go@v5 19 | with: 20 | go-version: '1.24' 21 | 22 | - name: Launch the docker-compose stack 23 | run: make setup-up 24 | 25 | - name: Build 26 | run: make 27 | 28 | - name: Test and send coverage 29 | env: 30 | COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }} 31 | run: | 32 | make 33 | make goveralls 34 | DISTRO=linux make integration 35 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*.*.*' 7 | 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v4 15 | 16 | - name: Setup Go 17 | uses: actions/setup-go@v5 18 | with: 19 | go-version: '1.24' 20 | 21 | - name: Launch the docker-compose stack 22 | run: make setup-up 23 | 24 | - name: Build 25 | run: make 26 | 27 | - name: Test and send coverage 28 | env: 29 | COVERALLS_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | run: | 31 | make 32 | make goveralls 33 | DISTRO=linux make integration 34 | 35 | - name: Get tag and sql-runner version information 36 | id: version 37 | run: | 38 | echo "TAG_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT 39 | echo "SQL_RUNNER_VERSION=$(cat VERSION)" >> $GITHUB_OUTPUT 40 | 41 | - name: Fail if version mismatch 42 | if: ${{ steps.version.outputs.TAG_VERSION != steps.version.outputs.SQL_RUNNER_VERSION }} 43 | run: | 44 | echo "Tag version (${{ steps.version.outputs.TAG_VERSION }}) doesn't match version in project (${{ steps.version.outputs.SQL_RUNNER_VERSION }})" 45 | exit 1 46 | 47 | - name: Make release assets 48 | run: make release 49 | 50 | - name: Get date for release name 51 | id: date 52 | run: echo "RELEASE_DATE=$(date +'%Y-%m-%d')" >> $GITHUB_ENV 53 | 54 | - name: Release 55 | uses: ncipollo/release-action@v1 56 | with: 57 | token: ${{ secrets.GITHUB_TOKEN }} 58 | tag: ${{ steps.version.outputs.TAG_VERSION }} 59 | name: Version ${{ steps.version.outputs.TAG_VERSION }} (${{ env.RELEASE_DATE }}) 60 | draft: false 61 | prerelease: ${{ contains(steps.version.outputs.TAG_VERSION, '-rc') }} 62 | 63 | - name: Upload release assets 64 | uses: alexellis/upload-assets@0.4.0 65 | env: 66 | GITHUB_TOKEN: ${{ github.token }} 67 | with: 68 | asset_paths: '["./build/dist/*"]' 69 | -------------------------------------------------------------------------------- /.github/workflows/snyk.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Snyk 3 | 4 | on: 5 | push: 6 | branches: [ master ] 7 | 8 | jobs: 9 | security: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v4 14 | 15 | - name: Run Snyk to check for vulnerabilities 16 | uses: snyk/actions/golang@master 17 | with: 18 | command: monitor 19 | args: --prune-repeated-subdependencies --project-name=sql-runner 20 | env: 21 | SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Golang 2 | vendor/ 3 | 4 | # IntelliJ 5 | .idea 6 | 7 | # macOS 8 | .DS_Store 9 | 10 | # Project 11 | build/ 12 | dist/ 13 | .release.yml.tmp 14 | config.yml 15 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | Version 0.11.0 (2025-05-13) 2 | --------------------------- 3 | Add support for PrivateKey authentication with the Snowflake Target (#216) 4 | Format code and update COPYRIGHT 5 | Updated out-of-date workflow build files 6 | Updated docker compose for integration tests 7 | Updated Snowflake Driver + Go version 8 | 9 | Version 0.10.1 (2022-11-17) 10 | --------------------------- 11 | Update GitHub Actions workflows (#213) 12 | Fix matching to gosnowflake ErrUnknownError (#212) 13 | Update Snowflake driver to v1.6.13 (#211) 14 | 15 | Version 0.10.0 (2022-06-28) 16 | --------------------------- 17 | Fix out of range panic on invalid runQuery argument (#210) 18 | Exit on flags parsing error (#208) 19 | Fail fast on invalid playbook (#207) 20 | Upgrade yaml library (#206) 21 | Fix go get deprecation warning (#205) 22 | Add ability to set QUERY_TAG session parameter in Snowflake (#201) 23 | Update Snowflake Driver (#196) 24 | Fix linter issues (#204) 25 | Update dependencies (#195) 26 | Upgrade go-pg/pg to v10 (#203) 27 | Prevent exiting from goroutine (#202) 28 | 29 | Version 0.9.8 (2022-04-22) 30 | -------------------------- 31 | Add Application to JDBC driver configuration for identifying Snowplow jobs (#198) 32 | 33 | Version 0.9.7 (2022-01-25) 34 | -------------------------- 35 | Handle Snowflake error `-00001:` (#191) 36 | 37 | Version 0.9.6 (2022-01-18) 38 | -------------------------- 39 | Bump dependencies (#187) 40 | Update copyright for 2022 (#189) 41 | Update go version to 1.17 (#190) 42 | 43 | Version 0.9.5 (2021-04-30) 44 | -------------------------- 45 | Upgrade dependencies (#183) 46 | 47 | Version 0.9.4 (2021-02-15) 48 | -------------------------- 49 | Remove Bintray (#179) 50 | 51 | Version 0.9.3 (2021-01-25) 52 | -------------------------- 53 | Migrate to Github actions for CI/CD (#158) 54 | Update Copyright to 2015-2021 (#177) 55 | Snowflake: convert query output to string before printing (#166) 56 | Commented lines execute as empty SQL statements in Snowflake (#119) 57 | Add Snowflake reference to application description in help message (#132) 58 | 59 | Version 0.9.2 (2020-12-15) 60 | -------------------------- 61 | BigQuery: Run multiple-step queries as single script (#169) 62 | BigQuery: Explicitly set Client.Location from the region playbook variable (#173) 63 | Bump Travis Golang version to 1.15 (#174) 64 | 65 | Version 0.9.1 (2020-11-17) 66 | -------------------------- 67 | Add ability to template YAML playbook with custom functions (#167) 68 | 69 | Version 0.9.0 (2020-07-17) 70 | -------------------------- 71 | Replace Vagrant with Docker based development experience (#150) 72 | Update project to use go mod instead of dep (#151) 73 | Update Copyright to 2015-2020 (#152) 74 | Add Snyk Integration (#154) 75 | 76 | Version 0.8.0 (2018-11-08) 77 | -------------------------- 78 | Add support for BigQuery (#92) 79 | 80 | Version 0.7.1 (2018-10-15) 81 | -------------------------- 82 | Fix non-default Snowflake region from being invalidated (#134) 83 | 84 | Version 0.7.0 (2018-09-10) 85 | -------------------------- 86 | Update Vagrant configuration for dep and new playbooks (#131) 87 | Add -showQueryOutput flag to show output from SQL queries (#107) 88 | Add -consulOnlyForLock flag to read local playbook but use Consul locking (#96) 89 | Attempt connection to targets on -dryRun (#51) 90 | Elide default Snowflake region in Target config (#121) 91 | Add 'check SQL query' mode (#124) 92 | Exit with dedicated return code if no queries to run (#105) 93 | Replace godep with dep (#127) 94 | Add Goreport badge (#95) 95 | Remove Wine from required playbooks (#129) 96 | Fix v0.6.0 release date in CHANGELOG (#126) 97 | Add the possibility to template playbooks (#103) 98 | Allow passing multiple key value pairs to the -var flag (#120) 99 | Add a random number templating function (#109) 100 | Bump pg version to 6 (#133) 101 | 102 | Version 0.6.0 (2018-01-01) 103 | -------------------------- 104 | Bump Go version to 1.9.2 (#112) 105 | Remove Go versions 1.6 and 1.7 from .travis.yml (#110) 106 | Add support for Snowflake database (#114) 107 | Add database-specific config.yml samples (#118) 108 | Update README markdown in accordance with CommonMark (#104) 109 | 110 | Version 0.5.2 (2017-01-13) 111 | -------------------------- 112 | Update Copyright to 2015-2017 (#83) 113 | Bump Consul version to 0.7.2 (#82) 114 | Bump GoLang version to 1.7.4 (#84) 115 | 116 | Version 0.5.1 (2016-12-19) 117 | -------------------------- 118 | Update travis to deploy on Go 1.7 (#76) 119 | 120 | Version 0.5.0 (2016-12-12) 121 | -------------------------- 122 | Add ability to set a lockfile for a run (#54) 123 | Add command to check lockfile (#67) 124 | Add command to delete lockfile (#66) 125 | Add integration test suite (#71) 126 | Add tcpKeepAlive=true for long-running queries via NAT (#57) 127 | Add test suite (#63) 128 | Refactor: Playbook and SQL loading interface (#62) 129 | Refactor code structure (#72) 130 | Add ability to run a single query, -runQuery (#55) 131 | Change default for template from false to true (#53) 132 | Update copyright year to 2015-2016 (#69) 133 | Update README to direct users to wiki (#65) 134 | Update Postgres library to pg.v5 (#73) 135 | Fix SQL Runner download link pointing to old version (#58) 136 | Stop testing travis on tip (#68) 137 | Add Bintray credentials to .travis.yml (#70) 138 | Add CI/CD (#64) 139 | 140 | Version 0.4.0 (2015-12-03) 141 | -------------------------- 142 | Bump gopkg.in/pg.v2 to v3 (#48) 143 | Initialize the variables map inside the playbook properly, thanks @andrioni! (#47) 144 | Add -dryRun mode (#45) 145 | Add ability for SQL files to be read from Consul (#44) 146 | Add ability to read playbook from Consul (#43) 147 | Add an environment variable function (#30) 148 | Moved SQL file reading to startup (#22) 149 | 150 | Version 0.3.0 (2015-11-05) 151 | -------------------------- 152 | Hardened push script (#8) 153 | Upgraded vagrant push to upload OS X and Windows binaries (#31) 154 | Added option to start SQL Runner from a given step (#32) 155 | Updated vagrant up to use Golang 1.5 and Wine (#34) 156 | Added support to passing variables as command-line options (#35) 157 | Added AWS credentials support (#39) 158 | Added new template functions into README (#41) 159 | 160 | Version 0.2.0 (2015-09-13) 161 | -------------------------- 162 | Fixed typo of "queries executed againt targets" (#20) 163 | Added support for SSL connection to Postgres/Redshift database, thanks @dennisatspaceape! (#24) 164 | Updated vagrant up to work with latest Peru version (#25) 165 | Replaced bitbucket.org/kardianos/osext with github.com/kardianos/osext (#33) 166 | 167 | Version 0.1.0 (2015-01-17) 168 | -------------------------- 169 | Initial release 170 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2015-2022 Snowplow Analytics Ltd. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all format lint tidy setup-reset setup-up setup-down test goveralls integration release release-dry clean 2 | 3 | # ----------------------------------------------------------------------------- 4 | # CONSTANTS 5 | # ----------------------------------------------------------------------------- 6 | 7 | src_dir = sql_runner 8 | 9 | build_dir = build 10 | 11 | coverage_dir = $(build_dir)/coverage 12 | coverage_out = $(coverage_dir)/coverage.out 13 | coverage_html = $(coverage_dir)/coverage.html 14 | 15 | output_dir = $(build_dir)/output 16 | 17 | linux_dir = $(output_dir)/linux 18 | darwin_dir = $(output_dir)/darwin 19 | windows_dir = $(output_dir)/windows 20 | 21 | bin_name = sql-runner 22 | bin_linux = $(linux_dir)/$(bin_name) 23 | bin_darwin = $(darwin_dir)/$(bin_name) 24 | bin_windows = $(windows_dir)/$(bin_name) 25 | 26 | version = $(shell cat VERSION) 27 | 28 | artifacts_dir = $(build_dir)/dist 29 | 30 | zip_suffix = amd64.zip 31 | zip_prefix = sql_runner_$(subst -,_,$(version)) 32 | 33 | zip_linux = $(zip_prefix)_linux_$(zip_suffix) 34 | zip_darwin = $(zip_prefix)_darwin_$(zip_suffix) 35 | zip_windows = $(zip_prefix)_windows_$(zip_suffix) 36 | 37 | 38 | # ----------------------------------------------------------------------------- 39 | # BUILDING 40 | # ----------------------------------------------------------------------------- 41 | 42 | all: 43 | GO111MODULE=on go install github.com/mitchellh/gox@latest 44 | GO111MODULE=on CGO_ENABLED=0 gox -osarch=linux/amd64 -output=$(bin_linux) ./$(src_dir) 45 | GO111MODULE=on CGO_ENABLED=0 gox -osarch=darwin/amd64 -output=$(bin_darwin) ./$(src_dir) 46 | GO111MODULE=on CGO_ENABLED=0 gox -osarch=windows/amd64 -output=$(bin_windows) ./$(src_dir) 47 | 48 | # ----------------------------------------------------------------------------- 49 | # FORMATTING 50 | # ----------------------------------------------------------------------------- 51 | 52 | format: 53 | GO111MODULE=on go fmt ./$(src_dir) 54 | GO111MODULE=on gofmt -s -w ./$(src_dir) 55 | 56 | lint: 57 | GO111MODULE=on go install golang.org/x/lint/golint@latest 58 | GO111MODULE=on golint ./$(src_dir) 59 | 60 | tidy: 61 | GO111MODULE=on go mod tidy 62 | 63 | # ----------------------------------------------------------------------------- 64 | # TESTING 65 | # ----------------------------------------------------------------------------- 66 | 67 | setup-reset: setup-down setup-up 68 | 69 | setup-up: 70 | docker compose -f ./integration/docker-compose.yml up -d 71 | sleep 2 72 | ./integration/setup_consul.sh 73 | 74 | setup-down: 75 | docker compose -f ./integration/docker-compose.yml down 76 | 77 | test-setup: 78 | mkdir -p $(coverage_dir) 79 | GO111MODULE=on go install golang.org/x/tools/cmd/cover@latest 80 | 81 | test: test-setup 82 | GO111MODULE=on go test ./$(src_dir) -tags test -v -covermode=count -coverprofile=$(coverage_out) 83 | GO111MODULE=on go tool cover -html=$(coverage_out) -o $(coverage_html) 84 | 85 | goveralls: test 86 | GO111MODULE=on go install github.com/mattn/goveralls@latest 87 | goveralls -coverprofile=$(coverage_out) -service=github 88 | 89 | integration: 90 | ifndef DISTRO 91 | $(error DISTRO is undefined - this should be set to 'linux' or 'darwin'!) 92 | endif 93 | ./integration/run_tests.sh 94 | 95 | # ----------------------------------------------------------------------------- 96 | # RELEASE 97 | # ----------------------------------------------------------------------------- 98 | 99 | release: all 100 | mkdir -p $(artifacts_dir) 101 | (cd $(linux_dir) && zip staging.zip $(bin_name)) 102 | (cd $(darwin_dir) && zip staging.zip $(bin_name)) 103 | (cd $(windows_dir) && zip staging.zip $(bin_name).exe) 104 | mv $(linux_dir)/staging.zip $(artifacts_dir)/$(zip_linux) 105 | mv $(darwin_dir)/staging.zip $(artifacts_dir)/$(zip_darwin) 106 | mv $(windows_dir)/staging.zip $(artifacts_dir)/$(zip_windows) 107 | 108 | release-dry: release 109 | 110 | # ----------------------------------------------------------------------------- 111 | # CLEANUP 112 | # ----------------------------------------------------------------------------- 113 | 114 | clean: 115 | rm -rf $(build_dir) 116 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SQL Runner 2 | 3 | [![Build Status][gh-actions-image]][gh-actions] [![Coveralls][coveralls-image]][coveralls] [![Go Report Card][goreport-image]][goreport] [![Release][release-image]][releases] [![License][license-image]][license] 4 | 5 | ## Overview 6 | 7 | Run playbooks of SQL scripts in series and parallel on Snowflake DB, Amazon Redshift and PostgreSQL. 8 | 9 | Used with **[Snowplow][snowplow]** for scheduled SQL-based transformations of event stream data. 10 | 11 | | **[Setup Guide][setup-guide]** | **[User Guide][user-guide]** | 12 | |:--------------------------------------:|:-----------------------------------------:| 13 | | [![i1][setup-image]][setup-guide] | [![i2][user-image]][user-guide] | 14 | 15 | ## Quick start 16 | 17 | Assuming [go][go-url], [docker][docker-url] and [docker-compose][docker-compose-url] are installed: 18 | 19 | ```bash 20 | host> git clone https://github.com/snowplow/sql-runner 21 | host> cd sql-runner 22 | host> make setup-up # Launches Consul + Postgres for testing 23 | host> make # Builds sql-runner binaries 24 | host> make test # Runs unit tests 25 | 26 | # DISTRO specifies which binary you want to run integration tests with 27 | host> DISTRO=darwin make integration 28 | ``` 29 | 30 | _Note_: You will need to ensure that `~/go/bin` is on your PATH for `gox` to work - the underlying tool that we use for building the binaries. 31 | 32 | When you are done with testing simply execute `make setup-down` to terminate the docker-compose stack. 33 | 34 | To reset the testing resources execute `make setup-reset` which will rebuild the docker containers. This can be useful if the state of these systems gets out of sync with what the tests expect. 35 | 36 | To remove all build files: 37 | 38 | ```bash 39 | guest> make clean 40 | ``` 41 | 42 | To format the golang code in the source directory: 43 | 44 | ```bash 45 | guest> make format 46 | ``` 47 | 48 | **Note:** Always run `make format` before submitting any code. 49 | 50 | **Note:** The `make test` command also generates a code coverage file which can be found at `build/coverage/coverage.html`. 51 | 52 | ## How to use? 53 | 54 | First either compile the binary from source using the above `make` command or download the published Binary directly from the GitHub release: 55 | 56 | * [Darwin (macOS)](https://github.com/snowplow/sql-runner/releases/download/0.11.0/sql_runner_0.11.0_darwin_amd64.zip) 57 | * [Linux](https://github.com/snowplow/sql-runner/releases/download/0.11.0/sql_runner_0.11.0_linux_amd64.zip) 58 | * [Windows](https://github.com/snowplow/sql-runner/releases/download/0.11.0/sql_runner_0.11.0_windows_amd64.zip) 59 | 60 | ### CLI Output 61 | 62 | ```bash 63 | sql-runner version: 0.11.0 64 | Run playbooks of SQL scripts in series and parallel on Redshift and Postgres 65 | Usage: 66 | -checkLock string 67 | Checks whether the lockfile already exists 68 | -consul string 69 | The address of a consul server with playbooks and SQL files stored in KV pairs 70 | -consulOnlyForLock 71 | Will read playbooks locally, but use Consul for locking. 72 | -deleteLock string 73 | Will attempt to delete a lockfile if it exists 74 | -dryRun 75 | Runs through a playbook without executing any of the SQL 76 | -fillTemplates 77 | Will print all queries after templates are filled 78 | -fromStep string 79 | Starts from a given step defined in your playbook 80 | -help 81 | Shows this message 82 | -lock string 83 | Optional argument which checks and sets a lockfile to ensure this run is a singleton. Deletes lock on run completing successfully 84 | -playbook string 85 | Playbook of SQL scripts to execute 86 | -runQuery string 87 | Will run a single query in the playbook 88 | -showQueryOutput 89 | Will print all output from queries 90 | -softLock string 91 | Optional argument, like '-lock' but the lockfile will be deleted even if the run fails 92 | -sqlroot string 93 | Absolute path to SQL scripts. Use PLAYBOOK, BINARY and PLAYBOOK_CHILD for those respective paths (default "PLAYBOOK") 94 | -var value 95 | Variables to be passed to the playbook, in the key=value format 96 | -version 97 | Shows the program version 98 | ``` 99 | 100 | ## Copyright and license 101 | 102 | SQL Runner is copyright 2015-2022 Snowplow Analytics Ltd. 103 | 104 | Licensed under the **[Apache License, Version 2.0][license]** (the "License"); 105 | you may not use this software except in compliance with the License. 106 | 107 | Unless required by applicable law or agreed to in writing, software 108 | distributed under the License is distributed on an "AS IS" BASIS, 109 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 110 | See the License for the specific language governing permissions and 111 | limitations under the License. 112 | 113 | [go-url]: https://golang.org/doc/install 114 | [docker-url]: https://docs.docker.com/get-docker/ 115 | [docker-compose-url]: https://docs.docker.com/compose/install/ 116 | 117 | [gh-actions]: https://github.com/snowplow/sql-runner/actions 118 | [gh-actions-image]: https://github.com/snowplow/sql-runner/workflows/Build/badge.svg?branch=master 119 | 120 | [release-image]: https://img.shields.io/github/v/release/snowplow/sql-runner 121 | [releases]: https://github.com/snowplow/sql-runner/releases 122 | 123 | [license-image]: http://img.shields.io/badge/license-Apache--2-blue.svg?style=flat 124 | [license]: http://www.apache.org/licenses/LICENSE-2.0 125 | 126 | [coveralls-image]: https://coveralls.io/repos/github/snowplow/sql-runner/badge.svg?branch=master 127 | [coveralls]: https://coveralls.io/github/snowplow/sql-runner?branch=master 128 | 129 | [goreport]: https://goreportcard.com/report/github.com/snowplow/sql-runner 130 | [goreport-image]: https://goreportcard.com/badge/github.com/snowplow/sql-runner 131 | 132 | [snowplow]: https://github.com/snowplow/snowplow 133 | 134 | [setup-guide]: https://docs.snowplow.io/docs/modeling-your-data/modeling-your-data-with-sql-runner/ 135 | [user-guide]: https://docs.snowplow.io/docs/modeling-your-data/modeling-your-data-with-sql-runner/#user-guide 136 | 137 | [setup-image]: https://d3i6fms1cm1j0i.cloudfront.net/github/images/setup.png 138 | [user-image]: https://d3i6fms1cm1j0i.cloudfront.net/github/images/techdocs.png 139 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 0.11.0 -------------------------------------------------------------------------------- /config/.gitignore: -------------------------------------------------------------------------------- 1 | *.yml 2 | -------------------------------------------------------------------------------- /config/bigquery.yml.sample: -------------------------------------------------------------------------------- 1 | targets: 2 | - name: "My BigQuery database" 3 | type: bigquery 4 | project: ADD HERE # Project ID as shown in the GCP console's front page 5 | # Service account credentials should also be implicitly provided: https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually 6 | variables: 7 | foo: bar 8 | steps: 9 | - name: ADD HERE 10 | queries: 11 | - name: ADD HERE 12 | file: ADD REL/ABS PATH 13 | template: true 14 | - name: ADD HERE 15 | queries: 16 | - name: ADD HERE 17 | file: ADD REL/ABS PATH 18 | template: true 19 | - name: ADD HERE 20 | file: ADD REL/ABS PATH 21 | template: true 22 | - name: ADD HERE 23 | queries: 24 | - name: ADD HERE 25 | file: ADD REL/ABS PATH 26 | template: true 27 | -------------------------------------------------------------------------------- /config/postgres.yml.sample: -------------------------------------------------------------------------------- 1 | targets: 2 | - name: "My Postgres database" 3 | type: postgres 4 | host: ADD HERE 5 | database: ADD HERE # Name of database 6 | port: 5432 # Default Postgres port 7 | username: ADD HERE 8 | password: ADD HERE 9 | ssl: false # SSL disabled by default 10 | variables: 11 | foo: bar 12 | steps: 13 | - name: ADD HERE 14 | queries: 15 | - name: ADD HERE 16 | file: ADD REL/ABS PATH 17 | template: true 18 | - name: ADD HERE 19 | queries: 20 | - name: ADD HERE 21 | file: ADD REL/ABS PATH 22 | template: true 23 | - name: ADD HERE 24 | file: ADD REL/ABS PATH 25 | template: true 26 | - name: ADD HERE 27 | queries: 28 | - name: ADD HERE 29 | file: ADD REL/ABS PATH 30 | template: true 31 | -------------------------------------------------------------------------------- /config/redshift.yml.sample: -------------------------------------------------------------------------------- 1 | targets: 2 | - name: "My Redshift database" 3 | type: redshift 4 | host: ADD HERE # The endpoint as shown in the Redshift console 5 | database: ADD HERE # Name of database 6 | port: 5439 # Default Redshift port 7 | username: ADD HERE 8 | password: ADD HERE 9 | ssl: false # SSL disabled by default 10 | variables: 11 | foo: bar 12 | steps: 13 | - name: ADD HERE 14 | queries: 15 | - name: ADD HERE 16 | file: ADD REL/ABS PATH 17 | template: true 18 | - name: ADD HERE 19 | queries: 20 | - name: ADD HERE 21 | file: ADD REL/ABS PATH 22 | template: true 23 | - name: ADD HERE 24 | file: ADD REL/ABS PATH 25 | template: true 26 | - name: ADD HERE 27 | queries: 28 | - name: ADD HERE 29 | file: ADD REL/ABS PATH 30 | template: true 31 | -------------------------------------------------------------------------------- /config/snowflake.yml.sample: -------------------------------------------------------------------------------- 1 | targets: 2 | - name: "My Snowflake database" 3 | type: snowflake 4 | account: ADD HERE # Your Snowflake account name 5 | region: ADD HERE # Leave blank for default us-east-1 6 | database: ADD HERE # Name of database 7 | warehouse: ADD HERE # Name of warehouse to run the queries 8 | username: ADD HERE 9 | password: ADD HERE 10 | host: # Leave blank 11 | port: # Leave blank 12 | ssl: true # Snowflake connection is always secured by TLS 13 | variables: 14 | foo: bar 15 | steps: 16 | - name: ADD HERE 17 | queries: 18 | - name: ADD HERE 19 | file: ADD REL/ABS PATH 20 | template: true 21 | - name: ADD HERE 22 | queries: 23 | - name: ADD HERE 24 | file: ADD REL/ABS PATH 25 | template: true 26 | - name: ADD HERE 27 | file: ADD REL/ABS PATH 28 | template: true 29 | - name: ADD HERE 30 | queries: 31 | - name: ADD HERE 32 | file: ADD REL/ABS PATH 33 | template: true 34 | -------------------------------------------------------------------------------- /dist/.gitignore: -------------------------------------------------------------------------------- 1 | # Nothing in dist gets saved to version control 2 | * 3 | !.gitignore 4 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/snowplow/sql-runner 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.3 6 | 7 | require ( 8 | cloud.google.com/go/bigquery v1.32.0 9 | github.com/aws/aws-sdk-go v1.44.27 10 | github.com/davecgh/go-spew v1.1.1 11 | github.com/go-pg/pg/v10 v10.10.6 12 | github.com/goccy/go-yaml v1.11.0 13 | github.com/hashicorp/consul/api v1.13.0 14 | github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 15 | github.com/olekukonko/tablewriter v0.0.5 16 | github.com/pkg/errors v0.9.1 17 | github.com/snowflakedb/gosnowflake v1.13.3 18 | github.com/stretchr/testify v1.9.0 19 | golang.org/x/net v0.34.0 20 | google.golang.org/api v0.82.0 21 | ) 22 | 23 | require ( 24 | cloud.google.com/go v0.102.0 // indirect 25 | cloud.google.com/go/compute v1.6.1 // indirect 26 | cloud.google.com/go/iam v0.3.0 // indirect 27 | github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect 28 | github.com/99designs/keyring v1.2.2 // indirect 29 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 // indirect 30 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect 31 | github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect 32 | github.com/BurntSushi/toml v1.4.0 // indirect 33 | github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect 34 | github.com/apache/arrow-go/v18 v18.0.0 // indirect 35 | github.com/armon/go-metrics v0.4.0 // indirect 36 | github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect 37 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect 38 | github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect 39 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect 40 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect 41 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect 42 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect 43 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect 44 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect 45 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect 46 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect 47 | github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect 48 | github.com/aws/smithy-go v1.20.2 // indirect 49 | github.com/danieljoos/wincred v1.2.2 // indirect 50 | github.com/dvsekhvalnov/jose2go v1.6.0 // indirect 51 | github.com/fatih/color v1.15.0 // indirect 52 | github.com/gabriel-vasile/mimetype v1.4.7 // indirect 53 | github.com/go-pg/zerochecker v0.2.0 // indirect 54 | github.com/goccy/go-json v0.10.4 // indirect 55 | github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect 56 | github.com/golang-jwt/jwt/v5 v5.2.2 // indirect 57 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 58 | github.com/golang/protobuf v1.5.2 // indirect 59 | github.com/google/flatbuffers v24.12.23+incompatible // indirect 60 | github.com/googleapis/gax-go/v2 v2.4.0 // indirect 61 | github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect 62 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect 63 | github.com/hashicorp/go-hclog v1.2.0 // indirect 64 | github.com/hashicorp/go-immutable-radix v1.3.1 // indirect 65 | github.com/hashicorp/go-rootcerts v1.0.2 // indirect 66 | github.com/hashicorp/golang-lru v0.5.4 // indirect 67 | github.com/hashicorp/serf v0.9.8 // indirect 68 | github.com/jinzhu/inflection v1.0.0 // indirect 69 | github.com/jmespath/go-jmespath v0.4.0 // indirect 70 | github.com/klauspost/compress v1.17.11 // indirect 71 | github.com/klauspost/cpuid/v2 v2.2.9 // indirect 72 | github.com/mattn/go-colorable v0.1.13 // indirect 73 | github.com/mattn/go-isatty v0.0.19 // indirect 74 | github.com/mattn/go-runewidth v0.0.13 // indirect 75 | github.com/mitchellh/go-homedir v1.1.0 // indirect 76 | github.com/mitchellh/mapstructure v1.5.0 // indirect 77 | github.com/mtibben/percent v0.2.1 // indirect 78 | github.com/onsi/ginkgo v1.16.1 // indirect 79 | github.com/onsi/gomega v1.11.0 // indirect 80 | github.com/pierrec/lz4/v4 v4.1.22 // indirect 81 | github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect 82 | github.com/pmezard/go-difflib v1.0.0 // indirect 83 | github.com/rivo/uniseg v0.2.0 // indirect 84 | github.com/sirupsen/logrus v1.9.3 // indirect 85 | github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect 86 | github.com/vmihailenco/bufpool v0.1.11 // indirect 87 | github.com/vmihailenco/msgpack/v5 v5.3.4 // indirect 88 | github.com/vmihailenco/tagparser v0.1.2 // indirect 89 | github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect 90 | github.com/zeebo/xxh3 v1.0.2 // indirect 91 | go.opencensus.io v0.23.0 // indirect 92 | golang.org/x/crypto v0.32.0 // indirect 93 | golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect 94 | golang.org/x/mod v0.22.0 // indirect 95 | golang.org/x/oauth2 v0.26.0 // indirect 96 | golang.org/x/sync v0.10.0 // indirect 97 | golang.org/x/sys v0.29.0 // indirect 98 | golang.org/x/term v0.28.0 // indirect 99 | golang.org/x/text v0.21.0 // indirect 100 | golang.org/x/tools v0.29.0 // indirect 101 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect 102 | google.golang.org/appengine v1.6.7 // indirect 103 | google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect 104 | google.golang.org/grpc v1.67.1 // indirect 105 | google.golang.org/protobuf v1.35.1 // indirect 106 | gopkg.in/yaml.v3 v3.0.1 // indirect 107 | mellium.im/sasl v0.2.1 // indirect 108 | ) 109 | -------------------------------------------------------------------------------- /integration/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres: 3 | image: postgres:9.4 4 | container_name: postgres-sql-runner 5 | restart: always 6 | volumes: 7 | - ./setup_postgres.sql:/docker-entrypoint-initdb.d/init.sql 8 | ports: 9 | - "5434:5432" 10 | logging: 11 | options: 12 | max-size: "1M" 13 | max-file: "10" 14 | environment: 15 | POSTGRES_HOST_AUTH_METHOD: trust 16 | 17 | consul: 18 | image: consul:1.4.4 19 | container_name: consul-sql-runner 20 | restart: always 21 | ports: 22 | - "8502:8500" 23 | - "8602:8600/udp" 24 | logging: 25 | options: 26 | max-size: "1M" 27 | max-file: "10" 28 | environment: 29 | CONSUL_BIND_INTERFACE: eth0 30 | -------------------------------------------------------------------------------- /integration/resources/bad-mixed.yml: -------------------------------------------------------------------------------- 1 | targets: 2 | - name: "My Postgres database 1" 3 | type: postgres 4 | host: localhost 5 | database: sql_runner_tests_1 6 | port: 5434 7 | username: postgres 8 | password: 9 | ssl: false # SSL disabled by default 10 | - name: "My Hana database 1" 11 | type: hana 12 | host: localhost 13 | database: sql_runner_tests_2 14 | port: 34015 15 | username: hana 16 | password: 17 | ssl: false # SSL disabled by default 18 | variables: 19 | steps: 20 | - name: Failing 21 | queries: 22 | - name: Corrupted script 23 | file: postgres-sql/bad/1.sql 24 | -------------------------------------------------------------------------------- /integration/resources/good-postgres-truncated.yml: -------------------------------------------------------------------------------- 1 | # Note Ruby-flavored :s at start of YAML properties. Not required 2 | :targets: 3 | - :name: "My Postgres database 1" 4 | :type: postgres 5 | :host: localhost 6 | :database: sql_runner_tests_1 7 | :port: 5434 8 | :username: snowplow 9 | :password: snowplow 10 | :ssl: false # SSL disabled by default 11 | - :name: "My Postgres database 2" 12 | :type: postgres 13 | :host: localhost 14 | :database: sql_runner_tests_2 15 | :port: 5434 16 | :username: snowplow 17 | :password: snowplow 18 | :ssl: false # SSL disabled by default 19 | :variables: 20 | :test_schema: sql_runner_tests 21 | :timeFormat: "2006_01_02" 22 | :steps: -------------------------------------------------------------------------------- /integration/resources/good-postgres-with-template.yml: -------------------------------------------------------------------------------- 1 | :targets: 2 | - :name: "My Postgres database 1" 3 | :type: postgres 4 | :host: {{.host}} 5 | :database: sql_runner_tests_1 6 | :port: 5434 7 | :username: {{.username}} 8 | :password: {{.password}} 9 | :ssl: false # SSL disabled by default 10 | :variables: 11 | :test_schema: sql_runner_tests 12 | :timeFormat: "2006_01_02" 13 | :steps: 14 | - :name: Create schema and table 15 | :queries: 16 | - :name: Create schema and table 17 | :file: postgres-sql/good/1.sql 18 | :template: true 19 | -------------------------------------------------------------------------------- /integration/resources/good-postgres.yml: -------------------------------------------------------------------------------- 1 | # Note Ruby-flavored :s at start of YAML properties. Not required 2 | :targets: 3 | - :name: "My Postgres database 1" 4 | :type: postgres 5 | :host: localhost 6 | :database: sql_runner_tests_1 7 | :port: 5434 8 | :username: snowplow 9 | :password: snowplow 10 | :ssl: false # SSL disabled by default 11 | - :name: "My Postgres database 2" 12 | :type: postgres 13 | :host: localhost 14 | :database: sql_runner_tests_2 15 | :port: 5434 16 | :username: snowplow 17 | :password: snowplow 18 | :ssl: false # SSL disabled by default 19 | :variables: 20 | :test_schema: sql_runner_tests 21 | :timeFormat: "2006_01_02" 22 | :steps: 23 | - :name: Failing 24 | :queries: 25 | - :name: Corrupted script 26 | :file: postgres-sql/bad/1.sql 27 | - :name: Create schema and table 28 | :queries: 29 | - :name: Create schema and table 30 | :file: postgres-sql/good/1.sql 31 | :template: true 32 | - :name: Parallel load 33 | :queries: 34 | - :name: Parallel load 1 35 | :file: postgres-sql/good/2a.sql 36 | :template: true 37 | - :name: Parallel load 2 38 | :file: postgres-sql/good/2b.sql 39 | :template: true 40 | - :name: Create view 41 | :queries: 42 | - :name: Create view 43 | :file: postgres-sql/good/3.sql 44 | :template: true 45 | - :name: Assertions 46 | :queries: 47 | - :name: Assertions 48 | :file: postgres-sql/good/assert.sql 49 | :template: true 50 | - :name: Output 51 | :queries: 52 | - :name: Output 53 | :file: postgres-sql/good/output.sql 54 | :template: true -------------------------------------------------------------------------------- /integration/resources/postgres-sql/bad/1.sql: -------------------------------------------------------------------------------- 1 | -- Test file: 1.sql 2 | 3 | CREATE TABLE 4 | -------------------------------------------------------------------------------- /integration/resources/postgres-sql/good/1.sql: -------------------------------------------------------------------------------- 1 | -- Test file: 1.sql 2 | 3 | DROP SCHEMA IF EXISTS {{.test_schema}} CASCADE; 4 | 5 | CREATE SCHEMA {{.test_schema}}; 6 | 7 | CREATE TABLE {{.test_schema}}.table1 ( 8 | age int, 9 | firstName varchar(255), 10 | city varchar(255), 11 | country varchar(255) 12 | ); 13 | -------------------------------------------------------------------------------- /integration/resources/postgres-sql/good/2a.sql: -------------------------------------------------------------------------------- 1 | -- Test file: 2a.sql 2 | 3 | INSERT INTO {{.test_schema}}.table1 VALUES 4 | (18, 'john', 'new york', 'us'), 5 | (20, 'ben', 'london', 'uk'); -------------------------------------------------------------------------------- /integration/resources/postgres-sql/good/2b.sql: -------------------------------------------------------------------------------- 1 | -- Test file: 2b.sql 2 | 3 | INSERT INTO {{.test_schema}}.table1 VALUES (32); 4 | -------------------------------------------------------------------------------- /integration/resources/postgres-sql/good/3.sql: -------------------------------------------------------------------------------- 1 | -- Test file: 3.sql 2 | 3 | CREATE VIEW {{.test_schema}}.view_{{nowWithFormat .timeFormat}} AS 4 | SELECT AVG(age)::int AS average_age FROM {{.test_schema}}.table1; 5 | -------------------------------------------------------------------------------- /integration/resources/postgres-sql/good/assert.sql: -------------------------------------------------------------------------------- 1 | -- Test file: assert.sql 2 | 3 | CREATE OR REPLACE PROCEDURAL LANGUAGE plpgsql; 4 | 5 | CREATE OR REPLACE FUNCTION {{.test_schema}}.assert_average_age() RETURNS VOID AS $$ 6 | DECLARE 7 | expected_average_age CONSTANT integer := 23; 8 | BEGIN 9 | {{/* Update view names to today's date or Travis will error */}} 10 | IF (SELECT average_age <> expected_average_age FROM {{.test_schema}}.view_{{.test_date}}) THEN 11 | RAISE EXCEPTION 'Average_age % does not match expected age %', 12 | (SELECT average_age FROM {{.test_schema}}.view_{{.test_date}}), 13 | expected_average_age; 14 | END IF; 15 | END; 16 | $$ LANGUAGE plpgsql; 17 | 18 | SELECT {{.test_schema}}.assert_average_age(); 19 | -------------------------------------------------------------------------------- /integration/resources/postgres-sql/good/output.sql: -------------------------------------------------------------------------------- 1 | -- Test file: output.sql 2 | 3 | SELECT * FROM {{.test_schema}}.table1; 4 | -------------------------------------------------------------------------------- /integration/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 4 | # 5 | # This program is licensed to you under the Apache License Version 2.0, 6 | # and you may not use this file except in compliance with the Apache License Version 2.0. 7 | # You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the Apache License Version 2.0 is distributed on an 11 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 13 | 14 | set -e 15 | 16 | # ----------------------------------------------------------------------------- 17 | # CONSTANTS 18 | # ----------------------------------------------------------------------------- 19 | 20 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 21 | 22 | root=${DIR}/../ 23 | bin_path=${DIR}/../build/output/${DISTRO}/sql-runner 24 | consul_server_uri=localhost:8502 25 | root_key=${DIR}/resources 26 | assert_counter=0 27 | 28 | # ----------------------------------------------------------------------------- 29 | # FUNCTIONS & PROCEDURES 30 | # ----------------------------------------------------------------------------- 31 | 32 | # Similar to Perl die 33 | function die() { 34 | echo "$@" 1>&2 ; exit 1; 35 | } 36 | 37 | # Is passed an exit code and a command and 38 | # will then assert that the exit code matches. 39 | # 40 | # Parameters: 41 | # 1. exit_code 42 | # 2. command 43 | function assert_ExitCodeForCommand() { 44 | [ "$#" -eq 2 ] || die "2 arguments required, $# provided" 45 | local __exit_code="$1" 46 | local __command="$2" 47 | let "assert_counter+=1" 48 | 49 | printf "RUNNING: Assertion ${assert_counter}:\n - ${__command}\n\n" 50 | 51 | set +e 52 | eval ${__command} 53 | retval=`echo $?` 54 | set -e 55 | 56 | if [ ${retval} -eq ${__exit_code} ] ; then 57 | printf "\nSUCCESS: Test finished with exit code ${__exit_code}\n\n" 58 | else 59 | printf "\nFAIL: Expected exit code ${__exit_code} got ${retval}\n\n" 60 | exit 1 61 | fi 62 | } 63 | 64 | # ----------------------------------------------------------------------------- 65 | # TEST EXECUTION 66 | # ----------------------------------------------------------------------------- 67 | 68 | cd ${root} 69 | 70 | printf "==========================================================\n" 71 | printf " RUNNING INTEGRATION TESTS\n" 72 | printf "==========================================================\n\n" 73 | 74 | # Test: Invalid playbook should return exit code 7 75 | assert_ExitCodeForCommand "7" "${bin_path} -playbook ${root_key}/bad-mixed.yml" 76 | 77 | # Test: Valid playbook with invalid query should return exit code 6 78 | assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"`" 79 | 80 | # Test: Valid playbook which attempts to lock but fails should return exit code 1 81 | assert_ExitCodeForCommand "1" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock ${root}/dist/locks/integration/1" 82 | assert_ExitCodeForCommand "1" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock /locks/integration/1 -consul ${consul_server_uri}" 83 | assert_ExitCodeForCommand "1" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock ${root}/dist/locks/integration/1" 84 | assert_ExitCodeForCommand "1" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock /locks/integration/1 -consul ${consul_server_uri}" 85 | 86 | # Test: Checking for a lock that does not exist should return exit code 0 87 | assert_ExitCodeForCommand "0" "${bin_path} -checkLock ${root}/dist/locks/integration/1" 88 | assert_ExitCodeForCommand "0" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" 89 | 90 | # Test: Deleting a lock which does not exist should return exit code 1 91 | assert_ExitCodeForCommand "1" "${bin_path} -deleteLock ${root}/dist/locks/integration/1" 92 | assert_ExitCodeForCommand "1" "${bin_path} -deleteLock locks/integration/1 -consul ${consul_server_uri}" 93 | 94 | # Test: Valid playbook which creates a hard-lock and then fails SHOULD leave the lock around afterwards 95 | assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock locks/integration/1 -consul ${consul_server_uri}" 96 | assert_ExitCodeForCommand "3" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock locks/integration/1 -consul ${consul_server_uri}" 97 | assert_ExitCodeForCommand "3" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" 98 | assert_ExitCodeForCommand "0" "${bin_path} -deleteLock locks/integration/1 -consul ${consul_server_uri}" 99 | assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock ${root}/dist/integration-lock" 100 | assert_ExitCodeForCommand "3" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock ${root}/dist/integration-lock" 101 | assert_ExitCodeForCommand "3" "${bin_path} -checkLock ${root}/dist/integration-lock" 102 | assert_ExitCodeForCommand "0" "${bin_path} -deleteLock ${root}/dist/integration-lock" 103 | 104 | # Test: Valid playbook which creates a soft-lock and then fails SHOULD NOT leave the lock around afterwards 105 | assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -softLock locks/integration/1 -consul ${consul_server_uri}" 106 | assert_ExitCodeForCommand "0" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" 107 | assert_ExitCodeForCommand "1" "${bin_path} -deleteLock locks/integration/1 -consul ${consul_server_uri}" 108 | assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -softLock ${root}/dist/integration-lock" 109 | assert_ExitCodeForCommand "0" "${bin_path} -checkLock ${root}/dist/integration-lock" 110 | assert_ExitCodeForCommand "1" "${bin_path} -deleteLock ${root}/dist/integration-lock" 111 | 112 | # Test: Valid playbook which creates a hard/soft-lock and then succeeds SHOULD NOT leave the lock around afterwards 113 | assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock locks/integration/1 -consul ${consul_server_uri}" 114 | assert_ExitCodeForCommand "0" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" 115 | assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock ${root}/dist/integration-lock" 116 | assert_ExitCodeForCommand "0" "${bin_path} -checkLock ${root}/dist/integration-lock" 117 | assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock locks/integration/1 -consul ${consul_server_uri}" 118 | assert_ExitCodeForCommand "0" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" 119 | assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock ${root}/dist/integration-lock" 120 | assert_ExitCodeForCommand "0" "${bin_path} -checkLock ${root}/dist/integration-lock" 121 | 122 | # Test: Invalid playbook which creates a hard/soft-lock but is run using -dryRun should return exit code 0 123 | assert_ExitCodeForCommand "5" "${bin_path} -playbook ${root_key}/bad-mixed.yml -lock ${root}/dist/integration-lock -dryRun" 124 | assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock ${root}/dist/integration-lock -dryRun" 125 | 126 | # Test: Valid playbook outputs proper results from playbooks using -showQueryOutput 127 | assert_ExitCodeForCommand "6" "${bin_path} -showQueryOutput -playbook ${root_key}/good-postgres.yml" 128 | 129 | # Test: Valid playbook which uses playbook template variables 130 | assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres-with-template.yml -var password=,host=localhost" 131 | assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres-with-template.yml" 132 | assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres-with-template.yml -var username=postgres,password=,host=localhost" 133 | 134 | # Test: Truncated steps field in playbook should return exit code 1 135 | assert_ExitCodeForCommand "1" "${bin_path} -playbook ${root_key}/good-postgres-truncated.yml -lock ${root}/dist/integration-lock" 136 | 137 | # Test: fillTemplate option should return exit code 8 138 | assert_ExitCodeForCommand "8" "${bin_path} -fillTemplates -playbook ${root_key}/good-postgres-with-template.yml -var username=postgres,password=,host=localhost" 139 | 140 | printf "==========================================================\n" 141 | printf " INTEGRATION TESTS SUCCESSFUL\n" 142 | printf "==========================================================\n" 143 | -------------------------------------------------------------------------------- /integration/setup_consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 4 | # 5 | # This program is licensed to you under the Apache License Version 2.0, 6 | # and you may not use this file except in compliance with the Apache License Version 2.0. 7 | # You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the Apache License Version 2.0 is distributed on an 11 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 13 | 14 | set -e 15 | 16 | # ----------------------------------------------------------------------------- 17 | # CONSTANTS 18 | # ----------------------------------------------------------------------------- 19 | 20 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 21 | CONSUL_SERVER_URL=http://localhost:8502 22 | ROOT_KEY=${DIR}/resources 23 | 24 | # ----------------------------------------------------------------------------- 25 | # EXECUTION 26 | # ----------------------------------------------------------------------------- 27 | 28 | declare -a consul_keys=( 29 | "${ROOT_KEY}/good-postgres.yml" 30 | "${ROOT_KEY}/postgres-sql/bad/1.sql" 31 | "${ROOT_KEY}/postgres-sql/good/1.sql" 32 | "${ROOT_KEY}/postgres-sql/good/2a.sql" 33 | "${ROOT_KEY}/postgres-sql/good/2b.sql" 34 | "${ROOT_KEY}/postgres-sql/good/3.sql" 35 | "${ROOT_KEY}/postgres-sql/good/assert.sql" 36 | "${ROOT_KEY}/postgres-sql/good/output.sql" 37 | ) 38 | 39 | echo " --- Stubbing Consul key values for integration tests --- " 40 | 41 | for i in "${!consul_keys[@]}" 42 | do 43 | : 44 | key=${consul_keys[$i]} 45 | value=`cat ${key}` 46 | res=`curl -s -XPUT -d "${value}" ${CONSUL_SERVER_URL}/v1/kv${key}` 47 | echo "PUT result for key ${key}: ${res}" 48 | done 49 | 50 | echo " --- Done! --- " 51 | -------------------------------------------------------------------------------- /integration/setup_postgres.sql: -------------------------------------------------------------------------------- 1 | CREATE USER snowplow WITH PASSWORD 'snowplow'; 2 | ALTER ROLE snowplow WITH superuser; 3 | CREATE DATABASE sql_runner_tests_1 OWNER snowplow; 4 | CREATE DATABASE sql_runner_tests_2 OWNER snowplow; 5 | -------------------------------------------------------------------------------- /sql_runner/aws_utils.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "fmt" 15 | 16 | "github.com/aws/aws-sdk-go/aws/credentials" 17 | "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" 18 | ) 19 | 20 | func awsCredentials(creds *credentials.Credentials) (string, error) { 21 | value, err := creds.Get() 22 | accessKeyID := value.AccessKeyID 23 | secretAccessKey := value.SecretAccessKey 24 | return fmt.Sprintf("CREDENTIALS 'aws_access_key_id=%s;aws_secret_access_key=%s'", accessKeyID, secretAccessKey), err 25 | } 26 | 27 | func awsEnvCredentials() (string, error) { 28 | creds := credentials.NewEnvCredentials() 29 | return awsCredentials(creds) 30 | } 31 | 32 | func awsProfileCredentials(profile string) (string, error) { 33 | creds := credentials.NewSharedCredentials("", profile) 34 | return awsCredentials(creds) 35 | } 36 | 37 | func awsEC2RoleCredentials() (string, error) { 38 | creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) 39 | return awsCredentials(creds) 40 | } 41 | 42 | func awsChainCredentials(profile string) (string, error) { 43 | creds := credentials.NewChainCredentials( 44 | []credentials.Provider{ 45 | &credentials.EnvProvider{}, 46 | &credentials.SharedCredentialsProvider{Filename: "", Profile: profile}, 47 | &ec2rolecreds.EC2RoleProvider{}, 48 | }) 49 | return awsCredentials(creds) 50 | } 51 | -------------------------------------------------------------------------------- /sql_runner/aws_utils_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "os" 15 | "testing" 16 | 17 | "github.com/stretchr/testify/assert" 18 | ) 19 | 20 | func TestAwsEnvCredentials(t *testing.T) { 21 | assert := assert.New(t) 22 | 23 | str, err := awsEnvCredentials() 24 | assert.NotNil(err) 25 | assert.NotNil(str) 26 | assert.Equal("EnvAccessKeyNotFound: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", err.Error()) 27 | assert.Equal("CREDENTIALS 'aws_access_key_id=;aws_secret_access_key='", str) 28 | 29 | os.Setenv("AWS_ACCESS_KEY_ID", "some-aws-key") 30 | os.Setenv("AWS_SECRET_ACCESS_KEY", "some-aws-secret") 31 | 32 | str, err = awsEnvCredentials() 33 | assert.NotNil(str) 34 | assert.Nil(err) 35 | assert.Equal("CREDENTIALS 'aws_access_key_id=some-aws-key;aws_secret_access_key=some-aws-secret'", str) 36 | } 37 | 38 | func TestAwsProfileCredentials(t *testing.T) { 39 | assert := assert.New(t) 40 | 41 | str, err := awsProfileCredentials("fake-profile") 42 | assert.NotNil(err) 43 | assert.NotNil(str) 44 | assert.Equal("CREDENTIALS 'aws_access_key_id=;aws_secret_access_key='", str) 45 | 46 | /** 47 | str, err = awsProfileCredentials("default") 48 | assert.NotNil(str) 49 | assert.Nil(err) 50 | assert.Equal("CREDENTIALS 'aws_access_key_id=some-aws-key;aws_secret_access_key=some-aws-secret'", str) 51 | */ 52 | } 53 | 54 | func TestAwsChainCredentials(t *testing.T) { 55 | assert := assert.New(t) 56 | 57 | os.Setenv("AWS_ACCESS_KEY_ID", "some-aws-key") 58 | os.Setenv("AWS_SECRET_ACCESS_KEY", "some-aws-secret") 59 | 60 | str, err := awsChainCredentials("fake-profile") 61 | assert.NotNil(str) 62 | assert.Nil(err) 63 | assert.Equal("CREDENTIALS 'aws_access_key_id=some-aws-key;aws_secret_access_key=some-aws-secret'", str) 64 | 65 | str, err = awsChainCredentials("default") 66 | assert.NotNil(str) 67 | assert.Nil(err) 68 | assert.Equal("CREDENTIALS 'aws_access_key_id=some-aws-key;aws_secret_access_key=some-aws-secret'", str) 69 | } 70 | -------------------------------------------------------------------------------- /sql_runner/bigquery_target.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "fmt" 15 | "log" 16 | "os" 17 | "strings" 18 | 19 | bq "cloud.google.com/go/bigquery" 20 | "github.com/olekukonko/tablewriter" 21 | "golang.org/x/net/context" 22 | "google.golang.org/api/iterator" 23 | ) 24 | 25 | // BigQueryTarget represents BigQuery as a target. 26 | type BigQueryTarget struct { 27 | Target 28 | Client *bq.Client 29 | } 30 | 31 | // IsConnectable tests connection to determine whether the BigQuery target is 32 | // connectable. 33 | func (bqt BigQueryTarget) IsConnectable() bool { 34 | var err error = nil 35 | ctx := context.Background() 36 | 37 | client := bqt.Client 38 | query := client.Query("SELECT 1") // empty query to test connection 39 | 40 | it, err := query.Read(ctx) 41 | if err != nil { 42 | log.Printf("ERROR: Failed to perform test query: %v", err) 43 | return false 44 | } 45 | 46 | var row []bq.Value 47 | err = it.Next(&row) 48 | if err != nil { 49 | log.Printf("ERROR: Failed to read test query results: %v", err) 50 | return false 51 | } 52 | 53 | return fmt.Sprint(row) == "[1]" 54 | } 55 | 56 | // NewBigQueryTarget returns a ptr to a BigQueryTarget. 57 | func NewBigQueryTarget(target Target) (*BigQueryTarget, error) { 58 | projectID := target.Project 59 | ctx := context.Background() 60 | 61 | client, err := bq.NewClient(ctx, projectID) 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | client.Location = target.Region 67 | 68 | return &BigQueryTarget{target, client}, nil 69 | } 70 | 71 | // GetTarget returns the Target field of BigQueryTarget. 72 | func (bqt BigQueryTarget) GetTarget() Target { 73 | return bqt.Target 74 | } 75 | 76 | // RunQuery runs a query against the target. 77 | func (bqt BigQueryTarget) RunQuery(query ReadyQuery, dryRun bool, showQueryOutput bool) QueryStatus { 78 | var affected int64 = 0 79 | var err error = nil 80 | var schema bq.Schema = nil 81 | ctx := context.Background() 82 | 83 | if dryRun { 84 | if bqt.IsConnectable() { 85 | log.Printf("SUCCESS: Able to connect to target database, %s.", bqt.Project) 86 | } else { 87 | log.Printf("ERROR: Cannot connect to target database, %s.", bqt.Project) 88 | } 89 | return QueryStatus{query, query.Path, 0, nil} 90 | } 91 | 92 | script := query.Script 93 | 94 | if len(strings.TrimSpace(script)) > 0 { 95 | // If showing query output, perform a dry run to get column metadata 96 | if showQueryOutput { 97 | dq := bqt.Client.Query(script) 98 | dq.DryRun = true 99 | dqJob, err := dq.Run(ctx) 100 | if err != nil { 101 | log.Printf("ERROR: Failed to dry run job: %s.", err) 102 | return QueryStatus{query, query.Path, int(affected), err} 103 | } 104 | 105 | schema = dqJob.LastStatus().Statistics.Details.(*bq.QueryStatistics).Schema 106 | } 107 | 108 | q := bqt.Client.Query(script) 109 | 110 | job, err := q.Run(ctx) 111 | if err != nil { 112 | log.Printf("ERROR: Failed to run job: %s.", err) 113 | return QueryStatus{query, query.Path, int(affected), err} 114 | } 115 | 116 | it, err := job.Read(ctx) 117 | if err != nil { 118 | log.Printf("ERROR: Failed to read job results: %s.", err) 119 | return QueryStatus{query, query.Path, int(affected), err} 120 | } 121 | 122 | status, err := job.Status(ctx) 123 | if err != nil { 124 | log.Printf("ERROR: Failed to read job results: %s.", err) 125 | return QueryStatus{query, query.Path, int(affected), err} 126 | } 127 | if err := status.Err(); err != nil { 128 | log.Printf("ERROR: Error running job: %s.", err) 129 | return QueryStatus{query, query.Path, int(affected), err} 130 | } 131 | 132 | if showQueryOutput { 133 | err = printBqTable(it, schema) 134 | if err != nil { 135 | log.Printf("ERROR: Failed to print output: %s.", err) 136 | return QueryStatus{query, query.Path, int(affected), err} 137 | } 138 | } else { 139 | queryStats := job.LastStatus().Statistics.Details.(*bq.QueryStatistics) 140 | aff := queryStats.NumDMLAffectedRows 141 | affected += aff 142 | } 143 | } 144 | 145 | return QueryStatus{query, query.Path, int(affected), err} 146 | } 147 | 148 | func printBqTable(rows *bq.RowIterator, schema bq.Schema) error { 149 | outputBuffer := make([][]string, 0, 10) 150 | 151 | for { 152 | var row []bq.Value 153 | err := rows.Next(&row) 154 | if err == iterator.Done { 155 | break 156 | } 157 | if err != nil { 158 | return err 159 | } 160 | outputBuffer = append(outputBuffer, bqStringify(row)) 161 | } 162 | 163 | if len(outputBuffer) > 0 { 164 | log.Printf("QUERY OUTPUT:\n") 165 | table := tablewriter.NewWriter(os.Stdout) 166 | 167 | // Get columns from table schema 168 | columns := make([]string, len(schema)) 169 | for i, field := range schema { 170 | columns[i] = field.Name 171 | } 172 | table.SetHeader(columns) 173 | 174 | table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) 175 | table.SetCenterSeparator("|") 176 | 177 | for _, row := range outputBuffer { 178 | table.Append(row) 179 | } 180 | 181 | table.Render() // Send output 182 | } 183 | return nil 184 | } 185 | 186 | func bqStringify(row []bq.Value) []string { 187 | var line []string 188 | for _, element := range row { 189 | line = append(line, fmt.Sprint(element)) 190 | } 191 | return line 192 | } 193 | -------------------------------------------------------------------------------- /sql_runner/consul_provider.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | // ConsulPlaybookProvider represents consul information as playbook provider. 14 | type ConsulPlaybookProvider struct { 15 | consulAddress string 16 | consulKey string 17 | variables map[string]string 18 | } 19 | 20 | // NewConsulPlaybookProvider returns a ptr to ConsulPlaybookProvider. 21 | func NewConsulPlaybookProvider(consulAddress, consulKey string, variables map[string]string) *ConsulPlaybookProvider { 22 | return &ConsulPlaybookProvider{ 23 | consulAddress: consulAddress, 24 | consulKey: consulKey, 25 | variables: variables, 26 | } 27 | } 28 | 29 | // GetPlaybook returns a ptr to a playbook from consul. 30 | func (p ConsulPlaybookProvider) GetPlaybook() (*Playbook, error) { 31 | lines, err := GetBytesFromConsul(p.consulAddress, p.consulKey) 32 | if err != nil { 33 | return nil, err 34 | } 35 | 36 | return parsePlaybookYaml(lines, p.variables) 37 | } 38 | -------------------------------------------------------------------------------- /sql_runner/consul_utils.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "fmt" 15 | 16 | "github.com/hashicorp/consul/api" 17 | ) 18 | 19 | // GetConsulClient returns a consul client given an address. 20 | func GetConsulClient(address string) (*api.Client, error) { 21 | // Add address to config 22 | conf := api.DefaultConfig() 23 | conf.Address = address 24 | 25 | // Connect to consul 26 | return api.NewClient(conf) 27 | } 28 | 29 | // GetBytesFromConsul attempts to return the bytes 30 | // of a key stored in a Consul server 31 | func GetBytesFromConsul(address string, key string) ([]byte, error) { 32 | client, _ := GetConsulClient(address) 33 | kv := client.KV() 34 | 35 | // Get the KV Pair from consul 36 | pair, _, err := kv.Get(key, nil) 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | if pair != nil { 42 | return pair.Value, nil 43 | } 44 | return nil, fmt.Errorf("The key '%s' returned a nil value from the consul server", key) 45 | } 46 | 47 | // GetStringValueFromConsul attempts to return 48 | // the string value of a key stored in a Consul server 49 | func GetStringValueFromConsul(address string, key string) (string, error) { 50 | bytes, err := GetBytesFromConsul(address, key) 51 | 52 | if err != nil { 53 | return "", err 54 | } 55 | return string(bytes), nil 56 | } 57 | 58 | // PutBytesToConsul attempts to push a new 59 | // KV pair to a Consul Server 60 | func PutBytesToConsul(address string, key string, value []byte) error { 61 | client, _ := GetConsulClient(address) 62 | kv := client.KV() 63 | 64 | // Put a new KV pair to consul 65 | p := &api.KVPair{Key: key, Value: value} 66 | _, err := kv.Put(p, nil) 67 | return err 68 | } 69 | 70 | // PutStringValueToConsul attempts to push a new 71 | // KV pair to a Consul Server 72 | func PutStringValueToConsul(address string, key string, value string) error { 73 | return PutBytesToConsul(address, key, []byte(value)) 74 | } 75 | 76 | // DeleteValueFromConsul attempts to delete a 77 | // KV pair from a Consul Server 78 | func DeleteValueFromConsul(address string, key string) error { 79 | client, _ := GetConsulClient(address) 80 | kv := client.KV() 81 | 82 | // Delete the KV pair 83 | _, err := kv.Delete(key, nil) 84 | return err 85 | } 86 | -------------------------------------------------------------------------------- /sql_runner/consul_utils_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "testing" 15 | 16 | "github.com/stretchr/testify/assert" 17 | ) 18 | 19 | func TestPutGetDelStringValueFromConsul_Failure(t *testing.T) { 20 | assert := assert.New(t) 21 | 22 | err := PutStringValueToConsul("localhost", "somekey", "somevalue") 23 | assert.NotNil(err) 24 | 25 | str, err := GetStringValueFromConsul("localhost", "somekey") 26 | assert.Equal("", str) 27 | assert.NotNil(err) 28 | 29 | err = DeleteValueFromConsul("localhost", "somekey") 30 | assert.NotNil(err) 31 | } 32 | 33 | func TestPutGetDelStringValueFromConsul_Success(t *testing.T) { 34 | assert := assert.New(t) 35 | 36 | err := PutStringValueToConsul("localhost:8502", "somekey", "somevalue") 37 | assert.Nil(err) 38 | 39 | str, err := GetStringValueFromConsul("localhost:8502", "somekey") 40 | assert.Nil(err) 41 | assert.NotNil(str) 42 | assert.Equal("somevalue", str) 43 | 44 | err = DeleteValueFromConsul("localhost:8502", "somekey") 45 | assert.Nil(err) 46 | } 47 | -------------------------------------------------------------------------------- /sql_runner/db.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "bytes" 15 | "text/template" 16 | ) 17 | 18 | // Db is a generalized interface to a database client. 19 | type Db interface { 20 | RunQuery(ReadyQuery, bool, bool) QueryStatus 21 | GetTarget() Target 22 | IsConnectable() bool 23 | } 24 | 25 | // Reads the script and fills in the template 26 | func prepareQuery(queryPath string, sp SQLProvider, template bool, variables map[string]interface{}) (string, error) { 27 | 28 | var script string 29 | var err error 30 | 31 | script, err = sp.GetSQL(queryPath) 32 | 33 | if err != nil { 34 | return "", err 35 | } 36 | 37 | if template { 38 | script, err = fillTemplate(script, variables) // Yech, mutate 39 | if err != nil { 40 | return "", err 41 | } 42 | } 43 | return script, nil 44 | } 45 | 46 | // Fills in a script which is a template 47 | func fillTemplate(script string, variables map[string]interface{}) (string, error) { 48 | t, err := template.New("playbook").Funcs(TemplFuncs).Parse(script) 49 | if err != nil { 50 | return "", err 51 | } 52 | 53 | var filled bytes.Buffer 54 | if err := t.Execute(&filled, variables); err != nil { 55 | return "", err 56 | } 57 | return filled.String(), nil 58 | } 59 | -------------------------------------------------------------------------------- /sql_runner/file_utils.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "io/ioutil" 15 | "os" 16 | ) 17 | 18 | // loadLocalFile reads a whole file into memory 19 | // and returns a slice of its lines. 20 | func loadLocalFile(path string) ([]byte, error) { 21 | file, err := os.Open(path) 22 | if err != nil { 23 | return nil, err 24 | } 25 | defer file.Close() 26 | 27 | return ioutil.ReadAll(file) 28 | } 29 | -------------------------------------------------------------------------------- /sql_runner/file_utils_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "testing" 15 | 16 | "github.com/stretchr/testify/assert" 17 | ) 18 | 19 | func TestLoadLocalFile(t *testing.T) { 20 | assert := assert.New(t) 21 | 22 | bytes, err := loadLocalFile("/this/path/does/not/exist") 23 | assert.Nil(bytes) 24 | assert.NotNil(err) 25 | assert.Equal("open /this/path/does/not/exist: no such file or directory", err.Error()) 26 | 27 | bytes, err = loadLocalFile("../VERSION") 28 | assert.NotNil(bytes) 29 | assert.Nil(err) 30 | assert.Equal(cliVersion, string(bytes)) 31 | } 32 | -------------------------------------------------------------------------------- /sql_runner/lock_file.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "fmt" 15 | "log" 16 | "os" 17 | "path/filepath" 18 | "time" 19 | ) 20 | 21 | // LockFile holds information for a lock file. 22 | type LockFile struct { 23 | Path string 24 | SoftLock bool 25 | ConsulAddress string 26 | locked bool 27 | } 28 | 29 | // InitLockFile creates a LockFile object 30 | // which is used to ensures jobs can not run 31 | // at the same time. 32 | func InitLockFile(path string, softLock bool, consulAddress string) (LockFile, error) { 33 | lockFile := LockFile{ 34 | Path: path, 35 | SoftLock: softLock, 36 | ConsulAddress: consulAddress, 37 | locked: false, 38 | } 39 | 40 | if lockFile.LockExists() { 41 | return lockFile, fmt.Errorf("%s found on start, previous run failed or is ongoing. Cannot start", path) 42 | } 43 | return lockFile, nil 44 | } 45 | 46 | // Lock creates a new lock file or kv entry 47 | func (lf *LockFile) Lock() error { 48 | if lf.locked == true { 49 | return fmt.Errorf("cannot Lock: LockFile is already locked") 50 | } 51 | 52 | value := time.Now().UTC().Format("2006-01-02T15:04:05-0700") 53 | 54 | log.Printf("Checking and setting the lockfile at this key '%s'", lf.Path) 55 | 56 | if lf.ConsulAddress == "" { 57 | // Check if dir exists 58 | dirStr := filepath.Dir(lf.Path) 59 | if _, err := os.Stat(dirStr); os.IsNotExist(err) { 60 | return fmt.Errorf("directory for key does not exist") 61 | } 62 | 63 | // Create the file 64 | f, err := os.OpenFile(lf.Path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) 65 | defer f.Close() 66 | if err != nil { 67 | return err 68 | } 69 | 70 | // Write a line to it 71 | _, err = f.WriteString(value) 72 | if err != nil { 73 | return err 74 | } 75 | 76 | lf.locked = true 77 | return nil 78 | } 79 | 80 | // Create the KV pair 81 | err := PutStringValueToConsul(lf.ConsulAddress, lf.Path, value) 82 | if err != nil { 83 | return err 84 | } 85 | 86 | lf.locked = true 87 | return nil 88 | } 89 | 90 | // Unlock deletes the lock or kv entry 91 | func (lf *LockFile) Unlock() error { 92 | 93 | log.Printf("Deleting lockfile at this key '%s'", lf.Path) 94 | 95 | if lf.ConsulAddress == "" { 96 | // Delete the file 97 | err := os.Remove(lf.Path) 98 | if err != nil { 99 | return err 100 | } 101 | 102 | lf.locked = false 103 | return nil 104 | } 105 | 106 | // Delete the KV pair 107 | err := DeleteValueFromConsul(lf.ConsulAddress, lf.Path) 108 | if err != nil { 109 | return err 110 | } 111 | 112 | lf.locked = false 113 | return nil 114 | } 115 | 116 | // LockExists checks if the lock file 117 | // exists already 118 | func (lf *LockFile) LockExists() bool { 119 | if lf.ConsulAddress == "" { 120 | if _, err := os.Stat(lf.Path); os.IsNotExist(err) { 121 | return false 122 | } 123 | return true 124 | } 125 | 126 | value, err := GetStringValueFromConsul(lf.ConsulAddress, lf.Path) 127 | if err != nil && value == "" { 128 | return false 129 | } 130 | return true 131 | } 132 | -------------------------------------------------------------------------------- /sql_runner/lock_file_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "testing" 15 | 16 | "github.com/stretchr/testify/assert" 17 | ) 18 | 19 | // TestInitLockFile_Local tests setting up a lockfile 20 | // on the local file system 21 | func TestInitLockFile_Local(t *testing.T) { 22 | assert := assert.New(t) 23 | 24 | lockFile, err := InitLockFile("../dist/lock.lockfile", false, "") 25 | 26 | assert.Nil(err) 27 | assert.Equal("../dist/lock.lockfile", lockFile.Path) 28 | assert.False(lockFile.SoftLock) 29 | assert.Equal("", lockFile.ConsulAddress) 30 | assert.False(lockFile.LockExists()) 31 | } 32 | 33 | // TestLockUnlockFile_Local asserts that we can 34 | // lock and unlock using a local file server 35 | func TestLockUnlockFile_Local(t *testing.T) { 36 | assert := assert.New(t) 37 | 38 | lockFile, err := InitLockFile("../dist/lock.lockfile", false, "") 39 | assert.Nil(err) 40 | assert.False(lockFile.LockExists()) 41 | 42 | err = lockFile.Lock() 43 | assert.Nil(err) 44 | assert.True(lockFile.LockExists()) 45 | 46 | err = lockFile.Lock() 47 | assert.NotNil(err) 48 | assert.Equal("cannot Lock: LockFile is already locked", err.Error()) 49 | assert.True(lockFile.LockExists()) 50 | 51 | _, err2 := InitLockFile("../dist/lock.lockfile", false, "") 52 | assert.NotNil(err2) 53 | assert.Equal("../dist/lock.lockfile found on start, previous run failed or is ongoing. Cannot start", err2.Error()) 54 | 55 | err = lockFile.Unlock() 56 | assert.Nil(err) 57 | assert.False(lockFile.LockExists()) 58 | 59 | err = lockFile.Unlock() 60 | assert.NotNil(err) 61 | assert.Equal("remove ../dist/lock.lockfile: no such file or directory", err.Error()) 62 | assert.False(lockFile.LockExists()) 63 | } 64 | 65 | // TestInitLockFile_LocalFailure tests setting up a lockfile 66 | // on the local file system that does not exist 67 | func TestInitLockFile_LocalFailure(t *testing.T) { 68 | assert := assert.New(t) 69 | 70 | lockFile, err := InitLockFile("dist/lock.lockfile", false, "") 71 | 72 | assert.Nil(err) 73 | assert.Equal("dist/lock.lockfile", lockFile.Path) 74 | assert.False(lockFile.SoftLock) 75 | assert.Equal("", lockFile.ConsulAddress) 76 | assert.False(lockFile.LockExists()) 77 | } 78 | 79 | // TestLockUnlockFile_LocalFailure asserts that we can 80 | // lock and unlock using a local file server that does not exist 81 | func TestLockUnlockFile_LocalFailure(t *testing.T) { 82 | assert := assert.New(t) 83 | 84 | lockFile, err := InitLockFile("dist/lock.lockfile", false, "") 85 | assert.Nil(err) 86 | assert.False(lockFile.LockExists()) 87 | 88 | err = lockFile.Lock() 89 | assert.NotNil(err) 90 | assert.Equal("directory for key does not exist", err.Error()) 91 | assert.False(lockFile.LockExists()) 92 | } 93 | 94 | // TestInitLockFile_Consul tests setting up a lockfile 95 | // on a remote consul server 96 | func TestInitLockFile_Consul(t *testing.T) { 97 | assert := assert.New(t) 98 | 99 | lockFile, err := InitLockFile("dist/lock.lockfile", false, "localhost:8502") 100 | 101 | assert.Nil(err) 102 | assert.Equal("dist/lock.lockfile", lockFile.Path) 103 | assert.False(lockFile.SoftLock) 104 | assert.Equal("localhost:8502", lockFile.ConsulAddress) 105 | assert.False(lockFile.LockExists()) 106 | } 107 | 108 | // TestLockUnlockFile_Consul asserts that we can 109 | // lock and unlock using a consul server 110 | func TestLockUnlockFile_Consul(t *testing.T) { 111 | assert := assert.New(t) 112 | 113 | lockFile, err := InitLockFile("dist/lock.lockfile", false, "localhost:8502") 114 | assert.Nil(err) 115 | assert.False(lockFile.LockExists()) 116 | 117 | err = lockFile.Lock() 118 | assert.Nil(err) 119 | assert.True(lockFile.LockExists()) 120 | 121 | err = lockFile.Lock() 122 | assert.NotNil(err) 123 | assert.Equal("cannot Lock: LockFile is already locked", err.Error()) 124 | assert.True(lockFile.LockExists()) 125 | 126 | _, err2 := InitLockFile("dist/lock.lockfile", false, "localhost:8502") 127 | assert.NotNil(err2) 128 | assert.Equal("dist/lock.lockfile found on start, previous run failed or is ongoing. Cannot start", err2.Error()) 129 | 130 | err = lockFile.Unlock() 131 | assert.Nil(err) 132 | assert.False(lockFile.LockExists()) 133 | 134 | err = lockFile.Unlock() 135 | assert.Nil(err) 136 | } 137 | 138 | // TestInitLockFile_ConsulFailure tests setting up a lockfile 139 | // on a remote consul server that does not exist 140 | func TestInitLockFile_ConsulFailure(t *testing.T) { 141 | assert := assert.New(t) 142 | 143 | lockFile, err := InitLockFile("dist/lock.lockfile", false, "localhost") 144 | 145 | assert.Nil(err) 146 | assert.Equal("dist/lock.lockfile", lockFile.Path) 147 | assert.False(lockFile.SoftLock) 148 | assert.Equal("localhost", lockFile.ConsulAddress) 149 | assert.False(lockFile.LockExists()) 150 | } 151 | 152 | // TestLockUnlockFile_ConsulFailure asserts that we can 153 | // lock and unlock using a consul server that does not exist 154 | func TestLockUnlockFile_ConsulFailure(t *testing.T) { 155 | assert := assert.New(t) 156 | 157 | lockFile, err := InitLockFile("dist/lock.lockfile", false, "localhost") 158 | assert.Nil(err) 159 | assert.False(lockFile.LockExists()) 160 | 161 | err = lockFile.Lock() 162 | assert.NotNil(err) 163 | assert.False(lockFile.LockExists()) 164 | 165 | err = lockFile.Lock() 166 | assert.NotNil(err) 167 | assert.False(lockFile.LockExists()) 168 | 169 | _, err2 := InitLockFile("dist/lock.lockfile", false, "localhost") 170 | assert.Nil(err2) 171 | assert.False(lockFile.LockExists()) 172 | 173 | err = lockFile.Unlock() 174 | assert.NotNil(err) 175 | assert.False(lockFile.LockExists()) 176 | 177 | err = lockFile.Unlock() 178 | assert.NotNil(err) 179 | assert.False(lockFile.LockExists()) 180 | } 181 | -------------------------------------------------------------------------------- /sql_runner/main.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "errors" 15 | "fmt" 16 | "log" 17 | "os" 18 | "path/filepath" 19 | "strings" 20 | 21 | "github.com/kardianos/osext" 22 | ) 23 | 24 | const ( 25 | cliName = "sql-runner" 26 | cliDescription = `Run playbooks of SQL scripts in series and parallel on Redshift, Postgres, BigQuery and Snowflake` 27 | cliVersion = "0.11.0" 28 | 29 | sqlrootBinary = "BINARY" 30 | sqlrootPlaybook = "PLAYBOOK" 31 | sqlrootPlaybookChild = "PLAYBOOK_CHILD" 32 | ) 33 | 34 | // main is the entry point for the application 35 | func main() { 36 | 37 | options := processFlags() 38 | 39 | lockFile, lockErr := LockFileFromOptions(options) 40 | if lockErr != nil { 41 | log.Printf("Error: %s", lockErr.Error()) 42 | os.Exit(3) 43 | } 44 | 45 | pbp, pbpErr := PlaybookProviderFromOptions(options) 46 | if pbpErr != nil { 47 | log.Fatalf("Could not determine playbook source: %s", pbpErr.Error()) 48 | } 49 | 50 | pb, err := pbp.GetPlaybook() 51 | if err != nil { 52 | log.Fatalf("Error getting playbook: %s", err.Error()) 53 | } 54 | 55 | if err := pb.Validate(); err != nil { 56 | log.Fatalf("Invalid playbook: %s", err.Error()) 57 | } 58 | 59 | pb.MergeCLIVariables(options.variables) 60 | 61 | sp, spErr := SQLProviderFromOptions(options) 62 | if spErr != nil { 63 | log.Fatalf("Could not determine sql source: %s", spErr.Error()) 64 | } 65 | 66 | // Lock it up... 67 | if lockFile != nil { 68 | lockErr2 := lockFile.Lock() 69 | if lockErr2 != nil { 70 | log.Fatalf("Error making lock: %s", lockErr2.Error()) 71 | } 72 | } 73 | 74 | statuses := Run(*pb, sp, options.fromStep, options.runQuery, options.dryRun, options.fillTemplates, options.showQueryOutput) 75 | code, message := review(statuses) 76 | 77 | // Unlock on success and soft-lock 78 | if lockFile != nil { 79 | if code == 0 || code == 8 || lockFile.SoftLock { 80 | lockFile.Unlock() 81 | } 82 | } 83 | 84 | log.Print(message) 85 | os.Exit(code) 86 | } 87 | 88 | // processFlags parses the arguments provided to 89 | // the main function. 90 | func processFlags() Options { 91 | 92 | var options Options = NewOptions() 93 | var fs = options.GetFlagSet() 94 | fs.Parse(os.Args[1:]) 95 | 96 | if options.version { 97 | fmt.Printf("%s version: %s\n", cliName, cliVersion) 98 | os.Exit(0) 99 | } 100 | 101 | if len(os.Args[1:]) == 0 || options.help { 102 | fmt.Printf("%s version: %s\n", cliName, cliVersion) 103 | fmt.Println(cliDescription) 104 | fmt.Println("Usage:") 105 | fs.PrintDefaults() 106 | os.Exit(0) 107 | } 108 | 109 | if options.checkLock != "" { 110 | lockFile, lockErr := LockFileFromOptions(options) 111 | if lockErr != nil { 112 | log.Printf("Error: %s found, previous run failed or is ongoing", lockFile.Path) 113 | os.Exit(3) 114 | } else { 115 | log.Printf("Success: %s does not exist", lockFile.Path) 116 | os.Exit(0) 117 | } 118 | } 119 | 120 | if options.deleteLock != "" { 121 | lockFile, lockErr := LockFileFromOptions(options) 122 | if lockErr != nil { 123 | unlockErr := lockFile.Unlock() 124 | if unlockErr != nil { 125 | log.Printf("Error: %s found but could not delete: %s", lockFile.Path, unlockErr.Error()) 126 | os.Exit(1) 127 | } else { 128 | log.Printf("Success: %s found and deleted", lockFile.Path) 129 | os.Exit(0) 130 | } 131 | } else { 132 | log.Printf("Error: %s does not exist, nothing to delete", lockFile.Path) 133 | os.Exit(1) 134 | } 135 | } 136 | 137 | if options.playbook == "" { 138 | fmt.Println("required flag not defined: -playbook") 139 | os.Exit(2) 140 | } 141 | 142 | sr, err := resolveSQLRoot(options.sqlroot, options.playbook, options.consul, options.consulOnlyForLock) 143 | if err != nil { 144 | fmt.Printf("Error resolving -sqlroot: %s\n%s\n", options.sqlroot, err) 145 | os.Exit(2) 146 | } 147 | options.sqlroot = sr // Yech, mutate in place 148 | 149 | return options 150 | } 151 | 152 | // --- Options resolvers 153 | 154 | // PlaybookProviderFromOptions returns a provider of the Playbook 155 | // based on flags passed in 156 | func PlaybookProviderFromOptions(options Options) (PlaybookProvider, error) { 157 | if options.consulOnlyForLock { 158 | return NewYAMLFilePlaybookProvider(options.playbook, options.variables), nil 159 | } else if options.consul != "" { 160 | return NewConsulPlaybookProvider(options.consul, options.playbook, options.variables), nil 161 | } else if options.playbook != "" { 162 | return NewYAMLFilePlaybookProvider(options.playbook, options.variables), nil 163 | } else { 164 | return nil, errors.New("Cannot determine provider for playbook") 165 | } 166 | } 167 | 168 | // SQLProviderFromOptions returns a provider of SQL files 169 | // based on flags passed in 170 | func SQLProviderFromOptions(options Options) (SQLProvider, error) { 171 | if options.consulOnlyForLock { 172 | return NewFileSQLProvider(options.sqlroot), nil 173 | } else if options.consul != "" { 174 | return NewConsulSQLProvider(options.consul, options.sqlroot), nil 175 | } else if options.playbook != "" { 176 | return NewFileSQLProvider(options.sqlroot), nil 177 | } else { 178 | return nil, errors.New("Cannot determine provider for sql") 179 | } 180 | } 181 | 182 | // LockFileFromOptions will check if a LockFile already 183 | // exists and will then either: 184 | // 1. Raise an error 185 | // 2. Set a new lock 186 | func LockFileFromOptions(options Options) (*LockFile, error) { 187 | 188 | // Do nothing if dry-run 189 | if options.dryRun == true { 190 | return nil, nil 191 | } 192 | 193 | var lockPath string 194 | var isSoftLock bool 195 | 196 | if options.lock != "" { 197 | lockPath = options.lock 198 | isSoftLock = false 199 | } else if options.softLock != "" { 200 | lockPath = options.softLock 201 | isSoftLock = true 202 | } else if options.checkLock != "" { 203 | lockPath = options.checkLock 204 | isSoftLock = false 205 | } else if options.deleteLock != "" { 206 | lockPath = options.deleteLock 207 | isSoftLock = false 208 | } else { 209 | // no-op 210 | return nil, nil 211 | } 212 | 213 | lockFile, err := InitLockFile(lockPath, isSoftLock, options.consul) 214 | 215 | return &lockFile, err 216 | } 217 | 218 | // --- SQLRoot resolvers 219 | 220 | // resolveSQLRoot returns the path to our SQL scripts 221 | func resolveSQLRoot(sqlroot string, playbookPath string, consulAddress string, consulOnlyForLock bool) (string, error) { 222 | consulErr1 := fmt.Errorf("Cannot use %s option with -consul argument", sqlroot) 223 | consulErr2 := fmt.Errorf("Cannot use %s option without -consul argument", sqlroot) 224 | consulErr3 := fmt.Errorf("Cannot use %s option with -consulOnlyForLock argument", sqlroot) 225 | 226 | if consulOnlyForLock { 227 | switch sqlroot { 228 | case sqlrootBinary: 229 | return osext.ExecutableFolder() 230 | case sqlrootPlaybook: 231 | return filepath.Abs(filepath.Dir(playbookPath)) 232 | case sqlrootPlaybookChild: 233 | return "", consulErr3 234 | default: 235 | return sqlroot, nil 236 | } 237 | } 238 | 239 | switch sqlroot { 240 | case sqlrootBinary: 241 | if consulAddress != "" { 242 | return "", consulErr1 243 | } 244 | return osext.ExecutableFolder() 245 | case sqlrootPlaybook: 246 | if consulAddress != "" { 247 | return getAbsConsulPath(playbookPath), nil 248 | } 249 | return filepath.Abs(filepath.Dir(playbookPath)) 250 | case sqlrootPlaybookChild: 251 | if consulAddress != "" { 252 | return playbookPath, nil 253 | } 254 | return "", consulErr2 255 | default: 256 | return sqlroot, nil 257 | } 258 | } 259 | 260 | // getAbsConsulPath returns an absolute path for Consul 261 | // one directory up 262 | func getAbsConsulPath(path string) string { 263 | strSpl := strings.Split(path, "/") 264 | trimSpl := strSpl[:len(strSpl)-1] 265 | return strings.Join(trimSpl, "/") 266 | } 267 | -------------------------------------------------------------------------------- /sql_runner/main_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "strings" 15 | "testing" 16 | 17 | "github.com/stretchr/testify/assert" 18 | ) 19 | 20 | func TestLockFileFromOptions(t *testing.T) { 21 | assert := assert.New(t) 22 | 23 | options := Options{ 24 | dryRun: true, 25 | } 26 | lockFile, err := LockFileFromOptions(options) 27 | assert.Nil(lockFile) 28 | assert.Nil(err) 29 | 30 | options = Options{ 31 | dryRun: false, 32 | } 33 | lockFile, err = LockFileFromOptions(options) 34 | assert.Nil(lockFile) 35 | assert.Nil(err) 36 | 37 | options = Options{ 38 | dryRun: false, 39 | lock: "../dist/lock.lockfile", 40 | } 41 | lockFile, err = LockFileFromOptions(options) 42 | assert.Nil(err) 43 | assert.False(lockFile.SoftLock) 44 | assert.Equal("../dist/lock.lockfile", lockFile.Path) 45 | 46 | options = Options{ 47 | dryRun: false, 48 | softLock: "../dist/lock.lockfile", 49 | } 50 | lockFile, err = LockFileFromOptions(options) 51 | assert.Nil(err) 52 | assert.True(lockFile.SoftLock) 53 | assert.Equal("../dist/lock.lockfile", lockFile.Path) 54 | 55 | options = Options{ 56 | dryRun: false, 57 | checkLock: "../dist/lock.lockfile", 58 | } 59 | lockFile, err = LockFileFromOptions(options) 60 | assert.Nil(err) 61 | assert.False(lockFile.SoftLock) 62 | assert.Equal("../dist/lock.lockfile", lockFile.Path) 63 | 64 | options = Options{ 65 | dryRun: false, 66 | deleteLock: "../dist/lock.lockfile", 67 | } 68 | lockFile, err = LockFileFromOptions(options) 69 | assert.Nil(err) 70 | assert.False(lockFile.SoftLock) 71 | assert.Equal("../dist/lock.lockfile", lockFile.Path) 72 | } 73 | 74 | func TestResolveSqlRoot(t *testing.T) { 75 | assert := assert.New(t) 76 | 77 | str, err := resolveSQLRoot(sqlrootBinary, "../integration/resources/good-postgres.yml", "", false) 78 | assert.NotNil(str) 79 | assert.Nil(err) 80 | str, err = resolveSQLRoot(sqlrootBinary, "../integration/resources/good-postgres.yml", "localhost:8502", false) 81 | assert.NotNil(str) 82 | assert.NotNil(err) 83 | assert.Equal("", str) 84 | assert.Equal("Cannot use BINARY option with -consul argument", err.Error()) 85 | 86 | str, err = resolveSQLRoot(sqlrootPlaybook, "../integration/resources/good-postgres.yml", "", false) 87 | assert.NotNil(str) 88 | assert.Nil(err) 89 | assert.True(strings.HasSuffix(str, "/integration/resources")) 90 | str, err = resolveSQLRoot(sqlrootPlaybook, "../integration/resources/good-postgres.yml", "localhost:8502", false) 91 | assert.NotNil(str) 92 | assert.Nil(err) 93 | assert.True(strings.HasSuffix(str, "/integration/resources")) 94 | 95 | str, err = resolveSQLRoot(sqlrootPlaybookChild, "../integration/resources/good-postgres.yml", "", false) 96 | assert.NotNil(str) 97 | assert.NotNil(err) 98 | assert.Equal("", str) 99 | assert.Equal("Cannot use PLAYBOOK_CHILD option without -consul argument", err.Error()) 100 | str, err = resolveSQLRoot(sqlrootPlaybookChild, "../integration/resources/good-postgres.yml", "localhost:8502", false) 101 | assert.NotNil(str) 102 | assert.Nil(err) 103 | assert.Equal("../integration/resources/good-postgres.yml", str) 104 | 105 | str, err = resolveSQLRoot("random", "../integration/resources/good-postgres.yml", "localhost:8502", false) 106 | assert.NotNil(str) 107 | assert.Nil(err) 108 | assert.Equal("random", str) 109 | } 110 | -------------------------------------------------------------------------------- /sql_runner/options.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "errors" 15 | "flag" 16 | "fmt" 17 | "strings" 18 | ) 19 | 20 | // CLIVariables represents the cli variables map. 21 | type CLIVariables map[string]string 22 | 23 | // Implement the Value interface 24 | func (i *CLIVariables) String() string { 25 | return fmt.Sprintf("%s", *i) 26 | } 27 | 28 | // Set adds a kv pair given as string to CLIVariables. 29 | func (i *CLIVariables) Set(value string) error { 30 | var split = strings.Split(value, ",") 31 | 32 | for value := range split { 33 | kv := strings.SplitN(split[value], "=", 2) 34 | 35 | if len(kv) != 2 { 36 | return errors.New("invalid size for key, value, key value should be in the key=value format") 37 | } 38 | 39 | (*i)[kv[0]] = kv[1] 40 | } 41 | return nil 42 | } 43 | 44 | // Options represents the SQL-Runner options. 45 | type Options struct { 46 | help bool 47 | version bool 48 | playbook string 49 | sqlroot string 50 | fromStep string 51 | dryRun bool 52 | consul string 53 | lock string 54 | softLock string 55 | checkLock string 56 | deleteLock string 57 | runQuery string 58 | variables CLIVariables 59 | fillTemplates bool 60 | consulOnlyForLock bool 61 | showQueryOutput bool 62 | } 63 | 64 | // NewOptions returns Options. 65 | func NewOptions() Options { 66 | return Options{variables: make(map[string]string)} 67 | } 68 | 69 | // GetFlagSet returns a ptr to the FlagSet. 70 | func (o *Options) GetFlagSet() *flag.FlagSet { 71 | var fs = flag.NewFlagSet("Options", flag.ExitOnError) 72 | 73 | fs.BoolVar(&(o.help), "help", false, "Shows this message") 74 | fs.BoolVar(&(o.version), "version", false, "Shows the program version") 75 | fs.StringVar(&(o.playbook), "playbook", "", "Playbook of SQL scripts to execute") 76 | fs.StringVar(&(o.sqlroot), "sqlroot", sqlrootPlaybook, fmt.Sprintf("Absolute path to SQL scripts. Use %s, %s and %s for those respective paths", sqlrootPlaybook, sqlrootBinary, sqlrootPlaybookChild)) 77 | fs.Var(&(o.variables), "var", "Variables to be passed to the playbook, in the key=value format") 78 | fs.StringVar(&(o.fromStep), "fromStep", "", "Starts from a given step defined in your playbook") 79 | fs.BoolVar(&(o.dryRun), "dryRun", false, "Runs through a playbook without executing any of the SQL") 80 | fs.StringVar(&(o.consul), "consul", "", "The address of a consul server with playbooks and SQL files stored in KV pairs") 81 | fs.StringVar(&(o.lock), "lock", "", "Optional argument which checks and sets a lockfile to ensure this run is a singleton. Deletes lock on run completing successfully") 82 | fs.StringVar(&(o.softLock), "softLock", "", "Optional argument, like '-lock' but the lockfile will be deleted even if the run fails") 83 | fs.StringVar(&(o.checkLock), "checkLock", "", "Checks whether the lockfile already exists") 84 | fs.StringVar(&(o.deleteLock), "deleteLock", "", "Will attempt to delete a lockfile if it exists") 85 | fs.StringVar(&(o.runQuery), "runQuery", "", "Will run a single query in the playbook") 86 | fs.BoolVar(&(o.fillTemplates), "fillTemplates", false, "Will print all queries after templates are filled") 87 | fs.BoolVar(&(o.consulOnlyForLock), "consulOnlyForLock", false, "Will read playbooks locally, but use Consul for locking.") 88 | fs.BoolVar(&(o.showQueryOutput), "showQueryOutput", false, "Will print all output from queries") 89 | // TODO: add format flag if/when we support TOML 90 | 91 | return fs 92 | } 93 | -------------------------------------------------------------------------------- /sql_runner/playbook.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "fmt" 15 | ) 16 | 17 | // Playbook maps exactly onto our YAML format 18 | type Playbook struct { 19 | Targets []Target 20 | Variables map[string]interface{} 21 | Steps []Step 22 | } 23 | 24 | // Target represents the playbook target. 25 | type Target struct { 26 | Name, Type, Host, Database, Port, Username, 27 | Password, Region, Account, Warehouse, Project string 28 | QueryTag string `yaml:"query_tag"` 29 | Ssl bool 30 | PrivateKeyPath string `yaml:"private_key_path"` 31 | PrivateKeyPassphrase string `yaml:"private_key_passphrase"` 32 | } 33 | 34 | // Step represents a playbook step. 35 | type Step struct { 36 | Name string 37 | Queries []Query 38 | } 39 | 40 | // Query represents a playbook query. 41 | type Query struct { 42 | Name, File string 43 | Template bool 44 | } 45 | 46 | // NewPlaybook initializes properly the Playbook. 47 | func NewPlaybook() Playbook { 48 | return Playbook{Variables: make(map[string]interface{})} 49 | } 50 | 51 | // MergeCLIVariables merges CLIVariables to playbook variables. 52 | func (p Playbook) MergeCLIVariables(variables map[string]string) Playbook { 53 | // TODO: Ideally this would return a new copy of the playbook to avoid 54 | // mutable state. 55 | for k, v := range variables { 56 | p.Variables[k] = v 57 | } 58 | return p 59 | } 60 | 61 | // Validate provides a way to fail fast if playbook is invalid. 62 | func (p Playbook) Validate() error { 63 | if p.Targets == nil || len(p.Targets) == 0 { 64 | return fmt.Errorf("no targets") 65 | } 66 | 67 | if p.Steps == nil || len(p.Steps) == 0 { 68 | return fmt.Errorf("no steps") 69 | } 70 | 71 | return nil 72 | } 73 | -------------------------------------------------------------------------------- /sql_runner/playbook_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "reflect" 15 | "testing" 16 | 17 | "github.com/davecgh/go-spew/spew" 18 | "github.com/stretchr/testify/assert" 19 | ) 20 | 21 | func TestMergeCLIVariables(t *testing.T) { 22 | testCases := []struct { 23 | Name string 24 | Vars map[string]string 25 | Expected Playbook 26 | }{ 27 | { 28 | Name: "empty_map", 29 | Vars: make(map[string]string), 30 | Expected: NewPlaybook(), 31 | }, 32 | { 33 | Name: "happy_path", 34 | Vars: map[string]string{ 35 | "a": "A", 36 | "b": "B", 37 | }, 38 | Expected: Playbook{ 39 | Variables: map[string]interface{}{ 40 | "a": "A", 41 | "b": "B", 42 | }, 43 | }, 44 | }, 45 | } 46 | 47 | for _, tt := range testCases { 48 | t.Run(tt.Name, func(t *testing.T) { 49 | pb := NewPlaybook() 50 | pb.MergeCLIVariables(tt.Vars) 51 | 52 | if !reflect.DeepEqual(pb, tt.Expected) { 53 | t.Errorf("GOT:\n%s\nEXPECTED:\n%s", 54 | spew.Sdump(pb), 55 | spew.Sdump(tt.Expected)) 56 | } 57 | }) 58 | } 59 | } 60 | 61 | func TestValidate(t *testing.T) { 62 | testCases := []struct { 63 | Name string 64 | Play Playbook 65 | IsValid bool 66 | ErrString string 67 | }{ 68 | { 69 | Name: "nil_targets", 70 | Play: Playbook{ 71 | Targets: nil, 72 | Steps: make([]Step, 1), 73 | }, 74 | IsValid: false, 75 | ErrString: "no targets", 76 | }, 77 | { 78 | Name: "zero_targets", 79 | Play: Playbook{ 80 | Targets: make([]Target, 0), 81 | Steps: make([]Step, 1), 82 | }, 83 | IsValid: false, 84 | ErrString: "no targets", 85 | }, 86 | { 87 | Name: "nil_steps", 88 | Play: Playbook{ 89 | Targets: make([]Target, 1), 90 | Steps: nil, 91 | }, 92 | IsValid: false, 93 | ErrString: "no steps", 94 | }, 95 | { 96 | Name: "zero_steps", 97 | Play: Playbook{ 98 | Targets: make([]Target, 1), 99 | Steps: make([]Step, 0), 100 | }, 101 | IsValid: false, 102 | ErrString: "no steps", 103 | }, 104 | { 105 | Name: "happy_path", 106 | Play: Playbook{ 107 | Targets: make([]Target, 1), 108 | Steps: make([]Step, 1), 109 | }, 110 | IsValid: true, 111 | ErrString: "", 112 | }, 113 | } 114 | 115 | for _, tt := range testCases { 116 | t.Run(tt.Name, func(t *testing.T) { 117 | assert := assert.New(t) 118 | 119 | err := tt.Play.Validate() 120 | 121 | if tt.IsValid { 122 | assert.Nil(err) 123 | } else { 124 | if err == nil { 125 | t.Fatal("got error, expected nil") 126 | } 127 | assert.Equal(tt.ErrString, err.Error()) 128 | } 129 | }) 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /sql_runner/postgres_target.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "context" 15 | "crypto/tls" 16 | "errors" 17 | "fmt" 18 | "log" 19 | "net" 20 | "os" 21 | "time" 22 | 23 | "github.com/go-pg/pg/v10" 24 | "github.com/go-pg/pg/v10/orm" 25 | "github.com/olekukonko/tablewriter" 26 | ) 27 | 28 | // For Redshift queries 29 | const ( 30 | dialTimeout = 10 * time.Second 31 | readTimeout = 8 * time.Hour // TODO: make this user configurable 32 | ) 33 | 34 | // PostgresTarget represents a Postgres as target. 35 | type PostgresTarget struct { 36 | Target 37 | Client *pg.DB 38 | } 39 | 40 | // IsConnectable tests connection to determine whether the Postgres target is 41 | // connectable. 42 | func (pt PostgresTarget) IsConnectable() bool { 43 | client := pt.Client 44 | err := client.Ping(context.Background()) 45 | 46 | return err == nil 47 | } 48 | 49 | // NewPostgresTarget returns a ptr to a PostgresTarget. 50 | func NewPostgresTarget(target Target) (*PostgresTarget, error) { 51 | var tlsConfig *tls.Config 52 | if target.Ssl == true { 53 | tlsConfig = &tls.Config{ 54 | InsecureSkipVerify: true, 55 | } 56 | } 57 | 58 | if target.Host == "" || target.Port == "" || target.Username == "" || target.Database == "" { 59 | return nil, fmt.Errorf("missing target connection parameters") 60 | } 61 | 62 | db := pg.Connect(&pg.Options{ 63 | Addr: fmt.Sprintf("%s:%s", target.Host, target.Port), 64 | User: target.Username, 65 | Password: target.Password, 66 | Database: target.Database, 67 | TLSConfig: tlsConfig, 68 | DialTimeout: dialTimeout, 69 | ReadTimeout: readTimeout, 70 | Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { 71 | cn, err := net.DialTimeout(network, addr, dialTimeout) 72 | if err != nil { 73 | return nil, err 74 | } 75 | return cn, cn.(*net.TCPConn).SetKeepAlive(true) 76 | }, 77 | }) 78 | 79 | return &PostgresTarget{target, db}, nil 80 | } 81 | 82 | // GetTarget returns the Target field of PostgresTarget. 83 | func (pt PostgresTarget) GetTarget() Target { 84 | return pt.Target 85 | } 86 | 87 | // RunQuery runs a query against the target. 88 | func (pt PostgresTarget) RunQuery(query ReadyQuery, dryRun bool, showQueryOutput bool) QueryStatus { 89 | var err error = nil 90 | var res orm.Result 91 | if dryRun { 92 | options := pt.Client.Options() 93 | address := options.Addr 94 | if pt.IsConnectable() { 95 | log.Printf("SUCCESS: Able to connect to target database, %s\n.", address) 96 | } else { 97 | log.Printf("ERROR: Cannot connect to target database, %s\n.", address) 98 | } 99 | return QueryStatus{query, query.Path, 0, nil} 100 | } 101 | 102 | affected := 0 103 | if showQueryOutput { 104 | var results Results 105 | res, err = pt.Client.Query(&results, query.Script) 106 | if err == nil { 107 | affected = res.RowsAffected() 108 | } else { 109 | log.Printf("ERROR: %s.", err) 110 | return QueryStatus{query, query.Path, int(affected), err} 111 | } 112 | 113 | err = printTable(&results) 114 | if err != nil { 115 | log.Printf("ERROR: %s.", err) 116 | return QueryStatus{query, query.Path, int(affected), err} 117 | } 118 | } else { 119 | res, err = pt.Client.Exec(query.Script) 120 | if err == nil { 121 | affected = res.RowsAffected() 122 | } 123 | } 124 | 125 | return QueryStatus{query, query.Path, affected, err} 126 | } 127 | 128 | func printTable(results *Results) error { 129 | columns := make([]string, len(results.columns)) 130 | for k := range results.columns { 131 | columns[k] = results.columns[k] 132 | } 133 | 134 | if results.elements == 1 { 135 | if results.results[0][0] == "" { 136 | return nil // blank output, edge case for asserts 137 | } 138 | } else if results.elements == 0 { 139 | return nil // break for no output 140 | } 141 | 142 | log.Printf("QUERY OUTPUT:\n") 143 | table := tablewriter.NewWriter(os.Stdout) 144 | table.SetHeader(columns) 145 | table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) 146 | table.SetCenterSeparator("|") 147 | 148 | if len(results.columns) == 0 { 149 | return errors.New("Unable to read columns") 150 | } 151 | 152 | for _, row := range results.results { 153 | table.Append(row) 154 | } 155 | 156 | table.Render() // Send output 157 | return nil 158 | } 159 | -------------------------------------------------------------------------------- /sql_runner/postgres_target_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "testing" 15 | 16 | "github.com/stretchr/testify/assert" 17 | ) 18 | 19 | func TestNewPostgresTarget_Error(t *testing.T) { 20 | expectedErr := "missing target connection parameters" 21 | testCases := []struct { 22 | Name string 23 | Input Target 24 | }{ 25 | { 26 | Name: "missing_host", 27 | Input: Target{ 28 | Port: "5432", 29 | Username: "postgres", 30 | Database: "postgres", 31 | }, 32 | }, 33 | { 34 | Name: "missing_port", 35 | Input: Target{ 36 | Host: "5432", 37 | Username: "postgres", 38 | Database: "postgres", 39 | }, 40 | }, 41 | { 42 | Name: "missing_user", 43 | Input: Target{ 44 | Host: "localhost", 45 | Port: "5432", 46 | Database: "postgres", 47 | }, 48 | }, 49 | { 50 | Name: "missing_database", 51 | Input: Target{ 52 | Host: "localhost", 53 | Port: "5432", 54 | Username: "postgres", 55 | }, 56 | }, 57 | } 58 | 59 | for _, tt := range testCases { 60 | t.Run(tt.Name, func(t *testing.T) { 61 | assert := assert.New(t) 62 | 63 | result, err := NewPostgresTarget(tt.Input) 64 | assert.Nil(result) 65 | if err == nil { 66 | t.Fatalf("expected error, got nil") 67 | } 68 | 69 | assert.Equal(expectedErr, err.Error()) 70 | 71 | }) 72 | } 73 | } 74 | 75 | func TestNewPostgresTarget(t *testing.T) { 76 | testCases := []struct { 77 | Name string 78 | Input Target 79 | }{ 80 | { 81 | Name: "happy_path", 82 | Input: Target{ 83 | Host: "localhost", 84 | Port: "5432", 85 | Username: "pguser", 86 | Database: "postgres", 87 | }, 88 | }, 89 | } 90 | 91 | for _, tt := range testCases { 92 | t.Run(tt.Name, func(t *testing.T) { 93 | assert := assert.New(t) 94 | 95 | result, err := NewPostgresTarget(tt.Input) 96 | assert.Nil(err) 97 | if result == nil { 98 | t.Fatal("unexpected nil result") 99 | } 100 | 101 | assert.Equal(result.Target, tt.Input) 102 | }) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /sql_runner/provider.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | // PlaybookProvider is an interface for different providers. 14 | type PlaybookProvider interface { 15 | GetPlaybook() (*Playbook, error) 16 | } 17 | -------------------------------------------------------------------------------- /sql_runner/review.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "bytes" 15 | "fmt" 16 | "text/template" 17 | ) 18 | 19 | var ( 20 | failureTemplate *template.Template 21 | ) 22 | 23 | func init() { 24 | failureTemplate = template.Must(template.New("failure").Parse(` 25 | TARGET INITIALIZATION FAILURES:{{range $status := .}}{{if $status.Errors}} 26 | * {{$status.Name}}{{range $error := $status.Errors}}, ERRORS: 27 | - {{$error}}{{end}}{{end}}{{end}} 28 | QUERY FAILURES:{{range $status := .}}{{range $step := $status.Steps}}{{range $query := $step.Queries}}{{if $query.Error}} 29 | * Query {{$query.Query.Name}} {{$query.Path}} (in step {{$step.Name}} @ target {{$status.Name}}), ERROR: 30 | - {{$query.Error}}{{end}}{{end}}{{end}}{{end}} 31 | `)) 32 | } 33 | 34 | func review(statuses []TargetStatus) (int, string) { 35 | exitCode, queryCount := getExitCodeAndQueryCount(statuses) 36 | 37 | if exitCode == 0 { 38 | return exitCode, getSuccessMessage(queryCount, len(statuses)) 39 | } else if exitCode == 8 { 40 | var message bytes.Buffer 41 | message.WriteString("WARNING: No queries to run\n") 42 | return exitCode, message.String() 43 | } else { 44 | return exitCode, getFailureMessage(statuses) 45 | } 46 | } 47 | 48 | // Don't use a template here as executing it could fail 49 | func getSuccessMessage(queryCount int, targetCount int) string { 50 | return fmt.Sprintf("SUCCESS: %d queries executed against %d targets", queryCount, targetCount) 51 | } 52 | 53 | // TODO: maybe would be cleaner to bubble up error from this function 54 | func getFailureMessage(statuses []TargetStatus) string { 55 | 56 | var message bytes.Buffer 57 | if err := failureTemplate.Execute(&message, statuses); err != nil { 58 | return fmt.Sprintf("ERROR: executing failure message template itself failed: %s", err.Error()) 59 | } 60 | 61 | return message.String() 62 | } 63 | 64 | // getExitCodeAndQueryCount processes statuses and returns: 65 | // - 0 for no errors 66 | // - 5 for target initialization errors 67 | // - 6 for query errors 68 | // - 7 for both types of error 69 | // Also return the total count of query statuses we have 70 | func getExitCodeAndQueryCount(statuses []TargetStatus) (int, int) { 71 | 72 | initErrors := false 73 | queryErrors := false 74 | queryCount := 0 75 | 76 | for _, targetStatus := range statuses { 77 | if targetStatus.Errors != nil { 78 | initErrors = true 79 | } 80 | CheckQueries: 81 | for _, stepStatus := range targetStatus.Steps { 82 | for _, queryStatus := range stepStatus.Queries { 83 | if queryStatus.Error != nil { 84 | queryErrors = true 85 | queryCount = 0 // Reset 86 | break CheckQueries 87 | } 88 | queryCount++ 89 | } 90 | } 91 | } 92 | 93 | var exitCode int 94 | switch { 95 | case initErrors && queryErrors: 96 | exitCode = 7 97 | case initErrors: 98 | exitCode = 5 99 | case queryErrors: 100 | exitCode = 6 101 | case queryCount == 0: 102 | exitCode = 8 103 | default: 104 | exitCode = 0 105 | } 106 | return exitCode, queryCount 107 | } 108 | -------------------------------------------------------------------------------- /sql_runner/run.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "bytes" 15 | "fmt" 16 | "log" 17 | "strings" 18 | ) 19 | 20 | const ( 21 | redshiftType = "redshift" 22 | postgresType = "postgres" 23 | postgresqlType = "postgresql" 24 | snowflakeType = "snowflake" 25 | bigqueryType = "bigquery" 26 | 27 | errorUnsupportedDbType = "Database type is unsupported" 28 | errorFromStepNotFound = "The fromStep argument did not match any available steps" 29 | errorQueryFailedInit = "An error occurred loading the SQL file" 30 | errorRunQueryNotFound = "The runQuery argument did not match any available queries" 31 | errorRunQueryArgument = "Argument for -runQuery should be in format 'step::query'" 32 | errorNewTargetFailure = "Failed to create target" 33 | ) 34 | 35 | // TargetStatus reports on any errors from running the 36 | // playbook against a singular target. 37 | type TargetStatus struct { 38 | Name string 39 | Errors []error // For any errors not related to a specific step 40 | Steps []StepStatus 41 | } 42 | 43 | // StepStatus reports on any errors from running a step. 44 | type StepStatus struct { 45 | Name string 46 | Index int 47 | Queries []QueryStatus 48 | } 49 | 50 | // QueryStatus reports ony any error from a query. 51 | type QueryStatus struct { 52 | Query ReadyQuery 53 | Path string 54 | Affected int 55 | Error error 56 | } 57 | 58 | // ReadyStep contains a step that is ready for execution. 59 | type ReadyStep struct { 60 | Name string 61 | Queries []ReadyQuery 62 | } 63 | 64 | // ReadyQuery contains a query that is ready for execution. 65 | type ReadyQuery struct { 66 | Script string 67 | Name string 68 | Path string 69 | } 70 | 71 | // Run runs a playbook of SQL scripts. 72 | // 73 | // Handles dispatch to the appropriate 74 | // database engine 75 | func Run(pb Playbook, sp SQLProvider, fromStep string, runQuery string, dryRun bool, fillTemplates bool, showQueryOutput bool) []TargetStatus { 76 | 77 | var steps []Step 78 | var trimErr []TargetStatus 79 | 80 | if runQuery != "" { 81 | steps, trimErr = trimToQuery(pb.Steps, runQuery, pb.Targets) 82 | } else { 83 | steps, trimErr = trimSteps(pb.Steps, fromStep, pb.Targets) 84 | } 85 | if trimErr != nil { 86 | return trimErr 87 | } 88 | 89 | // Prepare all SQL queries 90 | readySteps, readyErr := loadSteps(steps, sp, pb.Variables, pb.Targets) 91 | if readyErr != nil { 92 | return readyErr 93 | } 94 | 95 | if fillTemplates { 96 | for _, steps := range readySteps { 97 | for _, query := range steps.Queries { 98 | var message bytes.Buffer 99 | message.WriteString(fmt.Sprintf("Step name: %s\n", steps.Name)) 100 | message.WriteString(fmt.Sprintf("Query name: %s\n", query.Name)) 101 | message.WriteString(fmt.Sprintf("Query path: %s\n", query.Path)) 102 | message.WriteString(query.Script) 103 | log.Print(message.String()) 104 | } 105 | } 106 | allStatuses := make([]TargetStatus, 0) 107 | return allStatuses 108 | } 109 | 110 | targetChan := make(chan TargetStatus, len(pb.Targets)) 111 | 112 | // Route each target to the right db client and run 113 | for _, tgt := range pb.Targets { 114 | routeAndRun(tgt, readySteps, targetChan, dryRun, showQueryOutput) 115 | } 116 | 117 | // Compose statuses from each target run 118 | // Duplicated in runSteps, because NOGENERICS 119 | allStatuses := make([]TargetStatus, 0) 120 | for i := 0; i < len(pb.Targets); i++ { 121 | select { 122 | case status := <-targetChan: 123 | allStatuses = append(allStatuses, status) 124 | } 125 | } 126 | 127 | return allStatuses 128 | } 129 | 130 | // --- Pre-run processors 131 | 132 | // Trims down to an indivdual query 133 | func trimToQuery(steps []Step, runQuery string, targets []Target) ([]Step, []TargetStatus) { 134 | runQueryParts := strings.Split(runQuery, "::") 135 | if len(runQueryParts) != 2 { 136 | err := fmt.Errorf(errorRunQueryArgument) 137 | return nil, makeTargetStatuses(err, targets) 138 | } 139 | 140 | var stepName, queryName string = runQueryParts[0], runQueryParts[1] 141 | if stepName == "" || queryName == "" { 142 | err := fmt.Errorf(errorRunQueryArgument) 143 | return nil, makeTargetStatuses(err, targets) 144 | } 145 | 146 | steps, trimErr := trimSteps(steps, stepName, targets) 147 | if trimErr != nil { 148 | return nil, trimErr 149 | } 150 | 151 | step := steps[0] // safe 152 | queries := []Query{} 153 | for _, query := range step.Queries { 154 | if query.Name == queryName { 155 | queries = append(queries, query) 156 | break 157 | } 158 | } 159 | 160 | if len(queries) == 0 { 161 | err := fmt.Errorf("%s: '%s'", errorRunQueryNotFound, queryName) 162 | return nil, makeTargetStatuses(err, targets) 163 | } 164 | step.Queries = queries 165 | 166 | return []Step{step}, nil 167 | } 168 | 169 | // Trims skippable steps 170 | func trimSteps(steps []Step, fromStep string, targets []Target) ([]Step, []TargetStatus) { 171 | stepIndex := 0 172 | if fromStep != "" { 173 | exists := false 174 | for i := 0; i < len(steps); i++ { 175 | if steps[i].Name == fromStep { 176 | exists = true 177 | stepIndex = i 178 | break 179 | } 180 | } 181 | if !exists { 182 | err := fmt.Errorf("%s: %s", errorFromStepNotFound, fromStep) 183 | return nil, makeTargetStatuses(err, targets) 184 | } 185 | } 186 | return steps[stepIndex:], nil 187 | } 188 | 189 | // Helper to create the corresponding []TargetStatus given an error. 190 | func makeTargetStatuses(err error, targets []Target) []TargetStatus { 191 | allStatuses := make([]TargetStatus, 0, len(targets)) 192 | for _, tgt := range targets { 193 | errs := []error{err} 194 | status := TargetStatus{ 195 | Name: tgt.Name, 196 | Errors: errs, 197 | Steps: nil, 198 | } 199 | allStatuses = append(allStatuses, status) 200 | } 201 | 202 | return allStatuses 203 | } 204 | 205 | // Loads all SQL files for all Steps in the playbook ahead of time 206 | // Fails as soon as a bad query is found 207 | func loadSteps(steps []Step, sp SQLProvider, variables map[string]interface{}, targets []Target) ([]ReadyStep, []TargetStatus) { 208 | sCount := len(steps) 209 | readySteps := make([]ReadyStep, sCount) 210 | 211 | for i := 0; i < sCount; i++ { 212 | step := steps[i] 213 | qCount := len(step.Queries) 214 | readyQueries := make([]ReadyQuery, qCount) 215 | 216 | for j := 0; j < qCount; j++ { 217 | query := step.Queries[j] 218 | queryText, err := prepareQuery(query.File, sp, query.Template, variables) 219 | queryPath := sp.ResolveKey(query.File) 220 | 221 | if err != nil { 222 | allStatuses := make([]TargetStatus, 0) 223 | for _, tgt := range targets { 224 | status := loadQueryFailed(tgt.Name, queryPath, err) 225 | allStatuses = append(allStatuses, status) 226 | } 227 | return nil, allStatuses 228 | } 229 | readyQueries[j] = ReadyQuery{Script: queryText, Name: query.Name, Path: queryPath} 230 | } 231 | readySteps[i] = ReadyStep{Name: step.Name, Queries: readyQueries} 232 | } 233 | return readySteps, nil 234 | } 235 | 236 | // Helper for a load query failed error 237 | func loadQueryFailed(targetName string, queryPath string, err error) TargetStatus { 238 | errs := []error{fmt.Errorf("%s: %s: %s", errorQueryFailedInit, queryPath, err)} 239 | return TargetStatus{ 240 | Name: targetName, 241 | Errors: errs, 242 | Steps: nil, 243 | } 244 | } 245 | 246 | // --- Running 247 | 248 | // Route to correct database client and run 249 | func routeAndRun(target Target, readySteps []ReadyStep, targetChan chan TargetStatus, dryRun bool, showQueryOutput bool) { 250 | switch strings.ToLower(target.Type) { 251 | case redshiftType, postgresType, postgresqlType: 252 | go func(tgt Target) { 253 | pg, err := NewPostgresTarget(tgt) 254 | if err != nil { 255 | targetChan <- newTargetFailure(tgt, err) 256 | return 257 | } 258 | targetChan <- runSteps(pg, readySteps, dryRun, showQueryOutput) 259 | }(target) 260 | case snowflakeType: 261 | go func(tgt Target) { 262 | snfl, err := NewSnowflakeTarget(tgt) 263 | if err != nil { 264 | targetChan <- newTargetFailure(tgt, err) 265 | return 266 | } 267 | targetChan <- runSteps(snfl, readySteps, dryRun, showQueryOutput) 268 | }(target) 269 | case bigqueryType: 270 | go func(tgt Target) { 271 | bq, err := NewBigQueryTarget(tgt) 272 | if err != nil { 273 | targetChan <- newTargetFailure(tgt, err) 274 | return 275 | } 276 | targetChan <- runSteps(bq, readySteps, dryRun, showQueryOutput) 277 | }(target) 278 | default: 279 | targetChan <- unsupportedDbType(target.Name, target.Type) 280 | } 281 | } 282 | 283 | // Helper for an unrecognized database type 284 | func unsupportedDbType(targetName string, targetType string) TargetStatus { 285 | errs := []error{fmt.Errorf("%s: %s", errorUnsupportedDbType, targetType)} 286 | return TargetStatus{ 287 | Name: targetName, 288 | Errors: errs, 289 | Steps: nil, 290 | } 291 | } 292 | 293 | // Helper to create TargetStatus after an error on New*Target 294 | func newTargetFailure(target Target, err error) TargetStatus { 295 | errs := []error{fmt.Errorf("%s: %s: %s", errorNewTargetFailure, target.Type, err.Error())} 296 | return TargetStatus{ 297 | Name: target.Name, 298 | Errors: errs, 299 | Steps: nil, 300 | } 301 | } 302 | 303 | // Handles the sequential flow of steps (some of 304 | // which may involve multiple queries in parallel). 305 | // 306 | // runSteps fails fast - we stop executing SQL on 307 | // this target when a step fails. 308 | func runSteps(database Db, steps []ReadyStep, dryRun bool, showQueryOutput bool) TargetStatus { 309 | 310 | allStatuses := make([]StepStatus, len(steps)) 311 | 312 | FailFast: 313 | for i, stp := range steps { 314 | stpIndex := i + 1 315 | status := runQueries(database, stpIndex, stp.Name, stp.Queries, dryRun, showQueryOutput) 316 | allStatuses = append(allStatuses, status) 317 | 318 | for _, qry := range status.Queries { 319 | if qry.Error != nil { 320 | break FailFast 321 | } 322 | } 323 | } 324 | return TargetStatus{ 325 | Name: database.GetTarget().Name, 326 | Errors: nil, 327 | Steps: allStatuses, 328 | } 329 | } 330 | 331 | // Handles running N queries in parallel. 332 | // 333 | // runQueries composes failures across the queries 334 | // for a given step: if one query fails, the others 335 | // will still complete. 336 | func runQueries(database Db, stepIndex int, stepName string, queries []ReadyQuery, dryRun bool, showQueryOutput bool) StepStatus { 337 | 338 | queryChan := make(chan QueryStatus, len(queries)) 339 | dbName := database.GetTarget().Name 340 | 341 | // Route each target to the right db client and run 342 | for _, query := range queries { 343 | go func(qry ReadyQuery) { 344 | log.Printf("EXECUTING %s (in step %s @ %s): %s", qry.Name, stepName, dbName, qry.Path) 345 | queryChan <- database.RunQuery(qry, dryRun, showQueryOutput) 346 | }(query) 347 | } 348 | 349 | // Collect statuses from each target run 350 | allStatuses := make([]QueryStatus, 0) 351 | for i := 0; i < len(queries); i++ { 352 | select { 353 | case status := <-queryChan: 354 | if status.Error != nil { 355 | log.Printf("FAILURE: %s (step %s @ target %s), ERROR: %s\n", status.Query.Name, stepName, dbName, status.Error.Error()) 356 | } else { 357 | log.Printf("SUCCESS: %s (step %s @ target %s), ROWS AFFECTED: %d\n", status.Query.Name, stepName, dbName, status.Affected) 358 | } 359 | allStatuses = append(allStatuses, status) 360 | } 361 | } 362 | 363 | return StepStatus{ 364 | Name: stepName, 365 | Index: stepIndex, 366 | Queries: allStatuses, 367 | } 368 | } 369 | -------------------------------------------------------------------------------- /sql_runner/run_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "reflect" 15 | "strings" 16 | "testing" 17 | 18 | "github.com/davecgh/go-spew/spew" 19 | "github.com/stretchr/testify/assert" 20 | ) 21 | 22 | func TestTrimToQuery_Valid(t *testing.T) { 23 | testTargets := []Target{{Name: "test"}} 24 | testSteps := []Step{ 25 | { 26 | Name: "preFoo", 27 | Queries: []Query{{Name: "bar"}}, 28 | }, 29 | { 30 | Name: "foo", 31 | Queries: []Query{{Name: "bar"}}, 32 | }, 33 | { 34 | Name: "postFoo", 35 | Queries: []Query{{Name: "bar"}}, 36 | }, 37 | } 38 | 39 | testCases := []struct { 40 | RunQueryArg string 41 | ExpectedIdx int 42 | }{ 43 | { 44 | RunQueryArg: "preFoo::bar", 45 | ExpectedIdx: 0, 46 | }, 47 | { 48 | RunQueryArg: "foo::bar", 49 | ExpectedIdx: 1, 50 | }, 51 | { 52 | RunQueryArg: "postFoo::bar", 53 | ExpectedIdx: 2, 54 | }, 55 | } 56 | 57 | for _, tt := range testCases { 58 | t.Run(tt.RunQueryArg, func(t *testing.T) { 59 | assert := assert.New(t) 60 | steps, statuses := trimToQuery( 61 | testSteps, 62 | tt.RunQueryArg, 63 | testTargets) 64 | 65 | assert.Nil(statuses) 66 | 67 | if tt.ExpectedIdx >= len(testSteps) { 68 | t.Fatal("expected index out of testSteps range") 69 | } 70 | 71 | i := tt.ExpectedIdx 72 | expectedSteps := testSteps[i : i+1] 73 | if !reflect.DeepEqual(steps, testSteps[i:i+1]) { 74 | t.Errorf("\nGOT:\n%s\nEXPECTED:\n%s", 75 | spew.Sdump(steps), 76 | spew.Sdump(expectedSteps)) 77 | } 78 | }) 79 | } 80 | } 81 | 82 | func TestTrimToQuery_Errors(t *testing.T) { 83 | testTargets := []Target{ 84 | {Name: "a"}, 85 | {Name: "b"}, 86 | } 87 | testSteps := []Step{ 88 | { 89 | Name: "foo", 90 | Queries: []Query{{Name: "bar"}}, 91 | }, 92 | } 93 | 94 | testCases := []struct { 95 | Scenario string 96 | RunQueryArg string 97 | ErrorString string 98 | ErrorExact bool 99 | }{ 100 | { 101 | Scenario: "missing_delimiter", 102 | RunQueryArg: "foobar", 103 | ErrorString: errorRunQueryArgument, 104 | ErrorExact: true, 105 | }, 106 | { 107 | Scenario: "missing_delimiter_existing_step_issue210", 108 | RunQueryArg: "foo", 109 | ErrorString: errorRunQueryArgument, 110 | ErrorExact: true, 111 | }, 112 | { 113 | Scenario: "missing_step", 114 | RunQueryArg: "::bar", 115 | ErrorString: errorRunQueryArgument, 116 | ErrorExact: true, 117 | }, 118 | { 119 | Scenario: "missing_query", 120 | RunQueryArg: "foo::", 121 | ErrorString: errorRunQueryArgument, 122 | ErrorExact: true, 123 | }, 124 | { 125 | Scenario: "empty_string", 126 | RunQueryArg: "", 127 | ErrorString: errorRunQueryArgument, 128 | ErrorExact: true, 129 | }, 130 | { 131 | Scenario: "step_not_found", 132 | RunQueryArg: "baz::bar", 133 | ErrorString: errorFromStepNotFound, 134 | ErrorExact: false, 135 | }, 136 | { 137 | Scenario: "query_not_found", 138 | RunQueryArg: "foo::foo", 139 | ErrorString: errorRunQueryNotFound, 140 | ErrorExact: false, 141 | }, 142 | } 143 | 144 | for _, tt := range testCases { 145 | t.Run(tt.Scenario, func(t *testing.T) { 146 | assert := assert.New(t) 147 | steps, statuses := trimToQuery( 148 | testSteps, 149 | tt.RunQueryArg, 150 | testTargets) 151 | 152 | assert.Nil(steps) 153 | if statuses == nil { 154 | t.Fatal("unexpected nil statuses") 155 | } 156 | 157 | if len(statuses) != len(testTargets) { 158 | t.Fatalf("wrong length of []TargetStatus returned: got %v, expected %v", len(statuses), len(testTargets)) 159 | } 160 | 161 | for i, status := range statuses { 162 | assert.Equal(status.Name, testTargets[i].Name) 163 | 164 | if status.Errors == nil { 165 | t.Fatalf("unexpected nil errors in status - got status:\n%s\n", spew.Sdump(status)) 166 | } 167 | 168 | for i, ee := range status.Errors { 169 | if !checkError(ee, tt.ErrorString, tt.ErrorExact) { 170 | t.Errorf("error mismatch (at Errors index: %v) in status:\n%s\nEXPECTED (exact: %v) error string: %q\n", 171 | i, 172 | spew.Sdump(status), 173 | tt.ErrorExact, 174 | tt.ErrorString) 175 | } 176 | } 177 | } 178 | }) 179 | } 180 | } 181 | 182 | // Helper 183 | func checkError(err error, errString string, exact bool) bool { 184 | if err == nil { 185 | return false 186 | } 187 | 188 | if !exact { 189 | return strings.Contains(err.Error(), errString) 190 | } 191 | 192 | return err.Error() == errString 193 | } 194 | -------------------------------------------------------------------------------- /sql_runner/scanner.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "github.com/go-pg/pg/v10/orm" 15 | "github.com/go-pg/pg/v10/types" 16 | ) 17 | 18 | // Results information. 19 | type Results struct { 20 | results [][]string 21 | columns []string 22 | elements int 23 | rows int 24 | } 25 | 26 | var _ orm.HooklessModel = (*Results)(nil) 27 | 28 | // Init initializes Results. 29 | func (results *Results) Init() error { 30 | results.elements = 0 31 | results.rows = 0 32 | 33 | if s := results; len(s.results) >= 0 { 34 | results.results = (s.results)[:0] 35 | } 36 | 37 | if s := results; len(s.columns) >= 0 { 38 | results.columns = (s.columns)[:0] 39 | } 40 | return nil 41 | } 42 | 43 | // NextColumnScanner returns a ColumnScanner that is used to scan columns. 44 | func (results *Results) NextColumnScanner() orm.ColumnScanner { 45 | return results 46 | } 47 | 48 | // AddColumnScanner adds the ColumnScanner to the model. 49 | func (Results) AddColumnScanner(_ orm.ColumnScanner) error { 50 | return nil 51 | } 52 | 53 | // ScanColumn implements ColumnScanner interface. 54 | func (results *Results) ScanColumn(col types.ColumnInfo, rd types.Reader, n int) error { 55 | tmp, err := rd.ReadFullTemp() 56 | if err != nil { 57 | return err 58 | } 59 | 60 | curRow := len(results.results) - 1 61 | 62 | if col.Index == 0 { 63 | results.results = append(results.results, []string{}) 64 | curRow = len(results.results) - 1 65 | results.rows++ 66 | } 67 | 68 | if curRow == 0 { 69 | results.columns = append(results.columns, col.Name) 70 | } 71 | 72 | results.elements++ 73 | results.results[curRow] = append(results.results[curRow], string(tmp)) 74 | return nil 75 | } 76 | -------------------------------------------------------------------------------- /sql_runner/snowflake_target.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "context" 15 | "crypto/rsa" 16 | "crypto/x509" 17 | "database/sql" 18 | "encoding/pem" 19 | "fmt" 20 | "log" 21 | "os" 22 | "strings" 23 | "time" 24 | 25 | "github.com/olekukonko/tablewriter" 26 | "github.com/pkg/errors" 27 | sf "github.com/snowflakedb/gosnowflake" 28 | ) 29 | 30 | // Specific for Snowflake db 31 | const ( 32 | snowplowAppName = `Snowplow_OSS` 33 | loginTimeout = 5 * time.Second // by default is 60 34 | multiStmtName = "multiple statement execution" // https://github.com/snowflakedb/gosnowflake/blob/e909f00ff624a7e60d4f91718f6adc92cbd0d80f/connection.go#L57-L61 35 | ) 36 | 37 | // SnowflakeTarget represents Snowflake as target. 38 | type SnowflakeTarget struct { 39 | Target 40 | Client *sql.DB 41 | Dsn string 42 | } 43 | 44 | // IsConnectable tests connection to determine whether the Snowflake target is 45 | // connectable. 46 | func (sft SnowflakeTarget) IsConnectable() bool { 47 | client := sft.Client 48 | err := client.Ping() 49 | return err == nil 50 | } 51 | 52 | // parsePrivateKey reads and parses a private key file, optionally decrypting it with a passphrase. 53 | func parsePrivateKey(path string, passphrase string) (*rsa.PrivateKey, error) { 54 | privateKeyBytes, err := os.ReadFile(path) 55 | if err != nil { 56 | return nil, fmt.Errorf("failed to read private key file: %w", err) 57 | } 58 | 59 | block, _ := pem.Decode(privateKeyBytes) 60 | if block == nil { 61 | return nil, fmt.Errorf("failed to decode PEM block from private key file") 62 | } 63 | 64 | var keyBytes []byte 65 | if passphrase != "" { 66 | keyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase)) 67 | if err != nil { 68 | return nil, fmt.Errorf("failed to decrypt private key: %w", err) 69 | } 70 | } else { 71 | keyBytes = block.Bytes 72 | } 73 | 74 | key, err := x509.ParsePKCS8PrivateKey(keyBytes) 75 | if err != nil { 76 | return nil, fmt.Errorf("failed to parse private key: %w", err) 77 | } 78 | 79 | privateKey, ok := key.(*rsa.PrivateKey) 80 | if !ok { 81 | return nil, fmt.Errorf("private key is not an RSA key") 82 | } 83 | 84 | return privateKey, nil 85 | } 86 | 87 | // NewSnowflakeTarget returns a ptr to a SnowflakeTarget. 88 | func NewSnowflakeTarget(target Target) (*SnowflakeTarget, error) { 89 | params := make(map[string]*string) 90 | if target.QueryTag != "" { 91 | params["QUERY_TAG"] = &target.QueryTag 92 | } 93 | 94 | config := &sf.Config{ 95 | Region: target.Region, 96 | Account: target.Account, 97 | User: target.Username, 98 | Database: target.Database, 99 | Warehouse: target.Warehouse, 100 | LoginTimeout: loginTimeout, 101 | Params: params, 102 | } 103 | 104 | // Set authentication method based on available credentials 105 | if target.PrivateKeyPath != "" { 106 | config.Authenticator = sf.AuthTypeJwt 107 | privateKey, err := parsePrivateKey(target.PrivateKeyPath, target.PrivateKeyPassphrase) 108 | if err != nil { 109 | return nil, err 110 | } 111 | config.PrivateKey = privateKey 112 | } else { 113 | config.Password = target.Password 114 | } 115 | 116 | if envAppName := os.Getenv(`SNOWPLOW_SQL_RUNNER_SNOWFLAKE_APP_NAME`); envAppName != `` { 117 | config.Application = `Snowplow_` + envAppName 118 | } else { 119 | config.Application = snowplowAppName 120 | } 121 | 122 | configStr, err := sf.DSN(config) 123 | if err != nil { 124 | return nil, err 125 | } 126 | 127 | db, err := sql.Open("snowflake", configStr) 128 | if err != nil { 129 | return nil, err 130 | } 131 | 132 | return &SnowflakeTarget{target, db, configStr}, nil 133 | } 134 | 135 | // GetTarget returns the Target field of SnowflakeTarget. 136 | func (sft SnowflakeTarget) GetTarget() Target { 137 | return sft.Target 138 | } 139 | 140 | // RunQuery runs a query against the target 141 | func (sft SnowflakeTarget) RunQuery(query ReadyQuery, dryRun bool, showQueryOutput bool) QueryStatus { 142 | var affected int64 = 0 143 | var err error 144 | 145 | if dryRun { 146 | if sft.IsConnectable() { 147 | log.Printf("SUCCESS: Able to connect to target database, %s\n.", sft.Account) 148 | } else { 149 | log.Printf("ERROR: Cannot connect to target database, %s\n.", sft.Account) 150 | } 151 | 152 | return QueryStatus{query, query.Path, 0, nil} 153 | } 154 | 155 | // Enable grabbing the queryID 156 | queryIDChannel := make(chan string, 1) 157 | ctxWithQueryIDChan := sf.WithQueryIDChan(context.Background(), queryIDChannel) 158 | 159 | // Kick off a goroutine to grab the queryID when we get it from the driver (there should be one queryID per script) 160 | goroutineQIDChannel := make(chan string) 161 | go getQueryID(goroutineQIDChannel, queryIDChannel) 162 | 163 | // 0 allows arbitrary number of statements 164 | ctx, err := sf.WithMultiStatement(ctxWithQueryIDChan, 0) 165 | if err != nil { 166 | log.Printf("ERROR: Could not initialise query script.") 167 | return QueryStatus{query, query.Path, 0, err} 168 | } 169 | script := query.Script 170 | 171 | if len(strings.TrimSpace(script)) > 0 { 172 | if showQueryOutput { 173 | rows, err := sft.Client.QueryContext(ctx, script) 174 | if err != nil { 175 | return QueryStatus{query, query.Path, int(affected), err} 176 | } 177 | defer rows.Close() 178 | 179 | err = printSfTable(rows) 180 | if err != nil { 181 | log.Printf("ERROR: %s.", err) 182 | return QueryStatus{query, query.Path, int(affected), err} 183 | } 184 | 185 | for rows.NextResultSet() { 186 | err = printSfTable(rows) 187 | if err != nil { 188 | log.Printf("ERROR: %s.", err) 189 | return QueryStatus{query, query.Path, int(affected), err} 190 | } 191 | } 192 | } else { 193 | res, err := sft.Client.ExecContext(ctx, script) 194 | if err != nil { 195 | // We read queryID here 196 | queryID := <-goroutineQIDChannel 197 | if isSnowflakeUnknownError(err) { 198 | log.Println("INFO: Encountered -1 status. Polling for query result with queryID: ", queryID) 199 | pollResult := pollForQueryStatus(sft, queryID) 200 | return QueryStatus{query, query.Path, int(affected), pollResult} 201 | } 202 | 203 | return QueryStatus{query, query.Path, int(affected), errors.Wrap(err, fmt.Sprintf("QueryID: %s", queryID))} 204 | } 205 | aff, _ := res.RowsAffected() 206 | affected += aff 207 | } 208 | } 209 | 210 | return QueryStatus{query, query.Path, int(affected), err} 211 | } 212 | 213 | func printSfTable(rows *sql.Rows) error { 214 | outputBuffer := make([][]string, 0, 10) 215 | cols, err := rows.Columns() 216 | if err != nil { 217 | return errors.New("Unable to read columns") 218 | } 219 | 220 | // check to prevent rows.Next() on multi-statement 221 | // see also: https://github.com/snowflakedb/gosnowflake/issues/365 222 | for _, c := range cols { 223 | if c == multiStmtName { 224 | return errors.New("Unable to showQueryOutput for multi-statement queries") 225 | } 226 | } 227 | 228 | vals := make([]interface{}, len(cols)) 229 | rawResult := make([][]byte, len(cols)) 230 | for i := range rawResult { 231 | vals[i] = &rawResult[i] 232 | } 233 | 234 | for rows.Next() { 235 | err = rows.Scan(vals...) 236 | if err != nil { 237 | return errors.New("Unable to read row") 238 | } 239 | 240 | if len(vals) > 0 { 241 | outputBuffer = append(outputBuffer, stringify(rawResult)) 242 | } 243 | } 244 | 245 | if len(outputBuffer) > 0 { 246 | log.Printf("QUERY OUTPUT:\n") 247 | table := tablewriter.NewWriter(os.Stdout) 248 | table.SetHeader(cols) 249 | table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) 250 | table.SetCenterSeparator("|") 251 | 252 | for _, row := range outputBuffer { 253 | table.Append(row) 254 | } 255 | 256 | table.Render() // Send output 257 | } 258 | return nil 259 | } 260 | 261 | func stringify(row [][]byte) []string { 262 | var line []string 263 | for _, element := range row { 264 | line = append(line, fmt.Sprint(string(element))) 265 | } 266 | return line 267 | } 268 | 269 | // getQueryID reads from queryIDch and writes to goroutineCh. 270 | // If goroutineCh is unbuffered (as is being used above), it blocks. 271 | func getQueryID(goroutineCh chan string, queryIDch chan string) { 272 | queryID := <-queryIDch 273 | goroutineCh <- queryID 274 | } 275 | 276 | // Blocking function to poll for the true status of a query which didn't return a result. 277 | func pollForQueryStatus(sft SnowflakeTarget, queryID string) error { 278 | // Get the snoflake driver and open a connection 279 | sfd := sft.Client.Driver() 280 | conn, err := sfd.Open(sft.Dsn) 281 | if err != nil { 282 | return errors.Wrap(err, "Failed to open connection to poll for query result.") 283 | } 284 | // Poll Snowflake for actual query status 285 | for { 286 | qStatus, err := conn.(sf.SnowflakeConnection).GetQueryStatus(context.Background(), queryID) 287 | 288 | switch { 289 | case err != nil && isSnowflakeQueryRunningError(err): 290 | break 291 | case err != nil: 292 | // Any other error is genuine, return the error. 293 | return err 294 | case qStatus != nil && qStatus.ErrorCode == "": 295 | // A non-nil qStatus means the query completed. If the ErrorCode field is empty string, we have no error. 296 | return nil 297 | case qStatus != nil: 298 | // If qStatus is non-nil but has a non-zero error code, return the relevant info as an error. 299 | return errors.New(qStatus.ErrorMessage) 300 | default: 301 | break 302 | } 303 | // Give it a minute before polling again. 304 | time.Sleep(60 * time.Second) 305 | } 306 | } 307 | 308 | // isSnowflakeErrorCode returns whether its error argument is sf.SnowflakeError 309 | // with Number field equal to given code. 310 | func isSnowflakeErrorCode(e error, code int) bool { 311 | if e == nil { 312 | return false 313 | } 314 | 315 | var sfErr *sf.SnowflakeError 316 | if errors.As(e, &sfErr) { 317 | return sfErr.Number == code 318 | } 319 | 320 | return false 321 | } 322 | 323 | // isSnowflakeUnknownError returns whether an error is sf.ErrUnknownError 324 | // Based on: https://github.com/snowflakedb/gosnowflake/blob/5da2ab2463b2c7e544722bc58defdb23397287d6/errors.go#L312 325 | func isSnowflakeUnknownError(e error) bool { 326 | return isSnowflakeErrorCode(e, -1) 327 | } 328 | 329 | // isSnowflakeQueryRunningError returns if a Snowflake Error has 279301 code. 330 | // The driver returns an error with this code when the query is still running. 331 | func isSnowflakeQueryRunningError(e error) bool { 332 | return isSnowflakeErrorCode(e, sf.ErrQueryIsRunning) 333 | } 334 | -------------------------------------------------------------------------------- /sql_runner/snowflake_target_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "crypto/rand" 15 | "crypto/rsa" 16 | "crypto/x509" 17 | "encoding/pem" 18 | "os" 19 | "path/filepath" 20 | "testing" 21 | 22 | "github.com/stretchr/testify/assert" 23 | ) 24 | 25 | func TestParsePrivateKey(t *testing.T) { 26 | tmpDir, err := os.MkdirTemp("", "snowflake-test") 27 | if err != nil { 28 | t.Fatalf("Failed to create temp dir: %v", err) 29 | } 30 | defer os.RemoveAll(tmpDir) 31 | 32 | privateKey, err := rsa.GenerateKey(rand.Reader, 2048) 33 | if err != nil { 34 | t.Fatalf("Failed to generate test key: %v", err) 35 | } 36 | 37 | keyBytes, err := x509.MarshalPKCS8PrivateKey(privateKey) 38 | if err != nil { 39 | t.Fatalf("Failed to marshal private key: %v", err) 40 | } 41 | 42 | testCases := []struct { 43 | name string 44 | setup func() string 45 | passphrase string 46 | expectError bool 47 | }{ 48 | { 49 | name: "valid unencrypted key", 50 | setup: func() string { 51 | path := filepath.Join(tmpDir, "unencrypted.pem") 52 | block := &pem.Block{ 53 | Type: "PRIVATE KEY", 54 | Bytes: keyBytes, 55 | } 56 | if err := os.WriteFile(path, pem.EncodeToMemory(block), 0600); err != nil { 57 | t.Fatalf("Failed to write test file: %v", err) 58 | } 59 | return path 60 | }, 61 | passphrase: "", 62 | expectError: false, 63 | }, 64 | { 65 | name: "valid encrypted key", 66 | setup: func() string { 67 | path := filepath.Join(tmpDir, "encrypted.pem") 68 | block, err := x509.EncryptPEMBlock(rand.Reader, "PRIVATE KEY", keyBytes, []byte("testpass"), x509.PEMCipherAES256) 69 | if err != nil { 70 | t.Fatalf("Failed to encrypt key: %v", err) 71 | } 72 | if err := os.WriteFile(path, pem.EncodeToMemory(block), 0600); err != nil { 73 | t.Fatalf("Failed to write test file: %v", err) 74 | } 75 | return path 76 | }, 77 | passphrase: "testpass", 78 | expectError: false, 79 | }, 80 | { 81 | name: "wrong passphrase", 82 | setup: func() string { 83 | path := filepath.Join(tmpDir, "wrong-pass.pem") 84 | block, err := x509.EncryptPEMBlock(rand.Reader, "PRIVATE KEY", keyBytes, []byte("testpass"), x509.PEMCipherAES256) 85 | if err != nil { 86 | t.Fatalf("Failed to encrypt key: %v", err) 87 | } 88 | if err := os.WriteFile(path, pem.EncodeToMemory(block), 0600); err != nil { 89 | t.Fatalf("Failed to write test file: %v", err) 90 | } 91 | return path 92 | }, 93 | passphrase: "wrongpass", 94 | expectError: true, 95 | }, 96 | { 97 | name: "non-existent file", 98 | setup: func() string { 99 | return filepath.Join(tmpDir, "nonexistent.pem") 100 | }, 101 | passphrase: "", 102 | expectError: true, 103 | }, 104 | { 105 | name: "invalid PEM data", 106 | setup: func() string { 107 | path := filepath.Join(tmpDir, "invalid.pem") 108 | if err := os.WriteFile(path, []byte("not a pem file"), 0600); err != nil { 109 | t.Fatalf("Failed to write test file: %v", err) 110 | } 111 | return path 112 | }, 113 | passphrase: "", 114 | expectError: true, 115 | }, 116 | } 117 | 118 | for _, tc := range testCases { 119 | t.Run(tc.name, func(t *testing.T) { 120 | path := tc.setup() 121 | key, err := parsePrivateKey(path, tc.passphrase) 122 | 123 | if tc.expectError { 124 | assert.Error(t, err) 125 | assert.Nil(t, key) 126 | } else { 127 | assert.NoError(t, err) 128 | assert.NotNil(t, key) 129 | assert.IsType(t, &rsa.PrivateKey{}, key) 130 | } 131 | }) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /sql_runner/sql_provider.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "io/ioutil" 15 | "path" 16 | ) 17 | 18 | // SQLProvider is the interface that wraps the ResolveKey and GetSQL methods. 19 | type SQLProvider interface { 20 | ResolveKey(key string) string 21 | GetSQL(key string) (string, error) 22 | } 23 | 24 | // FileSQLProvider represents a file as a SQL provider. 25 | type FileSQLProvider struct { 26 | rootPath string 27 | } 28 | 29 | // NewFileSQLProvider returns a ptr to a FileSQLProvider. 30 | func NewFileSQLProvider(rootPath string) *FileSQLProvider { 31 | return &FileSQLProvider{ 32 | rootPath: rootPath, 33 | } 34 | } 35 | 36 | // GetSQL implements SQLProvider. 37 | func (p FileSQLProvider) GetSQL(scriptPath string) (string, error) { 38 | return readScript(p.ResolveKey(scriptPath)) 39 | } 40 | 41 | // ResolveKey implements SQLProvider. 42 | func (p FileSQLProvider) ResolveKey(scriptPath string) string { 43 | return path.Join(p.rootPath, scriptPath) 44 | } 45 | 46 | // Reads the file ready for executing 47 | func readScript(file string) (string, error) { 48 | scriptBytes, err := ioutil.ReadFile(file) 49 | if err != nil { 50 | return "", err 51 | } 52 | return string(scriptBytes), nil 53 | } 54 | 55 | // ConsulSQLProvider represents Consul as a SQL provider. 56 | type ConsulSQLProvider struct { 57 | consulAddress string 58 | keyPrefix string 59 | } 60 | 61 | // NewConsulSQLProvider returns a pts to a ConsulSQLProvider. 62 | func NewConsulSQLProvider(consulAddress, keyPrefix string) *ConsulSQLProvider { 63 | return &ConsulSQLProvider{ 64 | consulAddress: consulAddress, 65 | keyPrefix: keyPrefix, 66 | } 67 | } 68 | 69 | // GetSQL implements SQLProcider. 70 | func (p ConsulSQLProvider) GetSQL(key string) (string, error) { 71 | return GetStringValueFromConsul(p.consulAddress, p.ResolveKey(key)) 72 | } 73 | 74 | // ResolveKey implements SQLProvider. 75 | func (p ConsulSQLProvider) ResolveKey(key string) string { 76 | return path.Join(p.keyPrefix, key) 77 | } 78 | -------------------------------------------------------------------------------- /sql_runner/template.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "math/rand" 15 | "os" 16 | "strconv" 17 | "text/template" 18 | "time" 19 | ) 20 | 21 | var ( 22 | // TemplFuncs is the supported template functions map. 23 | TemplFuncs = template.FuncMap{ 24 | "nowWithFormat": func(format string) string { 25 | return time.Now().Format(format) 26 | }, 27 | "systemEnv": func(env string) string { 28 | return os.Getenv(env) 29 | }, 30 | "randomInt": func() (string, error) { 31 | r := rand.NewSource(time.Now().UnixNano()) 32 | return strconv.FormatInt(r.Int63(), 10), nil 33 | }, 34 | "awsChainCredentials": awsChainCredentials, 35 | "awsEC2RoleCredentials": awsEC2RoleCredentials, 36 | "awsEnvCredentials": awsEnvCredentials, 37 | "awsProfileCredentials": awsProfileCredentials, 38 | } 39 | ) 40 | -------------------------------------------------------------------------------- /sql_runner/yaml_provider.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | // YAMLFilePlaybookProvider represents YAML as playbook provider. 14 | type YAMLFilePlaybookProvider struct { 15 | playbookPath string 16 | variables map[string]string 17 | } 18 | 19 | // NewYAMLFilePlaybookProvider returns a ptr to YAMLFilePlaybookProvider. 20 | func NewYAMLFilePlaybookProvider(playbookPath string, variables map[string]string) *YAMLFilePlaybookProvider { 21 | return &YAMLFilePlaybookProvider{ 22 | playbookPath: playbookPath, 23 | variables: variables, 24 | } 25 | } 26 | 27 | // GetPlaybook returns a ptr to a yaml playbook. 28 | func (p YAMLFilePlaybookProvider) GetPlaybook() (*Playbook, error) { 29 | lines, err := loadLocalFile(p.playbookPath) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | return parsePlaybookYaml(lines, p.variables) 35 | } 36 | -------------------------------------------------------------------------------- /sql_runner/yaml_utils.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "bytes" 15 | "fmt" 16 | "regexp" 17 | "strings" 18 | "text/template" 19 | 20 | "github.com/goccy/go-yaml" 21 | ) 22 | 23 | var ( 24 | // Remove the prepended :s 25 | rubyYamlRegex = regexp.MustCompile("^(\\s*-?\\s*):?(.*)$") 26 | ) 27 | 28 | // Parses a playbook.yml to return the targets 29 | // to execute against and the steps to execute 30 | func parsePlaybookYaml(playbookBytes []byte, variables map[string]string) (*Playbook, error) { 31 | // Define and initialize the Playbook struct 32 | var playbook Playbook = NewPlaybook() 33 | 34 | // Clean up the YAML 35 | cleaned := cleanYaml(playbookBytes) 36 | 37 | // Run the yaml through the template engine 38 | str, err := fillPlaybookTemplate(string(cleaned[:]), variables) 39 | if err != nil { 40 | return nil, fmt.Errorf("error filling playbook template") 41 | } 42 | 43 | // Unmarshal the yaml into the playbook 44 | if err = yaml.Unmarshal([]byte(str), &playbook); err != nil { 45 | return nil, fmt.Errorf("error unmarshalling playbook yaml") 46 | } 47 | 48 | return &playbook, nil 49 | } 50 | 51 | // Because our StorageLoader's YAML file has elements with 52 | // : prepended (bad decision to make things easier from 53 | // our Ruby code). 54 | func cleanYaml(rawYaml []byte) []byte { 55 | var lines []string 56 | var buffer bytes.Buffer 57 | 58 | lines = strings.Split(string(rawYaml), "\n") 59 | 60 | for _, line := range lines { 61 | buffer.WriteString(rubyYamlRegex.ReplaceAllString(line, "${1}${2}\n")) 62 | } 63 | return buffer.Bytes() 64 | } 65 | 66 | func fillPlaybookTemplate(playbookStr string, variables map[string]string) (string, error) { 67 | t, err := template.New("playbook").Funcs(TemplFuncs).Parse(playbookStr) 68 | if err != nil { 69 | return "", err 70 | } 71 | 72 | var filled bytes.Buffer 73 | if err := t.Execute(&filled, variables); err != nil { 74 | return "", err 75 | } 76 | 77 | return filled.String(), err 78 | } 79 | -------------------------------------------------------------------------------- /sql_runner/yaml_utils_test.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2025 Snowplow Analytics Ltd. All rights reserved. 2 | // 3 | // This program is licensed to you under the Apache License Version 2.0, 4 | // and you may not use this file except in compliance with the Apache License Version 2.0. 5 | // You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. 6 | // 7 | // Unless required by applicable law or agreed to in writing, 8 | // software distributed under the Apache License Version 2.0 is distributed on an 9 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | // See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. 11 | package main 12 | 13 | import ( 14 | "reflect" 15 | "testing" 16 | 17 | "github.com/davecgh/go-spew/spew" 18 | "github.com/stretchr/testify/assert" 19 | ) 20 | 21 | func TestParsePlaybookYaml(t *testing.T) { 22 | assert := assert.New(t) 23 | 24 | playbook, err := parsePlaybookYaml(nil, nil) 25 | assert.Nil(err) 26 | assert.NotNil(playbook) 27 | assert.Equal(0, len(playbook.Targets)) 28 | assert.Equal(0, len(playbook.Steps)) 29 | 30 | playbookBytes, err1 := loadLocalFile("../integration/resources/good-postgres.yml") 31 | assert.Nil(err1) 32 | assert.NotNil(playbookBytes) 33 | 34 | playbook, err = parsePlaybookYaml(playbookBytes, nil) 35 | assert.Nil(err) 36 | assert.NotNil(playbook) 37 | assert.Equal(2, len(playbook.Targets)) 38 | assert.Equal(6, len(playbook.Steps)) 39 | } 40 | 41 | func TestCleanYaml(t *testing.T) { 42 | assert := assert.New(t) 43 | 44 | rawYaml := []byte(":hello: world\n:world: hello") 45 | cleanYamlStr := string(cleanYaml(rawYaml)) 46 | assert.Equal("hello: world\nworld: hello\n", cleanYamlStr) 47 | 48 | rawYaml = []byte(":hello:\n :world: hello") 49 | cleanYamlStr = string(cleanYaml(rawYaml)) 50 | assert.Equal("hello:\n world: hello\n", cleanYamlStr) 51 | 52 | cleanYamlStr = string(cleanYaml(nil)) 53 | assert.Equal("\n", cleanYamlStr) 54 | } 55 | 56 | func TestTemplateYaml(t *testing.T) { 57 | assert := assert.New(t) 58 | 59 | playbookBytes, err1 := loadLocalFile("../integration/resources/good-postgres-with-template.yml") 60 | assert.Nil(err1) 61 | 62 | var m map[string]string = make(map[string]string) 63 | 64 | m["password"] = "qwerty123" 65 | m["username"] = "animoto" 66 | m["host"] = "theinternetz" 67 | 68 | playbook, err := parsePlaybookYaml(playbookBytes, CLIVariables(m)) 69 | 70 | assert.Nil(err) 71 | 72 | assert.Equal("qwerty123", playbook.Targets[0].Password) 73 | assert.Equal("animoto", playbook.Targets[0].Username) 74 | assert.Equal("theinternetz", playbook.Targets[0].Host) 75 | } 76 | 77 | func TestParse_QueryFlag(t *testing.T) { 78 | testCases := []struct { 79 | Name string 80 | Playbook string 81 | Expected *Playbook 82 | }{ 83 | { 84 | Name: "simple", 85 | Playbook: ` 86 | :targets: 87 | - :type: snowflake 88 | :query_tag: "snowplow" 89 | `, 90 | Expected: &Playbook{ 91 | Targets: []Target{ 92 | { 93 | Type: "snowflake", 94 | QueryTag: "snowplow", 95 | }, 96 | }, 97 | Variables: make(map[string]interface{}), 98 | Steps: nil, 99 | }, 100 | }, 101 | { 102 | Name: "with_escaped_quotes", 103 | Playbook: ` 104 | :targets: 105 | - :type: snowflake 106 | :query_tag: "{module: \"base\", steps: \"main\"}" 107 | `, 108 | Expected: &Playbook{ 109 | Targets: []Target{ 110 | { 111 | Type: "snowflake", 112 | QueryTag: `{module: "base", steps: "main"}`, 113 | }, 114 | }, 115 | Variables: make(map[string]interface{}), 116 | Steps: nil, 117 | }, 118 | }, 119 | } 120 | 121 | noVars := make(map[string]string) 122 | 123 | for _, tt := range testCases { 124 | t.Run(tt.Name, func(t *testing.T) { 125 | assert := assert.New(t) 126 | 127 | result, err := parsePlaybookYaml([]byte(tt.Playbook), noVars) 128 | assert.Nil(err) 129 | if !reflect.DeepEqual(result, tt.Expected) { 130 | t.Fatalf("\nGOT:\n%s\nEXPECTED:\n%s\n", 131 | spew.Sdump(result), 132 | spew.Sdump(tt.Expected)) 133 | } 134 | }) 135 | } 136 | 137 | } 138 | 139 | func TestParsePlaybookYaml_compatibility(t *testing.T) { 140 | testCases := []struct { 141 | Name string 142 | Playbook string 143 | Expected *Playbook 144 | }{ 145 | { 146 | Name: "time", 147 | Playbook: ` 148 | :targets: 149 | - :name: test 150 | :database: 2022-01-01 151 | :variables: 152 | :model_version: snowflake/web/1.0.1 153 | :start_date: 2022-01-01 154 | `, 155 | Expected: &Playbook{ 156 | Targets: []Target{ 157 | { 158 | Name: "test", 159 | Database: "2022-01-01", 160 | }, 161 | }, 162 | Variables: map[string]interface{}{ 163 | "model_version": "snowflake/web/1.0.1", 164 | "start_date": "2022-01-01", 165 | }, 166 | Steps: nil, 167 | }, 168 | }, 169 | { 170 | Name: "int", 171 | Playbook: ` 172 | :targets: 173 | - :name: test 174 | :database: 7 175 | :variables: 176 | :update_cadence_days: 7 177 | :lookback_window_hours: -3 178 | :some_float: 3.14 179 | `, 180 | Expected: &Playbook{ 181 | Targets: []Target{ 182 | { 183 | Name: "test", 184 | Database: string("7"), 185 | }, 186 | }, 187 | Variables: map[string]interface{}{ 188 | "update_cadence_days": uint64(7), 189 | "lookback_window_hours": int64(-3), 190 | "some_float": float64(3.14), 191 | }, 192 | Steps: nil, 193 | }, 194 | }, 195 | { 196 | Name: "bool", 197 | Playbook: ` 198 | :targets: 199 | - :name: true 200 | :database: test 201 | :ssl: true 202 | :variables: 203 | :stage_next: true 204 | :steps: 205 | - :name: 01-stored-procedures 206 | :queries: 207 | - :name: 01-stored-procedures 208 | :file: standard/00-setup/01-main/01-stored-procedures.sql 209 | :template: true 210 | `, 211 | Expected: &Playbook{ 212 | Targets: []Target{ 213 | { 214 | Name: string("true"), 215 | Database: "test", 216 | Ssl: bool(true), 217 | }, 218 | }, 219 | Variables: map[string]interface{}{ 220 | "stage_next": bool(true), 221 | }, 222 | Steps: []Step{ 223 | { 224 | Name: "01-stored-procedures", 225 | Queries: []Query{ 226 | { 227 | Name: "01-stored-procedures", 228 | File: "standard/00-setup/01-main/01-stored-procedures.sql", 229 | Template: bool(true), 230 | }, 231 | }, 232 | }, 233 | }, 234 | }, 235 | }, 236 | } 237 | 238 | noVars := make(map[string]string) 239 | 240 | for _, tt := range testCases { 241 | t.Run(tt.Name, func(t *testing.T) { 242 | assert := assert.New(t) 243 | 244 | result, err := parsePlaybookYaml([]byte(tt.Playbook), noVars) 245 | assert.Nil(err) 246 | if !reflect.DeepEqual(result, tt.Expected) { 247 | t.Fatalf("\nGOT:\n%s\nEXPECTED:\n%s\n", 248 | spew.Sdump(result), 249 | spew.Sdump(tt.Expected)) 250 | } 251 | }) 252 | } 253 | } 254 | --------------------------------------------------------------------------------