├── .github ├── dependabot.yml ├── license-header.txt ├── settings.yml └── workflows │ ├── benchmark.yaml │ ├── build.yml │ ├── docs_preview.yaml │ └── publish_benchmarks.yaml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yaml ├── .vscode └── settings.json ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── brand-kit ├── README.md ├── banner │ ├── pgroll-banner-github@2x.png │ ├── pgroll-banner.svg │ └── pgroll-banner@2x.png └── logo │ ├── symbol │ ├── pgroll-base-symbol.svg │ ├── pgroll-black-symbol.svg │ └── pgroll-white-symbol.svg │ └── wordmark │ ├── pgroll-base-wordmark.svg │ ├── pgroll-black-wordmark.svg │ └── pgroll-white-wordmark.svg ├── cli-definition.json ├── cmd ├── analyze.go ├── baseline.go ├── complete.go ├── convert.go ├── create.go ├── errors.go ├── flags │ └── flags.go ├── init.go ├── latest.go ├── migrate.go ├── pull.go ├── rollback.go ├── root.go ├── start.go ├── status.go └── validate.go ├── dev ├── benchmark-results │ ├── build.go │ ├── build_test.go │ └── testdata │ │ └── benchmark-results.json ├── doc.go ├── go.mod └── go.sum ├── docker-compose.yml ├── docs ├── cli │ ├── README.md │ ├── baseline.mdx │ ├── complete.mdx │ ├── convert.mdx │ ├── create.mdx │ ├── init.mdx │ ├── latest.mdx │ ├── migrate.mdx │ ├── pull.mdx │ ├── rollback.mdx │ ├── start.mdx │ ├── status.mdx │ └── validate.mdx ├── concepts.md ├── config.json ├── getting-started.md ├── guides │ ├── README.md │ ├── clientapps.mdx │ ├── orms.mdx │ └── updown.mdx ├── img │ ├── migration-schemas@2x.png │ └── schema-changes-flow@2x.png ├── install-pgroll.md ├── operations │ ├── README.md │ ├── add_column.mdx │ ├── alter_column │ │ ├── README.md │ │ ├── add_check_constraint.mdx │ │ ├── add_foreign_key.mdx │ │ ├── add_not_null_constraint.mdx │ │ ├── add_unique_constraint.mdx │ │ ├── change_comment.mdx │ │ ├── change_default.mdx │ │ ├── change_type.mdx │ │ └── drop_not_null_constraint.mdx │ ├── create_constraint.mdx │ ├── create_index.mdx │ ├── create_table.mdx │ ├── drop_column.mdx │ ├── drop_constraint.mdx │ ├── drop_index.mdx │ ├── drop_multi_column_constraint.mdx │ ├── drop_table.mdx │ ├── raw_sql.mdx │ ├── rename_column.mdx │ ├── rename_constraint.mdx │ ├── rename_table.mdx │ └── set_replica_identity.mdx ├── tutorial.md └── why-use-pgroll.md ├── examples ├── .ledger ├── 01_create_tables.yaml ├── 02_create_another_table.yaml ├── 03_add_column.yaml ├── 04_rename_table.yaml ├── 05_sql.yaml ├── 06_add_column_to_sql_table.yaml ├── 07_drop_table.yaml ├── 08_create_fruits_table.yaml ├── 09_drop_column.yaml ├── 10_create_index.yaml ├── 11_drop_index.yaml ├── 12_create_employees_table.yaml ├── 13_rename_column.yaml ├── 14_add_reviews_table.yaml ├── 15_set_column_unique.yaml ├── 16_set_nullable.yaml ├── 17_add_rating_column.yaml ├── 18_change_column_type.yaml ├── 19_create_orders_table.yaml ├── 20_create_posts_table.yaml ├── 21_add_foreign_key_constraint.yaml ├── 22_add_check_constraint.yaml ├── 23_drop_check_constraint.yaml ├── 24_drop_foreign_key_constraint.yaml ├── 25_add_table_with_check_constraint.yaml ├── 26_add_column_with_check_constraint.yaml ├── 27_drop_unique_constraint.yaml ├── 28_different_defaults.yaml ├── 29_set_replica_identity.yaml ├── 30_add_column_simple_up.yaml ├── 31_unset_not_null.yaml ├── 32_sql_on_complete.yaml ├── 33_rename_check_constraint.yaml ├── 34_create_events_table.yaml ├── 35_alter_column_multiple.yaml ├── 36_set_comment_to_null.yaml ├── 37_create_partial_index.yaml ├── 38_create_hash_index_with_fillfactor.yaml ├── 39_add_column_with_multiple_pk_in_table.yaml ├── 40_create_enum_type.yaml ├── 41_add_enum_column.yaml ├── 42_create_unique_index.yaml ├── 43_create_tickets_table.yaml ├── 44_add_table_unique_constraint.yaml ├── 45_add_table_check_constraint.yaml ├── 46_alter_column_drop_default.yaml ├── 47_add_table_foreign_key_constraint.yaml ├── 48_drop_tickets_check.yaml ├── 49_unset_not_null_on_indexed_column.yaml ├── 50_create_table_with_table_constraint.yaml ├── 51_create_table_with_table_foreign_key_constraint.yaml ├── 52_create_table_with_exclusion_constraint.yaml ├── 53_add_column_with_volatile_default.yaml ├── 54_create_index_with_opclass.yaml └── 55_add_primary_key_constraint_to_table.yaml ├── go.mod ├── go.sum ├── internal ├── benchmarks │ └── benchmarks_test.go ├── defaults │ ├── fastpath.go │ └── fastpath_test.go ├── jsonschema │ ├── jsonschema_test.go │ └── testdata │ │ ├── add-column-1.txtar │ │ ├── alter-column-1.txtar │ │ ├── alter-column-2.txtar │ │ ├── alter-column-3.txtar │ │ ├── create-constraint-1-invalid-check.txtar │ │ ├── create-constraint-2-invalid-no-inherit.txtar │ │ ├── create-invalid-index.txtar │ │ ├── create-table-1.txtar │ │ ├── create-table-10-invalid-primary-key-constraints-extra-check.txtar │ │ ├── create-table-10-invalid-unique-missing-columns.txtar │ │ ├── create-table-11-invalid-primary-key-constraints-missing-columns.txtar │ │ ├── create-table-12-valid-primary-key-constraint.txtar │ │ ├── create-table-13-invalid-fk-missing-references.txtar │ │ ├── create-table-14-invalid-fk-missing-referenced-table.txtar │ │ ├── create-table-15-invalid-fk-missing-columns.txtar │ │ ├── create-table-16-invalid-exclusion-missing-exclusion.txtar │ │ ├── create-table-17-invalid-exclusion-columns-set.txtar │ │ ├── create-table-2-check-constraints.txtar │ │ ├── create-table-3-invalid-check-constraints.txtar │ │ ├── create-table-3-unique-constraint.txtar │ │ ├── create-table-4-invalid-check-options-deferrable.txtar │ │ ├── create-table-5-invalid-check-options-initially-defer.txtar │ │ ├── create-table-6-invalid-check-options-nulls-not-distinct.txtar │ │ ├── create-table-7-invalid-check-options-forbidden-index-params.txtar │ │ ├── create-table-8-invalid-unique-forbidden-check-param.txtar │ │ ├── create-table-9-invalid-unique-forbidden-no-inherit.txtar │ │ ├── rename-constraint-1.txtar │ │ ├── sql-1.txtar │ │ ├── sql-2.txtar │ │ ├── sql-3.txtar │ │ └── sql-4.txtar └── testutils │ ├── db.go │ ├── error_codes.go │ └── util.go ├── main.go ├── pkg ├── backfill │ ├── backfill.go │ ├── config.go │ ├── errors.go │ └── templates │ │ ├── build.go │ │ ├── build_test.go │ │ └── sql.go ├── db │ ├── db.go │ ├── db_test.go │ └── fake.go ├── migrations │ ├── check.go │ ├── check_test.go │ ├── column.go │ ├── constraints.go │ ├── constraints_test.go │ ├── dbactions.go │ ├── duplicate.go │ ├── duplicate_test.go │ ├── errors.go │ ├── fk_reference.go │ ├── fk_reference_test.go │ ├── logger.go │ ├── migrations.go │ ├── migrations_test.go │ ├── op_add_column.go │ ├── op_add_column_test.go │ ├── op_alter_column.go │ ├── op_alter_column_test.go │ ├── op_change_type.go │ ├── op_change_type_test.go │ ├── op_common.go │ ├── op_common_test.go │ ├── op_create_constraint.go │ ├── op_create_constraint_test.go │ ├── op_create_index.go │ ├── op_create_index_test.go │ ├── op_create_table.go │ ├── op_create_table_test.go │ ├── op_drop_column.go │ ├── op_drop_column_test.go │ ├── op_drop_constraint.go │ ├── op_drop_constraint_test.go │ ├── op_drop_index.go │ ├── op_drop_index_test.go │ ├── op_drop_multicolumn_constraint.go │ ├── op_drop_multicolumn_constraint_test.go │ ├── op_drop_not_null.go │ ├── op_drop_not_null_test.go │ ├── op_drop_table.go │ ├── op_drop_table_test.go │ ├── op_raw_sql.go │ ├── op_raw_sql_test.go │ ├── op_rename_column.go │ ├── op_rename_column_test.go │ ├── op_rename_constraint.go │ ├── op_rename_constraint_test.go │ ├── op_rename_table.go │ ├── op_rename_table_test.go │ ├── op_set_check.go │ ├── op_set_check_test.go │ ├── op_set_comment.go │ ├── op_set_comment_test.go │ ├── op_set_default.go │ ├── op_set_default_test.go │ ├── op_set_fk.go │ ├── op_set_fk_test.go │ ├── op_set_notnull.go │ ├── op_set_notnull_test.go │ ├── op_set_replica_identity.go │ ├── op_set_replica_identity_test.go │ ├── op_set_unique.go │ ├── op_set_unique_test.go │ ├── pterm_create.go │ ├── rename.go │ ├── templates │ │ ├── function.go │ │ └── trigger.go │ ├── trigger.go │ ├── trigger_test.go │ ├── types.go │ ├── unique.go │ └── writer.go ├── roll │ ├── baseline.go │ ├── baseline_test.go │ ├── execute.go │ ├── execute_test.go │ ├── latest.go │ ├── latest_test.go │ ├── missing.go │ ├── missing_test.go │ ├── options.go │ ├── roll.go │ ├── unapplied.go │ └── unapplied_test.go ├── schema │ └── schema.go ├── sql2pgroll │ ├── alter_table.go │ ├── alter_table_test.go │ ├── convert.go │ ├── convert_test.go │ ├── create_index.go │ ├── create_index_test.go │ ├── create_table.go │ ├── create_table_test.go │ ├── drop.go │ ├── drop_test.go │ ├── expect │ │ ├── add_column.go │ │ ├── add_foreign_key.go │ │ ├── alter_column.go │ │ ├── create_constraint.go │ │ ├── create_index.go │ │ ├── create_table.go │ │ ├── drop_column.go │ │ ├── drop_constraint.go │ │ ├── drop_index.go │ │ ├── drop_table.go │ │ ├── raw_sql.go │ │ ├── rename_column.go │ │ ├── rename_constraint.go │ │ └── rename_table.go │ ├── rename.go │ └── rename_test.go └── state │ ├── errors.go │ ├── history.go │ ├── history_test.go │ ├── init.sql │ ├── state.go │ ├── state_test.go │ └── status.go ├── schema.json └── tools └── build-cli-definition.go /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Please see the documentation for all configuration options: 2 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 3 | version: 2 4 | updates: 5 | - package-ecosystem: "gomod" 6 | directory: "/" 7 | schedule: 8 | interval: "weekly" 9 | -------------------------------------------------------------------------------- /.github/license-header.txt: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | -------------------------------------------------------------------------------- /.github/workflows/benchmark.yaml: -------------------------------------------------------------------------------- 1 | name: Benchmark 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | permissions: 8 | id-token: write # For getting AWS permissions 9 | contents: read 10 | packages: read 11 | jobs: 12 | benchmark: 13 | name: 'benchmark (pg: ${{ matrix.pgVersion }})' 14 | runs-on: ubuntu-24.04 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | pgVersion: ['14.8', '15.3', '16.4', '17.0' ,'latest'] 19 | steps: 20 | - uses: actions/checkout@v4 21 | 22 | - name: Set up Go 23 | uses: actions/setup-go@v5 24 | with: 25 | go-version-file: 'go.mod' 26 | 27 | - name: Run benchmarks 28 | run: make bench 29 | env: 30 | POSTGRES_VERSION: ${{ matrix.pgVersion }} 31 | 32 | - name: Upload results 33 | uses: actions/upload-artifact@v4 34 | with: 35 | name: benchmark_result_${{ matrix.pgVersion }}.json 36 | path: internal/benchmarks/benchmark_result_${{ matrix.pgVersion }}.json 37 | 38 | gather: 39 | name: 'Gather results' 40 | runs-on: ubuntu-24.04 41 | needs: [benchmark] 42 | 43 | steps: 44 | - uses: actions/download-artifact@v4 45 | with: 46 | path: ./results/ 47 | merge-multiple: true 48 | 49 | - name: Configure AWS Credentials 50 | uses: aws-actions/configure-aws-credentials@v2 51 | with: 52 | role-to-assume: arn:aws:iam::493985724844:role/pgroll-benchmark-results-access 53 | aws-region: us-east-1 54 | mask-aws-account-id: 'no' 55 | 56 | - name: Download current results from S3 57 | run: aws s3 cp s3://pgroll-benchmark-results/benchmark-results.json ./benchmark-results.json 58 | 59 | - name: Append new results 60 | run: cat results/*.json >> benchmark-results.json 61 | 62 | - name: Upload combined results 63 | run: aws s3 cp ./benchmark-results.json s3://pgroll-benchmark-results/benchmark-results.json 64 | -------------------------------------------------------------------------------- /.github/workflows/docs_preview.yaml: -------------------------------------------------------------------------------- 1 | name: Docs Preview 2 | on: 3 | pull_request_target: # Like pull_request but runs the 'base' workflow, safer and works with forks. 4 | types: [opened, reopened, synchronize] 5 | jobs: 6 | docs-preview: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Create Docs Preview GitHub Deployment 10 | id: create_deployment 11 | uses: actions/github-script@v6 12 | with: 13 | script: | 14 | const { owner: repoOwner, repo } = context.repo; 15 | const pr = context.payload.pull_request; 16 | const prOwner = pr.head.repo.full_name === context.repo.full_name ? repoOwner : pr.head.repo.owner.login; 17 | const branch = pr.head.ref; 18 | const previewUrl = `https://pgroll.com/next/preview-path?path=/docs-preview/${prOwner}:${branch}/` 19 | 20 | const deployment = await github.rest.repos.createDeployment({ 21 | owner: repoOwner, 22 | repo, 23 | ref: pr.head.sha, 24 | auto_merge: false, 25 | required_contexts: [], // Don't require checks to pass 26 | environment: 'Docs Preview', 27 | description: 'Creating a preview', 28 | }); 29 | if (deployment.status !== 201) { 30 | throw new Error('Failed to create deployment'); 31 | } 32 | await github.rest.repos.createDeploymentStatus({ 33 | deployment_id: deployment.data.id, 34 | owner: repoOwner, 35 | repo, 36 | state: 'success', 37 | description: 'Created a preview', 38 | environment_url: previewUrl 39 | }); 40 | 41 | -------------------------------------------------------------------------------- /.github/workflows/publish_benchmarks.yaml: -------------------------------------------------------------------------------- 1 | name: Publish Benchmark 2 | 3 | on: 4 | workflow_run: 5 | workflows: [ "Benchmark" ] 6 | branches: [ main ] 7 | types: 8 | - completed 9 | workflow_dispatch: 10 | 11 | permissions: 12 | id-token: write # For getting AWS permissions 13 | contents: write 14 | packages: read 15 | pages: write 16 | 17 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. 18 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. 19 | concurrency: 20 | group: "pages" 21 | cancel-in-progress: false 22 | 23 | jobs: 24 | publish: 25 | name: Publish benchmarks 26 | runs-on: ubuntu-24.04 27 | steps: 28 | - name: Configure AWS Credentials 29 | uses: aws-actions/configure-aws-credentials@v2 30 | with: 31 | role-to-assume: arn:aws:iam::493985724844:role/pgroll-benchmark-results-access 32 | aws-region: us-east-1 33 | mask-aws-account-id: 'no' 34 | 35 | - name: Checkout 36 | uses: actions/checkout@v4 37 | 38 | - name: Set up Go 39 | uses: actions/setup-go@v5 40 | with: 41 | go-version-file: 'dev/go.mod' 42 | 43 | - name: Setup Pages 44 | uses: actions/configure-pages@v5 45 | 46 | - name: Download results and build html 47 | working-directory: ./dev 48 | run: | 49 | aws s3 cp s3://pgroll-benchmark-results/benchmark-results.json $HOME/benchmark-results.json 50 | go run benchmark-results/build.go $HOME/benchmark-results.json /home/runner/work/pgroll/pgroll/benchmarks.html 51 | 52 | # This will pick up the benchmarks.html file generated in the previous step and will also 53 | # publish the README at index.html 54 | - name: Build with Jekyll 55 | uses: actions/jekyll-build-pages@v1 56 | with: 57 | source: ./ 58 | destination: ./static 59 | 60 | - name: Upload artifact 61 | uses: actions/upload-pages-artifact@v3 62 | with: 63 | path: ./static 64 | 65 | - name: Deploy to GitHub Pages 66 | id: deployment 67 | uses: actions/deploy-pages@v4 68 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/license-header-checker 2 | 3 | # misc 4 | .DS_Store 5 | .env 6 | pgroll 7 | 8 | dist/ 9 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | goVersion: &goVersion "1.24" 2 | 3 | run: 4 | go: *goVersion 5 | timeout: 5m 6 | 7 | linters: 8 | disable-all: true 9 | # Enable specific linter 10 | # https://golangci-lint.run/usage/linters/#enabled-by-default-linters 11 | enable: 12 | - errorlint 13 | - copyloopvar 14 | - forcetypeassert 15 | - goconst 16 | - gocritic 17 | - gofumpt 18 | - gosec 19 | - gosimple 20 | - govet 21 | - ineffassign 22 | - makezero 23 | - misspell 24 | - nakedret 25 | - nolintlint 26 | - prealloc 27 | - prealloc 28 | - staticcheck 29 | - stylecheck 30 | - unused 31 | 32 | linters-settings: 33 | errorlint: 34 | errorf: true 35 | 36 | gomodguard: 37 | blocked: 38 | # List of blocked modules. 39 | modules: 40 | # Blocked module. 41 | - github.com/pkg/errors: 42 | # Recommended modules that should be used instead. (Optional) 43 | recommendations: 44 | - errors 45 | - fmt 46 | reason: "This package is deprecated, use `fmt.Errorf` with `%w` instead" 47 | 48 | goconst: 49 | ignore-tests: true 50 | numbers: true 51 | 52 | gocritic: 53 | disabled-checks: 54 | - exitAfterDefer 55 | - ifElseChain 56 | - commentFormatting 57 | 58 | gofumpt: 59 | module-path: "pgroll" 60 | extra-rules: false 61 | 62 | staticcheck: 63 | checks: ["all"] 64 | 65 | stylecheck: 66 | checks: ["all", "-ST1000", "-ST1005"] 67 | 68 | unused: 69 | go: *goVersion 70 | 71 | makezero: 72 | always: false 73 | 74 | gosec: 75 | exclude-generated: true 76 | severity: low 77 | includes: [] 78 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "gopls": { 3 | "formatting.gofumpt": true, 4 | "formatting.local": "pgroll" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # This Dockerfile is used by GoReleaser in the `release` job. 3 | # See: 4 | # https://goreleaser.com/customization/docker 5 | # 6 | FROM scratch 7 | COPY pgroll /usr/bin/pgroll 8 | ENTRYPOINT [ "/usr/bin/pgroll" ] 9 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: pgroll generate format lint examples test install-license-checker license 2 | 3 | pgroll: 4 | go build 5 | 6 | clean: 7 | go clean 8 | 9 | format: 10 | # Format JSON schema 11 | docker run --rm -v $$PWD/schema.json:/mnt/schema.json node:alpine npx prettier /mnt/schema.json --parser json --tab-width 2 --single-quote --trailing-comma all --no-semi --arrow-parens always --print-width 120 --write 12 | # Format embedded SQL 13 | docker run --rm -v $$PWD/pkg/state/init.sql:/data/init.sql backplane/pgformatter --inplace /data/init.sql 14 | # Run gofumpt 15 | gofumpt -w . 16 | 17 | generate: 18 | # Generate the types from the JSON schema 19 | docker run --rm -v $$PWD/schema.json:/mnt/schema.json omissis/go-jsonschema:0.17.0 --only-models -p migrations --tags json /mnt/schema.json > pkg/migrations/types.go 20 | # Add the license header to the generated type file 21 | echo "// SPDX-License-Identifier: Apache-2.0" | cat - pkg/migrations/types.go > pkg/migrations/types.go.tmp 22 | mv pkg/migrations/types.go.tmp pkg/migrations/types.go 23 | # Generate the cli-definition.json file 24 | go run tools/build-cli-definition.go 25 | 26 | lint: 27 | golangci-lint --config=.golangci.yml run 28 | 29 | ledger: 30 | cd examples && ls > .ledger 31 | 32 | examples: ledger 33 | @go build 34 | @./pgroll init 35 | @./pgroll migrate examples --complete 36 | @go clean 37 | 38 | test: 39 | go test ./... 40 | 41 | bench: 42 | go test ./internal/benchmarks -v -benchtime=1x -bench . 43 | 44 | install-license-checker: 45 | if [ ! -f ./bin/license-header-checker ]; then curl -s https://raw.githubusercontent.com/lluissm/license-header-checker/master/install.sh | bash; fi 46 | 47 | license: install-license-checker 48 | ./bin/license-header-checker -a -r .github/license-header.txt . go 49 | -------------------------------------------------------------------------------- /brand-kit/README.md: -------------------------------------------------------------------------------- 1 | ## Logos 2 | 3 | Our logo combines a symbol and wordmark. Use the base logo and wordmark whenever possible. For dark backgrounds, use the inverted (white) logo, and for light backgrounds, use the black logo. Maintain the logo's aspect ratio, provide clear space, and avoid unauthorized modifications. In contexts with other logos, opt for the monochrome versions to ensure a cohesive and community-oriented brand identity. 4 | 5 | | Symbol | Wordmark | 6 | |-------------------------------------------------------|-------------------------------------------------------------| 7 | | ![SymbolBase](logo/symbol/pgroll-base-symbol.svg) | ![WordmarkBase](logo/wordmark/pgroll-base-wordmark.svg) | 8 | | ![SymbolWhite](logo/symbol/pgroll-white-symbol.svg) | ![WordmarkWhite](logo/wordmark/pgroll-white-wordmark.svg) | 9 | | ![SymbolBlack](logo/symbol/pgroll-black-symbol.svg) | ![WordmarkBlack](logo/wordmark/pgroll-black-wordmark.svg) | 10 | 11 | ## Banner 12 | 13 | The project banner is a key branding element that can be prominently featured at the top of project documentation, websites, social media profiles, and even on swag stickers. It serves as a visual representation of our project's identity and can be used to create a strong connection with our brand. Ensure that the banner is displayed at its original size to maintain clarity and visibility across various applications. 14 | 15 | ![Banner](banner/pgroll-banner.svg) -------------------------------------------------------------------------------- /brand-kit/banner/pgroll-banner-github@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xataio/pgroll/f8bf96f308e0da6b1cecbbc370f59d08e5d5ce0f/brand-kit/banner/pgroll-banner-github@2x.png -------------------------------------------------------------------------------- /brand-kit/banner/pgroll-banner@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xataio/pgroll/f8bf96f308e0da6b1cecbbc370f59d08e5d5ce0f/brand-kit/banner/pgroll-banner@2x.png -------------------------------------------------------------------------------- /cmd/analyze.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import ( 6 | "encoding/json" 7 | "fmt" 8 | 9 | "github.com/spf13/cobra" 10 | 11 | "github.com/xataio/pgroll/cmd/flags" 12 | "github.com/xataio/pgroll/pkg/state" 13 | ) 14 | 15 | var analyzeCmd = &cobra.Command{ 16 | Use: "analyze", 17 | Short: "Analyze the SQL schema of the target database", 18 | Hidden: true, 19 | Args: cobra.NoArgs, 20 | RunE: func(cmd *cobra.Command, _ []string) error { 21 | ctx := cmd.Context() 22 | state, err := state.New(ctx, flags.PostgresURL(), flags.StateSchema()) 23 | if err != nil { 24 | return err 25 | } 26 | defer state.Close() 27 | 28 | // Ensure that pgroll is initialized 29 | if err := EnsureInitialized(ctx, state); err != nil { 30 | return err 31 | } 32 | 33 | schema, err := state.ReadSchema(ctx, flags.Schema()) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | schemaJSON, err := json.MarshalIndent(schema, "", " ") 39 | if err != nil { 40 | return err 41 | } 42 | 43 | fmt.Println(string(schemaJSON)) 44 | return nil 45 | }, 46 | } 47 | -------------------------------------------------------------------------------- /cmd/baseline.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import ( 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "os" 10 | 11 | "github.com/pterm/pterm" 12 | "github.com/spf13/cobra" 13 | "github.com/xataio/pgroll/pkg/migrations" 14 | ) 15 | 16 | func baselineCmd() *cobra.Command { 17 | var useJSON bool 18 | 19 | baselineCmd := &cobra.Command{ 20 | Use: "baseline ", 21 | Short: "Create a baseline migration for an existing database schema", 22 | Args: cobra.ExactArgs(2), 23 | ValidArgs: []string{"version", "directory"}, 24 | RunE: func(cmd *cobra.Command, args []string) error { 25 | version := args[0] 26 | targetDir := args[1] 27 | 28 | ctx := cmd.Context() 29 | 30 | // Create a roll instance 31 | m, err := NewRollWithInitCheck(ctx) 32 | if err != nil { 33 | return err 34 | } 35 | defer m.Close() 36 | 37 | // Ensure that the target directory exists 38 | if err := ensureDirectoryExists(targetDir); err != nil { 39 | return err 40 | } 41 | 42 | // Prompt for confirmation 43 | fmt.Println("Creating a baseline migration will restart the migration history.") 44 | ok, _ := pterm.DefaultInteractiveConfirm.Show() 45 | if !ok { 46 | return nil 47 | } 48 | 49 | // Create a placeholder baseline migration 50 | ops := migrations.Operations{&migrations.OpRawSQL{Up: ""}} 51 | opsJSON, err := json.Marshal(ops) 52 | if err != nil { 53 | return fmt.Errorf("failed to marshal operations: %w", err) 54 | } 55 | mig := &migrations.RawMigration{ 56 | Name: version, 57 | Operations: opsJSON, 58 | } 59 | 60 | // Write the placeholder migration to disk 61 | filePath, err := writeMigrationToFile(mig, targetDir, "", useJSON) 62 | if err != nil { 63 | return fmt.Errorf("failed to write placeholder baseline migration: %w", err) 64 | } 65 | 66 | sp, _ := pterm.DefaultSpinner.WithText(fmt.Sprintf("Creating baseline migration %q...", version)).Start() 67 | 68 | // Create the baseline in the target database 69 | err = m.CreateBaseline(ctx, version) 70 | if err != nil { 71 | sp.Fail(fmt.Sprintf("Failed to create baseline: %s", err)) 72 | err = errors.Join(err, os.Remove(filePath)) 73 | return err 74 | } 75 | 76 | sp.Success(fmt.Sprintf("Baseline created successfully. Placeholder migration %q written", filePath)) 77 | return nil 78 | }, 79 | } 80 | 81 | baselineCmd.Flags().BoolVarP(&useJSON, "json", "j", false, "output in JSON format instead of YAML") 82 | 83 | return baselineCmd 84 | } 85 | -------------------------------------------------------------------------------- /cmd/complete.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import ( 6 | "fmt" 7 | 8 | "github.com/pterm/pterm" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | var completeCmd = &cobra.Command{ 13 | Use: "complete ", 14 | Short: "Complete an ongoing migration with the operations present in the given file", 15 | RunE: func(cmd *cobra.Command, args []string) error { 16 | // Create a roll instance and check if pgroll is initialized 17 | m, err := NewRollWithInitCheck(cmd.Context()) 18 | if err != nil { 19 | return err 20 | } 21 | defer m.Close() 22 | 23 | sp, _ := pterm.DefaultSpinner.WithText("Completing migration...").Start() 24 | err = m.Complete(cmd.Context()) 25 | if err != nil { 26 | sp.Fail(fmt.Sprintf("Failed to complete migration: %s", err)) 27 | return err 28 | } 29 | 30 | sp.Success("Migration successful!") 31 | return nil 32 | }, 33 | } 34 | -------------------------------------------------------------------------------- /cmd/convert.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import ( 6 | "bytes" 7 | "fmt" 8 | "io" 9 | "os" 10 | 11 | "github.com/spf13/cobra" 12 | "github.com/xataio/pgroll/pkg/migrations" 13 | "github.com/xataio/pgroll/pkg/sql2pgroll" 14 | ) 15 | 16 | func convertCmd() *cobra.Command { 17 | var useJSON bool 18 | 19 | convertCmd := &cobra.Command{ 20 | Use: "convert ", 21 | Short: "Convert SQL statements to a pgroll migration", 22 | Long: "Convert SQL statements to a pgroll migration. The command can read SQL statements from stdin or a file", 23 | Args: cobra.MaximumNArgs(1), 24 | ValidArgs: []string{"migration-file"}, 25 | RunE: func(cmd *cobra.Command, args []string) error { 26 | reader, err := openSQLReader(args) 27 | if err != nil { 28 | return fmt.Errorf("open SQL migration: %w", err) 29 | } 30 | defer reader.Close() 31 | 32 | migration, err := sqlStatementsToMigration(reader) 33 | if err != nil { 34 | return err 35 | } 36 | err = migrations.NewWriter(os.Stdout, migrations.NewMigrationFormat(useJSON)).Write(&migration) 37 | if err != nil { 38 | return fmt.Errorf("failed to write migration to stdout: %w", err) 39 | } 40 | return nil 41 | }, 42 | } 43 | 44 | convertCmd.Flags().BoolVarP(&useJSON, "json", "j", false, "Output migration file in JSON format instead of YAML") 45 | 46 | return convertCmd 47 | } 48 | 49 | func openSQLReader(args []string) (io.ReadCloser, error) { 50 | if len(args) == 0 { 51 | return os.Stdin, nil 52 | } 53 | return os.Open(args[0]) 54 | } 55 | 56 | func sqlStatementsToMigration(reader io.Reader) (migrations.Migration, error) { 57 | var buf bytes.Buffer 58 | _, err := io.Copy(&buf, reader) 59 | if err != nil { 60 | return migrations.Migration{}, err 61 | } 62 | ops, err := sql2pgroll.Convert(buf.String()) 63 | if err != nil { 64 | return migrations.Migration{}, err 65 | } 66 | return migrations.Migration{ 67 | Operations: ops, 68 | }, nil 69 | } 70 | -------------------------------------------------------------------------------- /cmd/create.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import ( 6 | "fmt" 7 | "os" 8 | 9 | "github.com/pterm/pterm" 10 | "github.com/spf13/cobra" 11 | "github.com/xataio/pgroll/pkg/migrations" 12 | ) 13 | 14 | func createCmd() *cobra.Command { 15 | var isEmpty bool 16 | var useJSON bool 17 | var name string 18 | 19 | createCmd := &cobra.Command{ 20 | Use: "create", 21 | Short: "Create a new migration interactively", 22 | RunE: func(cmd *cobra.Command, args []string) error { 23 | if name == "" { 24 | name, _ = pterm.DefaultInteractiveTextInput. 25 | WithDefaultText("Set the name of your migration"). 26 | Show() 27 | } 28 | 29 | mig := &migrations.Migration{} 30 | addMoreOperations := !isEmpty 31 | 32 | for addMoreOperations { 33 | selectedOption, _ := pterm.DefaultInteractiveSelect. 34 | WithDefaultText("Select operation"). 35 | WithOptions(migrations.AllNonDeprecatedOperations). 36 | Show() 37 | 38 | op, _ := migrations.OperationFromName(migrations.OpName(selectedOption)) 39 | mig.Operations = append(mig.Operations, op) 40 | if operation, ok := op.(migrations.Createable); ok { 41 | operation.Create() 42 | } 43 | addMoreOperations, _ = pterm.DefaultInteractiveConfirm. 44 | WithDefaultText("Add more operations"). 45 | Show() 46 | } 47 | 48 | format := migrations.NewMigrationFormat(useJSON) 49 | migrationFileName := fmt.Sprintf("%s.%s", name, format.Extension()) 50 | file, err := os.Create(migrationFileName) 51 | if err != nil { 52 | return fmt.Errorf("failed to create migration file: %w", err) 53 | } 54 | defer file.Close() 55 | 56 | err = migrations.NewWriter(file, format).Write(mig) 57 | if err != nil { 58 | return fmt.Errorf("failed to write migration to file: %w", err) 59 | } 60 | 61 | pterm.Success.Println("Migration written to " + migrationFileName) 62 | 63 | return nil 64 | }, 65 | } 66 | createCmd.Flags().BoolVarP(&isEmpty, "empty", "e", false, "Create empty migration file") 67 | createCmd.Flags().BoolVarP(&useJSON, "json", "j", false, "Output migration file in JSON format instead of YAML") 68 | createCmd.Flags().StringVarP(&name, "name", "n", "", "Migration name") 69 | 70 | return createCmd 71 | } 72 | -------------------------------------------------------------------------------- /cmd/errors.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import "errors" 6 | 7 | var errPGRollNotInitialized = errors.New("pgroll is not initialized, run 'pgroll init' to initialize") 8 | -------------------------------------------------------------------------------- /cmd/flags/flags.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package flags 4 | 5 | import ( 6 | "github.com/spf13/viper" 7 | ) 8 | 9 | func PostgresURL() string { 10 | return viper.GetString("PG_URL") 11 | } 12 | 13 | func Schema() string { 14 | return viper.GetString("SCHEMA") 15 | } 16 | 17 | func StateSchema() string { 18 | return viper.GetString("STATE_SCHEMA") 19 | } 20 | 21 | func LockTimeout() int { 22 | return viper.GetInt("LOCK_TIMEOUT") 23 | } 24 | 25 | func SkipValidation() bool { return viper.GetBool("SKIP_VALIDATION") } 26 | 27 | func Role() string { 28 | return viper.GetString("ROLE") 29 | } 30 | 31 | func Verbose() bool { return viper.GetBool("VERBOSE") } 32 | -------------------------------------------------------------------------------- /cmd/init.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import ( 6 | "fmt" 7 | 8 | "github.com/pterm/pterm" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | var initCmd = &cobra.Command{ 13 | Use: "init ", 14 | Short: "Initialize pgroll in the target database", 15 | RunE: func(cmd *cobra.Command, args []string) error { 16 | m, err := NewRoll(cmd.Context()) 17 | if err != nil { 18 | return err 19 | } 20 | defer m.Close() 21 | 22 | sp, _ := pterm.DefaultSpinner.WithText("Initializing pgroll...").Start() 23 | err = m.Init(cmd.Context()) 24 | if err != nil { 25 | sp.Fail(fmt.Sprintf("Failed to initialize pgroll: %s", err)) 26 | return err 27 | } 28 | 29 | sp.Success("Initialization complete") 30 | return nil 31 | }, 32 | } 33 | -------------------------------------------------------------------------------- /cmd/rollback.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import ( 6 | "fmt" 7 | 8 | "github.com/pterm/pterm" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | var rollbackCmd = &cobra.Command{ 13 | Use: "rollback", 14 | Short: "Roll back an ongoing migration", 15 | RunE: func(cmd *cobra.Command, args []string) error { 16 | // Create a roll instance and check if pgroll is initialized 17 | m, err := NewRollWithInitCheck(cmd.Context()) 18 | if err != nil { 19 | return err 20 | } 21 | defer m.Close() 22 | 23 | sp, _ := pterm.DefaultSpinner.WithText("Rolling back migration...").Start() 24 | err = m.Rollback(cmd.Context()) 25 | if err != nil { 26 | sp.Fail(fmt.Sprintf("Failed to roll back migration: %s", err)) 27 | return err 28 | } 29 | 30 | sp.Success("Migration rolled back. Changes made since the last version have been reverted") 31 | return nil 32 | }, 33 | } 34 | -------------------------------------------------------------------------------- /cmd/status.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import ( 6 | "encoding/json" 7 | "fmt" 8 | 9 | "github.com/xataio/pgroll/cmd/flags" 10 | "github.com/xataio/pgroll/pkg/state" 11 | 12 | "github.com/spf13/cobra" 13 | ) 14 | 15 | var statusCmd = &cobra.Command{ 16 | Use: "status", 17 | Short: "Show pgroll status", 18 | RunE: func(cmd *cobra.Command, _ []string) error { 19 | ctx := cmd.Context() 20 | 21 | state, err := state.New(ctx, flags.PostgresURL(), flags.StateSchema()) 22 | if err != nil { 23 | return err 24 | } 25 | defer state.Close() 26 | 27 | // Ensure that pgroll is initialized 28 | if err := EnsureInitialized(ctx, state); err != nil { 29 | return err 30 | } 31 | 32 | status, err := state.Status(ctx, flags.Schema()) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | statusJSON, err := json.MarshalIndent(status, "", " ") 38 | if err != nil { 39 | return err 40 | } 41 | 42 | fmt.Println(string(statusJSON)) 43 | return nil 44 | }, 45 | } 46 | -------------------------------------------------------------------------------- /cmd/validate.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package cmd 4 | 5 | import ( 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/spf13/cobra" 10 | "github.com/xataio/pgroll/pkg/migrations" 11 | ) 12 | 13 | var validateCmd = &cobra.Command{ 14 | Use: "validate ", 15 | Short: "Validate a migration file", 16 | Example: "validate migrations/03_my_migration.yaml", 17 | Args: cobra.ExactArgs(1), 18 | ValidArgs: []string{"file"}, 19 | RunE: func(cmd *cobra.Command, args []string) error { 20 | ctx := cmd.Context() 21 | fileName := args[0] 22 | 23 | m, err := NewRollWithInitCheck(ctx) 24 | if err != nil { 25 | return err 26 | } 27 | defer m.Close() 28 | 29 | migration, err := migrations.ReadMigration(os.DirFS(filepath.Dir(fileName)), filepath.Base(fileName)) 30 | if err != nil { 31 | return err 32 | } 33 | err = m.Validate(ctx, migration) 34 | if err != nil { 35 | return err 36 | } 37 | return nil 38 | }, 39 | } 40 | -------------------------------------------------------------------------------- /dev/benchmark-results/build_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package main 4 | 5 | import ( 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | // TestBuildChartsRegression is a simple regression test 12 | func TestBuildChartsRegression(t *testing.T) { 13 | reports, err := loadData("testdata/benchmark-results.json") 14 | assert.NoError(t, err) 15 | assert.Len(t, reports, 5) 16 | 17 | generated := generateCharts(reports) 18 | // 5 versions * 3 benchmarks 19 | assert.Len(t, generated, 15) 20 | } 21 | 22 | func TestTrimName(t *testing.T) { 23 | assert.Equal(t, "Test1", trimName("BenchmarkTest1/1000")) 24 | assert.Equal(t, "Test1/Case2", trimName("BenchmarkTest1/Case2/1000")) 25 | assert.Equal(t, "Test1", trimName("BenchmarkTest1")) 26 | } 27 | -------------------------------------------------------------------------------- /dev/doc.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | // Package dev contains code that is only intended for internal usage and allows us to use dependencies that 4 | // we don't want to expose to users of pgroll as a library. 5 | package dev 6 | -------------------------------------------------------------------------------- /dev/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/xataio/pgroll/dev 2 | 3 | go 1.24 4 | 5 | require ( 6 | github.com/go-echarts/go-echarts/v2 v2.4.5 7 | github.com/spf13/cobra v1.8.1 8 | github.com/stretchr/testify v1.9.0 9 | ) 10 | 11 | require ( 12 | github.com/davecgh/go-spew v1.1.1 // indirect 13 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 14 | github.com/kr/text v0.2.0 // indirect 15 | github.com/pmezard/go-difflib v1.0.0 // indirect 16 | github.com/spf13/pflag v1.0.5 // indirect 17 | gopkg.in/yaml.v3 v3.0.1 // indirect 18 | ) 19 | -------------------------------------------------------------------------------- /dev/go.sum: -------------------------------------------------------------------------------- 1 | github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= 2 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 3 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 4 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 5 | github.com/go-echarts/go-echarts/v2 v2.4.5 h1:gwDqxdi5x329sg+g2ws2OklreJ1K34FCimraInurzwk= 6 | github.com/go-echarts/go-echarts/v2 v2.4.5/go.mod h1:56YlvzhW/a+du15f3S2qUGNDfKnFOeJSThBIrVFHDtI= 7 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 8 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 9 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 10 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 11 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 12 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 13 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 14 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 15 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 16 | github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= 17 | github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= 18 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 19 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 20 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 21 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 22 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 23 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= 24 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 25 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 26 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 27 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | db: 3 | image: 'postgres:17' 4 | user: postgres 5 | restart: always 6 | environment: 7 | - POSTGRES_USER=postgres 8 | - POSTGRES_PASSWORD=postgres 9 | ports: 10 | - '5432:5432' 11 | volumes: 12 | - /var/lib/postgresql/data 13 | healthcheck: 14 | test: "pg_isready -h localhost" 15 | start_period: 30s 16 | start_interval: 100ms 17 | interval: 24h 18 | timeout: 5s 19 | -------------------------------------------------------------------------------- /docs/cli/README.md: -------------------------------------------------------------------------------- 1 | # Command line reference 2 | 3 | The `pgroll` CLI has the following top-level flags: 4 | 5 | - `--postgres-url`: The URL of the postgres instance against which migrations will be run. 6 | - `--schema`: The Postgres schema in which migrations will be run (default `"public"`). 7 | - `--pgroll-schema`: The Postgres schema in which `pgroll` will store its internal state (default: `"pgroll"`). One `--pgroll-schema` may be used safely with multiple `--schema`s. 8 | - `--lock-timeout`: The Postgres `lock_timeout` value to use for all `pgroll` DDL operations, specified in milliseconds (default `500`). 9 | - `--role`: The Postgres role to use for all `pgroll` DDL operations (default: `""`, which doesn't set any role). 10 | 11 | Each of these flags can also be set via an environment variable: 12 | 13 | - `PGROLL_PG_URL` 14 | - `PGROLL_SCHEMA` 15 | - `PGROLL_STATE_SCHEMA` 16 | - `PGROLL_LOCK_TIMEOUT` 17 | - `PGROLL_ROLE` 18 | 19 | The CLI flag takes precedence if a flag is set via both an environment variable and a CLI flag. 20 | -------------------------------------------------------------------------------- /docs/cli/baseline.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Baseline 3 | description: Create a baseline migration for an existing database schema 4 | --- 5 | 6 | ## Command 7 | 8 | ``` 9 | $ pgroll baseline 10 | ``` 11 | 12 | This command creates a baseline migration for an existing database schema. It captures the current schema state without applying any changes, providing a starting point for future migrations. 13 | 14 | Use `pgroll baseline` when: 15 | - Starting to use pgroll with an existing database that already has a schema 16 | - You want to consolidate a long migration history into a clean starting point 17 | 18 | The command requires two arguments: 19 | 1. `version` - The version name for the baseline (e.g., "01_initial_schema") 20 | 2. `target directory` - The directory where the placeholder migration file will be written 21 | 22 | The optional `--json` flag can be used to write the placeholder migration file in JSON format instead of YAML. 23 | 24 | ### How it works 25 | 26 | When the `pgroll baseline` command is run, it: 27 | 1. Captures the current database schema state in pgroll's internal tracking 28 | 2. Creates an empty placeholder migration file in the target directory 29 | 3. Records the baseline in `pgroll`'s internal state 30 | 31 | **Important**: After running the command, you should manually complete the placeholder migration file: 32 | 1. Use a tool like `pg_dump` to extract the DDL statements for your schema 33 | 2. Copy those statements (CREATE TABLE, CREATE INDEX, etc.) into the placeholder migration file's raw SQL section 34 | 3. This completed migration file can then be used to reconstruct the schema in other environments 35 | 36 | Future migrations will build upon this baseline. 37 | 38 | 39 | Creating a baseline will restart your migration history. The command will prompt for confirmation before proceeding. 40 | 41 | 42 | ### Effects on migration history 43 | 44 | Creating a baseline: 45 | - Creates a "reset point" in your migration history 46 | - Previous migrations become part of the baseline and are no longer individually visible 47 | - When using commands like `pull` and `migrate`, only migrations after the most recent baseline are considered 48 | 49 | ### Examples 50 | 51 | #### Create a baseline with default YAML format 52 | 53 | ``` 54 | pgroll baseline 01_initial_schema ./migrations 55 | ``` 56 | 57 | #### Create a baseline with JSON format 58 | 59 | ``` 60 | pgroll baseline 01_initial_schema ./migrations --json 61 | ``` 62 | -------------------------------------------------------------------------------- /docs/cli/complete.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Complete 3 | description: Complete a pgroll migration, removing the previous schema and leaving only the latest schema. 4 | --- 5 | 6 | ## Command 7 | 8 | ``` 9 | $ pgroll complete 10 | ``` 11 | 12 | This completes the most recently started migration. 13 | 14 | Running `pgroll complete` when there is no migration in progress is a no-op. 15 | 16 | Completing a `pgroll` migration removes the previous schema version from the database (e.g. `public_02_create_table`), leaving only the latest version of the schema (e.g. `public_03_add_column`). At this point, any temporary columns and triggers created on the affected tables in the `public` schema will also be cleaned up, leaving the table schema in its final state. Note that the real schema (e.g. `public`) should never be used directly by the client as that is not safe; instead, clients should use the schemas with versioned views (e.g. `public_03_add_column`). 17 | 18 | 19 | Before running `pgroll complete` ensure that all applications that depend on 20 | the old version of the database schema are no longer live. Prematurely running 21 | `pgroll complete` can cause downtime of old application instances that depend 22 | on the old schema. 23 | 24 | -------------------------------------------------------------------------------- /docs/cli/convert.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Convert 3 | description: Convert SQL migration files to a pgroll migration 4 | --- 5 | 6 | ## Command 7 | 8 | ``` 9 | $ pgroll convert /path/to/migration.sql 10 | ``` 11 | 12 | This reads the SQL statements from `/path/to/migration.sql` and translates them into a `pgroll` migration. The migration files are written to stdout in YAML format. 13 | 14 | The optional `--json` flag can be used to write migration files in JSON. 15 | 16 | ### Read SQL statements from stdin 17 | 18 | If a file name is not specified `pgroll convert` reads the SQL statements from stdin. 19 | 20 | ``` 21 | $ cat 'CREATE TABLE my_table(name text);' | pgroll convert 22 | ``` 23 | 24 | 25 | The generated pgroll migrations might include `up` and `down` migrations. Those must be filled in manually because currently `pgroll` is unable to infer correct up and down migrations. 26 | 27 | -------------------------------------------------------------------------------- /docs/cli/create.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Create 3 | description: Create migration files interactively 4 | --- 5 | 6 | ## Command 7 | 8 | ``` 9 | $ pgroll create 10 | ``` 11 | 12 | This command helps you generate `pgroll` migrations interactively, and saves the file to disk in YAML format. 13 | 14 | The optional `--name` flag can be used to specify the name of the migration. 15 | 16 | The optional `--empty` flag can be used to generate empty migration files. 17 | 18 | The optional `--json` flag can be used to write migration files in JSON. 19 | 20 | If both `--name` and `--empty` flags are set, the command does not prompt you for anything. So it can be used to generate initialize new migrations from scripts. 21 | 22 | 23 | ### Examples 24 | 25 | #### Generate empty YAML migrations 26 | 27 | ``` 28 | pgroll create --name my_migration --empty 29 | ``` 30 | 31 | #### Generate empty JSON migrations 32 | 33 | ``` 34 | pgroll create --name my_migration --empty --json 35 | ``` 36 | 37 | 38 | The generated migration files are not validated. It is possible that depending on the user input, the command produces invalid migrations. For example, required fields might not be set. Please, always validate the generated migration file. 39 | 40 | -------------------------------------------------------------------------------- /docs/cli/init.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Init 3 | description: Initializes pgroll for first use. 4 | --- 5 | 6 | ## Command 7 | 8 | ``` 9 | $ pgroll init 10 | ``` 11 | 12 | This will create a new schema in the database called `pgroll` (or whatever value is specified with the `--pgroll-schema` switch). 13 | 14 | The tables and functions in this schema store `pgroll`'s internal state and are not intended to be modified outside of `pgroll` CLI. 15 | -------------------------------------------------------------------------------- /docs/cli/latest.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Latest 3 | description: Prints the latest schema version or migration name in either the target database or a local directory of migration files. 4 | --- 5 | 6 | ## Command 7 | 8 | The `pgroll latest` command has two subcommands: 9 | 10 | - `pgroll latest schema` - prints the latest schema version (migration name prefixed with schema name) 11 | - `pgroll latest migration` - prints the latest migration name (without schema prefix) 12 | 13 | Both subcommands support the `--local` flag to retrieve the latest version from a local directory of migration files instead of the target database. 14 | 15 | ### Schema Command 16 | 17 | #### Database 18 | 19 | Assuming that the [example migrations](https://github.com/xataio/pgroll/tree/main/examples) have been applied to the `public` schema in the target database, running: 20 | 21 | ``` 22 | $ pgroll latest schema 23 | ``` 24 | 25 | will print the latest schema version in the target database: 26 | 27 | ``` 28 | public_55_add_primary_key_constraint_to_table 29 | ``` 30 | 31 | #### Local 32 | 33 | Assuming that the [example migrations](https://github.com/xataio/pgroll/tree/main/examples) are on disk in a directory called `examples`, running: 34 | 35 | ``` 36 | $ pgroll latest schema --local examples/ 37 | ``` 38 | 39 | will print the latest schema version in the directory: 40 | 41 | ``` 42 | public_55_add_primary_key_constraint_to_table 43 | ``` 44 | 45 | ### Migration Command 46 | 47 | #### Database 48 | 49 | Assuming that the [example migrations](https://github.com/xataio/pgroll/tree/main/examples) have been applied to the `public` schema in the target database, running: 50 | 51 | ``` 52 | $ pgroll latest migration 53 | ``` 54 | 55 | will print the latest migration name in the target database: 56 | 57 | ``` 58 | 55_add_primary_key_constraint_to_table 59 | ``` 60 | 61 | #### Local 62 | 63 | Assuming that the [example migrations](https://github.com/xataio/pgroll/tree/main/examples) are on disk in a directory called `examples`, running: 64 | 65 | ``` 66 | $ pgroll latest migration --local examples/ 67 | ``` 68 | 69 | will print the latest migration name in the directory: 70 | 71 | ``` 72 | 55_add_primary_key_constraint_to_table 73 | ``` 74 | 75 | The exact output will vary as the `examples/` directory is updated. 76 | -------------------------------------------------------------------------------- /docs/cli/migrate.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Migrate 3 | description: Applies all outstanding migrations from a directory to the target database. 4 | --- 5 | 6 | ## Command 7 | 8 | Assuming that migrations up to and including migration `40_create_enum_type` from the [example migrations](https://github.com/xataio/pgroll/tree/main/examples) directory have been applied, running: 9 | 10 | ``` 11 | $ pgroll migrate examples/ 12 | ``` 13 | 14 | will apply migrations from `41_add_enum_column` onwards to the target database. 15 | 16 | If the `--complete` flag is passed to `pgroll migrate` the final migration to be applied will be completed. Otherwise the final migration will be left active (started but not completed). 17 | 18 | If any of the migration files are incompatible with your `pgroll` version, the command will report the errors and exit before running any migrations. 19 | 20 | ## Existing Database Schema 21 | 22 | If you attempt to run `pgroll migrate` against a database that has existing tables but no migration history, the command will fail with an error message. In this case, you should first run `pgroll baseline` to establish a baseline migration that captures the current schema state before applying any new migrations. 23 | -------------------------------------------------------------------------------- /docs/cli/pull.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Pull 3 | description: Pull migrations from the target database into a local migrations directory 4 | --- 5 | 6 | ## Command 7 | 8 | Assuming that all [example migrations](https://github.com/xataio/pgroll/tree/main/examples) have been applied, running: 9 | 10 | ``` 11 | $ pgroll pull migrations/ 12 | ``` 13 | 14 | will write the complete schema history as `.yaml` files to the `migrations/` directory: 15 | 16 | ``` 17 | $ ls migrations/ 18 | 19 | 01_create_tables.yaml 20 | 02_create_another_table.yaml 21 | 03_add_column_to_products.yaml 22 | 04_rename_table.yaml 23 | 05_sql.yaml 24 | 06_add_column_to_sql_table.yaml 25 | ... 26 | ``` 27 | 28 | The command takes an optional `--with-prefixes` flag which will write each filename prefixed with its position in the schema history: 29 | 30 | ``` 31 | $ ls migrations/ 32 | 33 | 0001_01_create_tables.yaml 34 | 0002_02_create_another_table.yaml 35 | 0003_03_add_column_to_products.yaml 36 | 0004_04_rename_table.yaml 37 | 0005_05_sql.yaml 38 | 0006_06_add_column_to_sql_table.yaml 39 | ... 40 | ``` 41 | 42 | The `--with-prefixes` flag ensures that files are sorted lexicographically by their time of application. 43 | 44 | Use the `--json` flag to pull migrations in JSON format rather than YAML. 45 | 46 | If the target directory given to `pgroll pull` does not exist, `pgroll pull` will create it. 47 | 48 | If the target directory is empty, `pgroll pull` will pull all migrations from the target database. If the target directory contains migration files, `pgroll pull` will pull only those migrations that don't already exist in the directory. 49 | -------------------------------------------------------------------------------- /docs/cli/rollback.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Rollback 3 | description: Roll back the currently active migration. 4 | --- 5 | 6 | ## Command 7 | 8 | ``` 9 | $ pgroll rollback 10 | ``` 11 | 12 | This rolls back the currently active migration (an active migration is one that has been started but not yet completed). 13 | 14 | Rolling back a `pgroll` migration means removing the new schema version. The old schema version was still present throughout the migration period and does not require modification. 15 | 16 | Migrations cannot be rolled back once completed. Attempting to roll back a migration that has already been completed is a no-op. 17 | 18 | 19 | Before running `pgroll rollback` ensure that any new versions of applications 20 | that depend on the new database schema are no longer live. Prematurely running 21 | `pgroll rollback` can cause downtime of new application instances that depend 22 | on the new schema. 23 | 24 | -------------------------------------------------------------------------------- /docs/cli/start.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Start 3 | description: Start a pgroll migration 4 | --- 5 | 6 | ## Command 7 | 8 | ``` 9 | $ pgroll start sql/03_add_column.yaml 10 | ``` 11 | 12 | This starts the migration defined in the `sql/03_add_column.yaml` file. 13 | 14 | After starting a migration there will be two schema versions in the database; one for the old schema before the migration (e.g. `public_02_create_table`) and one for the new version with the schema changes (e.g. `public_03_add_column`). Each of these schemas merely contains views on the tables in the `public` schema. 15 | 16 | ### Using `pgroll start` with the `--complete` flag 17 | 18 | A migration can be started and completed with one command by specifying the `--complete` flag: 19 | 20 | ``` 21 | $ pgroll start sql/03_add_column.yaml --complete 22 | ``` 23 | 24 | This is equivalent to running `pgroll start` immediately followed by `pgroll complete`. 25 | 26 | 27 | Using the `--complete` flag is appropriate only when there are no applications 28 | running against the old database schema. In most cases, the recommended 29 | workflow is to run `pgroll start`, then gracefully shut down old applications 30 | before running `pgroll complete` as a separate step. 31 | 32 | 33 | ## Existing Database Schema 34 | 35 | If you attempt to run `pgroll start` against a database that has existing tables but no migration history, the command will fail with an error message. In this case, you should first run `pgroll baseline` to establish a baseline migration that captures the current schema state before starting any new migrations. 36 | -------------------------------------------------------------------------------- /docs/cli/status.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Status 3 | description: Show the current status of pgroll within a given schema. 4 | --- 5 | 6 | ## Command 7 | 8 | ``` 9 | $ pgroll status 10 | ``` 11 | 12 | ```json 13 | { 14 | "Schema": "public", 15 | "Version": "27_drop_unique_constraint", 16 | "Status": "Complete" 17 | } 18 | ``` 19 | 20 | The status field can be one of the following values: 21 | 22 | - `"No migrations"` - no migrations have been applied in this schema yet. 23 | - `"In progress"` - a migration has been started, but not yet completed. 24 | - `"Complete"` - the most recent migration was completed. 25 | 26 | The `Version` field gives the name of the latest schema version. 27 | 28 | If a migration is `In progress` the schemas for both the latest version indicated by the `Version` field and the previous version will exist in the database. 29 | 30 | If a migration is `Complete` only the latest version of the schema will exist in the database. 31 | 32 | The top-level `--schema` flag can be used to view the status of `pgroll` in a different schema: 33 | 34 | ``` 35 | $ pgroll status --schema schema_a 36 | ``` 37 | 38 | ```json 39 | { 40 | "Schema": "schema_a", 41 | "Version": "01_create_tables", 42 | "Status": "Complete" 43 | } 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/cli/validate.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Validate 3 | description: Validate a pgroll migration 4 | --- 5 | 6 | ## Command 7 | 8 | ``` 9 | $ pgroll validate sql/03_add_column.yaml 10 | ``` 11 | 12 | This validates the migration defined in the `sql/03_add_column.yaml` file. 13 | 14 | The command can detect the following errors: 15 | 16 | * syntax error in pgroll migration format 17 | * unknown/invalid configuration options and settings in the migration file 18 | * reference to unknown database objects 19 | -------------------------------------------------------------------------------- /docs/concepts.md: -------------------------------------------------------------------------------- 1 | # Concepts 2 | 3 | `pgroll` introduces a few concepts that are important to understand before using the tool. 4 | 5 | ## Migration workflow 6 | 7 | `pgroll` migrations are applied in two steps, following an [expand/contract pattern](https://openpracticelibrary.com/practice/expand-and-contract-pattern/). 8 | 9 | ![migration flow](img/schema-changes-flow@2x.png) 10 | 11 | During the migration start phase, `pgroll` will perform only additive changes to the database schema. This includes: creating new tables, adding new columns, and creating new indexes. In the cases where a required change is not backwards compatible, `pgroll` will take the necessary steps to ensure that the current schema is still valid. For example, if a new column is added to a table with a `NOT NULL` constraint, `pgroll` will backfill the new column with a default value. 12 | 13 | After a successful migration start, the database will contain two versions of the schema: the old version and the new version. The old version of the schema is still available to client applications. This allows client applications to be updated to use the new version of the schema without any downtime. 14 | 15 | Once all client applications have been updated to use the latest version of the schema, the complete phase can be run. During the complete phase `pgroll` will perform all non-additive changes to the database schema. This includes: dropping tables, dropping columns, and dropping indexes. Effectively breaking the old version of the schema. 16 | 17 | ## Multiple schema versions 18 | 19 | `pgroll` maintains multiple versions of the database schema side-by-side. This is achieved by creating a new Postgres schema for each migration that is applied to the database. The schema will contain views on the underlying tables. These views are used to expose different tables or columns to client applications depending on which version of the schema they are configured to use. 20 | 21 | For instance, a rename column migration will create a new schema containing a view on the underlying table with the new column name. This allows for the new version of the schema to become available without breaking existing client applications that are still using the old name. In the migration complete phase, the old schema is dropped and the actual column is renamed (views are updated to point to the new column name automatically). 22 | 23 | ![multiple schema versions](img/migration-schemas@2x.png) 24 | 25 | For other more complex changes, like adding a `NOT NULL` constraint to a column, `pgroll` will duplicate the affected column and backfill it with the values from the old one. For some time the old & new columns will coexist in the same table. This allows for the new version of the schema to expose the column that fulfils the constraint, while the old version still uses the old column. `pgroll` will take care of copying the values from the old column to the new one, and vice versa, as needed, both by executing the backfill or installing triggers to keep the columns in sync during updates. 26 | -------------------------------------------------------------------------------- /docs/getting-started.md: -------------------------------------------------------------------------------- 1 | # Welcome to pgroll's documentation 2 | 3 | `pgroll` is a migration tool for PostgreSQL that makes it easy to apply schema changes safely, without application downtime. It supports instant rollbacks and helps ensure applications stay available during database schema changes by allowing applications to access different versions of your database schema. 4 | 5 | ## Getting started 6 | 7 | Learn how `pgroll` simplifies zero-downtime migrations for you 8 | 9 | * [Why use pgroll](why-use-pgroll) 10 | * [Installation](installation) 11 | * [Concepts](concepts) 12 | * [Write your first migration](tutorial) 13 | 14 | ## Guides 15 | 16 | Learn how to use `pgroll` in your developer workflow 17 | 18 | * [Integrate pgroll into your project](guides/clientapps) 19 | * [Use pgroll with ORMs](guides/orms) 20 | * [Writing up and down migrations](guides/updown) 21 | 22 | ## Connect with us 23 | 24 | If you have questions reach out to us on our 25 | * [Discord server](https://xata.io/discord) 26 | * [Github Discussion board](https://github.com/xataio/pgroll/discussions) 27 | 28 | If you want to report issues or submit feedback visit 29 | * [Github Issues](https://github.com/xataio/pgroll/issues) 30 | 31 | ## Supported Postgres versions 32 | 33 | `pgroll` supports Postgres versions >= 14. 34 | 35 | :warning: In Postgres 14, row level security policies on tables are not respected by `pgroll`'s versioned views. This is because `pgroll` is unable to create the views with the `(security_invoker = true)` option, as the ability to do so was added in Postgres 15. If you use RLS in Postgres 14 `pgroll` is likely a poor choice of migration tool. All other `pgroll` features are fully supported across all supported Postgres versions. 36 | 37 | -------------------------------------------------------------------------------- /docs/guides/README.md: -------------------------------------------------------------------------------- 1 | # Guides to using pgroll 2 | -------------------------------------------------------------------------------- /docs/img/migration-schemas@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xataio/pgroll/f8bf96f308e0da6b1cecbbc370f59d08e5d5ce0f/docs/img/migration-schemas@2x.png -------------------------------------------------------------------------------- /docs/img/schema-changes-flow@2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xataio/pgroll/f8bf96f308e0da6b1cecbbc370f59d08e5d5ce0f/docs/img/schema-changes-flow@2x.png -------------------------------------------------------------------------------- /docs/install-pgroll.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | ## Binaries 4 | 5 | Binaries are available for Linux, macOS & Windows on our [Releases](https://github.com/xataio/pgroll/releases) page. 6 | 7 | ## From source 8 | 9 | To install `pgroll` from source, run the following command: 10 | 11 | ```sh 12 | go install github.com/xataio/pgroll@latest 13 | ``` 14 | 15 | Note: requires [Go 1.24](https://golang.org/doc/install) or later. 16 | 17 | ## From package manager - Homebrew 18 | 19 | To install `pgroll` with homebrew, run the following command: 20 | 21 | ```sh 22 | # macOS or Linux 23 | brew tap xataio/pgroll 24 | brew install pgroll 25 | ``` 26 | 27 | -------------------------------------------------------------------------------- /docs/operations/README.md: -------------------------------------------------------------------------------- 1 | # Operations reference 2 | 3 | `pgroll` migrations are specified as YAML or JSON files. All migrations follow the same basic structure: 4 | 5 | YAML migration: 6 | 7 | ```yaml 8 | operations: [...] 9 | ``` 10 | 11 | JSON migration: 12 | 13 | ```json 14 | { 15 | "operations": [...] 16 | } 17 | ``` 18 | 19 | -------------------------------------------------------------------------------- /docs/operations/alter_column/README.md: -------------------------------------------------------------------------------- 1 | # Alter column 2 | 3 | An alter column operation alters the properties of a column. The operation supports several sub-operations, described below. 4 | 5 | An alter column operation may contain multiple sub-operations. For example, a single alter column operation may change its type, and add a check constraint. 6 | -------------------------------------------------------------------------------- /docs/operations/alter_column/add_check_constraint.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Add check constraint 3 | description: An add check constraint operation adds a `CHECK` constraint to a column. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | alter_column: 11 | table: table name 12 | column: column name 13 | check: 14 | name: check constraint name 15 | constraint: constraint expression 16 | up: SQL expression 17 | down: SQL expression 18 | ``` 19 | ```json 20 | { 21 | "alter_column": { 22 | "table": "table name", 23 | "column": "column name", 24 | "check": { 25 | "name": "check constraint name", 26 | "constraint": "constraint expression" 27 | }, 28 | "up": "SQL expression", 29 | "down": "SQL expression" 30 | } 31 | } 32 | ``` 33 | 34 | 35 | The `up` SQL expression is used to migrate values from the column in the old schema version that aren't subject to the constraint to values in the new schema version that are subject to the constraint. 36 | 37 | ## Examples 38 | 39 | ### Add a `CHECK` constraint 40 | 41 | Add a `CHECK` constraint to the `title` column in the `posts` table. 42 | 43 | The `up` SQL migrates values to ensure they meet the constraint. The `down` SQL copies values without modification from column in the new schema version to the column in the old schema version: 44 | 45 | 46 | -------------------------------------------------------------------------------- /docs/operations/alter_column/add_foreign_key.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Add foreign key 3 | description: Add a foreign key constraint to a column. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | alter_column: 11 | table: table name 12 | column: column name 13 | references: 14 | name: name of foreign key reference 15 | table: name of referenced table 16 | column: name of referenced column 17 | on_delete: ON DELETE behaviour, can be CASCADE, SET NULL, RESTRICT, or NO ACTION. Default is NO ACTION 18 | up: SQL expression 19 | down: SQL expression 20 | ``` 21 | ```json 22 | { 23 | "alter_column": { 24 | "table": "table name", 25 | "column": "column name", 26 | "references": { 27 | "name": "name of foreign key reference", 28 | "table": "name of referenced table", 29 | "column": "name of referenced column", 30 | "on_delete": "ON DELETE behaviour, can be CASCADE, SET NULL, RESTRICT, or NO ACTION. Default is NO ACTION" 31 | }, 32 | "up": "SQL expression", 33 | "down": "SQL expression" 34 | } 35 | } 36 | ``` 37 | 38 | 39 | ## Examples 40 | 41 | ### Add a foreign key constraint 42 | 43 | Add a `FOREIGN KEY` constraint to the `user_id` column in the `posts` table: 44 | 45 | 46 | -------------------------------------------------------------------------------- /docs/operations/alter_column/add_not_null_constraint.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Add not null constraint 3 | description: Add not null operations add a `NOT NULL` constraint to a column. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | alter_column: 11 | table: table name 12 | column: column name 13 | nullable: false 14 | up: SQL expression 15 | down: SQL expression 16 | ``` 17 | ```json 18 | { 19 | "alter_column": { 20 | "table": "table name", 21 | "column": "column name", 22 | "nullable": false, 23 | "up": "SQL expression", 24 | "down": "SQL expression" 25 | } 26 | } 27 | ``` 28 | 29 | 30 | Use `up` to migrate values from the nullable column in the old schema view to the `NOT NULL` column in the new schema version. `down` is used to migrate values in the other direction. 31 | 32 | ## Examples 33 | 34 | ### Add a `NOT NULL` constraint 35 | 36 | Add a `NOT NULL` constraint to the `review` column in the `reviews` table. 37 | 38 | 39 | -------------------------------------------------------------------------------- /docs/operations/alter_column/add_unique_constraint.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Add unique constraint 3 | description: Add unique operations add a `UNIQUE` constraint to a column. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | alter_column: 11 | table: table name 12 | column: column name 13 | unique: 14 | name: name of unique constraint 15 | up: SQL expression 16 | down: SQL expression 17 | ``` 18 | ```json 19 | { 20 | "alter_column": { 21 | "table": "table name", 22 | "column": "column name", 23 | "unique": { 24 | "name": "name of unique constraint" 25 | }, 26 | "up": "SQL expression", 27 | "down": "SQL expression" 28 | } 29 | } 30 | ``` 31 | 32 | 33 | Use the `up` SQL expression to migrate values from the old non-unique column in the old schema to the `UNIQUE` column in the new schema. 34 | 35 | ## Examples 36 | 37 | ### Add a `UNIQUE` constraint 38 | 39 | Add a `UNIQUE` constraint to the `review` column in the `reviews` table. The `up` SQL appends a random suffix to ensure uniqueness: 40 | 41 | 42 | -------------------------------------------------------------------------------- /docs/operations/alter_column/change_comment.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Change comment 3 | description: A change comment operation changes the comment on a column. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | alter_column: 11 | table: table name 12 | column: column name 13 | comment: new comment for column | null 14 | up: SQL expression 15 | down: SQL expression 16 | ``` 17 | ```json 18 | { 19 | "alter_column": { 20 | "table": "table name", 21 | "column": "column name", 22 | "comment": "new comment for column" | null, 23 | "up": "SQL expression", 24 | "down": "SQL expression" 25 | } 26 | } 27 | ``` 28 | 29 | 30 | The comment is added directly to the column on migration start. 31 | 32 | ## Examples 33 | 34 | ### Alter many column properties 35 | 36 | An alter column migration performs many operations, including setting a comment: 37 | 38 | 39 | 40 | ### Remove a comment 41 | 42 | To remove a comment from a column set `comment` to `NULL`: 43 | 44 | 45 | -------------------------------------------------------------------------------- /docs/operations/alter_column/change_default.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Change default 3 | description: A change default operation changes the default value of a column. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | alter_column: 11 | table: table name 12 | column: column name 13 | default: new default value | null 14 | up: SQL expression 15 | down: SQL expression 16 | ``` 17 | ```json 18 | { 19 | "alter_column": { 20 | "table": "table name", 21 | "column": "column name", 22 | "default": "new default value" | null, 23 | "up": "SQL expression", 24 | "down": "SQL expression" 25 | } 26 | } 27 | ``` 28 | 29 | 30 | The `default` expression is subject to the usual SQL quoting rules. In particular, string literals should be surrounded with `''`. 31 | 32 | To remove a column default, set the `default` field to `NULL`. 33 | 34 | ## Examples 35 | 36 | ### Make multiple column changes 37 | 38 | An alter column migration that makes multiple changes including setting the default: 39 | 40 | 41 | 42 | ### Drop a column default 43 | 44 | Drop a default by setting the `default` field to `null`. 45 | 46 | 47 | -------------------------------------------------------------------------------- /docs/operations/alter_column/change_type.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Change type 3 | description: A change type operation changes the type of a column. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | alter_column: 11 | table: table name 12 | column: column name 13 | type: new type of column 14 | up: SQL expression 15 | down: SQL expression 16 | ``` 17 | ```json 18 | { 19 | "alter_column": { 20 | "table": "table name", 21 | "column": "column name", 22 | "type": "new type of column", 23 | "up": "SQL expression", 24 | "down": "SQL expression" 25 | } 26 | } 27 | ``` 28 | 29 | 30 | Use the `up` SQL expression to do data conversion from the old column type to the new type. In the old schema version, the column will have its old data type; in the new version the column will have its new type. 31 | 32 | Use the `down` SQL expression to do data conversion in the other direction; from the new data type back to the old. 33 | 34 | ## Examples 35 | 36 | ### Change column type 37 | 38 | Change the type of the `rating` column on the `reviews` table: 39 | 40 | 41 | -------------------------------------------------------------------------------- /docs/operations/alter_column/drop_not_null_constraint.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Drop not null constraint 3 | description: Drop not null operations drop a `NOT NULL` constraint from a column. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | alter_column: 11 | table: table name 12 | column: column name 13 | nullable: true 14 | up: SQL expression 15 | down: SQL expression 16 | ``` 17 | ```json 18 | { 19 | "alter_column": { 20 | "table": "table name", 21 | "column": "column name", 22 | "nullable": true, 23 | "up": "SQL expression", 24 | "down": "SQL expression" 25 | } 26 | } 27 | ``` 28 | 29 | 30 | ## Examples 31 | 32 | ### Remove `NOT NULL` from a column 33 | 34 | Remove `NOT NULL` from the `title` column in the `posts` table: 35 | 36 | 37 | -------------------------------------------------------------------------------- /docs/operations/create_index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Create index 3 | description: A create index operation creates a new index on a set of columns. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | create_index: 11 | table: name of table on which to define the index 12 | name: index name 13 | columns: 14 | column_name: 15 | collate: collation name 16 | sort: ASC | DESC 17 | nulls: FIRST | LAST 18 | opclass: 19 | name: operator_class_name 20 | params: 21 | - param1=val 22 | - param2=val 23 | predicate: conditional expression for defining a partial index 24 | storage_parameters: comma-separated list of storage parameters 25 | unique: true | false 26 | method: btree 27 | ``` 28 | ```json 29 | { 30 | "create_index": { 31 | "table": "name of table on which to define the index", 32 | "name": "index name", 33 | "columns": [ 34 | "column_name": { 35 | "collate": "collation name", 36 | "sort": "ASC | DESC", 37 | "nulls": "FIRST | LAST", 38 | "opclass": { 39 | "name": "operator_class_name", 40 | "params": [ 41 | "param1=val", 42 | "param2=val" 43 | ] 44 | } 45 | } 46 | ] 47 | "predicate": "conditional expression for defining a partial index", 48 | "storage_parameters": "comma-separated list of storage parameters", 49 | "unique": true | false, 50 | "method": "btree" 51 | } 52 | } 53 | ``` 54 | 55 | 56 | * The field `method` can be `btree`, `hash`, `gist`, `spgist`, `gin`, `brin`. 57 | * You can also specify storage parameters for the index in `storage_parameters`. 58 | * To create a unique index set `unique` to `true`. 59 | 60 | ## Examples 61 | 62 | ### Create a `btree` index 63 | 64 | Create a `btree` index on the `name` column in the `fruits` table: 65 | 66 | 67 | 68 | ### Create a partial index 69 | 70 | Create a partial index on the `id` column in the `fruits` table: 71 | 72 | 73 | 74 | ### Create an index with storage parameters 75 | 76 | Set storage parameters and index method: 77 | 78 | 82 | 83 | ### Create a unique index 84 | 85 | Create a unique index: 86 | 87 | 88 | 89 | ### Create an index with custom operator class 90 | 91 | Create an index with a custom operator class: 92 | 93 | 94 | -------------------------------------------------------------------------------- /docs/operations/drop_column.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Drop column 3 | description: A drop column operation drops a column from an existing table. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | drop_column: 11 | table: name of table 12 | column: name of column to drop 13 | down: SQL expression 14 | ``` 15 | ```json 16 | { 17 | "drop_column": { 18 | "table": "name of table", 19 | "column": "name of column to drop", 20 | "down": "SQL expression" 21 | } 22 | } 23 | ``` 24 | 25 | 26 | The `down` field above is required in order to backfill the previous version of the schema during an active migration. 27 | 28 | ## Examples 29 | 30 | ### Drop a column 31 | 32 | Drop a column - if a new row is inserted against the new schema without a `price` column, the old schema `price` column will be set to `0`. 33 | 34 | 35 | -------------------------------------------------------------------------------- /docs/operations/drop_constraint.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Drop constraint 3 | description: A drop constraint operation drops a single-column constraint from an existing table. 4 | --- 5 | 6 | 7 | The **drop constraint** operation is deprecated. Please use the [drop 8 | multi-column constraint](#drop-multi-column-constraint) operation instead. The 9 | **drop_constraint** operation will be removed in a future release of `pgroll`. 10 | 11 | 12 | ## Structure 13 | 14 | Only `CHECK`, `FOREIGN KEY`, and `UNIQUE` constraints can be dropped. 15 | 16 | 17 | ```yaml 18 | drop_constraint: 19 | table: name of table 20 | name: name of constraint to drop 21 | up: SQL expression 22 | down: SQL expression 23 | ``` 24 | ```json 25 | { 26 | "drop_constraint": { 27 | "table": "name of table", 28 | "name": "name of constraint to drop", 29 | "up": "SQL expression", 30 | "down": "SQL expression" 31 | } 32 | } 33 | ``` 34 | 35 | 36 | ## Examples 37 | 38 | ### Drop a `CHECK` constraint: 39 | 40 | Drop a `CHECK` constraint: 41 | 42 | 43 | 44 | ### Drop a `FOREIGN KEY` constraint: 45 | 46 | Drop a `FOREIGN KEY` constraint: 47 | 48 | 49 | 50 | ### Drop a `UNIQUE` constraint: 51 | 52 | Drop a `UNIQUE` constraint: 53 | 54 | 55 | -------------------------------------------------------------------------------- /docs/operations/drop_index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Drop index 3 | description: A drop index operation drops an index from a table. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | drop_index: 11 | name: name of index to drop 12 | ``` 13 | ```json 14 | { 15 | "drop_index": { 16 | "name": "name of index to drop" 17 | } 18 | } 19 | ``` 20 | 21 | 22 | ## Examples 23 | 24 | ### Drop an index 25 | 26 | Drop an index defined on the `fruits` table: 27 | 28 | 29 | -------------------------------------------------------------------------------- /docs/operations/drop_multi_column_constraint.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Drop multi-column constraint 3 | description: A drop constraint operation drops a multi-column constraint from an existing table. 4 | --- 5 | 6 | ## Structure 7 | 8 | Only `CHECK`, `FOREIGN KEY`, and `UNIQUE` constraints can be dropped. 9 | 10 | 11 | ```yaml 12 | drop_multicolumn_constraint: 13 | table: name of table 14 | name: name of constraint to drop 15 | up: 16 | column1: up SQL expressions for each column covered by the constraint 17 | ... 18 | down: 19 | column1: down SQL expressions for each column covered by the constraint 20 | ... 21 | ``` 22 | ```json 23 | { 24 | "drop_multicolumn_constraint": { 25 | "table": "name of table", 26 | "name": "name of constraint to drop", 27 | "up": { 28 | "column1": "up SQL expressions for each column covered by the constraint", 29 | ... 30 | }, 31 | "down": { 32 | "column1": "down SQL expressions for each column covered by the constraint", 33 | ... 34 | } 35 | } 36 | } 37 | ``` 38 | 39 | 40 | This operation can also be used to drop single-column constraints and replaces the deprecated (#drop-constraint) operation. 41 | 42 | An `up` and `down` SQL expression is required for each column covered by the constraint, and no other column names are permitted. For example, when droping a constraint covering columns `a` and `b` the `up` and `down` fields should look like: 43 | 44 | 45 | ```yaml 46 | up: 47 | a: up SQL expression for column a 48 | b: up SQL expression for column b 49 | down: 50 | a: down SQL expression for column a 51 | b: down SQL expression for column b 52 | ``` 53 | ```json 54 | { 55 | "up": { 56 | "a": "up SQL expression for column a", 57 | "b": "up SQL expression for column b", 58 | }, 59 | "down": { 60 | "a": "down SQL expression for column a", 61 | "b": "down SQL expression for column b", 62 | } 63 | } 64 | ``` 65 | 66 | 67 | The new versions of the columns will no longer have the constraint, but in the old view of the table the columns will still be covered by the constraint; the `down` expressions should therefore be used to ensure that the combination of values meets the constraint being dropped. 68 | 69 | ## Examples 70 | 71 | ### Drop a `CHECK` constraint 72 | 73 | Drop a `CHECK` constraint defined on the `tickets` table. 74 | 75 | 76 | -------------------------------------------------------------------------------- /docs/operations/drop_table.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Drop table 3 | description: A drop table operation drops a table. 4 | --- 5 | 6 | ## Structure 7 | 8 | 9 | ```yaml 10 | drop_table: 11 | name: name of table to drop 12 | ``` 13 | ```json 14 | { 15 | "drop_table": { 16 | "name": "name of table to drop" 17 | } 18 | } 19 | ``` 20 | 21 | 22 | The table is not visible in the new version of the schema created when the migration is started, but remains visible to the old version of the schema. The table is dropped on migration completion. 23 | 24 | ## Examples 25 | 26 | ### Drop a table 27 | 28 | Drop the products table: 29 | 30 | 31 | -------------------------------------------------------------------------------- /docs/operations/raw_sql.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Raw SQL 3 | description: A raw SQL operation runs arbitrary SQL against the database. 4 | --- 5 | 6 | 7 | `pgroll` is unable to guarantee that raw SQL migrations are safe and will not 8 | result in application downtime. 9 | 10 | 11 | ## Structure 12 | 13 | This is intended as an 'escape hatch' to allow a migration to perform operations that are otherwise not supported by `pgroll`. 14 | 15 | 16 | ```yaml 17 | sql: 18 | up: SQL expression 19 | down: SQL expression 20 | ``` 21 | ```json 22 | { 23 | "sql": { 24 | "up": "SQL expression", 25 | "down": "SQL expression" 26 | } 27 | } 28 | ``` 29 | 30 | 31 | By default, a `sql` operation cannot run together with other operations in the same migration. This is to ensure pgroll can correctly track the state of the database. However, it is possible to run a `sql` operation together with other operations by setting the `onComplete` flag to `true`. 32 | 33 | The `onComplete` flag will make this operation run the `up` expression on the complete phase (instead of the default, which is to run it on the start phase). 34 | 35 | `onComplete` flag is incompatible with `down` expression, as `pgroll` does not support running rollback after complete was executed. 36 | 37 | 38 | ```yaml 39 | sql: 40 | up: SQL expression 41 | onComplete: true 42 | ``` 43 | ```json 44 | { 45 | "sql": { 46 | "up": "SQL expression", 47 | "onComplete": true 48 | } 49 | } 50 | ``` 51 | 52 | 53 | 54 | 55 | The `down` migration must be idempotent. When an `up` migration fails, `pgroll` automatically runs the corresponding `down` migration to clean up leftover objects. If the `down` migration is not idempotent (does not contain `IF EXISTS`), the rollback will fail. 56 | 57 | 58 | ## Examples 59 | 60 | ### Create a table with a raw SQL migration 61 | 62 | A raw SQL migration to create a table: 63 | 64 | 65 | 66 | ### Run a SQL migration on migration complete 67 | 68 | A raw SQL migration run on migration completion rather than start. 69 | 70 | 71 | -------------------------------------------------------------------------------- /docs/operations/rename_column.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Rename column 3 | description: A rename column operation renames a column. 4 | --- 5 | 6 | ## Structure 7 | 8 | ```json 9 | { 10 | "rename_column": { 11 | "table": "table name", 12 | "column": "old column name", 13 | "name": "new column name" 14 | } 15 | } 16 | ``` 17 | 18 | In the new schema version, the column will have its new name. In the old schema version the column still has its old name. 19 | 20 | ## Examples 21 | 22 | ### Rename a column 23 | 24 | Rename the `role` column in the `employees` table: 25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/operations/rename_constraint.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Rename constraint 3 | description: A rename constraint operation renames a constraint. 4 | --- 5 | 6 | ## Structure 7 | 8 | ```json 9 | { 10 | "rename_constraint": { 11 | "table": "table name", 12 | "from": "old constraint name", 13 | "to": "new constraint name" 14 | } 15 | } 16 | ``` 17 | 18 | The constraint is renamed on migration completion; it retains its old name during the active migration period. 19 | 20 | ## Examples 21 | 22 | ### Rename a `CHECK` constraint 23 | 24 | Rename a `CHECK` constraint: 25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/operations/rename_table.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Rename table 3 | description: A rename table operation renames a table. 4 | --- 5 | 6 | ## Structure 7 | 8 | ```json 9 | { 10 | "rename_table": { 11 | "from": "old column name", 12 | "to": "new column name" 13 | } 14 | } 15 | ``` 16 | 17 | The table is accessible by its old name in the old version of the schema, and by its new name in the new version of the schema. 18 | 19 | The table itself is renamed on migration completion. 20 | 21 | ## Examples 22 | 23 | ### Rename a table 24 | 25 | Rename the customers table: 26 | 27 | 28 | -------------------------------------------------------------------------------- /docs/operations/set_replica_identity.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Set replica identity 3 | description: A set replica identity operation sets the replica identity for a table. 4 | --- 5 | 6 | 7 | The **set replica identity** operation is deprecated and will be removed in a 8 | future release. Please use a [raw SQL operation](#raw-sql) to set the replica 9 | identity of table instead. 10 | 11 | 12 | ## Structure 13 | 14 | ```json 15 | { 16 | "set_replica_identity": { 17 | "table": "name of the table", 18 | "identity": { 19 | "type": "full | default | nothing | index" 20 | "index": "name of the index, if type is 'index'" 21 | } 22 | } 23 | } 24 | ``` 25 | 26 | 27 | A **set replica identity** operation is applied directly to the underlying 28 | table on migration start. This means that both versions of the table exposed 29 | in the old and new version schemas will have the new replica identity set. 30 | 31 | 32 | ## Examples 33 | 34 | ### Set replica identity 35 | 36 | 37 | -------------------------------------------------------------------------------- /docs/why-use-pgroll.md: -------------------------------------------------------------------------------- 1 | # Why use pgroll? 2 | 3 | [pgroll](https://pgroll.com/) is a schema migration tool for Postgres. It is designed for application developers working on applications that require frequent schema changes but also need to maintain zero downtime around those schema changes. `pgroll` takes a different approach compared to most other migration tools on the market. 4 | 5 | There are two aspects that characterize `pgroll`'s approach to migrations. 6 | 7 | ## Multi-version migrations 8 | 9 | Making a schema change with `pgroll` results in two versions of the schema; the one before the change and the one after the change - this allows applications to select which version of the schema they want to work with and allows side-by-side rollout of applications that require the new schema changes with old applications that may be incompatible with it. 10 | 11 | ## Lock-safe migrations 12 | 13 | Migrations using `pgroll` are expressed declaratively, rather than using SQL directly. This allows `pgroll` to implement the steps required to perform the schema change in a safe manner, ensuring that any locks required on the affected objects are held for the shortest possible time. 14 | 15 | If you want to avoid worrying about schema changes, install `pgroll` and create your next migration with us. 16 | -------------------------------------------------------------------------------- /examples/.ledger: -------------------------------------------------------------------------------- 1 | 01_create_tables.yaml 2 | 02_create_another_table.yaml 3 | 03_add_column.yaml 4 | 04_rename_table.yaml 5 | 05_sql.yaml 6 | 06_add_column_to_sql_table.yaml 7 | 07_drop_table.yaml 8 | 08_create_fruits_table.yaml 9 | 09_drop_column.yaml 10 | 10_create_index.yaml 11 | 11_drop_index.yaml 12 | 12_create_employees_table.yaml 13 | 13_rename_column.yaml 14 | 14_add_reviews_table.yaml 15 | 15_set_column_unique.yaml 16 | 16_set_nullable.yaml 17 | 17_add_rating_column.yaml 18 | 18_change_column_type.yaml 19 | 19_create_orders_table.yaml 20 | 20_create_posts_table.yaml 21 | 21_add_foreign_key_constraint.yaml 22 | 22_add_check_constraint.yaml 23 | 23_drop_check_constraint.yaml 24 | 24_drop_foreign_key_constraint.yaml 25 | 25_add_table_with_check_constraint.yaml 26 | 26_add_column_with_check_constraint.yaml 27 | 27_drop_unique_constraint.yaml 28 | 28_different_defaults.yaml 29 | 29_set_replica_identity.yaml 30 | 30_add_column_simple_up.yaml 31 | 31_unset_not_null.yaml 32 | 32_sql_on_complete.yaml 33 | 33_rename_check_constraint.yaml 34 | 34_create_events_table.yaml 35 | 35_alter_column_multiple.yaml 36 | 36_set_comment_to_null.yaml 37 | 37_create_partial_index.yaml 38 | 38_create_hash_index_with_fillfactor.yaml 39 | 39_add_column_with_multiple_pk_in_table.yaml 40 | 40_create_enum_type.yaml 41 | 41_add_enum_column.yaml 42 | 42_create_unique_index.yaml 43 | 43_create_tickets_table.yaml 44 | 44_add_table_unique_constraint.yaml 45 | 45_add_table_check_constraint.yaml 46 | 46_alter_column_drop_default.yaml 47 | 47_add_table_foreign_key_constraint.yaml 48 | 48_drop_tickets_check.yaml 49 | 49_unset_not_null_on_indexed_column.yaml 50 | 50_create_table_with_table_constraint.yaml 51 | 51_create_table_with_table_foreign_key_constraint.yaml 52 | 52_create_table_with_exclusion_constraint.yaml 53 | 53_add_column_with_volatile_default.yaml 54 | 54_create_index_with_opclass.yaml 55 | 55_add_primary_key_constraint_to_table.yaml 56 | -------------------------------------------------------------------------------- /examples/01_create_tables.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: customers 4 | columns: 5 | - name: id 6 | type: integer 7 | pk: true 8 | - name: name 9 | type: varchar(255) 10 | unique: true 11 | - name: credit_card 12 | type: text 13 | nullable: true 14 | - create_table: 15 | name: bills 16 | columns: 17 | - name: id 18 | type: integer 19 | pk: true 20 | - name: date 21 | type: time with time zone 22 | - name: quantity 23 | type: integer 24 | - create_table: 25 | name: sellers 26 | columns: 27 | - name: name 28 | type: varchar(255) 29 | pk: true 30 | - name: zip 31 | type: integer 32 | pk: true 33 | - name: description 34 | type: varchar(255) 35 | nullable: true 36 | -------------------------------------------------------------------------------- /examples/02_create_another_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: products 4 | columns: 5 | - name: id 6 | type: serial 7 | pk: true 8 | - name: name 9 | type: varchar(255) 10 | unique: true 11 | - name: price 12 | type: decimal(10,2) 13 | -------------------------------------------------------------------------------- /examples/03_add_column.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - add_column: 3 | table: products 4 | up: UPPER(name) 5 | column: 6 | name: description 7 | type: varchar(255) 8 | nullable: true 9 | - add_column: 10 | table: products 11 | column: 12 | name: stock 13 | type: int 14 | nullable: false 15 | default: "100" 16 | - add_column: 17 | table: products 18 | up: name || '-category' 19 | column: 20 | name: category 21 | type: varchar(255) 22 | nullable: false 23 | -------------------------------------------------------------------------------- /examples/04_rename_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - rename_table: 3 | from: customers 4 | to: clients 5 | -------------------------------------------------------------------------------- /examples/05_sql.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - sql: 3 | up: CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT) 4 | down: DROP TABLE users 5 | -------------------------------------------------------------------------------- /examples/06_add_column_to_sql_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - add_column: 3 | table: users 4 | up: UPPER(name) 5 | column: 6 | name: description 7 | type: varchar(255) 8 | nullable: true 9 | -------------------------------------------------------------------------------- /examples/07_drop_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - drop_table: 3 | name: products 4 | -------------------------------------------------------------------------------- /examples/08_create_fruits_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: fruits 4 | columns: 5 | - name: id 6 | type: bigint 7 | pk: true 8 | generated: 9 | identity: 10 | user_specified_values: BY DEFAULT 11 | - name: name 12 | type: varchar(255) 13 | unique: true 14 | - name: price 15 | type: decimal(10,2) 16 | -------------------------------------------------------------------------------- /examples/09_drop_column.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - drop_column: 3 | table: fruits 4 | column: price 5 | down: "0" 6 | -------------------------------------------------------------------------------- /examples/10_create_index.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_index: 3 | name: idx_fruits_name 4 | table: fruits 5 | columns: 6 | name: {} 7 | -------------------------------------------------------------------------------- /examples/11_drop_index.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - drop_index: 3 | name: idx_fruits_name 4 | -------------------------------------------------------------------------------- /examples/12_create_employees_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: employees 4 | comment: This is a comment for the employees table 5 | columns: 6 | - name: id 7 | type: serial 8 | pk: true 9 | - name: role 10 | type: varchar(255) 11 | comment: This is a comment for the role column 12 | -------------------------------------------------------------------------------- /examples/13_rename_column.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - rename_column: 3 | table: employees 4 | from: role 5 | to: job_title 6 | -------------------------------------------------------------------------------- /examples/14_add_reviews_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: reviews 4 | columns: 5 | - name: id 6 | type: serial 7 | pk: true 8 | - name: username 9 | type: text 10 | - name: product 11 | type: text 12 | - name: review 13 | type: text 14 | nullable: true 15 | -------------------------------------------------------------------------------- /examples/15_set_column_unique.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: reviews 4 | column: review 5 | unique: 6 | name: reviews_review_unique 7 | up: review || '-' || (random()*1000000)::integer 8 | down: review 9 | -------------------------------------------------------------------------------- /examples/16_set_nullable.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: reviews 4 | column: review 5 | nullable: false 6 | up: SELECT CASE WHEN review IS NULL THEN product || ' is good' ELSE review END 7 | down: review 8 | -------------------------------------------------------------------------------- /examples/17_add_rating_column.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - add_column: 3 | table: reviews 4 | column: 5 | name: rating 6 | type: text 7 | default: "0" 8 | -------------------------------------------------------------------------------- /examples/18_change_column_type.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: reviews 4 | column: rating 5 | type: integer 6 | up: CAST(rating AS integer) 7 | down: CAST(rating AS text) 8 | -------------------------------------------------------------------------------- /examples/19_create_orders_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: orders 4 | columns: 5 | - name: id 6 | type: serial 7 | pk: true 8 | - name: user_id 9 | type: integer 10 | references: 11 | name: fk_users_id 12 | table: users 13 | column: id 14 | - name: quantity 15 | type: int 16 | -------------------------------------------------------------------------------- /examples/20_create_posts_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: posts 4 | columns: 5 | - name: id 6 | type: serial 7 | pk: true 8 | - name: title 9 | type: varchar(255) 10 | - name: user_id 11 | type: integer 12 | nullable: true 13 | -------------------------------------------------------------------------------- /examples/21_add_foreign_key_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: posts 4 | column: user_id 5 | references: 6 | name: fk_users_id 7 | table: users 8 | column: id 9 | on_delete: CASCADE 10 | up: SELECT CASE WHEN EXISTS (SELECT 1 FROM users WHERE users.id = user_id) THEN user_id ELSE NULL END 11 | down: user_id 12 | -------------------------------------------------------------------------------- /examples/22_add_check_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: posts 4 | column: title 5 | check: 6 | name: title_length 7 | constraint: length(title) > 3 8 | up: SELECT CASE WHEN length(title) <= 3 THEN LPAD(title, 4, '-') ELSE title END 9 | down: title 10 | -------------------------------------------------------------------------------- /examples/23_drop_check_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - drop_constraint: 3 | table: posts 4 | name: title_length 5 | up: title 6 | down: SELECT CASE WHEN length(title) <= 3 THEN LPAD(title, 4, '-') ELSE title END 7 | -------------------------------------------------------------------------------- /examples/24_drop_foreign_key_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - drop_constraint: 3 | table: posts 4 | name: fk_users_id 5 | up: user_id 6 | down: SELECT CASE WHEN EXISTS (SELECT 1 FROM users WHERE users.id = user_id) THEN user_id ELSE NULL END 7 | -------------------------------------------------------------------------------- /examples/25_add_table_with_check_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: people 4 | columns: 5 | - name: id 6 | type: integer 7 | pk: true 8 | - name: name 9 | type: varchar(255) 10 | check: 11 | name: name_length 12 | constraint: length(name) > 3 13 | -------------------------------------------------------------------------------- /examples/26_add_column_with_check_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - add_column: 3 | table: people 4 | column: 5 | name: age 6 | type: integer 7 | default: "18" 8 | check: 9 | name: age_check 10 | constraint: age >= 18 11 | -------------------------------------------------------------------------------- /examples/27_drop_unique_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - drop_constraint: 3 | table: reviews 4 | name: reviews_review_unique 5 | up: review 6 | down: review || '-' || (random()*1000000)::integer 7 | -------------------------------------------------------------------------------- /examples/28_different_defaults.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: items 4 | columns: 5 | - name: id 6 | type: serial 7 | pk: true 8 | - name: name 9 | type: varchar(255) 10 | default: '''unnamed''' 11 | - name: price 12 | type: decimal(10,2) 13 | default: "0.00" 14 | - name: created_at 15 | type: timestamptz 16 | default: now() 17 | -------------------------------------------------------------------------------- /examples/29_set_replica_identity.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - set_replica_identity: 3 | table: fruits 4 | identity: 5 | type: index 6 | index: fruits_pkey 7 | -------------------------------------------------------------------------------- /examples/30_add_column_simple_up.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - add_column: 3 | table: people 4 | up: '''temporary-description''' 5 | column: 6 | name: description 7 | type: varchar(255) 8 | nullable: false 9 | comment: This is a comment for the description column 10 | -------------------------------------------------------------------------------- /examples/31_unset_not_null.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: posts 4 | column: title 5 | nullable: true 6 | up: title 7 | down: SELECT CASE WHEN title IS NULL THEN 'placeholder title' ELSE title END 8 | -------------------------------------------------------------------------------- /examples/32_sql_on_complete.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - sql: 3 | up: ALTER TABLE people ADD COLUMN birth_date timestamp 4 | onComplete: true 5 | -------------------------------------------------------------------------------- /examples/33_rename_check_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - rename_constraint: 3 | table: people 4 | from: name_length 5 | to: name_length_check 6 | -------------------------------------------------------------------------------- /examples/34_create_events_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: events 4 | columns: 5 | - name: id 6 | type: serial 7 | pk: true 8 | - name: name 9 | type: varchar(255) 10 | nullable: true 11 | default: '''''' 12 | comment: the name of the event 13 | -------------------------------------------------------------------------------- /examples/35_alter_column_multiple.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: events 4 | column: name 5 | type: text 6 | default: '''unknown event''' 7 | nullable: false 8 | comment: the full name of the event 9 | unique: 10 | name: events_event_name_unique 11 | check: 12 | name: event_name_length 13 | constraint: length(name) > 3 14 | up: SELECT CASE WHEN name IS NULL OR LENGTH(name) <= 3 THEN 'placeholder' ELSE name END 15 | down: event_name 16 | -------------------------------------------------------------------------------- /examples/36_set_comment_to_null.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: events 4 | column: name 5 | comment: null 6 | up: name 7 | down: name 8 | -------------------------------------------------------------------------------- /examples/37_create_partial_index.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_index: 3 | name: idx_fruits_id_gt_10 4 | table: fruits 5 | columns: 6 | id: {} 7 | predicate: id > 10 8 | -------------------------------------------------------------------------------- /examples/38_create_hash_index_with_fillfactor.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_index: 3 | name: idx_fruits_name 4 | table: fruits 5 | columns: 6 | name: {} 7 | method: hash 8 | storage_parameters: fillfactor = 70 9 | -------------------------------------------------------------------------------- /examples/39_add_column_with_multiple_pk_in_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - add_column: 3 | table: sellers 4 | column: 5 | name: rating 6 | type: int 7 | default: "10" 8 | -------------------------------------------------------------------------------- /examples/40_create_enum_type.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - sql: 3 | up: CREATE TYPE fruit_size AS ENUM ('small', 'medium', 'large'); 4 | -------------------------------------------------------------------------------- /examples/41_add_enum_column.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - add_column: 3 | table: fruits 4 | column: 5 | name: size 6 | type: fruit_size 7 | default: '''small''' 8 | -------------------------------------------------------------------------------- /examples/42_create_unique_index.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_index: 3 | name: idx_fruits_unique_name 4 | table: fruits 5 | columns: 6 | name: {} 7 | unique: true 8 | -------------------------------------------------------------------------------- /examples/43_create_tickets_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: tickets 4 | columns: 5 | - name: ticket_id 6 | type: serial 7 | pk: true 8 | - name: sellers_name 9 | type: varchar(255) 10 | - name: sellers_zip 11 | type: integer 12 | - name: ticket_type 13 | type: varchar(255) 14 | default: '''paper''' 15 | -------------------------------------------------------------------------------- /examples/44_add_table_unique_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_constraint: 3 | type: unique 4 | table: tickets 5 | name: unique_zip_name 6 | columns: 7 | - sellers_name 8 | - sellers_zip 9 | up: 10 | sellers_name: sellers_name 11 | sellers_zip: sellers_zip 12 | down: 13 | sellers_name: sellers_name 14 | sellers_zip: sellers_zip 15 | -------------------------------------------------------------------------------- /examples/45_add_table_check_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_constraint: 3 | type: check 4 | table: tickets 5 | name: check_zip_name 6 | columns: 7 | - sellers_name 8 | - sellers_zip 9 | check: sellers_name = 'alice' OR sellers_zip > 0 10 | up: 11 | sellers_name: sellers_name 12 | sellers_zip: SELECT CASE WHEN sellers_name != 'alice' AND sellers_zip <= 0 THEN 123 WHEN sellers_name != 'alice' THEN sellers_zip ELSE sellers_zip END 13 | down: 14 | sellers_name: sellers_name 15 | sellers_zip: sellers_zip 16 | -------------------------------------------------------------------------------- /examples/46_alter_column_drop_default.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: tickets 4 | column: ticket_type 5 | default: null 6 | up: ticket_type 7 | down: ticket_type 8 | -------------------------------------------------------------------------------- /examples/47_add_table_foreign_key_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_constraint: 3 | type: foreign_key 4 | table: tickets 5 | name: fk_sellers 6 | columns: 7 | - sellers_name 8 | - sellers_zip 9 | references: 10 | table: sellers 11 | columns: 12 | - name 13 | - zip 14 | up: 15 | sellers_name: sellers_name 16 | sellers_zip: sellers_zip 17 | down: 18 | sellers_name: sellers_name 19 | sellers_zip: sellers_zip 20 | -------------------------------------------------------------------------------- /examples/48_drop_tickets_check.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - drop_multicolumn_constraint: 3 | table: tickets 4 | name: check_zip_name 5 | up: 6 | sellers_name: sellers_name 7 | sellers_zip: sellers_zip 8 | down: 9 | sellers_name: sellers_name 10 | sellers_zip: sellers_zip 11 | -------------------------------------------------------------------------------- /examples/49_unset_not_null_on_indexed_column.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - alter_column: 3 | table: fruits 4 | column: name 5 | nullable: true 6 | up: SELECT CASE WHEN name IS NULL THEN 'unknown fruit' ELSE name END 7 | down: name 8 | -------------------------------------------------------------------------------- /examples/50_create_table_with_table_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: telephone_providers 4 | columns: 5 | - name: id 6 | type: serial 7 | - name: name 8 | type: varchar(255) 9 | - name: tax_id 10 | type: varchar(255) 11 | - name: headquarters 12 | type: varchar(255) 13 | constraints: 14 | - name: provider_pk 15 | type: primary_key 16 | columns: 17 | - id 18 | - name: unique_tax_id 19 | type: unique 20 | columns: 21 | - tax_id 22 | - name: name_must_be_present 23 | type: check 24 | check: length(name) > 0 25 | -------------------------------------------------------------------------------- /examples/51_create_table_with_table_foreign_key_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: phonebook 4 | columns: 5 | - name: id 6 | type: serial 7 | - name: provider_id 8 | type: serial 9 | - name: name 10 | type: varchar(255) 11 | - name: city 12 | type: varchar(255) 13 | - name: phone 14 | type: varchar(255) 15 | constraints: 16 | - name: phonebook_pk 17 | type: primary_key 18 | columns: 19 | - id 20 | - name: provider_fk 21 | type: foreign_key 22 | columns: 23 | - provider_id 24 | deferrable: false 25 | references: 26 | table: telephone_providers 27 | columns: 28 | - id 29 | on_delete: CASCADE 30 | on_update: CASCADE 31 | match_type: SIMPLE 32 | - name: unique_numbers 33 | type: unique 34 | columns: 35 | - phone 36 | index_parameters: 37 | include_columns: 38 | - name 39 | - name: name_must_be_present 40 | type: check 41 | check: length(name) > 0 42 | -------------------------------------------------------------------------------- /examples/52_create_table_with_exclusion_constraint.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: library 4 | columns: 5 | - name: id 6 | type: serial 7 | - name: returned 8 | type: timestamp 9 | - name: title 10 | type: text 11 | - name: summary 12 | type: text 13 | constraints: 14 | - name: rooms_pk 15 | type: primary_key 16 | columns: 17 | - id 18 | - name: forbid_duplicated_titles 19 | type: exclude 20 | exclude: 21 | index_method: btree 22 | elements: title WITH = 23 | predicate: title IS NOT NULL 24 | -------------------------------------------------------------------------------- /examples/53_add_column_with_volatile_default.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - add_column: 3 | table: library 4 | up: (random() * 1000)::int 5 | column: 6 | name: value 7 | type: int 8 | default: (random() * 1000)::int 9 | -------------------------------------------------------------------------------- /examples/54_create_index_with_opclass.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_index: 3 | name: idx_fruits_custom_opclass 4 | table: fruits 5 | columns: 6 | id: 7 | opclass: 8 | name: int8_ops 9 | -------------------------------------------------------------------------------- /examples/55_add_primary_key_constraint_to_table.yaml: -------------------------------------------------------------------------------- 1 | operations: 2 | - create_table: 3 | name: tasks 4 | columns: 5 | - name: id 6 | type: serial 7 | - name: title 8 | type: varchar(255) 9 | nullable: false 10 | - name: description 11 | type: varchar(255) 12 | - name: deadline 13 | type: time with time zone 14 | - create_constraint: 15 | name: tasks_pkey 16 | table: tasks 17 | columns: [id] 18 | type: primary_key 19 | up: 20 | id: id 21 | down: 22 | id: id 23 | -------------------------------------------------------------------------------- /internal/defaults/fastpath.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package defaults 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | 9 | "github.com/lib/pq" 10 | "github.com/xataio/pgroll/pkg/db" 11 | ) 12 | 13 | const ( 14 | cNewTableName = "_pgroll_temp_fastpath_check" 15 | cNewColumnName = "_pgroll_fastpath_check_column" 16 | ) 17 | 18 | // UsesFastPath returns true if [defaultExpr] will use the fast-path 19 | // optimization added in Postgres 11 to avoid taking an `ACCESS_EXCLUSIVE` lock 20 | // when adding a new column with a `DEFAULT` value. 21 | // 22 | // The implementation works by creating a schema-only copy of [tableName], 23 | // adding a new column with a `DEFAULT` value of [defaultExpr] and checking 24 | // system catalogs to see if the fast-path optimization was applied. 25 | func UsesFastPath(ctx context.Context, conn db.DB, tableName, columnType, defaultExpr string) (bool, error) { 26 | // Check if we have a real connection or a fake one 27 | if _, ok := conn.(*db.FakeDB); ok { 28 | return true, nil 29 | } 30 | 31 | // Create a schema-only copy of the table 32 | _, err := conn.ExecContext(ctx, fmt.Sprintf("CREATE UNLOGGED TABLE %s AS SELECT * FROM %s WHERE false", 33 | pq.QuoteIdentifier(cNewTableName), 34 | pq.QuoteIdentifier(tableName))) 35 | if err != nil { 36 | return false, fmt.Errorf("failed to create schema-only copy of table: %w", err) 37 | } 38 | 39 | // Ensure that the schema-only copy is removed 40 | defer cleanup(ctx, conn) 41 | 42 | // Add a new column with the default value 43 | _, err = conn.ExecContext(ctx, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s DEFAULT %s", 44 | pq.QuoteIdentifier(cNewTableName), 45 | pq.QuoteIdentifier(cNewColumnName), 46 | columnType, 47 | defaultExpr)) 48 | if err != nil { 49 | return false, fmt.Errorf("failed to add column to schema-only copy: %w", err) 50 | } 51 | 52 | // Inspect the system catalogs to see if the fast-path optimization was applied 53 | rows, err := conn.QueryContext(ctx, 54 | "SELECT atthasmissing FROM pg_attribute WHERE attrelid::regclass = $1::regclass AND attname = $2", 55 | cNewTableName, 56 | cNewColumnName) 57 | if err != nil { 58 | return false, fmt.Errorf("failed to query pg_attribute: %w", err) 59 | } 60 | defer rows.Close() 61 | 62 | // Read the `attmissing` column from the result to determine if the fast-path 63 | // optimization was applied 64 | var hasMissing bool 65 | if err := db.ScanFirstValue(rows, &hasMissing); err != nil { 66 | return false, fmt.Errorf("failed to read pg_attribute result: %w", err) 67 | } 68 | 69 | return hasMissing, nil 70 | } 71 | 72 | func cleanup(ctx context.Context, conn db.DB) { 73 | conn.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", pq.QuoteIdentifier(cNewTableName))) 74 | } 75 | -------------------------------------------------------------------------------- /internal/defaults/fastpath_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package defaults_test 4 | 5 | import ( 6 | "context" 7 | "database/sql" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/require" 11 | "github.com/xataio/pgroll/pkg/db" 12 | "github.com/xataio/pgroll/pkg/roll" 13 | 14 | "github.com/xataio/pgroll/internal/defaults" 15 | "github.com/xataio/pgroll/internal/testutils" 16 | ) 17 | 18 | func TestMain(m *testing.M) { 19 | testutils.SharedTestMain(m) 20 | } 21 | 22 | func TestFastPathDefaults(t *testing.T) { 23 | t.Parallel() 24 | 25 | testCases := []struct { 26 | Name string 27 | ColumnType string 28 | Default string 29 | ExpectedFastPath bool 30 | }{ 31 | { 32 | Name: "constant integer", 33 | ColumnType: "int", 34 | Default: "10", 35 | ExpectedFastPath: true, 36 | }, 37 | { 38 | Name: "constant boolean", 39 | ColumnType: "bool", 40 | Default: "true", 41 | ExpectedFastPath: true, 42 | }, 43 | { 44 | Name: "simple arithmetic", 45 | ColumnType: "int", 46 | Default: "1 + 2 + 3", 47 | ExpectedFastPath: true, 48 | }, 49 | { 50 | Name: "random() function", 51 | ColumnType: "double precision", 52 | Default: "random()", 53 | ExpectedFastPath: false, 54 | }, 55 | { 56 | Name: "random() function with typecast", 57 | ColumnType: "integer", 58 | Default: "(random()*1000)::integer", 59 | ExpectedFastPath: false, 60 | }, 61 | { 62 | Name: "timeofday() function", 63 | ColumnType: "text", 64 | Default: "timeofday()", 65 | ExpectedFastPath: false, 66 | }, 67 | } 68 | 69 | for _, tc := range testCases { 70 | t.Run(tc.Name, func(t *testing.T) { 71 | t.Parallel() 72 | 73 | testutils.WithMigratorAndConnectionToContainer(t, func(mig *roll.Roll, conn *sql.DB) { 74 | ctx := context.Background() 75 | rdb := &db.RDB{DB: conn} 76 | 77 | createTestTable(t, conn) 78 | 79 | fp, err := defaults.UsesFastPath(ctx, rdb, "test_table", tc.ColumnType, tc.Default) 80 | require.NoError(t, err) 81 | 82 | require.Equal(t, tc.ExpectedFastPath, fp) 83 | }) 84 | }) 85 | } 86 | } 87 | 88 | func createTestTable(t *testing.T, conn *sql.DB) { 89 | t.Helper() 90 | 91 | _, err := conn.Exec(` 92 | CREATE TABLE test_table ( 93 | id SERIAL PRIMARY KEY, 94 | name TEXT NOT NULL 95 | ) 96 | `) 97 | require.NoError(t, err) 98 | } 99 | -------------------------------------------------------------------------------- /internal/jsonschema/jsonschema_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package jsonschema 4 | 5 | import ( 6 | "encoding/json" 7 | "os" 8 | "path/filepath" 9 | "strconv" 10 | "strings" 11 | "testing" 12 | 13 | "github.com/santhosh-tekuri/jsonschema/v6" 14 | "github.com/stretchr/testify/assert" 15 | "golang.org/x/tools/txtar" 16 | ) 17 | 18 | const ( 19 | schemaPath = "../../schema.json" 20 | testDataDir = "./testdata" 21 | ) 22 | 23 | func TestJSONSchemaValidation(t *testing.T) { 24 | t.Parallel() 25 | 26 | compiler := jsonschema.NewCompiler() 27 | sch, err := compiler.Compile(schemaPath) 28 | assert.NoError(t, err) 29 | 30 | files, err := os.ReadDir(testDataDir) 31 | assert.NoError(t, err) 32 | 33 | for _, file := range files { 34 | t.Run(file.Name(), func(t *testing.T) { 35 | ac, err := txtar.ParseFile(filepath.Join(testDataDir, file.Name())) 36 | assert.NoError(t, err) 37 | 38 | assert.Len(t, ac.Files, 2) 39 | 40 | var v map[string]any 41 | assert.NoError(t, json.Unmarshal(ac.Files[0].Data, &v)) 42 | 43 | shouldValidate, err := strconv.ParseBool(strings.TrimSpace(string(ac.Files[1].Data))) 44 | assert.NoError(t, err) 45 | 46 | err = sch.Validate(v) 47 | if shouldValidate && err != nil { 48 | t.Errorf("%#v", err) 49 | } else if !shouldValidate && err == nil { 50 | t.Errorf("expected %q to be invalid", ac.Files[0].Name) 51 | } 52 | }) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/add-column-1.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'add_column' migration. 2 | Some optional column fields are specified and some aren't. 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "add_column": { 10 | "table": "reviews", 11 | "column": { 12 | "name": "rating", 13 | "type": "text", 14 | "default": "0" 15 | } 16 | } 17 | } 18 | ] 19 | } 20 | 21 | -- valid -- 22 | true 23 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/alter-column-1.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'alter_column' migration. 2 | It sets `nullable`, `up` and `down`. 3 | 4 | -- alter_column.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "alter_column": { 10 | "table": "reviews", 11 | "column": "review", 12 | "nullable": false, 13 | "up": "foo", 14 | "down": "foo" 15 | } 16 | } 17 | ] 18 | } 19 | 20 | -- valid -- 21 | true 22 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/alter-column-2.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'alter_column' migration. 2 | It sets both `type` and `nullable`. 3 | 4 | -- alter_column.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "alter_column": { 10 | "table": "reviews", 11 | "column": "review", 12 | "type": "integer", 13 | "nullable": true, 14 | "up": "foo", 15 | "down": "foo" 16 | } 17 | } 18 | ] 19 | } 20 | 21 | -- valid -- 22 | true 23 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/alter-column-3.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'alter_column' migration. 2 | It specifies a table and column but no changes. 3 | 4 | -- alter_column.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "alter_column": { 10 | "table": "reviews", 11 | "column": "review" 12 | } 13 | } 14 | ] 15 | } 16 | 17 | -- valid -- 18 | false 19 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-constraint-1-invalid-check.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_constraint' migration. 2 | Check constraint must have the option 'check' configured. 3 | 4 | -- create_constraint.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_constraint": { 10 | "name": "my_invalid_check", 11 | "table": "my_table", 12 | "type": "check", 13 | "columns": [ 14 | "my_column" 15 | ], 16 | "up": { 17 | "my_column": "my_column" 18 | }, 19 | "down": { 20 | "my_column": "my_column" 21 | } 22 | } 23 | } 24 | ] 25 | } 26 | 27 | -- valid -- 28 | false 29 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-constraint-2-invalid-no-inherit.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_constraint' migration. 2 | Only check constraints have no_inherit flag. 3 | 4 | -- create_constraint.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_constraint": { 10 | "name": "my_invalid_check", 11 | "table": "my_table", 12 | "type": "foreign_key", 13 | "no_inherit": true, 14 | "columns": [ 15 | "my_column" 16 | ], 17 | "up": { 18 | "my_column": "my_column" 19 | }, 20 | "down": { 21 | "my_column": "my_column" 22 | } 23 | } 24 | } 25 | ] 26 | } 27 | 28 | -- valid -- 29 | false 30 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-invalid-index.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_index' migration. 2 | 3 | -- create_table.json -- 4 | { 5 | "name": "migration_name", 6 | "operations": [ 7 | { 8 | "create_index": { 9 | "name": "reviews_index", 10 | "columns": [ 11 | "my-column" 12 | ], 13 | "method": "no_such_index_method" 14 | } 15 | } 16 | ] 17 | } 18 | 19 | -- valid -- 20 | false 21 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-1.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'create_table' migration. 2 | Some optional column fields are specified and some aren't. 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "id", 14 | "type": "serial", 15 | "pk": true 16 | }, 17 | { 18 | "name": "title", 19 | "type": "varchar(255)" 20 | }, 21 | { 22 | "name": "user_id", 23 | "type": "integer", 24 | "nullable": true 25 | } 26 | ] 27 | } 28 | } 29 | ] 30 | } 31 | 32 | -- valid -- 33 | true 34 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-10-invalid-primary-key-constraints-extra-check.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Primary key constraint must not constain a check expression. 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_pk", 25 | "type": "primary_key", 26 | "columns": [ 27 | "title" 28 | ], 29 | "check": "this should not be set" 30 | } 31 | ] 32 | } 33 | } 34 | ] 35 | } 36 | 37 | -- valid -- 38 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-10-invalid-unique-missing-columns.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Unique constraint must have list of columns configured 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_unique", 25 | "type": "unique" 26 | } 27 | ] 28 | } 29 | } 30 | ] 31 | } 32 | 33 | -- valid -- 34 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-11-invalid-primary-key-constraints-missing-columns.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Primary key constraint must have columns set 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_pk", 25 | "type": "primary_key" 26 | } 27 | ] 28 | } 29 | } 30 | ] 31 | } 32 | 33 | -- valid -- 34 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-12-valid-primary-key-constraint.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'create_table' migration. 2 | 3 | -- create_table.json -- 4 | { 5 | "name": "migration_name", 6 | "operations": [ 7 | { 8 | "create_table": { 9 | "name": "posts", 10 | "columns": [ 11 | { 12 | "name": "title", 13 | "type": "varchar(255)" 14 | }, 15 | { 16 | "name": "user_id", 17 | "type": "integer" 18 | } 19 | ], 20 | "constraints": [ 21 | { 22 | "name": "my_pk", 23 | "type": "primary_key", 24 | "columns": [ 25 | "title", 26 | "user_id" 27 | ] 28 | } 29 | ] 30 | } 31 | } 32 | ] 33 | } 34 | 35 | -- valid -- 36 | true -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-13-invalid-fk-missing-references.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Foreign key constraints must have references configured 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_fk", 25 | "type": "foreign_key", 26 | "columns": ["title"] 27 | } 28 | ] 29 | } 30 | } 31 | ] 32 | } 33 | 34 | -- valid -- 35 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-14-invalid-fk-missing-referenced-table.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Foreign key constraints must have referenced table configured in references.table 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_fk", 25 | "type": "foreign_key", 26 | "columns": ["title"], 27 | "references": { 28 | "columns": [ 29 | "referenced" 30 | ] 31 | } 32 | } 33 | ] 34 | } 35 | } 36 | ] 37 | } 38 | 39 | -- valid -- 40 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-15-invalid-fk-missing-columns.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Foreign key constraints must have columns configured 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_fk", 25 | "type": "foreign_key", 26 | "references": { 27 | "columns": ["title"], 28 | "table": "referenced", 29 | "on_delete": "CASCADE", 30 | "on_update": "SET NULL" 31 | } 32 | } 33 | ] 34 | } 35 | } 36 | ] 37 | } 38 | 39 | -- valid -- 40 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-16-invalid-exclusion-missing-exclusion.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Exclusion constraints must have exclude configured 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_fk", 25 | "type": "exclude" 26 | } 27 | ] 28 | } 29 | } 30 | ] 31 | } 32 | 33 | -- valid -- 34 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-17-invalid-exclusion-columns-set.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Exclusion constraints mustn't have columns configured 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_exclusion", 25 | "type": "exclude", 26 | "columns": ["invalid"], 27 | "exclude": { 28 | "index_method": "btree", 29 | "elements": "title WITH =" 30 | } 31 | } 32 | ] 33 | } 34 | } 35 | ] 36 | } 37 | 38 | -- valid -- 39 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-2-check-constraints.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'create_table' migration. 2 | 3 | -- create_table.json -- 4 | { 5 | "name": "migration_name", 6 | "operations": [ 7 | { 8 | "create_table": { 9 | "name": "posts", 10 | "columns": [ 11 | { 12 | "name": "id", 13 | "type": "serial", 14 | "pk": true 15 | }, 16 | { 17 | "name": "title", 18 | "type": "varchar(255)" 19 | }, 20 | { 21 | "name": "user_id", 22 | "type": "integer", 23 | "nullable": true 24 | } 25 | ], 26 | "constraints": [ 27 | { 28 | "name": "my_check", 29 | "type": "check", 30 | "check": "lenth(title) > 30" 31 | } 32 | ] 33 | } 34 | } 35 | ] 36 | } 37 | 38 | -- valid -- 39 | true -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-3-invalid-check-constraints.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Check constraint is missing the expression 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_check", 25 | "type": "check" 26 | } 27 | ] 28 | } 29 | } 30 | ] 31 | } 32 | 33 | -- valid -- 34 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-3-unique-constraint.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'create_table' migration. 2 | 3 | -- create_table.json -- 4 | { 5 | "name": "migration_name", 6 | "operations": [ 7 | { 8 | "create_table": { 9 | "name": "posts", 10 | "columns": [ 11 | { 12 | "name": "title", 13 | "type": "varchar(255)" 14 | }, 15 | { 16 | "name": "user_id", 17 | "type": "integer", 18 | "nullable": true 19 | } 20 | ], 21 | "constraints": [ 22 | { 23 | "name": "my_invalid_unique", 24 | "type": "unique", 25 | "columns": ["title"] 26 | } 27 | ] 28 | } 29 | } 30 | ] 31 | } 32 | 33 | -- valid -- 34 | true -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-4-invalid-check-options-deferrable.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Check constraint is not deferrable 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_check", 25 | "type": "check", 26 | "check": "length(title) > 10", 27 | "deferrable": true 28 | } 29 | ] 30 | } 31 | } 32 | ] 33 | } 34 | 35 | -- valid -- 36 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-5-invalid-check-options-initially-defer.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Check constraint is not deferrable 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_check", 25 | "type": "check", 26 | "check": "length(title) > 10", 27 | "initially_deferred": true 28 | } 29 | ] 30 | } 31 | } 32 | ] 33 | } 34 | 35 | -- valid -- 36 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-6-invalid-check-options-nulls-not-distinct.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Check constraint does not support nulls not distinct option 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_check", 25 | "type": "check", 26 | "check": "length(title) > 10", 27 | "nulls_not_distinct": true 28 | } 29 | ] 30 | } 31 | } 32 | ] 33 | } 34 | 35 | -- valid -- 36 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-7-invalid-check-options-forbidden-index-params.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Check constraint does not support index settings 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_check", 25 | "type": "check", 26 | "check": "length(title) > 10", 27 | "index_params": { 28 | "include_columns": [ 29 | "title" 30 | ] 31 | } 32 | } 33 | ] 34 | } 35 | } 36 | ] 37 | } 38 | 39 | -- valid -- 40 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-8-invalid-unique-forbidden-check-param.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Unique constraint cannot have a check expression 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_unique", 25 | "type": "unique", 26 | "columns": ["title"], 27 | "check": "length(title) > 10" 28 | } 29 | ] 30 | } 31 | } 32 | ] 33 | } 34 | 35 | -- valid -- 36 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/create-table-9-invalid-unique-forbidden-no-inherit.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'create_table' migration. 2 | Unique constraint does not support inheritance settings 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "create_table": { 10 | "name": "posts", 11 | "columns": [ 12 | { 13 | "name": "title", 14 | "type": "varchar(255)" 15 | }, 16 | { 17 | "name": "user_id", 18 | "type": "integer", 19 | "nullable": true 20 | } 21 | ], 22 | "constraints": [ 23 | { 24 | "name": "my_invalid_unique", 25 | "type": "unique", 26 | "columns": ["title"], 27 | "no_inherit": true 28 | } 29 | ] 30 | } 31 | } 32 | ] 33 | } 34 | 35 | -- valid -- 36 | false -------------------------------------------------------------------------------- /internal/jsonschema/testdata/rename-constraint-1.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'rename constraint' migration. 2 | 3 | -- rename_constraint.json -- 4 | { 5 | "name": "migration_name", 6 | "operations": [ 7 | { 8 | "rename_constraint": { 9 | "table": "people", 10 | "from": "name_length", 11 | "to": "name_length_check" 12 | } 13 | } 14 | ] 15 | } 16 | 17 | -- valid -- 18 | true 19 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/sql-1.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'sql' migration. 2 | It specifies both `up` and `down`. 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "sql": { 10 | "up": "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)", 11 | "down": "DROP TABLE users" 12 | } 13 | } 14 | ] 15 | } 16 | 17 | -- valid -- 18 | true 19 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/sql-2.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'sql' migration. 2 | It specifies `up` but not `down`. 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "sql": { 10 | "up": "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)" 11 | } 12 | } 13 | ] 14 | } 15 | 16 | -- valid -- 17 | true 18 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/sql-3.txtar: -------------------------------------------------------------------------------- 1 | This is a valid 'sql' migration. 2 | It specifies `up`, and `on_complete` 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "sql": { 10 | "up": "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)", 11 | "onComplete": true 12 | } 13 | } 14 | ] 15 | } 16 | 17 | -- valid -- 18 | true 19 | -------------------------------------------------------------------------------- /internal/jsonschema/testdata/sql-4.txtar: -------------------------------------------------------------------------------- 1 | This is an invalid 'sql' migration. 2 | It specifies `up`, `down` and `on_complete` 3 | 4 | -- create_table.json -- 5 | { 6 | "name": "migration_name", 7 | "operations": [ 8 | { 9 | "sql": { 10 | "up": "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)", 11 | "down": "DROP TABLE users", 12 | "onComplete": true 13 | } 14 | } 15 | ] 16 | } 17 | 18 | -- valid -- 19 | false 20 | -------------------------------------------------------------------------------- /internal/testutils/db.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package testutils 4 | 5 | import "math/rand" 6 | 7 | func randomDBName() string { 8 | const length = 15 9 | const charset = "abcdefghijklmnopqrstuvwxyz" 10 | 11 | b := make([]byte, length) 12 | for i := range b { 13 | b[i] = charset[rand.Intn(len(charset))] // #nosec G404 14 | } 15 | 16 | return "testdb_" + string(b) 17 | } 18 | -------------------------------------------------------------------------------- /internal/testutils/error_codes.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package testutils 4 | 5 | const ( 6 | CheckViolationErrorCode string = "check_violation" 7 | ExclusionViolationErrorCode string = "exclusion_violation" 8 | FKViolationErrorCode string = "foreign_key_violation" 9 | NotNullViolationErrorCode string = "not_null_violation" 10 | UndefinedColumnErrorCode string = "undefined_column" 11 | UndefinedTableErrorCode string = "undefined_table" 12 | UniqueViolationErrorCode string = "unique_violation" 13 | ) 14 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package main 4 | 5 | import ( 6 | "os" 7 | 8 | "github.com/xataio/pgroll/cmd" 9 | ) 10 | 11 | func main() { 12 | if err := cmd.Execute(); err != nil { 13 | os.Exit(1) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /pkg/backfill/config.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package backfill 4 | 5 | import ( 6 | "time" 7 | ) 8 | 9 | type Config struct { 10 | batchSize int 11 | batchDelay time.Duration 12 | callbacks []CallbackFn 13 | } 14 | 15 | const ( 16 | DefaultBatchSize int = 1000 17 | DefaultDelay time.Duration = 0 18 | ) 19 | 20 | type OptionFn func(*Config) 21 | 22 | func NewConfig(opts ...OptionFn) *Config { 23 | c := &Config{ 24 | batchSize: DefaultBatchSize, 25 | batchDelay: DefaultDelay, 26 | callbacks: make([]CallbackFn, 0), 27 | } 28 | 29 | for _, opt := range opts { 30 | opt(c) 31 | } 32 | return c 33 | } 34 | 35 | // WithBatchSize sets the batch size for the backfill operation. 36 | func WithBatchSize(batchSize int) OptionFn { 37 | return func(o *Config) { 38 | o.batchSize = batchSize 39 | } 40 | } 41 | 42 | // WithBatchDelay sets the delay between batches for the backfill operation. 43 | func WithBatchDelay(delay time.Duration) OptionFn { 44 | return func(o *Config) { 45 | o.batchDelay = delay 46 | } 47 | } 48 | 49 | // AddCallback adds a callback to the backfill operation. 50 | // Callbacks are invoked after each batch is processed. 51 | func (c *Config) AddCallback(fn CallbackFn) { 52 | c.callbacks = append(c.callbacks, fn) 53 | } 54 | -------------------------------------------------------------------------------- /pkg/backfill/errors.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package backfill 4 | -------------------------------------------------------------------------------- /pkg/backfill/templates/build.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package templates 4 | 5 | import ( 6 | "bytes" 7 | "strings" 8 | "text/template" 9 | 10 | "github.com/lib/pq" 11 | ) 12 | 13 | type BatchConfig struct { 14 | TableName string 15 | PrimaryKey []string 16 | LastValue []string 17 | BatchSize int 18 | NeedsBackfillColumn string 19 | } 20 | 21 | func BuildSQL(cfg BatchConfig) (string, error) { 22 | return executeTemplate("sql", SQL, cfg) 23 | } 24 | 25 | func executeTemplate(name, content string, cfg BatchConfig) (string, error) { 26 | ql := pq.QuoteLiteral 27 | qi := pq.QuoteIdentifier 28 | 29 | tmpl := template.Must(template.New(name). 30 | Funcs(template.FuncMap{ 31 | "ql": ql, 32 | "qi": qi, 33 | "commaSeparate": func(slice []string) string { 34 | return strings.Join(slice, ", ") 35 | }, 36 | "quoteIdentifiers": func(slice []string) []string { 37 | quoted := make([]string, len(slice)) 38 | for i, s := range slice { 39 | quoted[i] = qi(s) 40 | } 41 | return quoted 42 | }, 43 | "quoteLiterals": func(slice []string) []string { 44 | quoted := make([]string, len(slice)) 45 | for i, s := range slice { 46 | quoted[i] = ql(s) 47 | } 48 | return quoted 49 | }, 50 | "updateSetClause": func(tableName string, columns []string) string { 51 | quoted := make([]string, len(columns)) 52 | for i, c := range columns { 53 | quoted[i] = qi(c) + " = " + qi(tableName) + "." + qi(c) 54 | } 55 | return strings.Join(quoted, ", ") 56 | }, 57 | "updateWhereClause": func(tableName string, columns []string) string { 58 | quoted := make([]string, len(columns)) 59 | for i, c := range columns { 60 | quoted[i] = qi(tableName) + "." + qi(c) + " = batch." + qi(c) 61 | } 62 | return strings.Join(quoted, " AND ") 63 | }, 64 | "updateReturnClause": func(tableName string, columns []string) string { 65 | quoted := make([]string, len(columns)) 66 | for i, c := range columns { 67 | quoted[i] = qi(tableName) + "." + qi(c) 68 | } 69 | return strings.Join(quoted, ", ") 70 | }, 71 | "selectLastValue": func(columns []string) string { 72 | quoted := make([]string, len(columns)) 73 | for i, c := range columns { 74 | quoted[i] = "LAST_VALUE(" + qi(c) + ") OVER()" 75 | } 76 | return strings.Join(quoted, ", ") 77 | }, 78 | }). 79 | Parse(content)) 80 | 81 | buf := bytes.Buffer{} 82 | if err := tmpl.Execute(&buf, cfg); err != nil { 83 | return "", err 84 | } 85 | 86 | return buf.String(), nil 87 | } 88 | -------------------------------------------------------------------------------- /pkg/backfill/templates/sql.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package templates 4 | 5 | const SQL = `WITH batch AS 6 | ( 7 | SELECT {{ commaSeparate (quoteIdentifiers .PrimaryKey) }} 8 | FROM {{ .TableName | qi}} 9 | WHERE {{ .NeedsBackfillColumn | qi }} = true 10 | {{ if .LastValue -}} 11 | AND ({{ commaSeparate (quoteIdentifiers .PrimaryKey) }}) > ({{ commaSeparate (quoteLiterals .LastValue) }}) 12 | {{ end -}} 13 | ORDER BY {{ commaSeparate (quoteIdentifiers .PrimaryKey) }} 14 | LIMIT {{ .BatchSize }} 15 | FOR NO KEY UPDATE 16 | ), 17 | update AS 18 | ( 19 | UPDATE {{ .TableName | qi }} 20 | SET {{ updateSetClause .TableName .PrimaryKey }} 21 | FROM batch 22 | WHERE {{ updateWhereClause .TableName .PrimaryKey }} 23 | RETURNING {{ updateReturnClause .TableName .PrimaryKey }} 24 | ) 25 | SELECT {{ selectLastValue .PrimaryKey }} 26 | FROM update 27 | ` 28 | -------------------------------------------------------------------------------- /pkg/db/fake.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package db 4 | 5 | import ( 6 | "context" 7 | "database/sql" 8 | ) 9 | 10 | // FakeDB is a fake implementation of `DB`. All methods on `FakeDB` are 11 | // implemented as no-ops 12 | type FakeDB struct{} 13 | 14 | func (db *FakeDB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { 15 | return nil, nil 16 | } 17 | 18 | func (db *FakeDB) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { 19 | return nil, nil 20 | } 21 | 22 | func (db *FakeDB) WithRetryableTransaction(ctx context.Context, f func(context.Context, *sql.Tx) error) error { 23 | return nil 24 | } 25 | 26 | func (db *FakeDB) Close() error { 27 | return nil 28 | } 29 | -------------------------------------------------------------------------------- /pkg/migrations/check.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | // Validate checks that the CheckConstraint is valid 6 | func (c *CheckConstraint) Validate() error { 7 | if c.Name == "" { 8 | return FieldRequiredError{Name: "name"} 9 | } 10 | 11 | if err := ValidateIdentifierLength(c.Name); err != nil { 12 | return err 13 | } 14 | 15 | if c.Constraint == "" { 16 | return FieldRequiredError{Name: "constraint"} 17 | } 18 | 19 | return nil 20 | } 21 | -------------------------------------------------------------------------------- /pkg/migrations/check_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestCheckConstraintValidate(t *testing.T) { 13 | t.Parallel() 14 | 15 | t.Run("Name required", func(t *testing.T) { 16 | check := &CheckConstraint{ 17 | Name: "", 18 | } 19 | err := check.Validate() 20 | assert.EqualError(t, err, `field "name" is required`) 21 | }) 22 | t.Run("Name length", func(t *testing.T) { 23 | check := &CheckConstraint{ 24 | Name: strings.Repeat("x", maxIdentifierLength+1), 25 | } 26 | err := check.Validate() 27 | assert.EqualError(t, err, `length of "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" (64) exceeds maximum length of 63`) 28 | }) 29 | } 30 | -------------------------------------------------------------------------------- /pkg/migrations/column.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | // IsNullable returns true if the column is nullable 6 | func (c *Column) IsNullable() bool { 7 | return c.Nullable 8 | } 9 | 10 | // IsUnique returns true if the column values must be unique 11 | func (c *Column) IsUnique() bool { 12 | return c.Unique 13 | } 14 | 15 | // IsPrimaryKey returns true if the column is part of the primary key 16 | func (c *Column) IsPrimaryKey() bool { 17 | return c.Pk 18 | } 19 | 20 | // HasDefault returns true if the column has a default value 21 | func (c *Column) HasDefault() bool { 22 | return c.Default != nil 23 | } 24 | 25 | // HasImplicitDefault returns true if the column has an implicit default value 26 | func (c *Column) HasImplicitDefault() bool { 27 | switch c.Type { 28 | case "smallserial", "serial", "bigserial": 29 | return true 30 | default: 31 | return false 32 | } 33 | } 34 | 35 | // Validate returns true iff the column contains all fields required to create 36 | // the column 37 | func (c *Column) Validate() bool { 38 | if c.Name == "" { 39 | return false 40 | } 41 | if c.Type == "" { 42 | return false 43 | } 44 | return true 45 | } 46 | -------------------------------------------------------------------------------- /pkg/migrations/fk_reference.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "strings" 7 | 8 | "github.com/xataio/pgroll/pkg/schema" 9 | ) 10 | 11 | // Validate checks that the ForeignKeyReference is valid 12 | func (f *ForeignKeyReference) Validate(s *schema.Schema) error { 13 | if f.Name == "" { 14 | return FieldRequiredError{Name: "name"} 15 | } 16 | 17 | if err := ValidateIdentifierLength(f.Name); err != nil { 18 | return err 19 | } 20 | 21 | table := s.GetTable(f.Table) 22 | if table == nil { 23 | return TableDoesNotExistError{Name: f.Table} 24 | } 25 | 26 | column := table.GetColumn(f.Column) 27 | if column == nil { 28 | return ColumnDoesNotExistError{Table: f.Table, Name: f.Column} 29 | } 30 | 31 | switch strings.ToUpper(string(f.OnDelete)) { 32 | case string(ForeignKeyActionNOACTION): 33 | case string(ForeignKeyActionRESTRICT): 34 | case string(ForeignKeyActionSETDEFAULT): 35 | case string(ForeignKeyActionSETNULL): 36 | case string(ForeignKeyActionCASCADE): 37 | case "": 38 | break 39 | default: 40 | return InvalidOnDeleteSettingError{Name: f.Name, Setting: string(f.OnDelete)} 41 | } 42 | 43 | return nil 44 | } 45 | -------------------------------------------------------------------------------- /pkg/migrations/fk_reference_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestForeignKeyReferenceValidate(t *testing.T) { 13 | t.Parallel() 14 | 15 | t.Run("Name required", func(t *testing.T) { 16 | fk := &ForeignKeyReference{ 17 | Name: "", 18 | } 19 | // For now none of the tests use the schema 20 | err := fk.Validate(nil) 21 | assert.EqualError(t, err, `field "name" is required`) 22 | }) 23 | t.Run("Name length", func(t *testing.T) { 24 | fk := &ForeignKeyReference{ 25 | Name: strings.Repeat("x", maxIdentifierLength+1), 26 | } 27 | // For now none of the tests use the schema 28 | err := fk.Validate(nil) 29 | assert.EqualError(t, err, `length of "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" (64) exceeds maximum length of 63`) 30 | }) 31 | } 32 | -------------------------------------------------------------------------------- /pkg/migrations/op_change_type.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | 8 | "github.com/xataio/pgroll/pkg/db" 9 | "github.com/xataio/pgroll/pkg/schema" 10 | ) 11 | 12 | type OpChangeType struct { 13 | Table string `json:"table"` 14 | Column string `json:"column"` 15 | Type string `json:"type"` 16 | Up string `json:"up"` 17 | Down string `json:"down"` 18 | } 19 | 20 | var _ Operation = (*OpChangeType)(nil) 21 | 22 | func (o *OpChangeType) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 23 | l.LogOperationStart(o) 24 | 25 | table := s.GetTable(o.Table) 26 | if table == nil { 27 | return nil, TableDoesNotExistError{Name: o.Table} 28 | } 29 | 30 | return table, nil 31 | } 32 | 33 | func (o *OpChangeType) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 34 | l.LogOperationComplete(o) 35 | 36 | return nil 37 | } 38 | 39 | func (o *OpChangeType) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 40 | l.LogOperationRollback(o) 41 | 42 | return nil 43 | } 44 | 45 | func (o *OpChangeType) Validate(ctx context.Context, s *schema.Schema) error { 46 | if o.Up == "" { 47 | return FieldRequiredError{Name: "up"} 48 | } 49 | 50 | if o.Down == "" { 51 | return FieldRequiredError{Name: "down"} 52 | } 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /pkg/migrations/op_drop_index.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | 8 | "github.com/xataio/pgroll/pkg/db" 9 | "github.com/xataio/pgroll/pkg/schema" 10 | ) 11 | 12 | var ( 13 | _ Operation = (*OpDropIndex)(nil) 14 | _ Createable = (*OpDropIndex)(nil) 15 | ) 16 | 17 | func (o *OpDropIndex) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 18 | l.LogOperationStart(o) 19 | 20 | // no-op 21 | return nil, nil 22 | } 23 | 24 | func (o *OpDropIndex) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 25 | l.LogOperationComplete(o) 26 | 27 | return NewDropIndexAction(conn, o.Name).Execute(ctx) 28 | } 29 | 30 | func (o *OpDropIndex) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 31 | l.LogOperationRollback(o) 32 | 33 | // no-op 34 | return nil 35 | } 36 | 37 | func (o *OpDropIndex) Validate(ctx context.Context, s *schema.Schema) error { 38 | for _, table := range s.Tables { 39 | _, ok := table.Indexes[o.Name] 40 | if ok { 41 | return nil 42 | } 43 | } 44 | return IndexDoesNotExistError{Name: o.Name} 45 | } 46 | -------------------------------------------------------------------------------- /pkg/migrations/op_drop_not_null.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | 8 | "github.com/xataio/pgroll/pkg/db" 9 | "github.com/xataio/pgroll/pkg/schema" 10 | ) 11 | 12 | // OpDropNotNull is an operation that drops the NOT NULL constraint from a column 13 | type OpDropNotNull struct { 14 | Table string `json:"table"` 15 | Column string `json:"column"` 16 | Up string `json:"up"` 17 | Down string `json:"down"` 18 | } 19 | 20 | var _ Operation = (*OpDropNotNull)(nil) 21 | 22 | func (o *OpDropNotNull) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 23 | l.LogOperationStart(o) 24 | 25 | table := s.GetTable(o.Table) 26 | if table == nil { 27 | return nil, TableDoesNotExistError{Name: o.Table} 28 | } 29 | 30 | return table, nil 31 | } 32 | 33 | func (o *OpDropNotNull) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 34 | l.LogOperationComplete(o) 35 | return nil 36 | } 37 | 38 | func (o *OpDropNotNull) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 39 | l.LogOperationRollback(o) 40 | return nil 41 | } 42 | 43 | func (o *OpDropNotNull) Validate(ctx context.Context, s *schema.Schema) error { 44 | column := s.GetTable(o.Table).GetColumn(o.Column) 45 | if column.Nullable { 46 | return ColumnIsNullableError{Table: o.Table, Name: o.Column} 47 | } 48 | 49 | if o.Down == "" { 50 | return FieldRequiredError{Name: "down"} 51 | } 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /pkg/migrations/op_drop_table.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | 9 | "github.com/xataio/pgroll/pkg/db" 10 | "github.com/xataio/pgroll/pkg/schema" 11 | ) 12 | 13 | var ( 14 | _ Operation = (*OpDropTable)(nil) 15 | _ Createable = (*OpDropTable)(nil) 16 | ) 17 | 18 | func (o *OpDropTable) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 19 | l.LogOperationStart(o) 20 | 21 | table := s.GetTable(o.Name) 22 | if table == nil { 23 | return nil, TableDoesNotExistError{Name: o.Name} 24 | } 25 | 26 | // Soft-delete the table in order that a create table operation in the same 27 | // migration can create a table with the same name 28 | _, err := conn.ExecContext(ctx, fmt.Sprintf("ALTER TABLE IF EXISTS %s RENAME TO %s", 29 | table.Name, 30 | DeletionName(table.Name))) 31 | if err != nil { 32 | return nil, fmt.Errorf("failed to rename table %s: %w", o.Name, err) 33 | } 34 | 35 | s.RemoveTable(o.Name) 36 | return nil, nil 37 | } 38 | 39 | func (o *OpDropTable) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 40 | l.LogOperationComplete(o) 41 | 42 | deletionName := DeletionName(o.Name) 43 | 44 | // Perform the actual deletion of the soft-deleted table 45 | return NewDropTableAction(conn, deletionName).Execute(ctx) 46 | } 47 | 48 | func (o *OpDropTable) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 49 | l.LogOperationRollback(o) 50 | 51 | // Mark the table as no longer deleted so that it is visible to preceding 52 | // Rollbacks in the same migration 53 | s.UnRemoveTable(o.Name) 54 | 55 | // Rename the table back to its original name from its soft-deleted name 56 | table := s.GetTable(o.Name) 57 | _, err := conn.ExecContext(ctx, fmt.Sprintf("ALTER TABLE IF EXISTS %s RENAME TO %s", 58 | DeletionName(table.Name), 59 | table.Name)) 60 | if err != nil { 61 | return fmt.Errorf("failed to rename table %s: %w", o.Name, err) 62 | } 63 | return nil 64 | } 65 | 66 | func (o *OpDropTable) Validate(ctx context.Context, s *schema.Schema) error { 67 | table := s.GetTable(o.Name) 68 | 69 | if table == nil { 70 | return TableDoesNotExistError{Name: o.Name} 71 | } 72 | 73 | s.RemoveTable(table.Name) 74 | return nil 75 | } 76 | -------------------------------------------------------------------------------- /pkg/migrations/op_raw_sql.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | 8 | "github.com/xataio/pgroll/pkg/db" 9 | "github.com/xataio/pgroll/pkg/schema" 10 | ) 11 | 12 | var ( 13 | _ Operation = (*OpRawSQL)(nil) 14 | _ Createable = (*OpRawSQL)(nil) 15 | ) 16 | 17 | func (o *OpRawSQL) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 18 | l.LogOperationStart(o) 19 | 20 | if o.OnComplete { 21 | return nil, nil 22 | } 23 | 24 | _, err := conn.ExecContext(ctx, o.Up) 25 | return nil, err 26 | } 27 | 28 | func (o *OpRawSQL) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 29 | l.LogOperationComplete(o) 30 | 31 | if !o.OnComplete { 32 | return nil 33 | } 34 | 35 | _, err := conn.ExecContext(ctx, o.Up) 36 | return err 37 | } 38 | 39 | func (o *OpRawSQL) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 40 | l.LogOperationRollback(o) 41 | 42 | if o.Down == "" { 43 | return nil 44 | } 45 | 46 | _, err := conn.ExecContext(ctx, o.Down) 47 | return err 48 | } 49 | 50 | func (o *OpRawSQL) Validate(ctx context.Context, s *schema.Schema) error { 51 | if o.Up == "" { 52 | return EmptyMigrationError{} 53 | } 54 | 55 | if o.OnComplete && o.Down != "" { 56 | return InvalidMigrationError{Reason: "down is not allowed with onComplete"} 57 | } 58 | 59 | return nil 60 | } 61 | 62 | // IsIsolated returns true if the operation is isolated and should be run with other operations. 63 | func (o *OpRawSQL) IsIsolated() bool { 64 | return !o.OnComplete 65 | } 66 | 67 | func (o *OpRawSQL) RequiresSchemaRefresh() {} 68 | -------------------------------------------------------------------------------- /pkg/migrations/op_rename_column.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | 8 | "github.com/xataio/pgroll/pkg/db" 9 | "github.com/xataio/pgroll/pkg/schema" 10 | ) 11 | 12 | var _ Operation = (*OpRenameColumn)(nil) 13 | 14 | func (o *OpRenameColumn) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 15 | l.LogOperationStart(o) 16 | 17 | // Rename the table in the in-memory schema. 18 | table := s.GetTable(o.Table) 19 | if table == nil { 20 | return nil, TableDoesNotExistError{Name: o.Table} 21 | } 22 | column := table.GetColumn(o.From) 23 | if column == nil { 24 | return nil, ColumnDoesNotExistError{Table: o.Table, Name: o.From} 25 | } 26 | table.RenameColumn(o.From, o.To) 27 | 28 | // Update the name of the column in any constraints that reference the 29 | // renamed column. 30 | table.RenameConstraintColumns(o.From, o.To) 31 | 32 | return nil, nil 33 | } 34 | 35 | func (o *OpRenameColumn) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 36 | l.LogOperationComplete(o) 37 | return NewRenameColumnAction(conn, o.Table, o.From, o.To).Execute(ctx) 38 | } 39 | 40 | func (o *OpRenameColumn) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 41 | l.LogOperationRollback(o) 42 | 43 | // Rename the column back to the original name in the in-memory schema. 44 | table := s.GetTable(o.Table) 45 | table.RenameColumn(o.To, o.From) 46 | 47 | return nil 48 | } 49 | 50 | func (o *OpRenameColumn) Validate(ctx context.Context, s *schema.Schema) error { 51 | table := s.GetTable(o.Table) 52 | 53 | // Ensure that the `from` field is not empty 54 | if o.From == "" { 55 | return FieldRequiredError{Name: "from"} 56 | } 57 | 58 | // Ensure that the `to` field is not empty 59 | if o.To == "" { 60 | return FieldRequiredError{Name: "to"} 61 | } 62 | 63 | // Ensure that the table exists. 64 | if table == nil { 65 | return TableDoesNotExistError{Name: o.Table} 66 | } 67 | 68 | // Ensure that the column exists. 69 | if table.GetColumn(o.From) == nil { 70 | return ColumnDoesNotExistError{Table: o.Table, Name: o.From} 71 | } 72 | 73 | // Ensure that the new column name does not already exist 74 | if table.GetColumn(o.To) != nil { 75 | return ColumnAlreadyExistsError{Table: o.Table, Name: o.To} 76 | } 77 | 78 | // Update the in-memory schema to reflect the column rename so that it is 79 | // visible to subsequent operations' validation steps. 80 | table.RenameColumn(o.From, o.To) 81 | table.RenameConstraintColumns(o.From, o.To) 82 | 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /pkg/migrations/op_rename_constraint.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | 8 | "github.com/xataio/pgroll/pkg/db" 9 | "github.com/xataio/pgroll/pkg/schema" 10 | ) 11 | 12 | var _ Operation = (*OpRenameConstraint)(nil) 13 | 14 | func (o *OpRenameConstraint) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 15 | l.LogOperationStart(o) 16 | 17 | // no-op 18 | return nil, nil 19 | } 20 | 21 | func (o *OpRenameConstraint) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 22 | l.LogOperationComplete(o) 23 | 24 | // rename the constraint in the underlying table 25 | return NewRenameConstraintAction(conn, o.Table, o.From, o.To).Execute(ctx) 26 | } 27 | 28 | func (o *OpRenameConstraint) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 29 | l.LogOperationRollback(o) 30 | 31 | // no-op 32 | return nil 33 | } 34 | 35 | func (o *OpRenameConstraint) Validate(ctx context.Context, s *schema.Schema) error { 36 | table := s.GetTable(o.Table) 37 | 38 | if table == nil { 39 | return TableDoesNotExistError{Name: o.Table} 40 | } 41 | 42 | if !table.ConstraintExists(o.From) { 43 | return ConstraintDoesNotExistError{Table: o.Table, Constraint: o.From} 44 | } 45 | 46 | if table.ConstraintExists(o.To) { 47 | return ConstraintAlreadyExistsError{Table: o.Table, Constraint: o.To} 48 | } 49 | 50 | if err := ValidateIdentifierLength(o.To); err != nil { 51 | return err 52 | } 53 | 54 | return nil 55 | } 56 | -------------------------------------------------------------------------------- /pkg/migrations/op_rename_table.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | 9 | "github.com/lib/pq" 10 | 11 | "github.com/xataio/pgroll/pkg/db" 12 | "github.com/xataio/pgroll/pkg/schema" 13 | ) 14 | 15 | var _ Operation = (*OpRenameTable)(nil) 16 | 17 | func (o *OpRenameTable) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 18 | l.LogOperationStart(o) 19 | 20 | return nil, s.RenameTable(o.From, o.To) 21 | } 22 | 23 | func (o *OpRenameTable) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 24 | l.LogOperationComplete(o) 25 | 26 | _, err := conn.ExecContext(ctx, fmt.Sprintf("ALTER TABLE IF EXISTS %s RENAME TO %s", 27 | pq.QuoteIdentifier(o.From), 28 | pq.QuoteIdentifier(o.To))) 29 | 30 | return err 31 | } 32 | 33 | func (o *OpRenameTable) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 34 | l.LogOperationRollback(o) 35 | 36 | s.RenameTable(o.To, o.From) 37 | return nil 38 | } 39 | 40 | func (o *OpRenameTable) Validate(ctx context.Context, s *schema.Schema) error { 41 | if s.GetTable(o.From) == nil { 42 | return TableDoesNotExistError{Name: o.From} 43 | } 44 | if s.GetTable(o.To) != nil { 45 | return TableAlreadyExistsError{Name: o.To} 46 | } 47 | if err := ValidateIdentifierLength(o.To); err != nil { 48 | return err 49 | } 50 | 51 | s.RenameTable(o.From, o.To) 52 | return nil 53 | } 54 | -------------------------------------------------------------------------------- /pkg/migrations/op_set_check.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | 9 | "github.com/xataio/pgroll/pkg/db" 10 | "github.com/xataio/pgroll/pkg/schema" 11 | ) 12 | 13 | type OpSetCheckConstraint struct { 14 | Table string `json:"table"` 15 | Column string `json:"column"` 16 | Check CheckConstraint `json:"check"` 17 | Up string `json:"up"` 18 | Down string `json:"down"` 19 | } 20 | 21 | var _ Operation = (*OpSetCheckConstraint)(nil) 22 | 23 | func (o *OpSetCheckConstraint) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 24 | l.LogOperationStart(o) 25 | 26 | table := s.GetTable(o.Table) 27 | if table == nil { 28 | return nil, TableDoesNotExistError{Name: o.Table} 29 | } 30 | 31 | // Add the check constraint to the new column as NOT VALID. 32 | if err := NewCreateCheckConstraintAction(conn, table.Name, o.Check.Name, o.Check.Constraint, []string{o.Column}, o.Check.NoInherit, true).Execute(ctx); err != nil { 33 | return nil, fmt.Errorf("failed to add check constraint: %w", err) 34 | } 35 | 36 | return table, nil 37 | } 38 | 39 | func (o *OpSetCheckConstraint) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 40 | l.LogOperationComplete(o) 41 | 42 | // Validate the check constraint 43 | err := NewValidateConstraintAction(conn, o.Table, o.Check.Name).Execute(ctx) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | return nil 49 | } 50 | 51 | func (o *OpSetCheckConstraint) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 52 | l.LogOperationRollback(o) 53 | 54 | return nil 55 | } 56 | 57 | func (o *OpSetCheckConstraint) Validate(ctx context.Context, s *schema.Schema) error { 58 | if err := o.Check.Validate(); err != nil { 59 | return CheckConstraintError{ 60 | Table: o.Table, 61 | Column: o.Column, 62 | Err: err, 63 | } 64 | } 65 | 66 | table := s.GetTable(o.Table) 67 | if table == nil { 68 | return TableDoesNotExistError{Name: o.Table} 69 | } 70 | 71 | if table.ConstraintExists(o.Check.Name) { 72 | return ConstraintAlreadyExistsError{ 73 | Table: table.Name, 74 | Constraint: o.Check.Name, 75 | } 76 | } 77 | 78 | if o.Up == "" { 79 | return FieldRequiredError{Name: "up"} 80 | } 81 | 82 | if o.Down == "" { 83 | return FieldRequiredError{Name: "down"} 84 | } 85 | 86 | return nil 87 | } 88 | -------------------------------------------------------------------------------- /pkg/migrations/op_set_comment.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | 8 | "github.com/xataio/pgroll/pkg/db" 9 | "github.com/xataio/pgroll/pkg/schema" 10 | ) 11 | 12 | // OpSetComment is a operation that sets a comment on a object. 13 | type OpSetComment struct { 14 | Table string `json:"table"` 15 | Column string `json:"column"` 16 | Comment *string `json:"comment"` 17 | Up string `json:"up"` 18 | Down string `json:"down"` 19 | } 20 | 21 | var _ Operation = (*OpSetComment)(nil) 22 | 23 | func (o *OpSetComment) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 24 | l.LogOperationStart(o) 25 | 26 | tbl := s.GetTable(o.Table) 27 | if tbl == nil { 28 | return nil, TableDoesNotExistError{Name: o.Table} 29 | } 30 | 31 | return tbl, NewCommentColumnAction(conn, o.Table, TemporaryName(o.Column), o.Comment).Execute(ctx) 32 | } 33 | 34 | func (o *OpSetComment) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 35 | l.LogOperationComplete(o) 36 | 37 | return NewCommentColumnAction(conn, o.Table, o.Column, o.Comment).Execute(ctx) 38 | } 39 | 40 | func (o *OpSetComment) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 41 | l.LogOperationRollback(o) 42 | 43 | return nil 44 | } 45 | 46 | func (o *OpSetComment) Validate(ctx context.Context, s *schema.Schema) error { 47 | return nil 48 | } 49 | -------------------------------------------------------------------------------- /pkg/migrations/op_set_default.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | 9 | "github.com/lib/pq" 10 | 11 | "github.com/xataio/pgroll/pkg/db" 12 | "github.com/xataio/pgroll/pkg/schema" 13 | ) 14 | 15 | type OpSetDefault struct { 16 | Table string `json:"table"` 17 | Column string `json:"column"` 18 | Default *string `json:"default"` 19 | Up string `json:"up"` 20 | Down string `json:"down"` 21 | } 22 | 23 | var _ Operation = (*OpSetDefault)(nil) 24 | 25 | func (o *OpSetDefault) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 26 | l.LogOperationStart(o) 27 | 28 | table := s.GetTable(o.Table) 29 | if table == nil { 30 | return nil, TableDoesNotExistError{Name: o.Table} 31 | } 32 | column := table.GetColumn(o.Column) 33 | if column == nil { 34 | return nil, ColumnDoesNotExistError{Table: o.Table, Name: o.Column} 35 | } 36 | 37 | var err error 38 | if o.Default == nil { 39 | _, err = conn.ExecContext(ctx, fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN %s DROP DEFAULT`, 40 | pq.QuoteIdentifier(table.Name), 41 | pq.QuoteIdentifier(column.Name))) 42 | } else { 43 | _, err = conn.ExecContext(ctx, fmt.Sprintf(`ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s`, 44 | pq.QuoteIdentifier(table.Name), 45 | pq.QuoteIdentifier(column.Name), 46 | *o.Default)) 47 | } 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | return table, nil 53 | } 54 | 55 | func (o *OpSetDefault) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 56 | l.LogOperationComplete(o) 57 | 58 | return nil 59 | } 60 | 61 | func (o *OpSetDefault) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 62 | l.LogOperationRollback(o) 63 | 64 | return nil 65 | } 66 | 67 | func (o *OpSetDefault) Validate(ctx context.Context, s *schema.Schema) error { 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /pkg/migrations/op_set_notnull.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | 9 | "github.com/lib/pq" 10 | 11 | "github.com/xataio/pgroll/pkg/db" 12 | "github.com/xataio/pgroll/pkg/schema" 13 | ) 14 | 15 | type OpSetNotNull struct { 16 | Table string `json:"table"` 17 | Column string `json:"column"` 18 | Up string `json:"up"` 19 | Down string `json:"down"` 20 | } 21 | 22 | var _ Operation = (*OpSetNotNull)(nil) 23 | 24 | func (o *OpSetNotNull) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 25 | l.LogOperationStart(o) 26 | 27 | table := s.GetTable(o.Table) 28 | if table == nil { 29 | return nil, TableDoesNotExistError{Name: o.Table} 30 | } 31 | column := table.GetColumn(o.Column) 32 | if column == nil { 33 | return nil, ColumnDoesNotExistError{Table: o.Table, Name: o.Column} 34 | } 35 | 36 | // Add an unchecked NOT NULL constraint to the new column. 37 | if err := addNotNullConstraint(ctx, conn, table.Name, o.Column, column.Name); err != nil { 38 | return nil, fmt.Errorf("failed to add not null constraint: %w", err) 39 | } 40 | 41 | return table, nil 42 | } 43 | 44 | func (o *OpSetNotNull) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 45 | l.LogOperationComplete(o) 46 | 47 | // Validate the NOT NULL constraint on the old column. 48 | // The constraint must be valid because: 49 | // * Existing NULL values in the old column were rewritten using the `up` SQL during backfill. 50 | // * New NULL values written to the old column during the migration period were also rewritten using `up` SQL. 51 | err := NewValidateConstraintAction(conn, o.Table, NotNullConstraintName(o.Column)).Execute(ctx) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | // Use the validated constraint to add `NOT NULL` to the new column 57 | _, err = conn.ExecContext(ctx, fmt.Sprintf("ALTER TABLE IF EXISTS %s ALTER COLUMN %s SET NOT NULL", 58 | pq.QuoteIdentifier(o.Table), 59 | pq.QuoteIdentifier(TemporaryName(o.Column)))) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | // Drop the NOT NULL constraint 65 | err = NewDropConstraintAction(conn, o.Table, NotNullConstraintName(o.Column)).Execute(ctx) 66 | if err != nil { 67 | return err 68 | } 69 | 70 | return nil 71 | } 72 | 73 | func (o *OpSetNotNull) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 74 | l.LogOperationRollback(o) 75 | 76 | return nil 77 | } 78 | 79 | func (o *OpSetNotNull) Validate(ctx context.Context, s *schema.Schema) error { 80 | column := s.GetTable(o.Table).GetColumn(o.Column) 81 | 82 | if !column.Nullable { 83 | return ColumnIsNotNullableError{Table: o.Table, Name: o.Column} 84 | } 85 | 86 | if o.Up == "" { 87 | return FieldRequiredError{Name: "up"} 88 | } 89 | 90 | return nil 91 | } 92 | -------------------------------------------------------------------------------- /pkg/migrations/op_set_replica_identity.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | "slices" 9 | "strings" 10 | 11 | "github.com/lib/pq" 12 | 13 | "github.com/xataio/pgroll/pkg/db" 14 | "github.com/xataio/pgroll/pkg/schema" 15 | ) 16 | 17 | var _ Operation = (*OpSetReplicaIdentity)(nil) 18 | 19 | func (o *OpSetReplicaIdentity) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 20 | l.LogOperationStart(o) 21 | 22 | // build the correct form of the `SET REPLICA IDENTITY` statement based on the`identity type 23 | identitySQL := strings.ToUpper(o.Identity.Type) 24 | if identitySQL == "INDEX" { 25 | identitySQL = fmt.Sprintf("USING INDEX %s", pq.QuoteIdentifier(o.Identity.Index)) 26 | } 27 | 28 | // set the replica identity on the underlying table 29 | _, err := conn.ExecContext(ctx, fmt.Sprintf("ALTER TABLE %s REPLICA IDENTITY %s", 30 | pq.QuoteIdentifier(o.Table), 31 | identitySQL)) 32 | return nil, err 33 | } 34 | 35 | func (o *OpSetReplicaIdentity) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 36 | l.LogOperationComplete(o) 37 | 38 | // No-op 39 | return nil 40 | } 41 | 42 | func (o *OpSetReplicaIdentity) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 43 | l.LogOperationRollback(o) 44 | 45 | // No-op 46 | return nil 47 | } 48 | 49 | func (o *OpSetReplicaIdentity) Validate(ctx context.Context, s *schema.Schema) error { 50 | identityType := strings.ToUpper(o.Identity.Type) 51 | 52 | table := s.GetTable(o.Table) 53 | if table == nil { 54 | return TableDoesNotExistError{Name: o.Table} 55 | } 56 | 57 | identities := []string{"NOTHING", "DEFAULT", "INDEX", "FULL"} 58 | if !slices.Contains(identities, identityType) { 59 | return InvalidReplicaIdentityError{Table: o.Table, Identity: o.Identity.Type} 60 | } 61 | 62 | if identityType == "INDEX" { 63 | if _, ok := table.Indexes[o.Identity.Index]; !ok { 64 | return IndexDoesNotExistError{Name: o.Identity.Index} 65 | } 66 | } 67 | 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /pkg/migrations/op_set_unique.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | 9 | "github.com/lib/pq" 10 | 11 | "github.com/xataio/pgroll/pkg/db" 12 | "github.com/xataio/pgroll/pkg/schema" 13 | ) 14 | 15 | type OpSetUnique struct { 16 | Name string `json:"name"` 17 | Table string `json:"table"` 18 | Column string `json:"column"` 19 | Up string `json:"up"` 20 | Down string `json:"down"` 21 | } 22 | 23 | var _ Operation = (*OpSetUnique)(nil) 24 | 25 | func (o *OpSetUnique) Start(ctx context.Context, l Logger, conn db.DB, latestSchema string, s *schema.Schema) (*schema.Table, error) { 26 | l.LogOperationStart(o) 27 | 28 | table := s.GetTable(o.Table) 29 | if table == nil { 30 | return nil, TableDoesNotExistError{Name: o.Table} 31 | } 32 | column := table.GetColumn(o.Column) 33 | if column == nil { 34 | return nil, ColumnDoesNotExistError{Table: o.Table, Name: o.Column} 35 | } 36 | 37 | return table, NewCreateUniqueIndexConcurrentlyAction(conn, s.Name, o.Name, table.Name, column.Name).Execute(ctx) 38 | } 39 | 40 | func (o *OpSetUnique) Complete(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 41 | l.LogOperationComplete(o) 42 | 43 | // Create a unique constraint using the unique index 44 | _, err := conn.ExecContext(ctx, fmt.Sprintf("ALTER TABLE IF EXISTS %s ADD CONSTRAINT %s UNIQUE USING INDEX %s", 45 | pq.QuoteIdentifier(o.Table), 46 | pq.QuoteIdentifier(o.Name), 47 | pq.QuoteIdentifier(o.Name))) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | return err 53 | } 54 | 55 | func (o *OpSetUnique) Rollback(ctx context.Context, l Logger, conn db.DB, s *schema.Schema) error { 56 | l.LogOperationRollback(o) 57 | 58 | return nil 59 | } 60 | 61 | func (o *OpSetUnique) Validate(ctx context.Context, s *schema.Schema) error { 62 | if o.Name == "" { 63 | return FieldRequiredError{Name: "name"} 64 | } 65 | 66 | table := s.GetTable(o.Table) 67 | if table == nil { 68 | return TableDoesNotExistError{Name: o.Table} 69 | } 70 | 71 | if table.GetColumn(o.Column) == nil { 72 | return ColumnDoesNotExistError{Table: o.Table, Name: o.Column} 73 | } 74 | 75 | if table.ConstraintExists(o.Name) { 76 | return ConstraintAlreadyExistsError{ 77 | Table: table.Name, 78 | Constraint: o.Name, 79 | } 80 | } 81 | 82 | return nil 83 | } 84 | -------------------------------------------------------------------------------- /pkg/migrations/templates/function.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package templates 4 | 5 | const Function = `CREATE OR REPLACE FUNCTION {{ .Name | qi }}() 6 | RETURNS TRIGGER 7 | LANGUAGE PLPGSQL 8 | AS $$ 9 | DECLARE 10 | {{- $schemaName := .SchemaName }} 11 | {{- $tableName := .TableName }} 12 | {{ range $name, $col := .Columns }} 13 | {{- $name | qi }} {{ $schemaName | qi }}.{{ $tableName | qi}}.{{ $col.Name | qi }}%TYPE := NEW.{{ $col.Name | qi }}; 14 | {{ end -}} 15 | latest_schema text; 16 | search_path text; 17 | BEGIN 18 | SELECT current_setting 19 | INTO search_path 20 | FROM current_setting('search_path'); 21 | 22 | IF search_path {{- if eq .Direction "up" }} != {{- else }} = {{- end }} {{ .LatestSchema | ql }} THEN 23 | NEW.{{ .PhysicalColumn | qi }} = {{ .SQL }}; 24 | NEW.{{ .NeedsBackfillColumn | qi }} = false; 25 | END IF; 26 | 27 | RETURN NEW; 28 | END; $$ 29 | ` 30 | -------------------------------------------------------------------------------- /pkg/migrations/templates/trigger.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package templates 4 | 5 | const Trigger = `CREATE OR REPLACE TRIGGER {{ .Name | qi }} 6 | BEFORE UPDATE OR INSERT 7 | ON {{ .TableName | qi }} 8 | FOR EACH ROW 9 | EXECUTE PROCEDURE {{ .Name | qi }}(); 10 | ` 11 | -------------------------------------------------------------------------------- /pkg/migrations/unique.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | // Validate validates the UniqueConstraint 6 | func (c *UniqueConstraint) Validate() error { 7 | if c.Name == "" { 8 | return FieldRequiredError{Name: "name"} 9 | } 10 | 11 | return nil 12 | } 13 | -------------------------------------------------------------------------------- /pkg/migrations/writer.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package migrations 4 | 5 | import ( 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | 11 | "sigs.k8s.io/yaml" 12 | ) 13 | 14 | type MigrationFormat int 15 | 16 | const ( 17 | YAMLMigrationFormat MigrationFormat = iota 18 | JSONMigrationFormat 19 | ) 20 | 21 | var ErrInvalidMigrationFormat = errors.New("invalid migration format") 22 | 23 | // MigrationWriter is responsible for writing Migrations and RawMigrations 24 | // to the configured io.Writer instance in either YAML or JSON. 25 | type MigrationWriter struct { 26 | writer io.Writer 27 | format MigrationFormat 28 | } 29 | 30 | // NewMigrationFormat returns YAML or JSON format 31 | func NewMigrationFormat(useJSON bool) MigrationFormat { 32 | if useJSON { 33 | return JSONMigrationFormat 34 | } 35 | return YAMLMigrationFormat 36 | } 37 | 38 | // Extension returns the extension name for the migration file 39 | func (f MigrationFormat) Extension() string { 40 | switch f { 41 | case YAMLMigrationFormat: 42 | return "yaml" 43 | case JSONMigrationFormat: 44 | return "json" 45 | } 46 | return "" 47 | } 48 | 49 | // NewWriter creates a new MigrationWriter 50 | func NewWriter(w io.Writer, f MigrationFormat) *MigrationWriter { 51 | return &MigrationWriter{ 52 | writer: w, 53 | format: f, 54 | } 55 | } 56 | 57 | // Write writes a migration.Migration to the configured writer. 58 | func (w *MigrationWriter) Write(m *Migration) error { 59 | return w.writeAny(m) 60 | } 61 | 62 | // Write writes a migration.RawMigration to the configured writer. 63 | func (w *MigrationWriter) WriteRaw(m *RawMigration) error { 64 | return w.writeAny(m) 65 | } 66 | 67 | func (w *MigrationWriter) writeAny(migration any) error { 68 | switch w.format { 69 | case YAMLMigrationFormat: 70 | yml, err := yaml.Marshal(migration) 71 | if err != nil { 72 | return err 73 | } 74 | _, err = w.writer.Write(yml) 75 | if err != nil { 76 | return fmt.Errorf("encode yaml migration: %w", err) 77 | } 78 | case JSONMigrationFormat: 79 | enc := json.NewEncoder(w.writer) 80 | enc.SetIndent("", " ") 81 | if err := enc.Encode(migration); err != nil { 82 | return fmt.Errorf("encode json migration: %w", err) 83 | } 84 | default: 85 | return ErrInvalidMigrationFormat 86 | } 87 | return nil 88 | } 89 | -------------------------------------------------------------------------------- /pkg/roll/baseline.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package roll 4 | 5 | import ( 6 | "context" 7 | ) 8 | 9 | // CreateBaseline creates a baseline migration for an existing database schema. 10 | // This is used when starting pgroll with an existing database - it captures 11 | // the current schema state as a baseline version without applying any changes. 12 | // Future migrations will build upon this baseline version. 13 | func (m *Roll) CreateBaseline(ctx context.Context, baselineVersion string) error { 14 | // Log the operation 15 | m.logger.Info("Creating baseline version %q for schema %q", baselineVersion, m.schema) 16 | 17 | // Delegate to state to create the actual baseline migration record 18 | return m.state.CreateBaseline(ctx, m.schema, baselineVersion) 19 | } 20 | -------------------------------------------------------------------------------- /pkg/roll/baseline_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package roll_test 4 | 5 | import ( 6 | "context" 7 | "database/sql" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/require" 11 | "github.com/xataio/pgroll/internal/testutils" 12 | "github.com/xataio/pgroll/pkg/roll" 13 | "github.com/xataio/pgroll/pkg/schema" 14 | "github.com/xataio/pgroll/pkg/state" 15 | ) 16 | 17 | func TestBaseline(t *testing.T) { 18 | t.Parallel() 19 | 20 | t.Run("baseline migration captures the current schema", func(t *testing.T) { 21 | testutils.WithMigratorAndStateAndConnectionToContainerWithOptions(t, nil, func(roll *roll.Roll, st *state.State, db *sql.DB) { 22 | ctx := context.Background() 23 | 24 | // Create a table in the database to simulate an existing schema 25 | _, err := db.ExecContext(ctx, "CREATE TABLE users (id int)") 26 | require.NoError(t, err) 27 | 28 | // Create a baseline migration 29 | err = roll.CreateBaseline(ctx, "01_initial_version") 30 | require.NoError(t, err) 31 | 32 | // Get the captured database schema after the baseline migration was applied 33 | sc, err := st.SchemaAfterMigration(ctx, "public", "01_initial_version") 34 | require.NoError(t, err) 35 | 36 | // Define the expected schema 37 | wantSchema := &schema.Schema{ 38 | Name: "public", 39 | Tables: map[string]*schema.Table{ 40 | "users": { 41 | Name: "users", 42 | Columns: map[string]*schema.Column{ 43 | "id": { 44 | Name: "id", 45 | Type: "integer", 46 | Nullable: true, 47 | PostgresType: "base", 48 | }, 49 | }, 50 | PrimaryKey: []string{}, 51 | Indexes: map[string]*schema.Index{}, 52 | ForeignKeys: map[string]*schema.ForeignKey{}, 53 | CheckConstraints: map[string]*schema.CheckConstraint{}, 54 | UniqueConstraints: map[string]*schema.UniqueConstraint{}, 55 | ExcludeConstraints: map[string]*schema.ExcludeConstraint{}, 56 | }, 57 | }, 58 | } 59 | 60 | // Clear OIDs from the schema to avoid comparison issues 61 | clearOIDS(sc) 62 | 63 | // Assert the the schema matches the expected schema 64 | require.Equal(t, wantSchema, sc) 65 | }) 66 | }) 67 | } 68 | 69 | func clearOIDS(s *schema.Schema) { 70 | for k := range s.Tables { 71 | c := s.Tables[k] 72 | c.OID = "" 73 | s.Tables[k] = c 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /pkg/roll/latest.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package roll 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | "io/fs" 9 | 10 | "github.com/xataio/pgroll/pkg/migrations" 11 | ) 12 | 13 | var ( 14 | ErrNoMigrationFiles = fmt.Errorf("no migration files found") 15 | ErrNoMigrationApplied = fmt.Errorf("no migrations applied") 16 | ) 17 | 18 | // LatestVersionLocal returns the name of the last migration in `dir`, where the 19 | // migration files are lexicographically ordered by filename. 20 | func LatestVersionLocal(ctx context.Context, dir fs.FS) (string, error) { 21 | files, err := migrations.CollectFilesFromDir(dir) 22 | if err != nil { 23 | return "", fmt.Errorf("getting migration files from dir: %w", err) 24 | } 25 | 26 | if len(files) == 0 { 27 | return "", ErrNoMigrationFiles 28 | } 29 | 30 | latest := files[len(files)-1] 31 | 32 | migration, err := migrations.ReadMigration(dir, latest) 33 | if err != nil { 34 | return "", fmt.Errorf("reading migration file %q: %w", latest, err) 35 | } 36 | 37 | return migration.Name, nil 38 | } 39 | 40 | // LatestVersionRemote returns the name of the last migration to have been 41 | // applied to the target schema. 42 | func (m *Roll) LatestVersionRemote(ctx context.Context) (string, error) { 43 | latestVersion, err := m.State().LatestVersion(ctx, m.Schema()) 44 | if err != nil { 45 | return "", fmt.Errorf("failed to get latest version: %w", err) 46 | } 47 | 48 | if latestVersion == nil { 49 | return "", ErrNoMigrationApplied 50 | } 51 | 52 | return *latestVersion, nil 53 | } 54 | -------------------------------------------------------------------------------- /pkg/roll/latest_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package roll_test 4 | 5 | import ( 6 | "context" 7 | "database/sql" 8 | "testing" 9 | "testing/fstest" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | "github.com/xataio/pgroll/internal/testutils" 14 | "github.com/xataio/pgroll/pkg/backfill" 15 | "github.com/xataio/pgroll/pkg/migrations" 16 | "github.com/xataio/pgroll/pkg/roll" 17 | ) 18 | 19 | func TestLatestVersionLocal(t *testing.T) { 20 | t.Parallel() 21 | 22 | t.Run("returns the name of the last migration in the directory", func(t *testing.T) { 23 | fs := fstest.MapFS{ 24 | "01_migration_1.json": &fstest.MapFile{Data: exampleMigration(t, "01_migration_1")}, 25 | "02_migration_2.json": &fstest.MapFile{Data: exampleMigration(t, "02_migration_2")}, 26 | "03_migration_3.json": &fstest.MapFile{Data: exampleMigration(t, "03_migration_3")}, 27 | } 28 | 29 | ctx := context.Background() 30 | 31 | // Get the latest migration in the directory 32 | latest, err := roll.LatestVersionLocal(ctx, fs) 33 | require.NoError(t, err) 34 | 35 | // Assert last migration name 36 | assert.Equal(t, "03_migration_3", latest) 37 | }) 38 | 39 | t.Run("returns an error if the directory is empty", func(t *testing.T) { 40 | fs := fstest.MapFS{} 41 | 42 | ctx := context.Background() 43 | 44 | // Get the latest migration in the directory 45 | _, err := roll.LatestVersionLocal(ctx, fs) 46 | 47 | // Assert expected error 48 | assert.ErrorIs(t, err, roll.ErrNoMigrationFiles) 49 | }) 50 | } 51 | 52 | func TestLatestVersionRemote(t *testing.T) { 53 | t.Parallel() 54 | 55 | t.Run("returns the name of the latest version in the target schema", func(t *testing.T) { 56 | testutils.WithMigratorAndConnectionToContainer(t, func(m *roll.Roll, _ *sql.DB) { 57 | ctx := context.Background() 58 | 59 | // Start and complete a migration 60 | err := m.Start(ctx, &migrations.Migration{ 61 | Name: "01_first_migration", 62 | Operations: migrations.Operations{ 63 | &migrations.OpRawSQL{Up: "SELECT 1"}, 64 | }, 65 | }, backfill.NewConfig()) 66 | require.NoError(t, err) 67 | err = m.Complete(ctx) 68 | require.NoError(t, err) 69 | 70 | // Get the latest version in the target schema 71 | latestVersion, err := m.LatestVersionRemote(ctx) 72 | require.NoError(t, err) 73 | 74 | // Assert latest migration name 75 | assert.Equal(t, "01_first_migration", latestVersion) 76 | }) 77 | }) 78 | 79 | t.Run("returns an error if no migrations have been applied", func(t *testing.T) { 80 | testutils.WithMigratorAndConnectionToContainer(t, func(m *roll.Roll, _ *sql.DB) { 81 | ctx := context.Background() 82 | 83 | // Get the latest migration in the directory 84 | _, err := m.LatestVersionRemote(ctx) 85 | 86 | // Assert expected error 87 | assert.ErrorIs(t, err, roll.ErrNoMigrationApplied) 88 | }) 89 | }) 90 | } 91 | -------------------------------------------------------------------------------- /pkg/roll/missing.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package roll 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | "io/fs" 9 | 10 | "github.com/xataio/pgroll/pkg/migrations" 11 | ) 12 | 13 | // MissingMigrations returns the slice of migrations that have been applied to 14 | // the target database but are missing from the local migrations directory 15 | // `dir`. 16 | func (m *Roll) MissingMigrations(ctx context.Context, dir fs.FS) ([]*migrations.RawMigration, error) { 17 | // Determine the latest version of the database 18 | latestVersion, err := m.State().LatestVersion(ctx, m.Schema()) 19 | if err != nil { 20 | return nil, fmt.Errorf("determining latest version: %w", err) 21 | } 22 | 23 | // If no migrations are applied, return a nil slice 24 | if latestVersion == nil { 25 | return nil, nil 26 | } 27 | 28 | // Collect all migration files from the directory 29 | files, err := migrations.CollectFilesFromDir(dir) 30 | if err != nil { 31 | return nil, fmt.Errorf("reading migration files: %w", err) 32 | } 33 | 34 | // Create a set of local migration names for fast lookup 35 | localMigNames := make(map[string]struct{}, len(files)) 36 | for _, file := range files { 37 | mig, err := migrations.ReadRawMigration(dir, file) 38 | if err != nil { 39 | return nil, fmt.Errorf("reading migration file %s: %w", file, err) 40 | } 41 | localMigNames[mig.Name] = struct{}{} 42 | } 43 | 44 | // Get the full schema history from the database 45 | history, err := m.State().SchemaHistory(ctx, m.Schema()) 46 | if err != nil { 47 | return nil, fmt.Errorf("reading schema history: %w", err) 48 | } 49 | 50 | // Find all migrations that have been applied to the database but are missing 51 | // from the local directory 52 | migs := make([]*migrations.RawMigration, 0, len(history)) 53 | for _, h := range history { 54 | if _, ok := localMigNames[h.Migration.Name]; ok { 55 | continue 56 | } 57 | migs = append(migs, &h.Migration) 58 | } 59 | 60 | return migs, nil 61 | } 62 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/convert.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package sql2pgroll 4 | 5 | import ( 6 | "fmt" 7 | 8 | pgq "github.com/xataio/pg_query_go/v6" 9 | 10 | "github.com/xataio/pgroll/pkg/migrations" 11 | ) 12 | 13 | // Convert converts a SQL statement to a slice of pgroll operations. 14 | func Convert(sql string) (migrations.Operations, error) { 15 | tree, err := pgq.Parse(sql) 16 | if err != nil { 17 | return nil, fmt.Errorf("parse error: %w", err) 18 | } 19 | 20 | var migOps migrations.Operations 21 | stmts := tree.GetStmts() 22 | for i, stmt := range stmts { 23 | if stmt.GetStmt() == nil { 24 | continue 25 | } 26 | node := stmts[i].GetStmt().GetNode() 27 | var ops migrations.Operations 28 | var err error 29 | switch node := (node).(type) { 30 | case *pgq.Node_CreateStmt: 31 | ops, err = convertCreateStmt(node.CreateStmt) 32 | case *pgq.Node_AlterTableStmt: 33 | ops, err = convertAlterTableStmt(node.AlterTableStmt) 34 | case *pgq.Node_RenameStmt: 35 | ops, err = convertRenameStmt(node.RenameStmt) 36 | case *pgq.Node_DropStmt: 37 | ops, err = convertDropStatement(node.DropStmt) 38 | case *pgq.Node_IndexStmt: 39 | ops, err = convertCreateIndexStmt(node.IndexStmt) 40 | default: 41 | // SQL statement cannot be transformed to pgroll operation 42 | // so we will use raw SQL operation 43 | ops = makeRawSQLOperation(sql, i) 44 | } 45 | if err != nil { 46 | return nil, err 47 | } 48 | if ops == nil { 49 | ops = makeRawSQLOperation(sql, i) 50 | } 51 | migOps = append(migOps, ops...) 52 | } 53 | return migOps, nil 54 | } 55 | 56 | func makeRawSQLOperation(sql string, idx int) migrations.Operations { 57 | stmts, err := pgq.SplitWithParser(sql, true) 58 | if err != nil { 59 | return migrations.Operations{ 60 | &migrations.OpRawSQL{Up: sql}, 61 | } 62 | } 63 | return migrations.Operations{ 64 | &migrations.OpRawSQL{Up: stmts[idx]}, 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/drop.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package sql2pgroll 4 | 5 | import ( 6 | "strings" 7 | 8 | pgq "github.com/xataio/pg_query_go/v6" 9 | 10 | "github.com/xataio/pgroll/pkg/migrations" 11 | ) 12 | 13 | // convertDropStatement converts supported drop statements to pgroll operations 14 | func convertDropStatement(stmt *pgq.DropStmt) (migrations.Operations, error) { 15 | switch stmt.RemoveType { 16 | case pgq.ObjectType_OBJECT_INDEX: 17 | return convertDropIndexStatement(stmt) 18 | case pgq.ObjectType_OBJECT_TABLE: 19 | return convertDropTableStatement(stmt) 20 | 21 | } 22 | return nil, nil 23 | } 24 | 25 | // convertDropIndexStatement converts simple DROP INDEX statements to pgroll operations 26 | func convertDropIndexStatement(stmt *pgq.DropStmt) (migrations.Operations, error) { 27 | if !canConvertDropIndex(stmt) { 28 | return nil, nil 29 | } 30 | items := stmt.GetObjects()[0].GetList().GetItems() 31 | parts := make([]string, len(items)) 32 | for i, item := range items { 33 | parts[i] = item.GetString_().GetSval() 34 | } 35 | 36 | return migrations.Operations{ 37 | &migrations.OpDropIndex{ 38 | Name: strings.Join(parts, "."), 39 | }, 40 | }, nil 41 | } 42 | 43 | // canConvertDropIndex checks whether we can convert the statement without losing any information. 44 | func canConvertDropIndex(stmt *pgq.DropStmt) bool { 45 | if len(stmt.Objects) > 1 { 46 | return false 47 | } 48 | if stmt.Behavior == pgq.DropBehavior_DROP_CASCADE { 49 | return false 50 | } 51 | return true 52 | } 53 | 54 | // convertDropTableStatement converts simple DROP TABLE statements to pgroll operations 55 | func convertDropTableStatement(stmt *pgq.DropStmt) (migrations.Operations, error) { 56 | if !canConvertDropTable(stmt) { 57 | return nil, nil 58 | } 59 | 60 | items := stmt.GetObjects()[0].GetList().GetItems() 61 | parts := make([]string, len(items)) 62 | for i, item := range items { 63 | parts[i] = item.GetString_().GetSval() 64 | } 65 | 66 | return migrations.Operations{ 67 | &migrations.OpDropTable{ 68 | Name: strings.Join(parts, "."), 69 | }, 70 | }, nil 71 | } 72 | 73 | // canConvertDropTable checks whether we can convert the statement without losing any information. 74 | func canConvertDropTable(stmt *pgq.DropStmt) bool { 75 | return stmt.Behavior != pgq.DropBehavior_DROP_CASCADE 76 | } 77 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/drop_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package sql2pgroll_test 4 | 5 | import ( 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | 11 | "github.com/xataio/pgroll/pkg/migrations" 12 | "github.com/xataio/pgroll/pkg/sql2pgroll" 13 | "github.com/xataio/pgroll/pkg/sql2pgroll/expect" 14 | ) 15 | 16 | func TestDropIndexStatements(t *testing.T) { 17 | t.Parallel() 18 | 19 | tests := []struct { 20 | sql string 21 | expectedOp migrations.Operation 22 | }{ 23 | { 24 | sql: "DROP INDEX foo", 25 | expectedOp: expect.DropIndexOp1, 26 | }, 27 | { 28 | sql: "DROP INDEX myschema.foo", 29 | expectedOp: expect.DropIndexOp2, 30 | }, 31 | { 32 | sql: "DROP INDEX foo RESTRICT", 33 | expectedOp: expect.DropIndexOp1, 34 | }, 35 | { 36 | sql: "DROP INDEX IF EXISTS foo", 37 | expectedOp: expect.DropIndexOp1, 38 | }, 39 | { 40 | sql: "DROP INDEX CONCURRENTLY foo", 41 | expectedOp: expect.DropIndexOp1, 42 | }, 43 | } 44 | 45 | for _, tc := range tests { 46 | t.Run(tc.sql, func(t *testing.T) { 47 | ops, err := sql2pgroll.Convert(tc.sql) 48 | require.NoError(t, err) 49 | 50 | require.Len(t, ops, 1) 51 | 52 | assert.Equal(t, tc.expectedOp, ops[0]) 53 | }) 54 | } 55 | } 56 | 57 | func TestDropTableStatements(t *testing.T) { 58 | t.Parallel() 59 | 60 | tests := []struct { 61 | sql string 62 | expectedOp migrations.Operation 63 | }{ 64 | { 65 | sql: "DROP TABLE foo", 66 | expectedOp: expect.DropTableOp1, 67 | }, 68 | { 69 | sql: "DROP TABLE foo RESTRICT", 70 | expectedOp: expect.DropTableOp1, 71 | }, 72 | { 73 | sql: "DROP TABLE IF EXISTS foo", 74 | expectedOp: expect.DropTableOp1, 75 | }, 76 | { 77 | sql: "DROP TABLE foo.bar", 78 | expectedOp: expect.DropTableOp2, 79 | }, 80 | } 81 | 82 | for _, tc := range tests { 83 | t.Run(tc.sql, func(t *testing.T) { 84 | ops, err := sql2pgroll.Convert(tc.sql) 85 | require.NoError(t, err) 86 | 87 | require.Len(t, ops, 1) 88 | 89 | assert.Equal(t, tc.expectedOp, ops[0]) 90 | }) 91 | } 92 | } 93 | 94 | func TestUnconvertableDropStatements(t *testing.T) { 95 | t.Parallel() 96 | 97 | tests := []string{ 98 | // Drop index 99 | "DROP INDEX foo CASCADE", 100 | 101 | // Drop table 102 | "DROP TABLE foo CASCADE", 103 | } 104 | 105 | for _, sql := range tests { 106 | t.Run(sql, func(t *testing.T) { 107 | ops, err := sql2pgroll.Convert(sql) 108 | require.NoError(t, err) 109 | 110 | require.Len(t, ops, 1) 111 | 112 | assert.Equal(t, expect.RawSQLOp(sql), ops[0]) 113 | }) 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/add_foreign_key.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import ( 6 | "github.com/xataio/pgroll/pkg/migrations" 7 | "github.com/xataio/pgroll/pkg/sql2pgroll" 8 | ) 9 | 10 | func AddForeignKeyOp1WithParams(matchType migrations.ForeignKeyMatchType, onDelete, onUpdate migrations.ForeignKeyAction) *migrations.OpCreateConstraint { 11 | return &migrations.OpCreateConstraint{ 12 | Columns: []string{"a", "b"}, 13 | Name: "fk_bar_cd", 14 | References: &migrations.TableForeignKeyReference{ 15 | Columns: []string{"c", "d"}, 16 | OnDelete: onDelete, 17 | OnUpdate: onUpdate, 18 | MatchType: matchType, 19 | Table: "bar", 20 | }, 21 | Table: "foo", 22 | Type: migrations.OpCreateConstraintTypeForeignKey, 23 | Up: map[string]string{ 24 | "a": sql2pgroll.PlaceHolderSQL, 25 | "b": sql2pgroll.PlaceHolderSQL, 26 | }, 27 | Down: map[string]string{ 28 | "a": sql2pgroll.PlaceHolderSQL, 29 | "b": sql2pgroll.PlaceHolderSQL, 30 | }, 31 | } 32 | } 33 | 34 | var AddForeignKeyOp2 = &migrations.OpCreateConstraint{ 35 | Columns: []string{"a"}, 36 | Name: "fk_bar_c", 37 | References: &migrations.TableForeignKeyReference{ 38 | Columns: []string{"c"}, 39 | OnDelete: migrations.ForeignKeyActionNOACTION, 40 | OnUpdate: migrations.ForeignKeyActionNOACTION, 41 | MatchType: migrations.ForeignKeyMatchTypeSIMPLE, 42 | Table: "bar", 43 | }, 44 | Table: "foo", 45 | Type: migrations.OpCreateConstraintTypeForeignKey, 46 | Up: map[string]string{ 47 | "a": sql2pgroll.PlaceHolderSQL, 48 | }, 49 | Down: map[string]string{ 50 | "a": sql2pgroll.PlaceHolderSQL, 51 | }, 52 | } 53 | 54 | var AddForeignKeyOp3 = &migrations.OpCreateConstraint{ 55 | Columns: []string{"a"}, 56 | Name: "fk_bar_c", 57 | References: &migrations.TableForeignKeyReference{ 58 | Columns: []string{"c"}, 59 | OnDelete: migrations.ForeignKeyActionNOACTION, 60 | OnUpdate: migrations.ForeignKeyActionNOACTION, 61 | MatchType: migrations.ForeignKeyMatchTypeSIMPLE, 62 | Table: "schema_a.bar", 63 | }, 64 | Table: "schema_a.foo", 65 | Type: migrations.OpCreateConstraintTypeForeignKey, 66 | Up: map[string]string{ 67 | "a": sql2pgroll.PlaceHolderSQL, 68 | }, 69 | Down: map[string]string{ 70 | "a": sql2pgroll.PlaceHolderSQL, 71 | }, 72 | } 73 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/alter_column.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import ( 6 | "github.com/oapi-codegen/nullable" 7 | 8 | "github.com/xataio/pgroll/pkg/migrations" 9 | "github.com/xataio/pgroll/pkg/sql2pgroll" 10 | ) 11 | 12 | var AlterColumnOp1 = &migrations.OpAlterColumn{ 13 | Table: "foo", 14 | Column: "a", 15 | Nullable: ptr(false), 16 | Up: sql2pgroll.PlaceHolderSQL, 17 | Down: sql2pgroll.PlaceHolderSQL, 18 | } 19 | 20 | var AlterColumnOp2 = &migrations.OpAlterColumn{ 21 | Table: "foo", 22 | Column: "a", 23 | Nullable: ptr(true), 24 | Up: sql2pgroll.PlaceHolderSQL, 25 | Down: sql2pgroll.PlaceHolderSQL, 26 | } 27 | 28 | var AlterColumnOp3 = &migrations.OpAlterColumn{ 29 | Table: "foo", 30 | Column: "a", 31 | Type: ptr("text"), 32 | Up: sql2pgroll.PlaceHolderSQL, 33 | Down: sql2pgroll.PlaceHolderSQL, 34 | } 35 | 36 | var AlterColumnOp5 = &migrations.OpAlterColumn{ 37 | Table: "foo", 38 | Column: "bar", 39 | Default: nullable.NewNullableWithValue("'baz'"), 40 | Up: sql2pgroll.PlaceHolderSQL, 41 | Down: sql2pgroll.PlaceHolderSQL, 42 | } 43 | 44 | var AlterColumnOp6 = &migrations.OpAlterColumn{ 45 | Table: "foo", 46 | Column: "bar", 47 | Default: nullable.NewNullableWithValue("123"), 48 | Up: sql2pgroll.PlaceHolderSQL, 49 | Down: sql2pgroll.PlaceHolderSQL, 50 | } 51 | 52 | var AlterColumnOp7 = &migrations.OpAlterColumn{ 53 | Table: "foo", 54 | Column: "bar", 55 | Default: nullable.NewNullNullable[string](), 56 | Up: sql2pgroll.PlaceHolderSQL, 57 | Down: sql2pgroll.PlaceHolderSQL, 58 | } 59 | 60 | var AlterColumnOp8 = &migrations.OpAlterColumn{ 61 | Table: "foo", 62 | Column: "bar", 63 | Default: nullable.NewNullableWithValue("123.456"), 64 | Up: sql2pgroll.PlaceHolderSQL, 65 | Down: sql2pgroll.PlaceHolderSQL, 66 | } 67 | 68 | var AlterColumnOp9 = &migrations.OpAlterColumn{ 69 | Table: "foo", 70 | Column: "bar", 71 | Default: nullable.NewNullableWithValue("true"), 72 | Up: sql2pgroll.PlaceHolderSQL, 73 | Down: sql2pgroll.PlaceHolderSQL, 74 | } 75 | 76 | var AlterColumnOp10 = &migrations.OpAlterColumn{ 77 | Table: "foo", 78 | Column: "bar", 79 | Default: nullable.NewNullableWithValue("b'0101'"), 80 | Up: sql2pgroll.PlaceHolderSQL, 81 | Down: sql2pgroll.PlaceHolderSQL, 82 | } 83 | 84 | var AlterColumnOp11 = &migrations.OpAlterColumn{ 85 | Table: "foo", 86 | Column: "bar", 87 | Default: nullable.NewNullableWithValue("now()"), 88 | Up: sql2pgroll.PlaceHolderSQL, 89 | Down: sql2pgroll.PlaceHolderSQL, 90 | } 91 | 92 | var AlterColumnOp12 = &migrations.OpAlterColumn{ 93 | Table: "foo", 94 | Column: "bar", 95 | Default: nullable.NewNullableWithValue("(first_name || ' ') || last_name"), 96 | Up: sql2pgroll.PlaceHolderSQL, 97 | Down: sql2pgroll.PlaceHolderSQL, 98 | } 99 | 100 | func ptr[T any](v T) *T { 101 | return &v 102 | } 103 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/create_constraint.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import ( 6 | "github.com/xataio/pgroll/pkg/migrations" 7 | "github.com/xataio/pgroll/pkg/sql2pgroll" 8 | ) 9 | 10 | var CreateConstraintOp1 = &migrations.OpCreateConstraint{ 11 | Type: migrations.OpCreateConstraintTypeUnique, 12 | Name: "bar", 13 | Table: "foo", 14 | Columns: []string{"a"}, 15 | Down: map[string]string{"a": sql2pgroll.PlaceHolderSQL}, 16 | Up: map[string]string{"a": sql2pgroll.PlaceHolderSQL}, 17 | } 18 | 19 | var CreateConstraintOp2 = &migrations.OpCreateConstraint{ 20 | Type: migrations.OpCreateConstraintTypeUnique, 21 | Name: "bar", 22 | Table: "foo", 23 | Columns: []string{"a", "b"}, 24 | Down: map[string]string{ 25 | "a": sql2pgroll.PlaceHolderSQL, 26 | "b": sql2pgroll.PlaceHolderSQL, 27 | }, 28 | Up: map[string]string{ 29 | "a": sql2pgroll.PlaceHolderSQL, 30 | "b": sql2pgroll.PlaceHolderSQL, 31 | }, 32 | } 33 | 34 | var CreateConstraintOp3 = &migrations.OpCreateConstraint{ 35 | Type: migrations.OpCreateConstraintTypeCheck, 36 | Name: "bar", 37 | Table: "foo", 38 | Check: ptr("age > 0"), 39 | Columns: []string{sql2pgroll.PlaceHolderColumnName}, 40 | Up: map[string]string{ 41 | sql2pgroll.PlaceHolderColumnName: sql2pgroll.PlaceHolderSQL, 42 | }, 43 | Down: map[string]string{ 44 | sql2pgroll.PlaceHolderColumnName: sql2pgroll.PlaceHolderSQL, 45 | }, 46 | } 47 | 48 | var CreateConstraintOp4 = &migrations.OpCreateConstraint{ 49 | Type: migrations.OpCreateConstraintTypeCheck, 50 | Name: "bar", 51 | Table: "schema.foo", 52 | Check: ptr("age > 0"), 53 | Columns: []string{sql2pgroll.PlaceHolderColumnName}, 54 | Up: map[string]string{ 55 | sql2pgroll.PlaceHolderColumnName: sql2pgroll.PlaceHolderSQL, 56 | }, 57 | Down: map[string]string{ 58 | sql2pgroll.PlaceHolderColumnName: sql2pgroll.PlaceHolderSQL, 59 | }, 60 | } 61 | 62 | var CreateConstraintOp5 = &migrations.OpCreateConstraint{ 63 | Type: migrations.OpCreateConstraintTypeCheck, 64 | Name: "bar", 65 | Table: "foo", 66 | Check: ptr("age > 0"), 67 | NoInherit: true, 68 | Columns: []string{sql2pgroll.PlaceHolderColumnName}, 69 | Up: map[string]string{ 70 | sql2pgroll.PlaceHolderColumnName: sql2pgroll.PlaceHolderSQL, 71 | }, 72 | Down: map[string]string{ 73 | sql2pgroll.PlaceHolderColumnName: sql2pgroll.PlaceHolderSQL, 74 | }, 75 | } 76 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/drop_column.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import ( 6 | "github.com/xataio/pgroll/pkg/migrations" 7 | "github.com/xataio/pgroll/pkg/sql2pgroll" 8 | ) 9 | 10 | var DropColumnOp1 = &migrations.OpDropColumn{ 11 | Table: "foo", 12 | Column: "bar", 13 | Down: sql2pgroll.PlaceHolderSQL, 14 | } 15 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/drop_constraint.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import ( 6 | "github.com/xataio/pgroll/pkg/migrations" 7 | "github.com/xataio/pgroll/pkg/sql2pgroll" 8 | ) 9 | 10 | func OpDropConstraintWithTable(table string) *migrations.OpDropMultiColumnConstraint { 11 | return &migrations.OpDropMultiColumnConstraint{ 12 | Up: migrations.MultiColumnUpSQL{ 13 | sql2pgroll.PlaceHolderColumnName: sql2pgroll.PlaceHolderSQL, 14 | }, 15 | Down: migrations.MultiColumnDownSQL{ 16 | sql2pgroll.PlaceHolderColumnName: sql2pgroll.PlaceHolderSQL, 17 | }, 18 | Table: table, 19 | Name: "constraint_foo", 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/drop_index.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import ( 6 | "github.com/xataio/pgroll/pkg/migrations" 7 | ) 8 | 9 | var DropIndexOp1 = &migrations.OpDropIndex{ 10 | Name: "foo", 11 | } 12 | 13 | var DropIndexOp2 = &migrations.OpDropIndex{ 14 | Name: "myschema.foo", 15 | } 16 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/drop_table.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import ( 6 | "github.com/xataio/pgroll/pkg/migrations" 7 | ) 8 | 9 | var DropTableOp1 = &migrations.OpDropTable{ 10 | Name: "foo", 11 | } 12 | 13 | var DropTableOp2 = &migrations.OpDropTable{ 14 | Name: "foo.bar", 15 | } 16 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/raw_sql.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import "github.com/xataio/pgroll/pkg/migrations" 6 | 7 | func RawSQLOp(sql string) *migrations.OpRawSQL { 8 | return &migrations.OpRawSQL{Up: sql} 9 | } 10 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/rename_column.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import "github.com/xataio/pgroll/pkg/migrations" 6 | 7 | var RenameColumnOp1 = &migrations.OpRenameColumn{ 8 | Table: "foo", 9 | From: "a", 10 | To: "b", 11 | } 12 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/rename_constraint.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import "github.com/xataio/pgroll/pkg/migrations" 6 | 7 | var RenameConstraintOp1 = &migrations.OpRenameConstraint{ 8 | Table: "foo", 9 | From: "bar", 10 | To: "baz", 11 | } 12 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/expect/rename_table.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package expect 4 | 5 | import "github.com/xataio/pgroll/pkg/migrations" 6 | 7 | var RenameTableOp1 = &migrations.OpRenameTable{ 8 | From: "foo", 9 | To: "bar", 10 | } 11 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/rename.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package sql2pgroll 4 | 5 | import ( 6 | pgq "github.com/xataio/pg_query_go/v6" 7 | 8 | "github.com/xataio/pgroll/pkg/migrations" 9 | ) 10 | 11 | // convertRenameStmt converts RenameStmt nodes to pgroll operations. 12 | func convertRenameStmt(stmt *pgq.RenameStmt) (migrations.Operations, error) { 13 | switch stmt.GetRenameType() { 14 | case pgq.ObjectType_OBJECT_TABLE: 15 | return convertRenameTable(stmt) 16 | case pgq.ObjectType_OBJECT_COLUMN: 17 | return convertRenameColumn(stmt) 18 | case pgq.ObjectType_OBJECT_TABCONSTRAINT: 19 | return convertRenameConstraint(stmt) 20 | default: 21 | return nil, nil 22 | } 23 | } 24 | 25 | // convertRenameColumn converts SQL statements like: 26 | // 27 | // `ALTER TABLE foo RENAME COLUMN a TO b` 28 | // `ALTER TABLE foo RENAME a TO b` 29 | // 30 | // to an OpAlterColumn operation. 31 | func convertRenameColumn(stmt *pgq.RenameStmt) (migrations.Operations, error) { 32 | return migrations.Operations{ 33 | &migrations.OpRenameColumn{ 34 | Table: stmt.GetRelation().GetRelname(), 35 | From: stmt.GetSubname(), 36 | To: stmt.GetNewname(), 37 | }, 38 | }, nil 39 | } 40 | 41 | // convertRenameTable converts SQL statements like: 42 | // 43 | // `ALTER TABLE foo RENAME TO bar` 44 | // 45 | // to an OpRenameTable operation. 46 | func convertRenameTable(stmt *pgq.RenameStmt) (migrations.Operations, error) { 47 | return migrations.Operations{ 48 | &migrations.OpRenameTable{ 49 | From: stmt.GetRelation().GetRelname(), 50 | To: stmt.GetNewname(), 51 | }, 52 | }, nil 53 | } 54 | 55 | // convertRenameConstraint converts SQL statements like: 56 | // 57 | // `ALTER TABLE foo RENAME CONSTRAINT a TO b` 58 | // 59 | // to an OpRenameConstraint operation. 60 | func convertRenameConstraint(stmt *pgq.RenameStmt) (migrations.Operations, error) { 61 | return migrations.Operations{ 62 | &migrations.OpRenameConstraint{ 63 | Table: stmt.GetRelation().GetRelname(), 64 | From: stmt.GetSubname(), 65 | To: stmt.GetNewname(), 66 | }, 67 | }, nil 68 | } 69 | -------------------------------------------------------------------------------- /pkg/sql2pgroll/rename_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package sql2pgroll_test 4 | 5 | import ( 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | 11 | "github.com/xataio/pgroll/pkg/migrations" 12 | "github.com/xataio/pgroll/pkg/sql2pgroll" 13 | "github.com/xataio/pgroll/pkg/sql2pgroll/expect" 14 | ) 15 | 16 | func TestConvertRenameStatements(t *testing.T) { 17 | t.Parallel() 18 | 19 | tests := []struct { 20 | sql string 21 | expectedOp migrations.Operation 22 | }{ 23 | { 24 | sql: "ALTER TABLE foo RENAME COLUMN a TO b", 25 | expectedOp: expect.RenameColumnOp1, 26 | }, 27 | { 28 | sql: "ALTER TABLE foo RENAME a TO b", 29 | expectedOp: expect.RenameColumnOp1, 30 | }, 31 | { 32 | sql: "ALTER TABLE foo RENAME TO bar", 33 | expectedOp: expect.RenameTableOp1, 34 | }, 35 | { 36 | sql: "ALTER TABLE foo RENAME CONSTRAINT bar TO baz", 37 | expectedOp: expect.RenameConstraintOp1, 38 | }, 39 | } 40 | 41 | for _, tc := range tests { 42 | t.Run(tc.sql, func(t *testing.T) { 43 | ops, err := sql2pgroll.Convert(tc.sql) 44 | require.NoError(t, err) 45 | 46 | require.Len(t, ops, 1) 47 | 48 | assert.Equal(t, tc.expectedOp, ops[0]) 49 | }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /pkg/state/errors.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package state 4 | 5 | import "errors" 6 | 7 | var ErrNoActiveMigration = errors.New("no active migration") 8 | -------------------------------------------------------------------------------- /pkg/state/status.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: Apache-2.0 2 | 3 | package state 4 | 5 | type MigrationStatus string 6 | 7 | const ( 8 | NoneMigrationStatus MigrationStatus = "No migrations" 9 | InProgressMigrationStatus MigrationStatus = "In progress" 10 | CompleteMigrationStatus MigrationStatus = "Complete" 11 | ) 12 | 13 | // Status describes the current migration status of a database schema. 14 | type Status struct { 15 | // The schema name. 16 | Schema string `json:"schema"` 17 | 18 | // The name of the latest version schema. 19 | Version string `json:"version"` 20 | 21 | // The status of the most recent migration. 22 | Status MigrationStatus `json:"status"` 23 | } 24 | --------------------------------------------------------------------------------