├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ └── build.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE.md ├── Makefile ├── README.md ├── _typos.toml ├── cliff.toml ├── coverage └── .gitignore ├── deny.toml ├── docs ├── architecture.md └── images │ ├── demo.gif │ └── renovate.png ├── fixtures ├── .gitignore ├── config │ └── test.yml ├── db │ ├── _meta │ │ └── settings.sql │ ├── foreign_keys.sql │ ├── tenant │ │ ├── schema.sql │ │ └── tables │ │ │ └── tenants.sql │ └── triggers.sql ├── db1 │ └── user │ │ ├── functions │ │ └── auth_uid.sql │ │ ├── schema.sql │ │ └── tables │ │ └── users.sql ├── samples │ ├── comment.sql │ ├── enum.sql │ ├── extension.sql │ ├── fk.sql │ ├── function.sql │ ├── grant.sql │ ├── grant_all.sql │ ├── index.sql │ ├── mview.sql │ ├── owner_to.sql │ ├── pk.sql │ ├── policy.sql │ ├── revoke.sql │ ├── rls.sql │ ├── sequence.sql │ ├── set_default.sql │ ├── table.sql │ ├── trigger.sql │ ├── type.sql │ └── view.sql └── simple.sql ├── rfcs ├── 0001-sql-migration.md ├── 0002-rewrite-create-table.md └── template.md ├── src ├── commands │ ├── generate │ │ ├── completion.rs │ │ └── mod.rs │ ├── mod.rs │ └── schema │ │ ├── apply.rs │ │ ├── fetch.rs │ │ ├── init.rs │ │ ├── mod.rs │ │ ├── normalize.rs │ │ └── plan.rs ├── config.rs ├── lib.rs ├── macros.rs ├── main.rs ├── parser │ ├── composite_type.rs │ ├── enum_type.rs │ ├── function.rs │ ├── mod.rs │ ├── mview.rs │ ├── privilege │ │ ├── mod.rs │ │ └── single_priv.rs │ ├── sequence.rs │ ├── table │ │ ├── alter_table.rs │ │ ├── column │ │ │ ├── constraint_info.rs │ │ │ └── mod.rs │ │ ├── mod.rs │ │ ├── table_constraint.rs │ │ ├── table_index.rs │ │ ├── table_owner.rs │ │ ├── table_policy.rs │ │ ├── table_rls.rs │ │ ├── table_sequence.rs │ │ └── table_trigger.rs │ ├── utils │ │ ├── macros.rs │ │ ├── mod.rs │ │ ├── node.rs │ │ └── parsec.rs │ └── view.rs ├── repo │ ├── applier.rs │ ├── git.rs │ ├── loader.rs │ ├── mod.rs │ └── saver.rs ├── schema.rs ├── types │ ├── differ.rs │ ├── mod.rs │ ├── node_delta.rs │ ├── relation_id.rs │ └── schema_id.rs └── utils.rs └── tests ├── cli_tests.rs └── cmd ├── help.toml ├── help_schema.toml ├── init.in └── .keep ├── init.toml ├── normalize.in ├── renovate.yml └── test.sql ├── normalize.out ├── public │ └── 04_tables.sql └── renovate.yml ├── normalize.toml ├── plan.in ├── renovate.yml └── test.sql └── plan.toml /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | build-rust: 13 | strategy: 14 | matrix: 15 | platform: [ubuntu-latest] 16 | runs-on: ${{ matrix.platform }} 17 | services: 18 | postgres: 19 | image: postgres:14 20 | env: 21 | POSTGRES_USER: postgres 22 | POSTGRES_PASSWORD: postgres 23 | POSTGRES_DB: test 24 | ports: 25 | - 5432:5432 26 | # Set health checks to wait until postgres has started 27 | options: >- 28 | --health-cmd pg_isready 29 | --health-interval 10s 30 | --health-timeout 5s 31 | --health-retries 5 32 | steps: 33 | - uses: actions/checkout@v3 34 | with: 35 | fetch-depth: 0 36 | - name: Install Rust 37 | run: rustup toolchain install stable --component llvm-tools-preview 38 | - name: Install cargo-llvm-cov 39 | uses: taiki-e/install-action@cargo-llvm-cov 40 | - name: install nextest 41 | uses: taiki-e/install-action@nextest 42 | - uses: Swatinem/rust-cache@v1 43 | - name: Check code format 44 | run: cargo fmt -- --check 45 | - name: Check the package for errors 46 | run: cargo check --all 47 | - name: Lint rust sources 48 | run: cargo clippy --all-targets --all-features --tests --benches -- -D warnings 49 | - name: Run all tests 50 | run: cargo nextest run --all-features 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | fail_fast: false 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v4.3.0 5 | hooks: 6 | - id: check-byte-order-marker 7 | - id: check-case-conflict 8 | - id: check-merge-conflict 9 | - id: check-symlinks 10 | - id: check-yaml 11 | - id: end-of-file-fixer 12 | - id: mixed-line-ending 13 | - id: trailing-whitespace 14 | - repo: https://github.com/psf/black 15 | rev: 22.10.0 16 | hooks: 17 | - id: black 18 | - repo: local 19 | hooks: 20 | - id: cargo-fmt 21 | name: cargo fmt 22 | description: Format files with rustfmt. 23 | entry: bash -c 'cargo fmt -- --check' 24 | language: rust 25 | files: \.rs$ 26 | args: [] 27 | - id: cargo-deny 28 | name: cargo deny check 29 | description: Check cargo dependencies 30 | entry: bash -c 'cargo deny check -d' 31 | language: rust 32 | files: \.rs$ 33 | args: [] 34 | - id: typos 35 | name: typos 36 | description: check typo 37 | entry: bash -c 'typos' 38 | language: rust 39 | files: \.*$ 40 | pass_filenames: false 41 | - id: cargo-check 42 | name: cargo check 43 | description: Check the package for errors. 44 | entry: bash -c 'cargo check --all' 45 | language: rust 46 | files: \.rs$ 47 | pass_filenames: false 48 | - id: cargo-clippy 49 | name: cargo clippy 50 | description: Lint rust sources 51 | entry: bash -c 'cargo clippy --all-targets --all-features --tests --benches -- -D warnings' 52 | language: rust 53 | files: \.rs$ 54 | pass_filenames: false 55 | - id: cargo-test 56 | name: cargo test 57 | description: unit test for the project 58 | entry: bash -c 'cargo nextest run --all-features' 59 | language: rust 60 | files: \.rs$ 61 | pass_filenames: false 62 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "renovate" 3 | version = "0.2.23" 4 | edition = "2021" 5 | license = "MIT" 6 | documentation = "https://docs.rs/renovate" 7 | repository = "https://github.com/tyrchen/renovate" 8 | homepage = "https://github.com/tyrchen/renovate" 9 | description = """ 10 | A new way to handle Postgres schema migration. 11 | """ 12 | readme = "README.md" 13 | categories = ["database"] 14 | keywords = ["database", "postgres", "migration"] 15 | 16 | [lib] 17 | name = "renovate" 18 | path = "src/lib.rs" 19 | 20 | [[bin]] 21 | name = "renovate" 22 | path = "src/main.rs" 23 | 24 | [features] 25 | default = ["cli"] 26 | cli = ["clap-utils"] 27 | cli-test = [] 28 | 29 | [dependencies] 30 | anyhow = "1.0.68" 31 | async-process = "1.6.0" 32 | async-trait = "0.1.60" 33 | atty = "0.2.14" 34 | clap-utils = { version = "0.3.0", features = ["highlight"], optional = true } 35 | console = "0.15.4" 36 | derivative = "2.2.0" 37 | git2 = { version = "0.15.0", default-features = false } 38 | glob = "0.3.0" 39 | indexmap = "1.9.2" 40 | itertools = "0.10.5" 41 | nom = "7.1.2" 42 | pg_query = { version = "0.7.0", git = "https://github.com/pganalyze/pg_query.rs" } 43 | serde = { version = "1.0.152", features = ["derive"] } 44 | serde_yaml = "0.9.16" 45 | similar = { version = "2.2.1", features = ["inline"] } 46 | sqlformat = "0.2.0" 47 | sqlx = { version = "0.6.2", features = ["postgres", "runtime-tokio-rustls"] } 48 | tokio = { version = "1.23.1", features = ["fs", "rt", "macros", "rt-multi-thread", "tracing"] } 49 | tracing = "0.1.37" 50 | tracing-subscriber = "0.3.16" 51 | url = "2.3.1" 52 | uuid = { version = "1.2.2", features = ["v4"] } 53 | 54 | [dev-dependencies] 55 | tempfile = "3.3.0" 56 | trycmd = "0.14.10" 57 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright <2023> 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | @cargo build 3 | @rm -f ~/.cargo/bin/renovate && cp ~/.target/debug/renovate ~/.cargo/bin/ 4 | 5 | cov: 6 | @cargo llvm-cov nextest --all-features --workspace --lcov --output-path coverage/lcov-$(shell date +%F).info 7 | 8 | test: 9 | @cargo nextest run --all-features 10 | 11 | snapshot: 12 | @TRYCMD=overwrite cargo test --test cli_tests --all-features 13 | 14 | release: 15 | @cargo release tag --execute 16 | @git cliff -o CHANGELOG.md 17 | @git commit -a -m "Update CHANGELOG.md" || true 18 | @git push origin master 19 | @cargo release push --execute 20 | 21 | .PHONY: build cov test release 22 | -------------------------------------------------------------------------------- /_typos.toml: -------------------------------------------------------------------------------- 1 | [default.extend-words] 2 | 3 | [files] 4 | extend-exclude = ["CHANGELOG.md"] 5 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | [changelog] 2 | # changelog header 3 | header = """ 4 | # Changelog\n 5 | All notable changes to this project will be documented in this file.\n 6 | """ 7 | # template for the changelog body 8 | # https://tera.netlify.app/docs/#introduction 9 | body = """ 10 | {% if version %}\ 11 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 12 | {% else %}\ 13 | ## [unreleased] 14 | {% endif %}\ 15 | {% if previous %}\ 16 | {% if previous.commit_id %} 17 | [{{ previous.commit_id | truncate(length=7, end="") }}]({{ previous.commit_id }})...\ 18 | [{{ commit_id | truncate(length=7, end="") }}]({{ commit_id }}) 19 | {% endif %}\ 20 | {% endif %}\ 21 | {% for group, commits in commits | group_by(attribute="group") %} 22 | ### {{ group | upper_first }} 23 | {% for commit in commits %} 24 | - {{ commit.message | upper_first }} ([{{ commit.id | truncate(length=7, end="") }}]({{ commit.id }}) - {{ commit.author.timestamp | date }} by {{ commit.author.name }})\ 25 | {% for footer in commit.footers -%} 26 | , {{ footer.token }}{{ footer.separator }}{{ footer.value }}\ 27 | {% endfor %}\ 28 | {% endfor %} 29 | {% endfor %}\n 30 | """ 31 | # remove the leading and trailing whitespace from the template 32 | trim = true 33 | # changelog footer 34 | footer = """ 35 | 36 | """ 37 | 38 | [git] 39 | # parse the commits based on https://www.conventionalcommits.org 40 | conventional_commits = true 41 | # filter out the commits that are not conventional 42 | filter_unconventional = true 43 | # process each line of a commit as an individual commit 44 | split_commits = false 45 | # regex for parsing and grouping commits 46 | commit_parsers = [ 47 | { message = "^feat", group = "Features"}, 48 | { message = "^fix", group = "Bug Fixes"}, 49 | { message = "^doc", group = "Documentation"}, 50 | { message = "^perf", group = "Performance"}, 51 | { message = "^refactor", group = "Refactor"}, 52 | { message = "^style", group = "Styling"}, 53 | { message = "^test", group = "Testing"}, 54 | { message = "^chore\\(release\\): prepare for", skip = true}, 55 | { message = "^chore", group = "Miscellaneous Tasks"}, 56 | { body = ".*security", group = "Security"}, 57 | ] 58 | # protect breaking changes from being skipped due to matching a skipping commit_parser 59 | protect_breaking_commits = false 60 | # filter out the commits that are not matched by commit parsers 61 | filter_commits = false 62 | # glob pattern for matching git tags 63 | tag_pattern = "v[0-9]*" 64 | # regex for skipping tags 65 | skip_tags = "v0.1.0-beta.1" 66 | # regex for ignoring tags 67 | ignore_tags = "" 68 | # sort the tags chronologically 69 | date_order = false 70 | # sort the commits inside sections by oldest/newest order 71 | sort_commits = "oldest" 72 | -------------------------------------------------------------------------------- /coverage/.gitignore: -------------------------------------------------------------------------------- 1 | *.info 2 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | # This template contains all of the possible sections and their default values 2 | 3 | # Note that all fields that take a lint level have these possible values: 4 | # * deny - An error will be produced and the check will fail 5 | # * warn - A warning will be produced, but the check will not fail 6 | # * allow - No warning or error will be produced, though in some cases a note 7 | # will be 8 | 9 | # The values provided in this template are the default values that will be used 10 | # when any section or field is not specified in your own configuration 11 | 12 | # If 1 or more target triples (and optionally, target_features) are specified, 13 | # only the specified targets will be checked when running `cargo deny check`. 14 | # This means, if a particular package is only ever used as a target specific 15 | # dependency, such as, for example, the `nix` crate only being used via the 16 | # `target_family = "unix"` configuration, that only having windows targets in 17 | # this list would mean the nix crate, as well as any of its exclusive 18 | # dependencies not shared by any other crates, would be ignored, as the target 19 | # list here is effectively saying which targets you are building for. 20 | targets = [ 21 | # The triple can be any string, but only the target triples built in to 22 | # rustc (as of 1.40) can be checked against actual config expressions 23 | #{ triple = "x86_64-unknown-linux-musl" }, 24 | # You can also specify which target_features you promise are enabled for a 25 | # particular target. target_features are currently not validated against 26 | # the actual valid features supported by the target architecture. 27 | #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, 28 | ] 29 | 30 | # This section is considered when running `cargo deny check advisories` 31 | # More documentation for the advisories section can be found here: 32 | # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html 33 | [advisories] 34 | # The path where the advisory database is cloned/fetched into 35 | db-path = "~/.cargo/advisory-db" 36 | # The url(s) of the advisory databases to use 37 | db-urls = ["https://github.com/rustsec/advisory-db"] 38 | # The lint level for security vulnerabilities 39 | vulnerability = "deny" 40 | # The lint level for unmaintained crates 41 | unmaintained = "warn" 42 | # The lint level for crates that have been yanked from their source registry 43 | yanked = "warn" 44 | # The lint level for crates with security notices. Note that as of 45 | # 2019-12-17 there are no security notice advisories in 46 | # https://github.com/rustsec/advisory-db 47 | notice = "warn" 48 | # A list of advisory IDs to ignore. Note that ignored advisories will still 49 | # output a note when they are encountered. 50 | ignore = [ 51 | #"RUSTSEC-0000-0000", 52 | ] 53 | # Threshold for security vulnerabilities, any vulnerability with a CVSS score 54 | # lower than the range specified will be ignored. Note that ignored advisories 55 | # will still output a note when they are encountered. 56 | # * None - CVSS Score 0.0 57 | # * Low - CVSS Score 0.1 - 3.9 58 | # * Medium - CVSS Score 4.0 - 6.9 59 | # * High - CVSS Score 7.0 - 8.9 60 | # * Critical - CVSS Score 9.0 - 10.0 61 | #severity-threshold = 62 | 63 | # This section is considered when running `cargo deny check licenses` 64 | # More documentation for the licenses section can be found here: 65 | # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html 66 | [licenses] 67 | # The lint level for crates which do not have a detectable license 68 | unlicensed = "allow" 69 | # List of explicitly allowed licenses 70 | # See https://spdx.org/licenses/ for list of possible licenses 71 | # [possible values: any SPDX 3.7 short identifier (+ optional exception)]. 72 | allow = [ 73 | "MIT", 74 | "Apache-2.0", 75 | "Unicode-DFS-2016", 76 | "ISC", 77 | "BSD-3-Clause", 78 | ] 79 | # List of explicitly disallowed licenses 80 | # See https://spdx.org/licenses/ for list of possible licenses 81 | # [possible values: any SPDX 3.7 short identifier (+ optional exception)]. 82 | deny = [ 83 | #"Nokia", 84 | ] 85 | # Lint level for licenses considered copyleft 86 | copyleft = "warn" 87 | # Blanket approval or denial for OSI-approved or FSF Free/Libre licenses 88 | # * both - The license will be approved if it is both OSI-approved *AND* FSF 89 | # * either - The license will be approved if it is either OSI-approved *OR* FSF 90 | # * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF 91 | # * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved 92 | # * neither - This predicate is ignored and the default lint level is used 93 | allow-osi-fsf-free = "neither" 94 | # Lint level used when no other predicates are matched 95 | # 1. License isn't in the allow or deny lists 96 | # 2. License isn't copyleft 97 | # 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" 98 | default = "deny" 99 | # The confidence threshold for detecting a license from license text. 100 | # The higher the value, the more closely the license text must be to the 101 | # canonical license text of a valid SPDX license file. 102 | # [possible values: any between 0.0 and 1.0]. 103 | confidence-threshold = 0.8 104 | # Allow 1 or more licenses on a per-crate basis, so that particular licenses 105 | # aren't accepted for every possible crate as with the normal allow list 106 | exceptions = [ 107 | # Each entry is the crate and version constraint, and its specific allow 108 | # list 109 | #{ allow = ["Zlib"], name = "adler32", version = "*" }, 110 | ] 111 | 112 | # Some crates don't have (easily) machine readable licensing information, 113 | # adding a clarification entry for it allows you to manually specify the 114 | # licensing information 115 | #[[licenses.clarify]] 116 | # The name of the crate the clarification applies to 117 | #name = "ring" 118 | # The optional version constraint for the crate 119 | #version = "*" 120 | # The SPDX expression for the license requirements of the crate 121 | #expression = "MIT AND ISC AND OpenSSL" 122 | # One or more files in the crate's source used as the "source of truth" for 123 | # the license expression. If the contents match, the clarification will be used 124 | # when running the license check, otherwise the clarification will be ignored 125 | # and the crate will be checked normally, which may produce warnings or errors 126 | # depending on the rest of your configuration 127 | #license-files = [ 128 | # Each entry is a crate relative path, and the (opaque) hash of its contents 129 | #{ path = "LICENSE", hash = 0xbd0eed23 } 130 | #] 131 | 132 | [licenses.private] 133 | # If true, ignores workspace crates that aren't published, or are only 134 | # published to private registries 135 | ignore = false 136 | # One or more private registries that you might publish crates to, if a crate 137 | # is only published to private registries, and ignore is true, the crate will 138 | # not have its license(s) checked 139 | registries = [ 140 | #"https://sekretz.com/registry 141 | ] 142 | 143 | # This section is considered when running `cargo deny check bans`. 144 | # More documentation about the 'bans' section can be found here: 145 | # https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html 146 | [bans] 147 | # Lint level for when multiple versions of the same crate are detected 148 | multiple-versions = "warn" 149 | # Lint level for when a crate version requirement is `*` 150 | wildcards = "allow" 151 | # The graph highlighting used when creating dotgraphs for crates 152 | # with multiple versions 153 | # * lowest-version - The path to the lowest versioned duplicate is highlighted 154 | # * simplest-path - The path to the version with the fewest edges is highlighted 155 | # * all - Both lowest-version and simplest-path are used 156 | highlight = "all" 157 | # List of crates that are allowed. Use with care! 158 | allow = [ 159 | #{ name = "ansi_term", version = "=0.11.0" }, 160 | ] 161 | # List of crates to deny 162 | deny = [ 163 | # Each entry the name of a crate and a version range. If version is 164 | # not specified, all versions will be matched. 165 | #{ name = "ansi_term", version = "=0.11.0" }, 166 | # 167 | # Wrapper crates can optionally be specified to allow the crate when it 168 | # is a direct dependency of the otherwise banned crate 169 | #{ name = "ansi_term", version = "=0.11.0", wrappers = [] }, 170 | ] 171 | # Certain crates/versions that will be skipped when doing duplicate detection. 172 | skip = [ 173 | #{ name = "ansi_term", version = "=0.11.0" }, 174 | ] 175 | # Similarly to `skip` allows you to skip certain crates during duplicate 176 | # detection. Unlike skip, it also includes the entire tree of transitive 177 | # dependencies starting at the specified crate, up to a certain depth, which is 178 | # by default infinite 179 | skip-tree = [ 180 | #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, 181 | ] 182 | 183 | # This section is considered when running `cargo deny check sources`. 184 | # More documentation about the 'sources' section can be found here: 185 | # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html 186 | [sources] 187 | # Lint level for what to happen when a crate from a crate registry that is not 188 | # in the allow list is encountered 189 | unknown-registry = "warn" 190 | # Lint level for what to happen when a crate from a git repository that is not 191 | # in the allow list is encountered 192 | unknown-git = "warn" 193 | # List of URLs for allowed crate registries. Defaults to the crates.io index 194 | # if not specified. If it is specified but empty, no registries are allowed. 195 | allow-registry = ["https://github.com/rust-lang/crates.io-index"] 196 | # List of URLs for allowed Git repositories 197 | allow-git = [] 198 | 199 | [sources.allow-org] 200 | # 1 or more github.com organizations to allow git sources for 201 | github = [] 202 | # 1 or more gitlab.com organizations to allow git sources for 203 | gitlab = [] 204 | # 1 or more bitbucket.org organizations to allow git sources for 205 | bitbucket = [] 206 | -------------------------------------------------------------------------------- /docs/architecture.md: -------------------------------------------------------------------------------- 1 | # Renovate architecture 2 | 3 | TBD 4 | -------------------------------------------------------------------------------- /docs/images/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tyrchen/renovate/26c122c47ede75f10cd0c469ac3e089f1d6eb67d/docs/images/demo.gif -------------------------------------------------------------------------------- /docs/images/renovate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tyrchen/renovate/26c122c47ede75f10cd0c469ac3e089f1d6eb67d/docs/images/renovate.png -------------------------------------------------------------------------------- /fixtures/.gitignore: -------------------------------------------------------------------------------- 1 | dump*.sql 2 | -------------------------------------------------------------------------------- /fixtures/config/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | url: postgres://localhost:5432/todo 3 | output: 4 | layout: nested 5 | path: /tmp/db 6 | -------------------------------------------------------------------------------- /fixtures/db/_meta/settings.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tyrchen/renovate/26c122c47ede75f10cd0c469ac3e089f1d6eb67d/fixtures/db/_meta/settings.sql -------------------------------------------------------------------------------- /fixtures/db/foreign_keys.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE ONLY tenant.instances 2 | ADD CONSTRAINT instances_name_fkey FOREIGN KEY (name) REFERENCES tenant.tenants(name) ON DELETE CASCADE; 3 | 4 | ALTER TABLE ONLY tenant.tenants 5 | ADD CONSTRAINT tenants_owner_id_fkey FOREIGN KEY (owner_id) REFERENCES auth.users(id) ON DELETE RESTRICT; 6 | -------------------------------------------------------------------------------- /fixtures/db/tenant/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA tenant; 2 | -------------------------------------------------------------------------------- /fixtures/db/tenant/tables/tenants.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE tenant.tenants ( 2 | name character varying(64) NOT NULL, 3 | owner_id uuid, 4 | status tenant.tenant_status DEFAULT 'free'::tenant.tenant_status NOT NULL, 5 | next_billing_at timestamp with time zone, 6 | created_at timestamp with time zone DEFAULT now() NOT NULL, 7 | updated_at timestamp with time zone DEFAULT now() NOT NULL, 8 | CONSTRAINT tenants_name_check CHECK (((name)::text ~* '^[a-z][a-z0-9]{5,}$'::text)) 9 | ); 10 | 11 | CREATE TABLE tenant.instances ( 12 | name character varying(64) NOT NULL, 13 | env character varying(16) DEFAULT 'dev'::character varying NOT NULL, 14 | status tenant.instance_status DEFAULT 'ready'::tenant.instance_status NOT NULL, 15 | created_at timestamp with time zone DEFAULT now() NOT NULL, 16 | updated_at timestamp with time zone DEFAULT now() NOT NULL, 17 | CONSTRAINT instances_env_check CHECK (((env)::text ~* '^[a-z][a-z0-9]{2,}$'::text)) 18 | ); 19 | -------------------------------------------------------------------------------- /fixtures/db/triggers.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tyrchen/renovate/26c122c47ede75f10cd0c469ac3e089f1d6eb67d/fixtures/db/triggers.sql -------------------------------------------------------------------------------- /fixtures/db1/user/functions/auth_uid.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION auth.uid() 2 | RETURNS uuid 3 | LANGUAGE SQL stable 4 | AS $$ 5 | SELECT coalesce( 6 | current_setting('request.jwt.claim.sub', true), 7 | (current_setting('request.jwt.claims', true)::jsonb ->> 'sub') 8 | )::uuid 9 | $$; 10 | -------------------------------------------------------------------------------- /fixtures/db1/user/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA user; 2 | -------------------------------------------------------------------------------- /fixtures/db1/user/tables/users.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE auth.users ( 2 | id uuid DEFAULT gen_random_uuid() NOT NULL, 3 | aud character varying(255), 4 | role character varying(64), 5 | email character varying(255), 6 | encrypted_password character varying(255), 7 | confirmed_at timestamp with time zone, 8 | invited_at timestamp with time zone, 9 | confirmation_token character varying(255), 10 | confirmation_sent_at timestamp with time zone, 11 | recovery_token character varying(255), 12 | recovery_sent_at timestamp with time zone, 13 | email_change_token character varying(255), 14 | email_change character varying(255), 15 | email_change_sent_at timestamp with time zone, 16 | last_sign_in_at timestamp with time zone, 17 | raw_app_meta_data jsonb, 18 | raw_user_meta_data jsonb, 19 | is_super_admin boolean, 20 | created_at timestamp with time zone, 21 | updated_at timestamp with time zone 22 | ); 23 | 24 | ALTER TABLE auth.users OWNER TO autoapi_auth_admin; 25 | COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; 26 | 27 | ALTER TABLE ONLY auth.users 28 | ADD CONSTRAINT users_email_key UNIQUE (email); 29 | 30 | ALTER TABLE ONLY auth.users 31 | ADD CONSTRAINT users_pkey PRIMARY KEY (id); 32 | -------------------------------------------------------------------------------- /fixtures/samples/comment.sql: -------------------------------------------------------------------------------- 1 | COMMENT ON TABLE auth.audit_log_entries IS 'Auth: Audit trail for user actions.'; 2 | -------------------------------------------------------------------------------- /fixtures/samples/enum.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE tenant.instance_status AS ENUM ( 2 | 'ready', 3 | 'suspended' 4 | ); 5 | -------------------------------------------------------------------------------- /fixtures/samples/extension.sql: -------------------------------------------------------------------------------- 1 | CREATE EXTENSION IF NOT EXISTS moddatetime WITH SCHEMA extensions; 2 | -------------------------------------------------------------------------------- /fixtures/samples/fk.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE ONLY tenant.instances 2 | ADD CONSTRAINT instances_name_fkey FOREIGN KEY (name) REFERENCES tenant.tenants(name) ON DELETE CASCADE; 3 | -------------------------------------------------------------------------------- /fixtures/samples/function.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION auth.uid(name text, age integer) 2 | RETURNS uuid 3 | LANGUAGE SQL stable 4 | AS $$ 5 | SELECT coalesce( 6 | current_setting('request.jwt.claim.sub', true), 7 | (current_setting('request.jwt.claims', true)::jsonb ->> 'sub') 8 | )::uuid 9 | $$; 10 | -------------------------------------------------------------------------------- /fixtures/samples/grant.sql: -------------------------------------------------------------------------------- 1 | GRANT SELECT(id),UPDATE(id) ON TABLE public.tbl TO anon; 2 | -------------------------------------------------------------------------------- /fixtures/samples/grant_all.sql: -------------------------------------------------------------------------------- 1 | GRANT ALL ON TABLE audit.logged_actions TO autoapi_audit_admin; 2 | -------------------------------------------------------------------------------- /fixtures/samples/index.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX logged_actions_action_idx ON audit.logged_actions USING btree (action); 2 | -------------------------------------------------------------------------------- /fixtures/samples/mview.sql: -------------------------------------------------------------------------------- 1 | CREATE MATERIALIZED VIEW api_catalog.schemas AS 2 | SELECT (pg_namespace.oid)::integer AS id, 3 | (pg_namespace.nspname)::character varying AS name, 4 | (pg_namespace.nspowner)::integer AS owner 5 | FROM pg_namespace 6 | WHERE ((pg_namespace.nspname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name, 'pg_toast'::name, 'public'::name])) AND (pg_namespace.nspname !~ '^tn_'::text)) 7 | ; 8 | -------------------------------------------------------------------------------- /fixtures/samples/owner_to.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE public.tbl OWNER TO tchen; 2 | -------------------------------------------------------------------------------- /fixtures/samples/pk.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE ONLY auth.users 2 | ADD CONSTRAINT users_pkey PRIMARY KEY (id); 3 | -------------------------------------------------------------------------------- /fixtures/samples/policy.sql: -------------------------------------------------------------------------------- 1 | CREATE POLICY "Can only view own audit data." ON audit.logged_actions FOR SELECT USING ((auth.uid() = ((user_info ->> 'sub'::text))::uuid)); 2 | -------------------------------------------------------------------------------- /fixtures/samples/revoke.sql: -------------------------------------------------------------------------------- 1 | REVOKE USAGE ON SCHEMA auth FROM anon; 2 | -------------------------------------------------------------------------------- /fixtures/samples/rls.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE audit.logged_actions ENABLE ROW LEVEL SECURITY; 2 | -------------------------------------------------------------------------------- /fixtures/samples/sequence.sql: -------------------------------------------------------------------------------- 1 | CREATE SEQUENCE test.message_id_seq 2 | AS integer 3 | START WITH 1 4 | INCREMENT BY 1 5 | NO MINVALUE 6 | NO MAXVALUE 7 | CACHE 1; 8 | -------------------------------------------------------------------------------- /fixtures/samples/set_default.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE ONLY test.message ALTER COLUMN id SET DEFAULT nextval('test.message_id_seq'::regclass); 2 | -------------------------------------------------------------------------------- /fixtures/samples/table.sql: -------------------------------------------------------------------------------- 1 | -- CREATE TABLE tenant.tenants ( 2 | -- name character varying(64) NOT NULL, 3 | -- owner_id uuid, 4 | -- status tenant.tenant_status DEFAULT 'free'::tenant.tenant_status NOT NULL, 5 | -- next_billing_at timestamp with time zone, 6 | -- created_at timestamp with time zone DEFAULT now() NOT NULL, 7 | -- updated_at timestamp with time zone DEFAULT now() NOT NULL, 8 | -- CONSTRAINT tenants_name_check CHECK (((name)::text ~* '^[a-z][a-z0-9]{5,}$'::text)) 9 | -- ); 10 | -- CREATE TABLE foo (id serial not null primary key, name text default random_name(), CHECK (check_name(name))) 11 | 12 | CREATE TABLE public.foo ( 13 | id integer NOT NULL, 14 | name text DEFAULT 'hello'::text NOT NULL 15 | ); 16 | -------------------------------------------------------------------------------- /fixtures/samples/trigger.sql: -------------------------------------------------------------------------------- 1 | CREATE TRIGGER tenant_instance_modified_time_trigger BEFORE UPDATE ON tenant.instances FOR EACH ROW EXECUTE FUNCTION extensions.moddatetime('updated_at'); 2 | -------------------------------------------------------------------------------- /fixtures/samples/type.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE full_address AS ( 2 | city VARCHAR(90), 3 | street VARCHAR(90) 4 | ); 5 | -------------------------------------------------------------------------------- /fixtures/samples/view.sql: -------------------------------------------------------------------------------- 1 | CREATE VIEW api_catalog.schemas AS 2 | SELECT (pg_namespace.oid)::integer AS id, 3 | (pg_namespace.nspname)::character varying AS name, 4 | (pg_namespace.nspowner)::integer AS owner 5 | FROM pg_namespace 6 | WHERE ((pg_namespace.nspname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name, 'pg_toast'::name, 'public'::name])) AND (pg_namespace.nspname !~ '^tn_'::text)); 7 | -------------------------------------------------------------------------------- /fixtures/simple.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA tenant; 2 | ALTER SCHEMA tenant OWNER TO superadmin; 3 | 4 | CREATE TABLE tenant.tenants ( 5 | name character varying(64) NOT NULL, 6 | owner_id uuid, 7 | status tenant.tenant_status DEFAULT 'free'::tenant.tenant_status NOT NULL, 8 | next_billing_at timestamp with time zone, 9 | created_at timestamp with time zone DEFAULT now() NOT NULL, 10 | updated_at timestamp with time zone DEFAULT now() NOT NULL, 11 | CONSTRAINT tenants_name_check CHECK (((name)::text ~* '^[a-z][a-z0-9]{5,}$'::text)) 12 | ); 13 | CREATE TABLE tenant.instances ( 14 | name character varying(64) NOT NULL, 15 | env character varying(16) DEFAULT 'dev'::character varying NOT NULL, 16 | status tenant.instance_status DEFAULT 'ready'::tenant.instance_status NOT NULL, 17 | created_at timestamp with time zone DEFAULT now() NOT NULL, 18 | updated_at timestamp with time zone DEFAULT now() NOT NULL, 19 | CONSTRAINT instances_env_check CHECK (((env)::text ~* '^[a-z][a-z0-9]{2,}$'::text)) 20 | ); 21 | 22 | CREATE SCHEMA auth; 23 | ALTER SCHEMA auth OWNER TO superadmin; 24 | CREATE TABLE auth.users ( 25 | id uuid DEFAULT gen_random_uuid() NOT NULL, 26 | aud character varying(255), 27 | role character varying(64), 28 | email character varying(255), 29 | encrypted_password character varying(255), 30 | confirmed_at timestamp with time zone, 31 | invited_at timestamp with time zone, 32 | confirmation_token character varying(255), 33 | confirmation_sent_at timestamp with time zone, 34 | recovery_token character varying(255), 35 | recovery_sent_at timestamp with time zone, 36 | email_change_token character varying(255), 37 | email_change character varying(255), 38 | email_change_sent_at timestamp with time zone, 39 | last_sign_in_at timestamp with time zone, 40 | raw_app_meta_data jsonb, 41 | raw_user_meta_data jsonb, 42 | is_super_admin boolean, 43 | created_at timestamp with time zone, 44 | updated_at timestamp with time zone 45 | ); 46 | 47 | ALTER TABLE auth.users OWNER TO autoapi_auth_admin; 48 | COMMENT ON TABLE auth.users IS 'Auth: Stores user login data within a secure schema.'; 49 | 50 | ALTER TABLE ONLY auth.users 51 | ADD CONSTRAINT users_email_key UNIQUE (email); 52 | 53 | ALTER TABLE ONLY auth.users 54 | ADD CONSTRAINT users_pkey PRIMARY KEY (id); 55 | 56 | ALTER TABLE ONLY tenant.instances 57 | ADD CONSTRAINT instances_name_fkey FOREIGN KEY (name) REFERENCES tenant.tenants(name) ON DELETE CASCADE; 58 | 59 | ALTER TABLE ONLY tenant.tenants 60 | ADD CONSTRAINT tenants_owner_id_fkey FOREIGN KEY (owner_id) REFERENCES auth.users(id) ON DELETE RESTRICT; 61 | -------------------------------------------------------------------------------- /rfcs/0001-sql-migration.md: -------------------------------------------------------------------------------- 1 | # SQL migration 2 | 3 | - Feature Name: sql-migration 4 | - Proposal Date: 2022-09-30 18:38:20 5 | - Start Date: (date) 6 | 7 | ## Summary 8 | 9 | DB migration should be as easy as possible. Users should only need change their db schema as changing the code, and the migration system should take care of the rest, just like terraform. 10 | 11 | ## Motivation 12 | 13 | Existing solutions: 14 | 15 | 1. Use normal database migrations. This is a pretty bad experience since engineer need to keep track of what the database look like at this point. And from a list of migration files, it's super hard. 16 | 2. projects like [atlas](https://github.com/ariga/atlas). It tried to mimic the terraform experience, but given that SQL itself is a declarative language, why bother creating a new one that developers/DBAs need to learn? 17 | 18 | Since the existing solutions are not good enough, we need to create a new one. 19 | 20 | ## Guide-level explanation 21 | 22 | User could use the tool like this: 23 | 24 | ```bash 25 | # dump all the schemas into a folder 26 | $ renovate schema init --url postgres://user@localhost:5432/hello 27 | Database schema has successfully dumped into ./hello. 28 | 29 | # if schema already exists, before modifying it, it is always a good practice to fetch the latest schema. Fetch will fail if current folder is not under git or it is not up to date with remote repository. 30 | $ renovate schema fetch 31 | 32 | # do whatever schema changes you want 33 | 34 | # then run plan to see what changes will be applied. When redirect to a file, it will just print all the SQL statements for the migration. 35 | $ renovate schema plan 36 | Table auth.users changed: 37 | 38 | create table auth.users( 39 | id uuid primary key, 40 | name text not null, 41 | email text not null, 42 | password text not null, 43 | - created_at timestamptz not null, 44 | + created_at timestamptz not null default now(), 45 | + updated_at timestamptz not null 46 | ); 47 | 48 | The following SQLs will be applied: 49 | 50 | alter table auth.users add column updated_at timestamptz not null; 51 | alter table auth.users alter column created_at set default now(); 52 | 53 | # then apply the changes 54 | $ renovate apply 55 | Your repo is dirty. Please commit the changes before applying. 56 | 57 | $ git commit -a -m "add updated_at column and set default value for created_at" 58 | 59 | # now you can directly apply 60 | # apply can use -p to run a previously saved plan or manually edited plan 61 | # the remove schema and the plan being executed will be saved in _meta/plans/202109301022/. 62 | $ renovate apply 63 | 64 | The following SQLs will be applied: 65 | 66 | alter table auth.users add column updated_at timestamptz not null; 67 | alter table auth.users alter column created_at set default now(); 68 | 69 | Continue (y/n)? y 70 | Successfully applied migration to postgres://user@localhost:5432/hello. 71 | Your repo is updated with the latest schema. See `git diff HEAD~1` for details. 72 | ``` 73 | 74 | Note that not all changes could generate proper migration SQLs. Currently we only support to generate the following migration SQLs: 75 | 76 | - create table 77 | - alter table add/drop column 78 | - alter table alter column set default 79 | - alter table add/drop constraint 80 | - grant/revoke privilege 81 | 82 | We will gradually support more and more migrations. If certain schema changes are not supported (e.g. a table is completely removed or column type is changed), we will print a warning and ask the user to manually write the migration SQLs. 83 | 84 | Once the migration is applied, the code will be updated to the latest schema automatically. 85 | 86 | ## Reference-level explanation 87 | 88 | Postgres supports pg_get_xxx functions to retrieve DDL for view and function 89 | 90 | When loading remote schema to the local directories, we will create subdirecties for each schema, types/tables/views/functions/triggers directories under the schema directory if exists. Each types/table/view/function/trigger will be stored in a separate file. The file name will be the table/view/function/trigger name. The file content will be the SQL to create the type/table/view/function/trigger. 91 | 92 | Upon `renovate plan`, we will compare the local schema with the remote schema. The comparison algorithm looks like this: 93 | 94 | 1. use pg_dump to dump the remote schema into a temporary file, then load it to DatabaseSchema struct. The DatabaseSchema and corresponding structs are parsed from each sql statement. We use [sqlparser](https://github.com/sqlparser-rs/sqlparser-rs) to do the parsing work and `From` trait to convert SqlStatement to our own structs. 95 | 2. load the local repo into DatabaseSchema struct 96 | 3. Compare each data structure to find out: 1) newly added 2) removed 3) changed. 97 | 4. Based on the comparison result, generate the SQL statements to apply the changes. 98 | 99 | ```rust 100 | pub struct DatabaseSchema { 101 | pub schemas: BTreeMap, 102 | } 103 | 104 | pub struct Schema { 105 | pub types: BTreeMap, 106 | pub tables: BTreeMap, 107 | pub views: BTreeMap, 108 | pub functions: BTreeMap, 109 | pub triggers: BTreeMap, 110 | } 111 | 112 | pub struct DataType { 113 | 114 | } 115 | 116 | pub struct Table { 117 | pub columns: BTreeMap, 118 | pub constraints: BTreeMap, 119 | pub privileges: BTreeMap, 120 | } 121 | 122 | pub struct View { 123 | // for view definition, if it changed we will just drop and recreate it 124 | // shall we verify if the SQL is valid? 125 | pub sql: String, 126 | pub constraints: BTreeMap, 127 | pub privileges: BTreeMap, 128 | } 129 | 130 | pub struct Function { 131 | // for function definition, if it changed we will just drop and recreate it 132 | // shall we verify if the SQL is valid? 133 | pub sql: String, 134 | pub privileges: BTreeMap, 135 | } 136 | 137 | pub struct Trigger { 138 | // for trigger definition, if it changed we will just drop and recreate it 139 | // shall we verify if the SQL is valid? 140 | pub sql: String, 141 | } 142 | ``` 143 | 144 | For each 145 | 146 | ```rust 147 | pub trait Differ { 148 | fn text_diff(&self, remote: &Self) -> Vec; 149 | fn ast_diff(&self, remote: &Self) -> Vec; 150 | } 151 | ``` 152 | 153 | ```rust 154 | pub trait Planner { 155 | fn diff(&self, remote: &Self) -> Vec; 156 | fn plan(&self, diff: &[Diff]) -> Vec; 157 | } 158 | ``` 159 | 160 | When applying the migration, we will first check if the local schema is up to date with the remote schema. If not, we will print a warning and ask the user to run `renovate init` to update the local schema. Then we will apply the migration SQLs to the remote database. 161 | 162 | ## Drawbacks 163 | 164 | This is not a universal solution to all databases. It is only for Postgres. I don't have time or intention to support other databases. 165 | 166 | ## Rationale and alternatives 167 | 168 | ## Prior art 169 | 170 | ## Unresolved questions 171 | 172 | ## Future possibilities 173 | -------------------------------------------------------------------------------- /rfcs/0002-rewrite-create-table.md: -------------------------------------------------------------------------------- 1 | # Rewrite `create table` 2 | 3 | - Feature Name: rewrite-create-table 4 | - Proposal Date: 2022-10-04 21:40:31 5 | - Start Date: (date) 6 | 7 | ## Summary 8 | 9 | We shall support `renovate format` to rewrite `create table` statements to a compatible format that `pg_dump` uses. For example: 10 | 11 | ```sql 12 | CREATE TABLE foo ( 13 | id1 int generated always as identity, 14 | id2 serial not null primary key check((id2>5)), 15 | name text default 'tyrchen', 16 | CHECK (name ~* '^[a-z][a-z0-9]{5,}$') 17 | ); 18 | ``` 19 | 20 | shall be rewritten to: 21 | 22 | ```sql 23 | CREATE TABLE public.foo ( 24 | id1 integer NOT NULL, 25 | id2 integer NOT NULL, 26 | name text DEFAULT 'tyrchen'::text, 27 | CONSTRAINT foo_id2_check CHECK ((id2 > 5)), 28 | CONSTRAINT foo_name_check CHECK ((name ~* '^[a-z][a-z0-9]{5,}$'::text)) 29 | ); 30 | ALTER TABLE public.foo OWNER TO CURRENT_USER; 31 | 32 | ALTER TABLE public.foo ALTER COLUMN id1 ADD GENERATED ALWAYS AS IDENTITY ( 33 | SEQUENCE NAME public.foo_id1_seq 34 | START WITH 1 35 | INCREMENT BY 1 36 | NO MINVALUE 37 | NO MAXVALUE 38 | CACHE 1 39 | ); 40 | 41 | CREATE SEQUENCE public.foo_id2_seq 42 | AS integer 43 | START WITH 1 44 | INCREMENT BY 1 45 | NO MINVALUE 46 | NO MAXVALUE 47 | CACHE 1; 48 | 49 | ALTER TABLE public.foo_id2_seq OWNER TO CURRENT_USER; 50 | ALTER SEQUENCE public.foo_id2_seq OWNED BY public.foo.id2; 51 | ALTER TABLE ONLY public.foo ALTER COLUMN id2 SET DEFAULT nextval('public.foo_id2_seq'::regclass); 52 | ALTER TABLE ONLY public.foo ADD CONSTRAINT foo_pkey PRIMARY KEY (id2); 53 | ``` 54 | 55 | ## Motivation 56 | 57 | Better compatibility with `pg_dump`. 58 | 59 | ## Guide-level explanation 60 | 61 | To achieve this, we need to alter the AST of `create table`. High level thoughts: 62 | 63 | 1. if column level has constraints other than Not Null and Default, we move those constraints to table level and give then a name. For example, `check((id>5))` becomes `CONSTRAINT foo_id_check CHECK ((id > 5))`. 64 | 2. if table level has constraints other than Check, we move those constraints to `alter table` statements. For example, `primary key (id)` becomes `ALTER TABLE ONLY public.foo ADD CONSTRAINT foo_pkey PRIMARY KEY (id);` 65 | 66 | Note we won't support constraints like `GENERATED` 67 | ## Reference-level explanation 68 | 69 | ## Drawbacks 70 | 71 | ## Rationale and alternatives 72 | 73 | ## Prior art 74 | 75 | ## Unresolved questions 76 | 77 | ## Future possibilities 78 | -------------------------------------------------------------------------------- /rfcs/template.md: -------------------------------------------------------------------------------- 1 | # Title 2 | 3 | - Feature Name: (name) 4 | - Proposal Date: (date) 5 | - Start Date: (date) 6 | 7 | ## Summary 8 | 9 | One paragraph explanation of the feature. 10 | 11 | ## Motivation 12 | 13 | Why are we doing this? What use cases does it support? What is the expected outcome? 14 | 15 | ## Guide-level explanation 16 | 17 | Explain the proposal as if it was already included in the language and you were teaching it to another Rust programmer. That generally means: 18 | 19 | - Introducing new named concepts. 20 | - Explaining the feature largely in terms of examples. 21 | - Explaining how Rust programmers should *think* about the feature, and how it should impact the way they use Rust. It should explain the impact as concretely as possible. 22 | - If applicable, provide sample error messages, deprecation warnings, or migration guidance. 23 | - If applicable, describe the differences between teaching this to existing Rust programmers and new Rust programmers. 24 | 25 | For implementation-oriented RFCs (e.g. for compiler internals), this section should focus on how compiler contributors should think about the change, and give examples of its concrete impact. For policy RFCs, this section should provide an example-driven introduction to the policy, and explain its impact in concrete terms. 26 | 27 | ## Reference-level explanation 28 | 29 | This is the technical portion of the RFC. Explain the design in sufficient detail that: 30 | 31 | - Its interaction with other features is clear. 32 | - It is reasonably clear how the feature would be implemented. 33 | - Corner cases are dissected by example. 34 | 35 | The section should return to the examples given in the previous section, and explain more fully how the detailed proposal makes those examples work. 36 | 37 | ## Drawbacks 38 | 39 | Why should we *not* do this? 40 | 41 | ## Rationale and alternatives 42 | 43 | - Why is this design the best in the space of possible designs? 44 | - What other designs have been considered and what is the rationale for not choosing them? 45 | - What is the impact of not doing this? 46 | 47 | ## Prior art 48 | 49 | Discuss prior art, both the good and the bad, in relation to this proposal. 50 | A few examples of what this can include are: 51 | 52 | - For language, library, cargo, tools, and compiler proposals: Does this feature exist in other programming languages and what experience have their community had? 53 | - For community proposals: Is this done by some other community and what were their experiences with it? 54 | - For other teams: What lessons can we learn from what other communities have done here? 55 | - Papers: Are there any published papers or great posts that discuss this? If you have some relevant papers to refer to, this can serve as a more detailed theoretical background. 56 | 57 | This section is intended to encourage you as an author to think about the lessons from other languages, provide readers of your RFC with a fuller picture. 58 | If there is no prior art, that is fine - your ideas are interesting to us whether they are brand new or if it is an adaptation from other languages. 59 | 60 | Note that while precedent set by other languages is some motivation, it does not on its own motivate an RFC. 61 | Please also take into consideration that rust sometimes intentionally diverges from common language features. 62 | 63 | ## Unresolved questions 64 | 65 | - What parts of the design do you expect to resolve through the RFC process before this gets merged? 66 | - What parts of the design do you expect to resolve through the implementation of this feature before stabilization? 67 | - What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC? 68 | 69 | ## Future possibilities 70 | 71 | Think about what the natural extension and evolution of your proposal would 72 | be and how it would affect the language and project as a whole in a holistic 73 | way. Try to use this section as a tool to more fully consider all possible 74 | interactions with the project and language in your proposal. 75 | Also consider how this all fits into the roadmap for the project 76 | and of the relevant sub-team. 77 | 78 | This is also a good place to "dump ideas", if they are out of scope for the 79 | RFC you are writing but otherwise related. 80 | 81 | If you have tried and cannot think of any future possibilities, 82 | you may simply state that you cannot think of anything. 83 | 84 | Note that having something written down in the future-possibilities section 85 | is not a reason to accept the current or a future RFC; such notes should be 86 | in the section on motivation or rationale in this or subsequent RFCs. 87 | The section merely provides additional information. 88 | -------------------------------------------------------------------------------- /src/commands/generate/completion.rs: -------------------------------------------------------------------------------- 1 | use super::{Args, CommandExecutor}; 2 | use clap::{CommandFactory, Parser}; 3 | use clap_utils::prelude::*; 4 | 5 | #[derive(Parser, Debug, Clone)] 6 | pub struct GenerateCompletionCommand { 7 | /// the type of the shell 8 | #[clap(default_value = "fish")] 9 | pub shell_type: ShellType, 10 | } 11 | 12 | #[async_trait] 13 | impl CommandExecutor for GenerateCompletionCommand { 14 | async fn execute(&self, _args: &Args) -> Result<(), Error> { 15 | self.shell_type 16 | .generate_completion("renovate", &mut Args::command()); 17 | 18 | Ok(()) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/commands/generate/mod.rs: -------------------------------------------------------------------------------- 1 | mod_pub_use!(completion); 2 | 3 | use super::{Args, CommandExecutor}; 4 | use clap_utils::prelude::*; 5 | 6 | #[derive(Parser, Debug, Clone)] 7 | pub struct ActionGenerateCommand { 8 | #[clap(subcommand)] 9 | pub cmd: Generate, 10 | } 11 | 12 | #[async_trait] 13 | impl CommandExecutor for ActionGenerateCommand { 14 | async fn execute(&self, args: &Args) -> Result<(), Error> { 15 | self.cmd.execute(args).await 16 | } 17 | } 18 | 19 | subcmd!(Generate, [Completion = "generate shell completion"]); 20 | -------------------------------------------------------------------------------- /src/commands/mod.rs: -------------------------------------------------------------------------------- 1 | mod generate; 2 | mod schema; 3 | 4 | use clap_utils::prelude::*; 5 | use generate::*; 6 | use schema::*; 7 | 8 | /// Dispatch and execute the command. Make sure to add the new command enum into the enum_dispatch macro below. 9 | #[async_trait] 10 | #[enum_dispatch(Action, Generate, Schema)] // <- [new group] put the new group enum here 11 | pub trait CommandExecutor { 12 | async fn execute(&self, args: &Args) -> Result<(), Error>; 13 | } 14 | 15 | /// Renovate database migration tool 16 | #[derive(Parser, Debug, Clone)] 17 | #[clap(version, author, about, long_about = None)] 18 | pub struct Args { 19 | /// subcommand to execute 20 | #[clap(subcommand)] 21 | pub action: Action, 22 | 23 | #[cfg(feature = "cli-test")] 24 | /// drop database on exit (for testing purpose only) 25 | #[clap(long, global = true, value_parser, default_value = "false")] 26 | pub drop_on_exit: bool, 27 | } 28 | 29 | subcmd!( 30 | Action, 31 | // [new group] add the new command enum here 32 | [Generate = "generate something", Schema = "Schema migration"] 33 | ); 34 | -------------------------------------------------------------------------------- /src/commands/schema/apply.rs: -------------------------------------------------------------------------------- 1 | use super::{generate_plan, git_commit, git_dirty, Args, CommandExecutor}; 2 | use crate::{utils::load_config, DatabaseRepo}; 3 | use clap_utils::{ 4 | dialoguer::{theme::ColorfulTheme, Confirm}, 5 | prelude::*, 6 | }; 7 | 8 | #[derive(Parser, Debug, Clone)] 9 | pub struct SchemaApplyCommand { 10 | #[clap(long, value_parser, default_value = "false")] 11 | remote: bool, 12 | } 13 | 14 | #[async_trait] 15 | impl CommandExecutor for SchemaApplyCommand { 16 | async fn execute(&self, _args: &Args) -> Result<(), Error> { 17 | let plan = generate_plan(self.remote).await?; 18 | if plan.is_empty() { 19 | return Ok(()); 20 | } 21 | let config = load_config().await?; 22 | let db_repo = DatabaseRepo::new(&config); 23 | 24 | if git_dirty()? { 25 | if confirm("\nYour repo is dirty. Do you want to commit it first?") { 26 | git_commit("automatically commit the schema changes before applying the plan")?; 27 | } else { 28 | bail!("Your repo is dirty. Please commit the changes before applying."); 29 | } 30 | } 31 | 32 | if confirm("Do you want to perform this update?") { 33 | db_repo.apply(plan, self.remote).await?; 34 | git_commit("automatically commit the changes applied to remote server")?; 35 | let url = if self.remote { 36 | &config.remote_url 37 | } else { 38 | &config.url 39 | }; 40 | println!( 41 | "Successfully applied migration to {}.\nYour repo is updated with the latest schema. See `git diff HEAD~1` for details.", 42 | url 43 | ); 44 | } else { 45 | println!("Database schema update has been cancelled."); 46 | } 47 | 48 | Ok(()) 49 | } 50 | } 51 | 52 | pub(crate) fn confirm(prompt: &'static str) -> bool { 53 | Confirm::with_theme(&ColorfulTheme::default()) 54 | .with_prompt(prompt) 55 | .interact() 56 | .expect("confirm UI should work") 57 | } 58 | -------------------------------------------------------------------------------- /src/commands/schema/fetch.rs: -------------------------------------------------------------------------------- 1 | use super::{confirm, git_commit, Args, CommandExecutor}; 2 | use crate::{utils::load_config, DatabaseRepo}; 3 | use clap_utils::prelude::*; 4 | 5 | #[derive(Parser, Debug, Clone)] 6 | pub struct SchemaFetchCommand {} 7 | 8 | #[async_trait] 9 | impl CommandExecutor for SchemaFetchCommand { 10 | async fn execute(&self, _args: &Args) -> Result<(), Error> { 11 | let config = load_config().await?; 12 | let repo = DatabaseRepo::new(&config); 13 | 14 | if confirm("This will overwrite the local schema files. Continue?") { 15 | git_commit("commit schema changes before fetching")?; 16 | repo.fetch().await?; 17 | } 18 | Ok(()) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/commands/schema/init.rs: -------------------------------------------------------------------------------- 1 | use super::{git_commit, Args, CommandExecutor}; 2 | use crate::{DatabaseRepo, RenovateConfig}; 3 | use clap_utils::prelude::*; 4 | use std::{env::set_current_dir, fs, path::PathBuf}; 5 | use url::Url; 6 | 7 | #[derive(Parser, Debug, Clone)] 8 | pub struct SchemaInitCommand { 9 | #[clap(value_parser = parse_url)] 10 | pub url: Url, 11 | } 12 | 13 | #[async_trait] 14 | impl CommandExecutor for SchemaInitCommand { 15 | async fn execute(&self, _args: &Args) -> Result<(), Error> { 16 | let path = PathBuf::from(format!(".{}", self.url.path())); 17 | if path.exists() && path.read_dir()?.next().is_some() { 18 | bail!("directory {} already exists and not empty", path.display()); 19 | } else { 20 | fs::create_dir(&path)?; 21 | } 22 | 23 | set_current_dir(&path)?; 24 | let config = RenovateConfig::new(self.url.clone()); 25 | config.save("renovate.yml").await?; 26 | 27 | let db_repo = DatabaseRepo::new(&config); 28 | db_repo.init_local_database().await?; 29 | 30 | db_repo.fetch().await?; 31 | 32 | git_commit(format!("init schema migration repo for {}", self.url))?; 33 | 34 | println!( 35 | "Database schema for {} has successfully dumped into {}.", 36 | self.url, 37 | path.display() 38 | ); 39 | Ok(()) 40 | } 41 | } 42 | 43 | fn parse_url(s: &str) -> Result { 44 | let url = Url::parse(s)?; 45 | if url.scheme() != "postgres" { 46 | bail!("only postgres url is supported"); 47 | } 48 | if url.path().is_empty() { 49 | bail!("database name is required in the url"); 50 | } 51 | Ok(url) 52 | } 53 | 54 | #[allow(dead_code)] 55 | async fn fetch_and_save(url: &Url) -> Result<()> { 56 | let config = RenovateConfig::new(url.clone()); 57 | config.save("renovate.yml").await?; 58 | 59 | let db_repo = DatabaseRepo::new(&config); 60 | db_repo.init_local_database().await?; 61 | 62 | db_repo.fetch().await?; 63 | 64 | git_commit(format!("init schema migration repo for {}", url))?; 65 | Ok(()) 66 | } 67 | -------------------------------------------------------------------------------- /src/commands/schema/mod.rs: -------------------------------------------------------------------------------- 1 | mod_pub_use!(apply, fetch, init, normalize, plan); 2 | 3 | use super::{Args, CommandExecutor}; 4 | use clap_utils::prelude::*; 5 | 6 | #[derive(Parser, Debug, Clone)] 7 | pub struct ActionSchemaCommand { 8 | #[clap(subcommand)] 9 | pub cmd: Schema, 10 | } 11 | 12 | #[async_trait] 13 | impl CommandExecutor for ActionSchemaCommand { 14 | async fn execute(&self, args: &Args) -> Result<(), Error> { 15 | self.cmd.execute(args).await 16 | } 17 | } 18 | 19 | subcmd!( 20 | Schema, 21 | [ 22 | Apply = "apply the migration plan to the remote database server", 23 | Fetch = "fetch the most recent schema from the remote database server", 24 | Init = "init a database migration repo", 25 | Normalize = "normalize local schema via a temp local database", 26 | Plan = "diff the local change and remote state, then make a migration plan" 27 | ] 28 | ); 29 | 30 | #[cfg(feature = "cli-test")] 31 | fn git_commit(_msg: impl AsRef) -> Result<()> { 32 | Ok(()) 33 | } 34 | 35 | #[cfg(not(feature = "cli-test"))] 36 | fn git_commit(msg: impl AsRef) -> Result<()> { 37 | use crate::GitRepo; 38 | let repo = if std::path::Path::new(".git").exists() { 39 | GitRepo::open(".")? 40 | } else { 41 | GitRepo::init(".")? 42 | }; 43 | if repo.is_dirty() { 44 | repo.commit(msg)?; 45 | } 46 | 47 | Ok(()) 48 | } 49 | #[cfg(feature = "cli-test")] 50 | fn git_dirty() -> Result { 51 | Ok(false) 52 | } 53 | 54 | #[cfg(not(feature = "cli-test"))] 55 | fn git_dirty() -> Result { 56 | let repo = crate::GitRepo::open(".")?; 57 | Ok(repo.is_dirty()) 58 | } 59 | -------------------------------------------------------------------------------- /src/commands/schema/normalize.rs: -------------------------------------------------------------------------------- 1 | use super::{git_commit, Args, CommandExecutor}; 2 | use crate::{utils::load_config, DatabaseRepo, LocalRepo, SchemaLoader, SqlSaver}; 3 | use clap_utils::prelude::*; 4 | 5 | #[derive(Parser, Debug, Clone)] 6 | pub struct SchemaNormalizeCommand {} 7 | 8 | #[async_trait] 9 | impl CommandExecutor for SchemaNormalizeCommand { 10 | async fn execute(&self, _args: &Args) -> Result<(), Error> { 11 | let config = load_config().await?; 12 | 13 | git_commit("commit schema changes before nomalization")?; 14 | 15 | let local_repo = LocalRepo::new(&config.output.path); 16 | let schema = local_repo.load().await?; 17 | let sql = schema.sql(true); 18 | 19 | let repo = DatabaseRepo::new(&config); 20 | let schema = repo.normalize(&sql).await?; 21 | schema.save(&config.output).await?; 22 | 23 | git_commit("commit schema changes after nomalization")?; 24 | 25 | Ok(()) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/commands/schema/plan.rs: -------------------------------------------------------------------------------- 1 | use super::{Args, CommandExecutor}; 2 | use crate::{utils::load_config, DatabaseRepo, LocalRepo, SchemaLoader, SqlLoader}; 3 | use clap_utils::{highlight_text, prelude::*}; 4 | 5 | #[derive(Parser, Debug, Clone)] 6 | pub struct SchemaPlanCommand {} 7 | 8 | #[async_trait] 9 | impl CommandExecutor for SchemaPlanCommand { 10 | async fn execute(&self, _args: &Args) -> Result<(), Error> { 11 | generate_plan(false).await?; 12 | Ok(()) 13 | } 14 | } 15 | 16 | pub(super) async fn generate_plan(remote: bool) -> Result> { 17 | let config = load_config().await?; 18 | let db_repo = DatabaseRepo::new(&config); 19 | 20 | let local_schema = if !remote { 21 | let sql = LocalRepo::new(&config.output.path).load_sql().await?; 22 | db_repo.normalize(&sql).await? 23 | } else { 24 | db_repo.load().await? 25 | }; 26 | let remote_schema = if !remote { 27 | db_repo.load().await? 28 | } else { 29 | let sql = db_repo.load_sql_string(remote).await?; 30 | SqlLoader::new(&sql).load().await? 31 | }; 32 | let plan = local_schema.plan(&remote_schema, true)?; 33 | 34 | if plan.is_empty() { 35 | println!("No changes detected."); 36 | return Ok(vec![]); 37 | } 38 | 39 | println!("The following SQLs will be applied:\n"); 40 | for item in plan.iter() { 41 | let formatted = sqlformat::format( 42 | item, 43 | &Default::default(), 44 | config.output.format.unwrap_or_default().into(), 45 | ); 46 | if atty::is(atty::Stream::Stdout) { 47 | println!("{};", highlight_text(&formatted, "sql", None)?); 48 | } else { 49 | println!("{};", formatted); 50 | } 51 | } 52 | Ok(plan) 53 | } 54 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | use serde::{Deserialize, Serialize}; 3 | use sqlformat::{FormatOptions, Indent}; 4 | use std::path::{Path, PathBuf}; 5 | use tokio::fs; 6 | use url::{Host, Url}; 7 | 8 | #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] 9 | #[serde(rename_all = "snake_case")] 10 | pub struct RenovateConfig { 11 | /// The local postgres url of the database 12 | pub url: String, 13 | /// the actual postgres url of the database 14 | pub remote_url: String, 15 | /// The output config 16 | #[serde(default)] 17 | pub output: RenovateOutputConfig, 18 | } 19 | 20 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 21 | #[serde(rename_all = "snake_case")] 22 | pub struct RenovateOutputConfig { 23 | #[serde(default)] 24 | pub(crate) layout: Layout, 25 | #[serde(default = "default_path")] 26 | pub(crate) path: PathBuf, 27 | #[serde(default = "default_format")] 28 | pub(crate) format: Option, 29 | } 30 | 31 | /// Layout of the output files when saving the schema 32 | #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] 33 | #[serde(rename_all = "snake_case")] 34 | pub enum Layout { 35 | /// Default layout. Each schema has its own directory, with each file for a type of objects. 36 | #[default] 37 | Normal, 38 | /// All objects are in a single file. 39 | Flat, 40 | /// Each type has its own directory under the schema directory. 41 | Nested, 42 | } 43 | 44 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] 45 | #[serde(rename_all = "snake_case")] 46 | pub struct RenovateFormatConfig { 47 | /// Controls the type and length of indentation to use. Default 4. 48 | #[serde(default = "default_indent")] 49 | indent: u8, 50 | /// When set, changes reserved keywords to ALL CAPS. Defaults to false. 51 | #[serde(default = "default_uppercase")] 52 | uppercase: bool, 53 | /// Controls the number of line breaks after a query. Default 2. 54 | #[serde(default = "default_lines")] 55 | lines_between_queries: u8, 56 | } 57 | 58 | impl Default for RenovateFormatConfig { 59 | fn default() -> Self { 60 | Self { 61 | indent: default_indent(), 62 | uppercase: default_uppercase(), 63 | lines_between_queries: default_lines(), 64 | } 65 | } 66 | } 67 | 68 | impl From for FormatOptions { 69 | fn from(config: RenovateFormatConfig) -> Self { 70 | Self { 71 | indent: Indent::Spaces(config.indent), 72 | uppercase: config.uppercase, 73 | lines_between_queries: config.lines_between_queries, 74 | } 75 | } 76 | } 77 | 78 | impl RenovateConfig { 79 | pub fn new(url: Url) -> Self { 80 | let local_url = match url.host() { 81 | Some(Host::Domain(domain)) => { 82 | if domain == "localhost" { 83 | Some(url.clone()) 84 | } else { 85 | None 86 | } 87 | } 88 | Some(Host::Ipv4(ip)) => { 89 | if ip.is_loopback() { 90 | Some(url.clone()) 91 | } else { 92 | None 93 | } 94 | } 95 | Some(Host::Ipv6(ip)) => { 96 | if ip.is_loopback() { 97 | Some(url.clone()) 98 | } else { 99 | None 100 | } 101 | } 102 | _ => panic!("Invalid host: {}", url), 103 | }; 104 | 105 | let local_url = local_url.unwrap_or_else(|| { 106 | format!( 107 | "postgres://127.0.0.1:5432/_renovate_{}", 108 | url.path().trim_start_matches('/') 109 | ) 110 | .parse() 111 | .unwrap() 112 | }); 113 | 114 | Self { 115 | url: local_url.into(), 116 | remote_url: url.into(), 117 | output: RenovateOutputConfig::default(), 118 | } 119 | } 120 | 121 | pub async fn load(path: impl AsRef) -> Result { 122 | let path = path.as_ref(); 123 | let content = fs::read_to_string(path) 124 | .await 125 | .with_context(|| format!("Failed to read configuration: {}", path.display()))?; 126 | let config = serde_yaml::from_str(&content) 127 | .with_context(|| format!("Failed to parse configuration:\n{}", content))?; 128 | Ok(config) 129 | } 130 | 131 | pub async fn save(&self, path: impl AsRef) -> Result<()> { 132 | let path = path.as_ref(); 133 | let content = serde_yaml::to_string(&self) 134 | .with_context(|| format!("Failed to serialize configuration: {:?}", self))?; 135 | fs::write(path, content) 136 | .await 137 | .with_context(|| format!("Failed to write configuration: {}", path.display()))?; 138 | Ok(()) 139 | } 140 | } 141 | 142 | impl RenovateOutputConfig { 143 | pub fn new(path: impl Into) -> Self { 144 | Self { 145 | path: path.into(), 146 | ..Default::default() 147 | } 148 | } 149 | } 150 | 151 | impl Default for RenovateOutputConfig { 152 | fn default() -> Self { 153 | Self { 154 | layout: Layout::default(), 155 | path: default_path(), 156 | format: default_format(), 157 | } 158 | } 159 | } 160 | 161 | fn default_format() -> Option { 162 | Some(RenovateFormatConfig::default()) 163 | } 164 | 165 | fn default_path() -> PathBuf { 166 | PathBuf::from(".") 167 | } 168 | 169 | fn default_indent() -> u8 { 170 | 4 171 | } 172 | 173 | fn default_uppercase() -> bool { 174 | true 175 | } 176 | 177 | fn default_lines() -> u8 { 178 | 2 179 | } 180 | 181 | #[cfg(test)] 182 | mod tests { 183 | use super::*; 184 | 185 | #[test] 186 | fn remote_url_should_generate_equivalent_local_url() { 187 | let url = 188 | Url::parse("postgres://tyrchen:password@awseome.cloud.neon.tech/test-db").unwrap(); 189 | let config = RenovateConfig::new(url); 190 | assert_eq!(config.url, "postgres://127.0.0.1:5432/_renovate_test-db"); 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "cli")] 2 | pub mod commands; 3 | mod config; 4 | mod macros; 5 | mod parser; 6 | mod repo; 7 | mod schema; 8 | mod types; 9 | mod utils; 10 | 11 | use anyhow::Result; 12 | use async_trait::async_trait; 13 | use config::RenovateOutputConfig; 14 | use pg_query::NodeEnum; 15 | use std::{collections::BTreeSet, path::PathBuf}; 16 | 17 | pub use config::RenovateConfig; 18 | pub use parser::DatabaseSchema; 19 | pub use repo::git::{BumpVersion, GitRepo}; 20 | 21 | #[async_trait] 22 | pub trait SchemaLoader { 23 | /// Load the sql file(s) to a DatabaseSchema 24 | async fn load(&self) -> Result; 25 | async fn load_sql(&self) -> Result; 26 | } 27 | 28 | #[async_trait] 29 | pub trait SqlSaver { 30 | /// store data to sql files in the given directory 31 | async fn save(&self, config: &RenovateOutputConfig) -> Result<()>; 32 | } 33 | 34 | /// Object for Differ must satisfy NodeItem trait 35 | pub trait NodeItem: ToString { 36 | type Inner; 37 | /// Unique id for the object 38 | fn id(&self) -> String; 39 | /// database type name for the object 40 | fn type_name(&self) -> &'static str; 41 | /// get node for the item 42 | fn node(&self) -> &NodeEnum; 43 | /// get the inner item 44 | fn inner(&self) -> Result<&Self::Inner>; 45 | /// convert by mapping inner item with optional data and generate a new node 46 | fn map(&self, f: F, data: Option) -> Result 47 | where 48 | F: Fn(&Self::Inner, Option) -> Result, 49 | { 50 | f(self.inner()?, data) 51 | } 52 | /// revert the node. For example, a `GRANT xxx` will become `REVOKE xxx` 53 | fn revert(&self) -> Result; 54 | } 55 | 56 | /// Record the old/new for a schema object 57 | #[derive(Debug, Clone, PartialEq, Eq)] 58 | pub struct NodeDiff { 59 | pub old: Option, 60 | pub new: Option, 61 | pub diff: String, 62 | } 63 | 64 | /// Record the changes for a schema object 65 | #[derive(Debug, Clone, PartialEq, Eq)] 66 | pub struct NodeDelta { 67 | pub added: BTreeSet, 68 | pub removed: BTreeSet, 69 | pub changed: BTreeSet<(T, T)>, 70 | } 71 | 72 | /// Diffing two objects to get deltas 73 | pub trait Differ { 74 | type Diff: MigrationPlanner; 75 | /// find the schema change 76 | fn diff(&self, remote: &Self) -> Result>; 77 | } 78 | 79 | pub type MigrationResult = Result>; 80 | pub trait MigrationPlanner { 81 | type Migration: ToString; 82 | 83 | /// generate drop planner 84 | fn drop(&self) -> MigrationResult; 85 | /// generate create planner 86 | fn create(&self) -> MigrationResult; 87 | /// generate alter planner 88 | fn alter(&self) -> MigrationResult; 89 | 90 | /// if alter return Some, use the result for migration directly; otherwise, use drop/create for migration 91 | fn plan(&self) -> Result> { 92 | // if alter result is available, use that for migration 93 | let items = self.alter()?; 94 | if !items.is_empty() { 95 | return Ok(items); 96 | } 97 | 98 | // otherwise, we do drop/create for migration 99 | let drop = self.drop()?; 100 | let create = self.create()?; 101 | Ok([drop, create].into_iter().flatten().collect()) 102 | } 103 | } 104 | 105 | /// A trait for the diff object to generate proper migration sql 106 | pub trait DeltaItem: ToString { 107 | /// The node which will be used to generated the final SQL 108 | type SqlNode: NodeItem; 109 | /// generate sql for drop 110 | fn drop(self, node: &Self::SqlNode) -> Result>; 111 | /// generate sql for create 112 | fn create(self, node: &Self::SqlNode) -> Result>; 113 | /// generate rename SQL 114 | fn rename(self, node: &Self::SqlNode, new: Self) -> Result>; 115 | /// generate sql for alter 116 | fn alter(self, node: &Self::SqlNode, new: Self) -> Result>; 117 | } 118 | 119 | pub trait SqlFormatter { 120 | fn format(&self) -> Result; 121 | } 122 | 123 | #[async_trait] 124 | pub trait MigrationExecutor { 125 | /// execute the migration 126 | async fn execute(&self) -> Result<()>; 127 | } 128 | 129 | /// Local repository 130 | #[derive(Debug, Clone)] 131 | pub struct LocalRepo { 132 | pub path: PathBuf, 133 | } 134 | 135 | /// Remote repository 136 | #[derive(Debug, Clone)] 137 | pub struct DatabaseRepo { 138 | url: String, 139 | remote_url: String, 140 | } 141 | 142 | /// intermediate representation for local and remote repo 143 | #[derive(Debug, Clone)] 144 | pub struct SqlLoader(String); 145 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! map_insert_schema { 3 | ($map:expr, $item:ident) => { 4 | $map.entry($item.id.schema.clone()) 5 | .or_insert(Default::default()) 6 | .insert($item.id.name.clone(), $item); 7 | }; 8 | } 9 | 10 | #[macro_export] 11 | macro_rules! map_insert_relation { 12 | ($map:expr, $item:ident) => { 13 | $map.entry($item.id.schema_id.clone()) 14 | .or_insert(Default::default()) 15 | .insert($item.id.name.clone(), $item); 16 | }; 17 | } 18 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use clap_utils::prelude::*; 2 | use renovate::commands::{Args, CommandExecutor}; 3 | 4 | #[tokio::main] 5 | async fn main() -> Result<()> { 6 | let args = Args::parse(); 7 | let action = &args.action; 8 | action.execute(&args).await?; 9 | 10 | #[cfg(feature = "cli-test")] 11 | if args.drop_on_exit { 12 | use renovate::{DatabaseRepo, RenovateConfig}; 13 | let config = RenovateConfig::load("renovate.yml").await?; 14 | let repo = DatabaseRepo::new(&config); 15 | repo.drop_database().await.ok(); 16 | } 17 | Ok(()) 18 | } 19 | -------------------------------------------------------------------------------- /src/parser/composite_type.rs: -------------------------------------------------------------------------------- 1 | use super::CompositeType; 2 | use crate::NodeItem; 3 | use pg_query::{protobuf::CompositeTypeStmt, NodeEnum, NodeRef}; 4 | 5 | impl NodeItem for CompositeType { 6 | type Inner = CompositeTypeStmt; 7 | fn id(&self) -> String { 8 | self.id.to_string() 9 | } 10 | 11 | fn type_name(&self) -> &'static str { 12 | "composite type" 13 | } 14 | 15 | fn node(&self) -> &NodeEnum { 16 | &self.node 17 | } 18 | 19 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 20 | match &self.node { 21 | NodeEnum::CompositeTypeStmt(stmt) => Ok(stmt), 22 | _ => anyhow::bail!("not a create view statement"), 23 | } 24 | } 25 | 26 | fn revert(&self) -> anyhow::Result { 27 | let sql = format!("DROP TYPE {}", self.id); 28 | let parsed = pg_query::parse(&sql)?; 29 | let node = parsed.protobuf.nodes()[0].0; 30 | match node { 31 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 32 | _ => anyhow::bail!("not a drop type statement"), 33 | } 34 | } 35 | } 36 | 37 | impl TryFrom<&CompositeTypeStmt> for CompositeType { 38 | type Error = anyhow::Error; 39 | fn try_from(stmt: &CompositeTypeStmt) -> Result { 40 | let id = stmt.typevar.as_ref().into(); 41 | let node = NodeEnum::CompositeTypeStmt(stmt.clone()); 42 | Ok(Self { id, node }) 43 | } 44 | } 45 | 46 | #[cfg(test)] 47 | mod tests { 48 | use super::*; 49 | use crate::{Differ, MigrationPlanner}; 50 | 51 | #[test] 52 | fn composite_type_should_parse() { 53 | let sql = "CREATE TYPE foo AS (a int, b text)"; 54 | let composite_type: CompositeType = sql.parse().unwrap(); 55 | assert_eq!(composite_type.id.to_string(), "public.foo"); 56 | } 57 | 58 | #[test] 59 | fn composite_type_should_generate_drop_create_plan() { 60 | let sql1 = "CREATE TYPE foo AS (a int, b text)"; 61 | let sql2 = "CREATE TYPE foo AS (a int, b text, c text)"; 62 | let old: CompositeType = sql1.parse().unwrap(); 63 | let new: CompositeType = sql2.parse().unwrap(); 64 | let diff = old.diff(&new).unwrap().unwrap(); 65 | let plan = diff.plan().unwrap(); 66 | assert_eq!(plan.len(), 2); 67 | assert_eq!(plan[0].to_string(), "DROP TYPE public.foo"); 68 | assert_eq!( 69 | plan[1].to_string(), 70 | "CREATE TYPE foo AS (a int, b text, c text)" 71 | ); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/parser/enum_type.rs: -------------------------------------------------------------------------------- 1 | use super::{utils::node_to_string, EnumType}; 2 | use crate::{MigrationPlanner, MigrationResult, NodeDiff, NodeItem}; 3 | use itertools::Itertools; 4 | use pg_query::{protobuf::CreateEnumStmt, NodeEnum, NodeRef}; 5 | 6 | impl NodeItem for EnumType { 7 | type Inner = CreateEnumStmt; 8 | fn id(&self) -> String { 9 | self.id.to_string() 10 | } 11 | 12 | fn type_name(&self) -> &'static str { 13 | "enum" 14 | } 15 | 16 | fn node(&self) -> &NodeEnum { 17 | &self.node 18 | } 19 | 20 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 21 | match &self.node { 22 | NodeEnum::CreateEnumStmt(stmt) => Ok(stmt), 23 | _ => anyhow::bail!("not a create view statement"), 24 | } 25 | } 26 | 27 | fn revert(&self) -> anyhow::Result { 28 | let sql = format!("DROP TYPE {}", self.id); 29 | let parsed = pg_query::parse(&sql)?; 30 | let node = parsed.protobuf.nodes()[0].0; 31 | match node { 32 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 33 | _ => anyhow::bail!("not a drop type statement"), 34 | } 35 | } 36 | } 37 | 38 | impl TryFrom<&CreateEnumStmt> for EnumType { 39 | type Error = anyhow::Error; 40 | fn try_from(stmt: &CreateEnumStmt) -> Result { 41 | let id = stmt 42 | .type_name 43 | .iter() 44 | .filter_map(node_to_string) 45 | .join(".") 46 | .parse()?; 47 | let node = NodeEnum::CreateEnumStmt(stmt.clone()); 48 | let items = stmt.vals.iter().filter_map(node_to_string).collect(); 49 | Ok(Self { id, items, node }) 50 | } 51 | } 52 | 53 | impl MigrationPlanner for NodeDiff { 54 | type Migration = String; 55 | 56 | fn drop(&self) -> MigrationResult { 57 | if let Some(old) = &self.old { 58 | let sqls = vec![old.revert()?.deparse()?]; 59 | Ok(sqls) 60 | } else { 61 | Ok(vec![]) 62 | } 63 | } 64 | 65 | fn create(&self) -> MigrationResult { 66 | if let Some(new) = &self.new { 67 | let sqls = vec![new.node.deparse()?]; 68 | Ok(sqls) 69 | } else { 70 | Ok(vec![]) 71 | } 72 | } 73 | 74 | fn alter(&self) -> MigrationResult { 75 | match (&self.old, &self.new) { 76 | (Some(old), Some(new)) => { 77 | let added = new.items.difference(&old.items).collect::>(); 78 | let removed = old.items.difference(&new.items).collect::>(); 79 | if removed.is_empty() { 80 | let migrations = added 81 | .iter() 82 | .map(|s| format!("ALTER TYPE {} ADD VALUE '{}'", old.id, s)) 83 | .collect(); 84 | return Ok(migrations); 85 | } 86 | 87 | if removed.len() == added.len() && removed.len() == 1 { 88 | let sql = format!( 89 | "ALTER TYPE {} RENAME VALUE '{}' TO '{}'", 90 | old.id, 91 | removed.get(0).unwrap(), 92 | added.get(0).unwrap() 93 | ); 94 | return Ok(vec![sql]); 95 | } 96 | 97 | if atty::is(atty::Stream::Stdout) { 98 | println!("WARNING: recreate enum type {} because of incompatible changes. Be CAUTIOUS this migration might fail if you referenced the type in other places.", old.id); 99 | } 100 | Ok(vec![]) 101 | } 102 | _ => Ok(vec![]), 103 | } 104 | } 105 | } 106 | 107 | #[cfg(test)] 108 | mod tests { 109 | use super::*; 110 | use crate::{Differ, MigrationPlanner}; 111 | 112 | #[test] 113 | fn enum_type_should_parse() { 114 | let sql = "CREATE TYPE enum_type AS ENUM ('a', 'b', 'c')"; 115 | let enum_type: EnumType = sql.parse().unwrap(); 116 | assert_eq!(enum_type.id.to_string(), "public.enum_type"); 117 | } 118 | 119 | #[test] 120 | fn composite_type_should_generate_drop_create_plan() { 121 | let sql1 = "CREATE TYPE enum_type AS ENUM ('a', 'b', 'c')"; 122 | let sql2 = "CREATE TYPE enum_type AS ENUM ('a', 'b', 'c', 'd', 'e')"; 123 | let old: EnumType = sql1.parse().unwrap(); 124 | let new: EnumType = sql2.parse().unwrap(); 125 | let diff = old.diff(&new).unwrap().unwrap(); 126 | let plan = diff.plan().unwrap(); 127 | assert_eq!(plan.len(), 2); 128 | assert_eq!(plan[0], "ALTER TYPE public.enum_type ADD VALUE 'd'"); 129 | assert_eq!(plan[1], "ALTER TYPE public.enum_type ADD VALUE 'e'"); 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/parser/function.rs: -------------------------------------------------------------------------------- 1 | use super::{ 2 | utils::{node_to_string, type_name_to_string}, 3 | Function, FunctionArg, SchemaId, 4 | }; 5 | use crate::{MigrationPlanner, MigrationResult, NodeDiff, NodeItem}; 6 | use itertools::Itertools; 7 | use pg_query::{protobuf::CreateFunctionStmt, Node, NodeEnum, NodeRef}; 8 | 9 | impl NodeItem for Function { 10 | type Inner = CreateFunctionStmt; 11 | 12 | fn id(&self) -> String { 13 | self.id.to_string() 14 | } 15 | 16 | fn type_name(&self) -> &'static str { 17 | "function" 18 | } 19 | 20 | fn node(&self) -> &NodeEnum { 21 | &self.node 22 | } 23 | 24 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 25 | match &self.node { 26 | NodeEnum::CreateFunctionStmt(stmt) => Ok(stmt), 27 | _ => anyhow::bail!("not a create function statement"), 28 | } 29 | } 30 | 31 | fn revert(&self) -> anyhow::Result { 32 | let sql = format!("DROP FUNCTION {}", self.signature()); 33 | let parsed = pg_query::parse(&sql)?; 34 | let node = parsed.protobuf.nodes()[0].0; 35 | match node { 36 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 37 | _ => anyhow::bail!("not a drop statement"), 38 | } 39 | } 40 | } 41 | 42 | impl TryFrom<&CreateFunctionStmt> for Function { 43 | type Error = anyhow::Error; 44 | fn try_from(stmt: &CreateFunctionStmt) -> Result { 45 | let args = parse_args(&stmt.parameters); 46 | 47 | let id = stmt 48 | .funcname 49 | .iter() 50 | .filter_map(node_to_string) 51 | .join(".") 52 | .parse()?; 53 | 54 | let returns = type_name_to_string(stmt.return_type.as_ref().unwrap()); 55 | 56 | let node = NodeEnum::CreateFunctionStmt(stmt.clone()); 57 | Ok(Self { 58 | id, 59 | args, 60 | returns, 61 | node, 62 | }) 63 | } 64 | } 65 | 66 | impl MigrationPlanner for NodeDiff { 67 | type Migration = String; 68 | 69 | fn drop(&self) -> MigrationResult { 70 | if let Some(old) = &self.old { 71 | let sqls = vec![old.revert()?.deparse()?]; 72 | Ok(sqls) 73 | } else { 74 | Ok(vec![]) 75 | } 76 | } 77 | 78 | fn create(&self) -> MigrationResult { 79 | if let Some(new) = &self.new { 80 | let sqls = vec![new.node.deparse()?]; 81 | Ok(sqls) 82 | } else { 83 | Ok(vec![]) 84 | } 85 | } 86 | 87 | fn alter(&self) -> MigrationResult { 88 | match (&self.old, &self.new) { 89 | (Some(old), Some(new)) => { 90 | // if args or return type changed, drop and create 91 | if old.args != new.args || old.returns != new.returns { 92 | return Ok(vec![]); 93 | } 94 | 95 | let sql = new.node.deparse()?; 96 | let sql = sql.replace("CREATE FUNCTION", "CREATE OR REPLACE FUNCTION"); 97 | Ok(vec![sql]) 98 | } 99 | _ => Ok(vec![]), 100 | } 101 | } 102 | } 103 | 104 | impl Function { 105 | pub fn signature(&self) -> String { 106 | format!( 107 | "{}({})", 108 | self.id, 109 | self.args.iter().map(|a| &a.data_type).join(", ") 110 | ) 111 | } 112 | } 113 | 114 | #[allow(dead_code)] 115 | fn parse_id(nodes: &[Node], args: &[FunctionArg]) -> SchemaId { 116 | let mut names = nodes.iter().filter_map(node_to_string).collect::>(); 117 | assert!(!names.is_empty() && names.len() <= 2); 118 | let name = names.pop().unwrap(); 119 | let func_name = format!("{}({})", name, args.iter().map(|a| &a.data_type).join(", ")); 120 | names.push(func_name); 121 | SchemaId::new_with(&names.iter().map(|v| v.as_str()).collect::>()) 122 | } 123 | 124 | fn parse_args(args: &[Node]) -> Vec { 125 | args.iter() 126 | .map(|n| match n.node.as_ref() { 127 | Some(NodeEnum::FunctionParameter(param)) => FunctionArg { 128 | name: param.name.clone(), 129 | data_type: type_name_to_string(param.arg_type.as_ref().unwrap()), 130 | }, 131 | _ => panic!("not a function parameter"), 132 | }) 133 | .collect::>() 134 | } 135 | 136 | #[cfg(test)] 137 | mod tests { 138 | use crate::{Differ, MigrationPlanner}; 139 | 140 | use super::*; 141 | 142 | #[test] 143 | fn valid_create_function_sql_should_parse() { 144 | let f1 = "CREATE FUNCTION test(name text, value integer) RETURNS text LANGUAGE sql STABLE AS $$ select 1 $$"; 145 | let fun: Function = f1.parse().unwrap(); 146 | assert_eq!(fun.id, SchemaId::new("public", "test")); 147 | assert_eq!( 148 | fun.args, 149 | vec![ 150 | FunctionArg { 151 | name: "name".to_string(), 152 | data_type: "text".to_string() 153 | }, 154 | FunctionArg { 155 | name: "value".to_string(), 156 | data_type: "pg_catalog.int4".to_string() 157 | }, 158 | ] 159 | ); 160 | assert_eq!(fun.returns, "text"); 161 | } 162 | 163 | #[test] 164 | fn unchanged_function_should_return_none() { 165 | let f1 = "CREATE FUNCTION public.test() RETURNS text LANGUAGE sql STABLE AS $$ select 1 $$"; 166 | let f2 = "CREATE FUNCTION public.test() RETURNS text LANGUAGE sql STABLE AS $$ select 1 $$"; 167 | let old: Function = f1.parse().unwrap(); 168 | let new: Function = f2.parse().unwrap(); 169 | let diff = old.diff(&new).unwrap(); 170 | assert!(diff.is_none()); 171 | } 172 | 173 | #[test] 174 | fn function_add_new_args_should_be_treated_as_new_function() { 175 | let f1 = "CREATE FUNCTION test() RETURNS text LANGUAGE SQL stable AS $$ select 1 $$"; 176 | let f2 = "CREATE FUNCTION test(name1 text) RETURNS text LANGUAGE sql STABLE AS $$ select name1 $$"; 177 | let old: Function = f1.parse().unwrap(); 178 | let new: Function = f2.parse().unwrap(); 179 | let diff = old.diff(&new).unwrap().unwrap(); 180 | let plan = diff.plan().unwrap(); 181 | assert_eq!(plan.len(), 2); 182 | assert_eq!(plan[0], "DROP FUNCTION public.test()"); 183 | assert_eq!(plan[1], f2); 184 | } 185 | 186 | #[test] 187 | fn function_change_arg_type_should_generate_migration() { 188 | let f1 = "CREATE FUNCTION test(name1 text) RETURNS text LANGUAGE sql STABLE AS $$ select name1 $$"; 189 | let f2 = "CREATE FUNCTION test(name1 int4) RETURNS int4 LANGUAGE sql STABLE AS $$ select name1 $$"; 190 | let old: Function = f1.parse().unwrap(); 191 | let new: Function = f2.parse().unwrap(); 192 | let diff = old.diff(&new).unwrap().unwrap(); 193 | let plan = diff.plan().unwrap(); 194 | assert_eq!(plan.len(), 2); 195 | assert_eq!(plan[0], "DROP FUNCTION public.test(text)"); 196 | assert_eq!(plan[1], f2); 197 | } 198 | 199 | #[test] 200 | fn function_change_content_should_generate_migration() { 201 | let f1 = "CREATE FUNCTION test(name1 text) RETURNS text LANGUAGE sql STABLE AS $$ select name1 $$"; 202 | let f2 = "CREATE FUNCTION test(name2 text) RETURNS text LANGUAGE sql IMMUTABLE AS $$ select name2 $$"; 203 | let old: Function = f1.parse().unwrap(); 204 | let new: Function = f2.parse().unwrap(); 205 | let diff = old.diff(&new).unwrap().unwrap(); 206 | let plan = diff.plan().unwrap(); 207 | assert_eq!(plan.len(), 1); 208 | assert_eq!(plan[0], "CREATE OR REPLACE FUNCTION test(name2 text) RETURNS text LANGUAGE sql IMMUTABLE AS $$ select name2 $$"); 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /src/parser/mod.rs: -------------------------------------------------------------------------------- 1 | mod composite_type; 2 | mod enum_type; 3 | mod function; 4 | mod mview; 5 | mod privilege; 6 | mod sequence; 7 | mod table; 8 | mod utils; 9 | mod view; 10 | 11 | use derivative::Derivative; 12 | use indexmap::IndexMap; 13 | use pg_query::{ 14 | protobuf::{ConstrType, GrantTargetType, ObjectType}, 15 | NodeEnum, 16 | }; 17 | use std::collections::{BTreeMap, BTreeSet}; 18 | 19 | #[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] 20 | pub struct SchemaId { 21 | pub schema: String, 22 | pub name: String, 23 | } 24 | #[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord)] 25 | pub struct RelationId { 26 | pub schema_id: SchemaId, 27 | pub name: String, 28 | } 29 | 30 | /// All the parsed information about a database 31 | #[derive(Debug, Default, Clone, PartialEq, Eq)] 32 | pub struct DatabaseSchema { 33 | pub schemas: BTreeSet, 34 | 35 | // schema level objects 36 | pub extensions: BTreeMap>, 37 | pub composite_types: BTreeMap>, 38 | pub enum_types: BTreeMap>, 39 | pub sequences: BTreeMap>, 40 | pub tables: BTreeMap>, 41 | pub views: BTreeMap>, 42 | pub mviews: BTreeMap>, 43 | pub functions: BTreeMap>, 44 | 45 | // database level objects 46 | pub privileges: BTreeMap>, 47 | 48 | // table level objects 49 | pub table_indexes: BTreeMap>, 50 | pub table_constraints: BTreeMap>, 51 | pub table_sequences: BTreeMap>, 52 | pub table_triggers: BTreeMap>, 53 | pub table_policies: BTreeMap>, 54 | pub table_rls: BTreeMap, 55 | pub table_owners: BTreeMap, 56 | 57 | // internal data structures 58 | _table_sequences: BTreeMap, 59 | } 60 | 61 | /// Postgres schema 62 | #[derive(Debug, Clone, PartialEq, Eq)] 63 | pub struct Schema { 64 | pub name: String, 65 | pub types: BTreeMap, 66 | pub tables: BTreeMap, 67 | pub views: BTreeMap, 68 | pub functions: BTreeMap, 69 | } 70 | 71 | /// Trigger defined in the database 72 | #[derive(Derivative, Clone)] 73 | #[derivative(Debug, PartialEq, Eq, PartialOrd, Ord)] 74 | pub struct Trigger { 75 | pub id: RelationId, 76 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 77 | pub node: NodeEnum, 78 | } 79 | 80 | /// Composite type defined in the schema 81 | #[derive(Derivative, Debug, Clone)] 82 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 83 | pub struct CompositeType { 84 | pub id: SchemaId, 85 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 86 | pub node: NodeEnum, 87 | } 88 | 89 | /// Enum type defined in the schema 90 | #[derive(Derivative, Debug, Clone)] 91 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 92 | pub struct EnumType { 93 | pub id: SchemaId, 94 | pub items: BTreeSet, 95 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 96 | pub node: NodeEnum, 97 | } 98 | 99 | /// Table defined in the schema 100 | #[derive(Derivative, Debug, Clone)] 101 | #[derivative(PartialEq, Eq)] 102 | pub struct Table { 103 | pub id: SchemaId, 104 | pub columns: IndexMap, 105 | pub constraints: IndexMap, 106 | 107 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 108 | pub node: NodeEnum, 109 | } 110 | 111 | /// View defined in the schema 112 | #[derive(Derivative, Debug, Clone)] 113 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 114 | pub struct View { 115 | pub id: SchemaId, 116 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 117 | pub node: NodeEnum, 118 | } 119 | 120 | /// Materialized View defined in the schema 121 | #[derive(Derivative, Debug, Clone)] 122 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 123 | pub struct MatView { 124 | pub id: SchemaId, 125 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 126 | pub node: NodeEnum, 127 | } 128 | 129 | /// Function defined in the schema 130 | #[derive(Derivative, Debug, Clone)] 131 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 132 | pub struct Function { 133 | pub id: SchemaId, 134 | pub args: Vec, 135 | pub returns: String, 136 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 137 | pub node: NodeEnum, 138 | } 139 | 140 | /// Function defined in the schema 141 | #[derive(Derivative, Debug, Clone, PartialOrd, Ord)] 142 | #[derivative(PartialEq, Eq)] 143 | pub struct FunctionArg { 144 | #[derivative(PartialEq = "ignore")] 145 | pub name: String, 146 | pub data_type: String, 147 | } 148 | 149 | #[derive(Derivative, Debug, Clone)] 150 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 151 | pub struct Column { 152 | pub id: RelationId, 153 | pub type_name: String, 154 | pub nullable: bool, 155 | pub default: Option, 156 | pub constraints: BTreeSet, 157 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 158 | pub node: NodeEnum, 159 | } 160 | 161 | #[derive(Derivative, Debug, Clone)] 162 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 163 | pub struct Sequence { 164 | pub id: SchemaId, 165 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 166 | pub node: NodeEnum, 167 | } 168 | 169 | #[derive(Derivative, Debug, Clone)] 170 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 171 | pub struct TableSequence { 172 | pub id: RelationId, 173 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 174 | pub node: NodeEnum, 175 | } 176 | 177 | #[derive(Derivative, Debug, Clone)] 178 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 179 | pub struct SequenceInfo { 180 | pub column: String, 181 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 182 | pub node: NodeEnum, 183 | } 184 | 185 | #[derive(Derivative, Debug, Clone)] 186 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 187 | pub struct ConstraintInfo { 188 | pub name: String, 189 | pub con_type: ConstrType, 190 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 191 | pub node: NodeEnum, 192 | } 193 | 194 | #[derive(Derivative, Debug, Clone)] 195 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 196 | pub struct TableConstraint { 197 | pub id: RelationId, 198 | pub info: ConstraintInfo, 199 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 200 | pub node: NodeEnum, 201 | } 202 | 203 | #[derive(Derivative, Debug, Clone)] 204 | #[derivative(PartialEq, Eq, PartialOrd, Ord, Hash)] 205 | pub struct Privilege { 206 | pub id: String, 207 | pub target_type: GrantTargetType, 208 | pub object_type: ObjectType, 209 | pub privileges: BTreeMap, 210 | pub grantee: String, 211 | pub grant: bool, 212 | #[derivative( 213 | Debug = "ignore", 214 | PartialOrd = "ignore", 215 | Ord = "ignore", 216 | Hash = "ignore" 217 | )] 218 | pub node: NodeEnum, 219 | } 220 | 221 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] 222 | pub struct SinglePriv { 223 | pub name: String, 224 | pub cols: BTreeSet, 225 | } 226 | 227 | /// Index for table or material view 228 | #[derive(Derivative, Debug, Clone)] 229 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 230 | pub struct TableIndex { 231 | pub id: RelationId, 232 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 233 | pub node: NodeEnum, 234 | } 235 | 236 | #[derive(Derivative, Debug, Clone)] 237 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 238 | pub struct Extension { 239 | pub id: SchemaId, 240 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 241 | pub node: NodeEnum, 242 | } 243 | 244 | #[derive(Derivative, Debug, Clone)] 245 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 246 | pub struct TablePolicy { 247 | pub id: RelationId, 248 | pub cmd_name: String, 249 | pub permissive: bool, 250 | pub roles: BTreeSet, 251 | pub qual: Option, 252 | pub with_check: Option, 253 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 254 | pub node: NodeEnum, 255 | } 256 | 257 | /// Struct to capture all alter table statements 258 | #[derive(Derivative, Debug, Clone)] 259 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 260 | pub struct AlterTable { 261 | pub id: SchemaId, 262 | // for sql from pg_dump, only one action is used 263 | pub action: AlterTableAction, 264 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 265 | pub node: NodeEnum, 266 | } 267 | 268 | /// Supported alter table actions 269 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] 270 | pub enum AlterTableAction { 271 | Constraint(Box), 272 | Rls, 273 | Owner(String), 274 | Sequence(Box), 275 | Unsupported, 276 | } 277 | 278 | /// Struct to capture `ALTER TABLE ENABLE ROW LEVEL SECURITY;` 279 | #[derive(Derivative, Debug, Clone)] 280 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 281 | pub struct TableRls { 282 | pub id: SchemaId, 283 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 284 | pub node: NodeEnum, 285 | } 286 | 287 | /// Struct to capture `ALTER TABLE OWNER TO new_owner;` 288 | #[derive(Derivative, Debug, Clone)] 289 | #[derivative(PartialEq, Eq, PartialOrd, Ord)] 290 | pub struct TableOwner { 291 | pub id: SchemaId, 292 | pub owner: String, 293 | #[derivative(Debug = "ignore", PartialOrd = "ignore", Ord = "ignore")] 294 | pub node: NodeEnum, 295 | } 296 | -------------------------------------------------------------------------------- /src/parser/mview.rs: -------------------------------------------------------------------------------- 1 | use super::{MatView, SchemaId}; 2 | use crate::NodeItem; 3 | use pg_query::{protobuf::CreateTableAsStmt, NodeEnum, NodeRef}; 4 | 5 | impl NodeItem for MatView { 6 | type Inner = CreateTableAsStmt; 7 | 8 | fn id(&self) -> String { 9 | self.id.to_string() 10 | } 11 | 12 | fn type_name(&self) -> &'static str { 13 | "materialized view" 14 | } 15 | 16 | fn node(&self) -> &NodeEnum { 17 | &self.node 18 | } 19 | 20 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 21 | match &self.node { 22 | NodeEnum::CreateTableAsStmt(stmt) => Ok(stmt), 23 | _ => anyhow::bail!("not a create materialized view statement"), 24 | } 25 | } 26 | 27 | fn revert(&self) -> anyhow::Result { 28 | let sql = format!("DROP MATERIALIZED VIEW {}", self.id); 29 | let parsed = pg_query::parse(&sql)?; 30 | let node = parsed.protobuf.nodes()[0].0; 31 | match node { 32 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 33 | _ => anyhow::bail!("not a drop statement"), 34 | } 35 | } 36 | } 37 | 38 | impl TryFrom<&CreateTableAsStmt> for MatView { 39 | type Error = anyhow::Error; 40 | fn try_from(stmt: &CreateTableAsStmt) -> Result { 41 | let id = get_mview_id(stmt); 42 | let node = NodeEnum::CreateTableAsStmt(Box::new(stmt.clone())); 43 | Ok(Self { id, node }) 44 | } 45 | } 46 | 47 | fn get_mview_id(stmt: &CreateTableAsStmt) -> SchemaId { 48 | assert!(stmt.into.is_some()); 49 | let into = stmt.into.as_ref().unwrap(); 50 | assert!(into.rel.is_some()); 51 | into.rel.as_ref().unwrap().into() 52 | } 53 | 54 | #[cfg(test)] 55 | mod tests { 56 | use super::*; 57 | use crate::{Differ, MigrationPlanner}; 58 | 59 | #[test] 60 | fn mview_should_parse() { 61 | let sql = "CREATE MATERIALIZED VIEW foo.bar AS SELECT 1"; 62 | let view: MatView = sql.parse().unwrap(); 63 | assert_eq!(view.id.to_string(), "foo.bar"); 64 | } 65 | 66 | #[test] 67 | fn test_mview_migration() { 68 | let sql1 = "CREATE MATERIALIZED VIEW foo AS SELECT 1"; 69 | let sql2 = "CREATE MATERIALIZED VIEW foo AS SELECT 2"; 70 | let old: MatView = sql1.parse().unwrap(); 71 | let new: MatView = sql2.parse().unwrap(); 72 | let diff = old.diff(&new).unwrap().unwrap(); 73 | let migrations = diff.plan().unwrap(); 74 | assert_eq!(migrations.len(), 2); 75 | assert_eq!(migrations[0], "DROP MATERIALIZED VIEW public.foo"); 76 | assert_eq!(migrations[1], "CREATE MATERIALIZED VIEW foo AS SELECT 2"); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/parser/privilege/mod.rs: -------------------------------------------------------------------------------- 1 | mod single_priv; 2 | 3 | use super::{Privilege, SinglePriv}; 4 | use crate::{parser::SchemaId, MigrationPlanner, MigrationResult, NodeDelta, NodeDiff, NodeItem}; 5 | use pg_query::{ 6 | protobuf::{GrantStmt, GrantTargetType, ObjectType}, 7 | NodeEnum, 8 | }; 9 | use std::collections::BTreeMap; 10 | 11 | impl NodeItem for Privilege { 12 | type Inner = GrantStmt; 13 | 14 | fn id(&self) -> String { 15 | format!("{}:{}", self.id, self.grantee) 16 | } 17 | 18 | fn type_name(&self) -> &'static str { 19 | "privilege" 20 | } 21 | 22 | fn node(&self) -> &NodeEnum { 23 | &self.node 24 | } 25 | 26 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 27 | match &self.node { 28 | NodeEnum::GrantStmt(stmt) => Ok(stmt), 29 | _ => anyhow::bail!("not a grant statement"), 30 | } 31 | } 32 | 33 | fn revert(&self) -> anyhow::Result { 34 | let mut stmt = self.inner()?.clone(); 35 | stmt.is_grant = !stmt.is_grant; 36 | Ok(NodeEnum::GrantStmt(stmt)) 37 | } 38 | } 39 | 40 | impl TryFrom<&GrantStmt> for Privilege { 41 | type Error = anyhow::Error; 42 | 43 | fn try_from(stmt: &GrantStmt) -> Result { 44 | let target_type = get_target_type(stmt); 45 | let object_type = get_object_type(stmt)?; 46 | let id = get_id(stmt)?; 47 | let privileges = get_privileges(stmt); 48 | let grantee = get_grantee(stmt); 49 | let node = NodeEnum::GrantStmt(stmt.clone()); 50 | Ok(Self { 51 | target_type, 52 | object_type, 53 | id, 54 | privileges, 55 | grantee, 56 | node, 57 | grant: stmt.is_grant, 58 | }) 59 | } 60 | } 61 | 62 | impl MigrationPlanner for NodeDiff { 63 | type Migration = String; 64 | 65 | fn drop(&self) -> MigrationResult { 66 | if let Some(old) = &self.old { 67 | let sql = old.revert()?.deparse()?; 68 | Ok(vec![sql]) 69 | } else { 70 | Ok(vec![]) 71 | } 72 | } 73 | 74 | fn create(&self) -> MigrationResult { 75 | if let Some(new) = &self.new { 76 | let sqls = vec![new.node.deparse()?]; 77 | Ok(sqls) 78 | } else { 79 | Ok(vec![]) 80 | } 81 | } 82 | 83 | fn alter(&self) -> MigrationResult { 84 | match (&self.old, &self.new) { 85 | (Some(old), Some(new)) => { 86 | if old.grant != new.grant 87 | || old.target_type != new.target_type 88 | || old.grantee != new.grantee 89 | || old.privileges.is_empty() 90 | || new.privileges.is_empty() 91 | { 92 | // we can't alter these privilege changes, so we need to drop and recreate it 93 | return Ok(vec![]); 94 | } 95 | let delta = NodeDelta::create( 96 | old.privileges.iter().collect(), 97 | new.privileges.iter().collect(), 98 | ); 99 | delta.plan(old) 100 | } 101 | _ => Ok(vec![]), 102 | } 103 | } 104 | } 105 | 106 | fn get_target_type(stmt: &GrantStmt) -> GrantTargetType { 107 | let target_type = GrantTargetType::from_i32(stmt.targtype); 108 | assert!(target_type.is_some()); 109 | target_type.unwrap() 110 | } 111 | 112 | fn get_object_type(stmt: &GrantStmt) -> anyhow::Result { 113 | let object_type = ObjectType::from_i32(stmt.objtype); 114 | assert!(object_type.is_some()); 115 | match object_type.unwrap() { 116 | ObjectType::ObjectTable => Ok(ObjectType::ObjectTable), 117 | ObjectType::ObjectSchema => Ok(ObjectType::ObjectSchema), 118 | v => anyhow::bail!("unsupported grant/revoke object type: {:?}", v), 119 | } 120 | } 121 | 122 | fn get_id(stmt: &GrantStmt) -> anyhow::Result { 123 | // pg_dump generated grant would only have one object 124 | assert!(stmt.objects.len() == 1); 125 | let node = &stmt.objects[0].node; 126 | assert!(node.is_some()); 127 | let name = match node.as_ref().unwrap() { 128 | NodeEnum::String(s) => s.str.clone(), 129 | NodeEnum::RangeVar(v) => SchemaId::from(v).to_string(), 130 | _ => anyhow::bail!("unsupported grant/revoke object name: {:?}", node), 131 | }; 132 | 133 | Ok(name) 134 | } 135 | 136 | fn get_privileges(stmt: &GrantStmt) -> BTreeMap { 137 | stmt.privileges 138 | .iter() 139 | .filter_map(|p| { 140 | p.node.as_ref().and_then(|v| match v { 141 | NodeEnum::AccessPriv(p) => { 142 | let p = SinglePriv::from(p.clone()); 143 | Some((p.name.clone(), p)) 144 | } 145 | _ => None, 146 | }) 147 | }) 148 | .collect() 149 | } 150 | 151 | fn get_grantee(stmt: &GrantStmt) -> String { 152 | let name = stmt.grantees.first().and_then(|n| match n.node.as_ref() { 153 | Some(NodeEnum::RoleSpec(r)) => Some(r.rolename.clone()), 154 | _ => None, 155 | }); 156 | assert!(name.is_some()); 157 | name.unwrap() 158 | } 159 | 160 | #[cfg(test)] 161 | mod tests { 162 | use crate::Differ; 163 | 164 | use super::*; 165 | 166 | #[test] 167 | fn grant_all_should_parse() { 168 | let s = "GRANT ALL ON TABLE public.test TO test"; 169 | let p: Privilege = s.parse().unwrap(); 170 | assert!(p.grant); 171 | assert_eq!(p.target_type, GrantTargetType::AclTargetObject); 172 | assert_eq!(p.object_type, ObjectType::ObjectTable); 173 | assert_eq!(p.id, "public.test"); 174 | assert_eq!(p.grantee, "test"); 175 | assert_eq!(p.privileges.len(), 0); 176 | } 177 | 178 | #[test] 179 | fn grant_partial_should_parse() { 180 | let s = "GRANT SELECT(id, name), UPDATE(name) ON TABLE public.test TO test"; 181 | let p: Privilege = s.parse().unwrap(); 182 | assert!(p.grant); 183 | assert_eq!(p.target_type, GrantTargetType::AclTargetObject); 184 | assert_eq!(p.object_type, ObjectType::ObjectTable); 185 | assert_eq!(p.id, "public.test"); 186 | assert_eq!(p.grantee, "test"); 187 | assert_eq!(p.privileges.len(), 2); 188 | assert_eq!( 189 | p.privileges["select"].cols, 190 | vec!["id".into(), "name".into()].into_iter().collect() 191 | ); 192 | assert_eq!( 193 | p.privileges["update"].cols, 194 | vec!["name".into(),].into_iter().collect() 195 | ); 196 | } 197 | 198 | #[test] 199 | fn grand_diff_change_to_all_should_work() { 200 | let s1 = "GRANT SELECT(id, name) ON TABLE public.test TO test"; 201 | let s2 = "GRANT ALL ON TABLE public.test TO test"; 202 | let p1: Privilege = s1.parse().unwrap(); 203 | let p2: Privilege = s2.parse().unwrap(); 204 | let diff = p1.diff(&p2).unwrap().unwrap(); 205 | let plan = diff.plan().unwrap(); 206 | assert_eq!(plan.len(), 2); 207 | assert_eq!(plan[0], "REVOKE select (id, name) ON public.test FROM test"); 208 | assert_eq!(plan[1], "GRANT ALL ON public.test TO test"); 209 | } 210 | 211 | #[test] 212 | fn grand_diff_change_owner_should_work() { 213 | let s1 = "GRANT SELECT(id, name) ON TABLE public.test TO test"; 214 | let s2 = "GRANT SELECT(id, name) ON TABLE public.test TO test1"; 215 | let p1: Privilege = s1.parse().unwrap(); 216 | let p2: Privilege = s2.parse().unwrap(); 217 | let diff = p1.diff(&p2).unwrap_err(); 218 | 219 | assert_eq!( 220 | diff.to_string(), 221 | "can't diff public.test:test and public.test:test1" 222 | ); 223 | } 224 | 225 | #[test] 226 | fn grant_diff_create_should_work() { 227 | let s1 = "GRANT SELECT(id, name) ON TABLE public.test TO test"; 228 | let s2 = "GRANT SELECT(id, name), UPDATE(name) ON TABLE public.test TO test"; 229 | let p1: Privilege = s1.parse().unwrap(); 230 | let p2: Privilege = s2.parse().unwrap(); 231 | let diff = p1.diff(&p2).unwrap().unwrap(); 232 | let plan = diff.plan().unwrap(); 233 | assert_eq!(plan.len(), 1); 234 | assert_eq!(plan[0], "GRANT update (name) ON public.test TO test"); 235 | } 236 | 237 | #[test] 238 | fn grant_diff_drop_should_work() { 239 | let s1 = "GRANT SELECT(id, name), DELETE(name) ON TABLE public.test TO test"; 240 | let s2 = "GRANT SELECT(id, name) ON TABLE public.test TO test"; 241 | let p1: Privilege = s1.parse().unwrap(); 242 | let p2: Privilege = s2.parse().unwrap(); 243 | let diff = p1.diff(&p2).unwrap().unwrap(); 244 | let plan = diff.plan().unwrap(); 245 | assert_eq!(plan.len(), 1); 246 | assert_eq!(plan[0], "REVOKE delete (name) ON public.test FROM test"); 247 | } 248 | 249 | #[test] 250 | fn grant_diff_alter_should_work() { 251 | let s1 = "GRANT SELECT(id, name), DELETE(name) ON TABLE public.test TO test"; 252 | let s2 = "GRANT SELECT(id, temp), UPDATE(name) ON TABLE public.test TO test"; 253 | let p1: Privilege = s1.parse().unwrap(); 254 | let p2: Privilege = s2.parse().unwrap(); 255 | let diff = p1.diff(&p2).unwrap().unwrap(); 256 | let plan = diff.plan().unwrap(); 257 | assert_eq!(plan.len(), 4); 258 | assert_eq!(plan[0], "REVOKE delete (name) ON public.test FROM test"); 259 | assert_eq!(plan[1], "GRANT update (name) ON public.test TO test"); 260 | assert_eq!(plan[2], "REVOKE select (id, name) ON public.test FROM test"); 261 | assert_eq!(plan[3], "GRANT select (id, temp) ON public.test TO test"); 262 | } 263 | } 264 | -------------------------------------------------------------------------------- /src/parser/privilege/single_priv.rs: -------------------------------------------------------------------------------- 1 | use super::SinglePriv; 2 | use crate::{ 3 | parser::{utils::parsec::parse_single_priv, Privilege}, 4 | DeltaItem, NodeItem, 5 | }; 6 | use itertools::Itertools; 7 | use pg_query::{protobuf::AccessPriv, Node, NodeEnum}; 8 | use std::{collections::BTreeSet, fmt, str::FromStr}; 9 | 10 | impl FromStr for SinglePriv { 11 | type Err = anyhow::Error; 12 | 13 | fn from_str(s: &str) -> anyhow::Result { 14 | let (_, p) = 15 | parse_single_priv(s).map_err(|_| anyhow::anyhow!("invalid single priv: {}", s))?; 16 | Ok(p) 17 | } 18 | } 19 | 20 | impl From for AccessPriv { 21 | fn from(p: SinglePriv) -> Self { 22 | let cols = p 23 | .cols 24 | .into_iter() 25 | .map(|s| NodeEnum::String(pg_query::protobuf::String { str: s })) 26 | .map(|n| Node { node: Some(n) }) 27 | .collect::>(); 28 | AccessPriv { 29 | priv_name: p.name, 30 | cols, 31 | } 32 | } 33 | } 34 | 35 | impl SinglePriv { 36 | fn generate_change(self, item: &Privilege, is_grant: bool) -> anyhow::Result { 37 | let mut stmt = item.inner()?.clone(); 38 | stmt.is_grant = is_grant; 39 | stmt.privileges = vec![self.into()]; 40 | Ok(NodeEnum::GrantStmt(stmt)) 41 | } 42 | } 43 | 44 | impl DeltaItem for SinglePriv { 45 | type SqlNode = Privilege; 46 | fn drop(self, item: &Self::SqlNode) -> anyhow::Result> { 47 | let node = self.generate_change(item, false)?; 48 | Ok(vec![node.deparse()?]) 49 | } 50 | 51 | fn create(self, item: &Self::SqlNode) -> anyhow::Result> { 52 | let node = self.generate_change(item, true)?; 53 | Ok(vec![node.deparse()?]) 54 | } 55 | 56 | fn rename(self, _item: &Self::SqlNode, _new: Self) -> anyhow::Result> { 57 | Ok(vec![]) 58 | } 59 | 60 | fn alter(self, item: &Self::SqlNode, new: Self) -> anyhow::Result> { 61 | let mut migrations = vec![]; 62 | let sql = self.drop(item)?; 63 | migrations.extend(sql); 64 | let sql = new.create(item)?; 65 | migrations.extend(sql); 66 | Ok(migrations) 67 | } 68 | } 69 | 70 | impl From for Node { 71 | fn from(p: SinglePriv) -> Self { 72 | Node { 73 | node: Some(NodeEnum::AccessPriv(p.into())), 74 | } 75 | } 76 | } 77 | 78 | impl From for SinglePriv { 79 | fn from(p: AccessPriv) -> Self { 80 | let name = p.priv_name; 81 | let cols: BTreeSet = p 82 | .cols 83 | .into_iter() 84 | .filter_map(|n| { 85 | n.node.and_then(|c| match c { 86 | NodeEnum::String(s) => Some(s.str), 87 | _ => None, 88 | }) 89 | }) 90 | .collect(); 91 | Self { name, cols } 92 | } 93 | } 94 | 95 | impl fmt::Display for SinglePriv { 96 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 97 | write!(f, "{}", self.name)?; 98 | if !self.cols.is_empty() { 99 | write!(f, "({})", self.cols.iter().join(", "))?; 100 | } 101 | Ok(()) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/parser/sequence.rs: -------------------------------------------------------------------------------- 1 | use super::{SchemaId, Sequence}; 2 | use crate::NodeItem; 3 | use pg_query::{protobuf::CreateSeqStmt, NodeEnum, NodeRef}; 4 | 5 | impl NodeItem for Sequence { 6 | type Inner = CreateSeqStmt; 7 | 8 | fn id(&self) -> String { 9 | self.id.to_string() 10 | } 11 | 12 | fn type_name(&self) -> &'static str { 13 | "sequence" 14 | } 15 | 16 | fn node(&self) -> &NodeEnum { 17 | &self.node 18 | } 19 | 20 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 21 | match &self.node { 22 | NodeEnum::CreateSeqStmt(stmt) => Ok(stmt), 23 | _ => anyhow::bail!("not a create sequence statement"), 24 | } 25 | } 26 | 27 | fn revert(&self) -> anyhow::Result { 28 | let sql = format!("DROP SEQUENCE {}", self.id); 29 | let parsed = pg_query::parse(&sql)?; 30 | let node = parsed.protobuf.nodes()[0].0; 31 | match node { 32 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 33 | _ => anyhow::bail!("not a drop statement"), 34 | } 35 | } 36 | } 37 | 38 | impl TryFrom<&CreateSeqStmt> for Sequence { 39 | type Error = anyhow::Error; 40 | fn try_from(stmt: &CreateSeqStmt) -> Result { 41 | let id = SchemaId::from(stmt.sequence.as_ref()); 42 | let node = NodeEnum::CreateSeqStmt(stmt.clone()); 43 | Ok(Self { id, node }) 44 | } 45 | } 46 | 47 | #[cfg(test)] 48 | mod tests { 49 | use super::*; 50 | use crate::{Differ, MigrationPlanner}; 51 | 52 | #[test] 53 | fn sequence_should_parse() { 54 | let sql = "CREATE SEQUENCE public.todos_id_seq 55 | START WITH 1 56 | INCREMENT BY 1 57 | NO MINVALUE 58 | NO MAXVALUE 59 | CACHE 1;"; 60 | let seq: Sequence = sql.parse().unwrap(); 61 | assert_eq!(seq.id.to_string(), "public.todos_id_seq"); 62 | } 63 | 64 | #[test] 65 | fn test_sequence_migration() { 66 | let sql1 = "CREATE SEQUENCE public.todos_id_seq START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE CACHE 1"; 67 | let sql2 = "CREATE SEQUENCE public.todos_id_seq START 1 INCREMENT 2 NO MINVALUE NO MAXVALUE CACHE 1"; 68 | let old: Sequence = sql1.parse().unwrap(); 69 | let new: Sequence = sql2.parse().unwrap(); 70 | let diff = old.diff(&new).unwrap().unwrap(); 71 | let migrations = diff.plan().unwrap(); 72 | assert_eq!(migrations.len(), 2); 73 | assert_eq!(migrations[0], "DROP SEQUENCE public.todos_id_seq"); 74 | assert_eq!(migrations[1], sql2); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/parser/table/alter_table.rs: -------------------------------------------------------------------------------- 1 | use crate::parser::{AlterTable, AlterTableAction, SchemaId}; 2 | use crate::parser::{ConstraintInfo, SequenceInfo}; 3 | use anyhow::{anyhow, Context}; 4 | use pg_query::{ 5 | protobuf::{AlterTableCmd, AlterTableStmt, AlterTableType}, 6 | NodeEnum, 7 | }; 8 | use tracing::warn; 9 | 10 | impl TryFrom<&AlterTableStmt> for AlterTable { 11 | type Error = anyhow::Error; 12 | fn try_from(alter: &AlterTableStmt) -> Result { 13 | let id = SchemaId::from(alter.relation.as_ref()); 14 | let cmd = alter 15 | .cmds 16 | .iter() 17 | .filter_map(|n| n.node.as_ref()) 18 | .next() 19 | .ok_or_else(|| anyhow!("no commands"))?; 20 | 21 | let action = match cmd { 22 | NodeEnum::AlterTableCmd(ref cmd) => AlterTableAction::try_from(cmd.as_ref())?, 23 | _ => anyhow::bail!("not an alter table command"), 24 | }; 25 | 26 | let node = NodeEnum::AlterTableStmt(alter.clone()); 27 | 28 | Ok(Self { id, action, node }) 29 | } 30 | } 31 | 32 | impl TryFrom<&AlterTableCmd> for AlterTableAction { 33 | type Error = anyhow::Error; 34 | fn try_from(cmd: &AlterTableCmd) -> Result { 35 | let node = cmd.def.as_ref().and_then(|n| n.node.as_ref()); 36 | let node_type = cmd.subtype(); 37 | 38 | match (node_type, node) { 39 | (AlterTableType::AtAddConstraint, Some(NodeEnum::Constraint(constraint))) => { 40 | let item = ConstraintInfo::try_from(constraint.as_ref()).with_context(|| { 41 | let sql = NodeEnum::Constraint(constraint.clone()).deparse(); 42 | format!("Failed to convert: {:?}", sql) 43 | })?; 44 | Ok(Self::Constraint(Box::new(item))) 45 | } 46 | (AlterTableType::AtChangeOwner, None) => { 47 | let owner = cmd.newowner.as_ref().ok_or_else(|| anyhow!("no owner"))?; 48 | Ok(Self::Owner(owner.rolename.clone())) 49 | } 50 | (AlterTableType::AtEnableRowSecurity, None) => Ok(Self::Rls), 51 | (AlterTableType::AtColumnDefault, Some(n)) => { 52 | let info = SequenceInfo { 53 | column: cmd.name.clone(), 54 | node: n.clone(), 55 | }; 56 | Ok(Self::Sequence(Box::new(info))) 57 | } 58 | (ty, node) => { 59 | warn!("unhandled alter table action: {:?} {:?}", ty, node); 60 | Ok(Self::Unsupported) 61 | } 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/parser/table/column/constraint_info.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parser::{utils::node_to_string, ConstraintInfo, Table}, 3 | DeltaItem, 4 | }; 5 | use pg_query::{protobuf::ConstrType, NodeEnum}; 6 | use std::fmt; 7 | 8 | impl ConstraintInfo {} 9 | 10 | impl DeltaItem for ConstraintInfo { 11 | type SqlNode = Table; 12 | fn drop(self, item: &Self::SqlNode) -> anyhow::Result> { 13 | let sql = format!("ALTER TABLE ONLY {} DROP CONSTRAINT {}", item.id, self.name); 14 | 15 | Ok(vec![sql]) 16 | } 17 | 18 | fn create(self, item: &Self::SqlNode) -> anyhow::Result> { 19 | let sql = format!("ALTER TABLE ONLY {} ADD {}", item.id, self); 20 | Ok(vec![sql]) 21 | } 22 | 23 | fn rename(self, item: &Self::SqlNode, new: Self) -> anyhow::Result> { 24 | let sql1 = self.to_string().replace(&self.name, &new.name); 25 | let sql2 = new.to_string(); 26 | if sql1 == sql2 { 27 | return Ok(vec![format!( 28 | "ALTER TABLE ONLY {} RENAME CONSTRAINT {} TO {}", 29 | item.id, self.name, new.name 30 | )]); 31 | } 32 | Ok(vec![]) 33 | } 34 | 35 | fn alter(self, item: &Self::SqlNode, new: Self) -> anyhow::Result> { 36 | let mut migrations = vec![]; 37 | let sql = self.drop(item)?; 38 | migrations.extend(sql); 39 | let sql = new.create(item)?; 40 | migrations.extend(sql); 41 | Ok(migrations) 42 | } 43 | } 44 | 45 | impl fmt::Display for ConstraintInfo { 46 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 47 | let s = match self.node { 48 | NodeEnum::Constraint(ref constraint) 49 | if constraint.contype() == ConstrType::ConstrDefault => 50 | { 51 | let expr = constraint.raw_expr.as_deref().unwrap(); 52 | format!("DEFAULT {}", node_to_string(expr).unwrap()) 53 | } 54 | NodeEnum::Constraint(ref constraint) 55 | if constraint.contype() == ConstrType::ConstrCheck => 56 | { 57 | let expr = constraint.raw_expr.as_deref().unwrap(); 58 | format!( 59 | "CONSTRAINT {} CHECK ({})", 60 | self.name, 61 | node_to_string(expr).unwrap() 62 | ) 63 | } 64 | // TODO: support other constraints (primary key / unique will be normalized to a separate SQL). 65 | NodeEnum::Constraint(ref _constraint) => "".to_owned(), 66 | ref v => unreachable!( 67 | "ConstraintInfo::generate_sql: node should only be constraint, got {:?}", 68 | v 69 | ), 70 | }; 71 | write!(f, "{}", s) 72 | } 73 | } 74 | 75 | #[cfg(test)] 76 | mod tests { 77 | use crate::{parser::Table, Differ, MigrationPlanner}; 78 | use anyhow::Result; 79 | 80 | #[test] 81 | fn test_constraint_info_for_default_function() -> Result<()> { 82 | let sql = "CREATE TABLE foo (name text DEFAULT random_name(1))"; 83 | let table: Table = sql.parse()?; 84 | let constraint = table 85 | .columns 86 | .get("name") 87 | .as_ref() 88 | .unwrap() 89 | .default 90 | .as_ref() 91 | .unwrap(); 92 | let sql = constraint.to_string(); 93 | assert_eq!(sql, "DEFAULT random_name(1)"); 94 | Ok(()) 95 | } 96 | 97 | #[test] 98 | fn test_constraint_info_for_default_value() -> Result<()> { 99 | let sql = "CREATE TABLE foo (name text DEFAULT 'abcd')"; 100 | let table: Table = sql.parse()?; 101 | let constraint = table 102 | .columns 103 | .get("name") 104 | .as_ref() 105 | .unwrap() 106 | .default 107 | .as_ref() 108 | .unwrap(); 109 | let sql = constraint.to_string(); 110 | assert_eq!(sql, "DEFAULT 'abcd'"); 111 | Ok(()) 112 | } 113 | 114 | #[test] 115 | fn table_rename_constraint_should_work() { 116 | let s1 = "CREATE TABLE foo (name text, constraint c1 CHECK (length(name) > 5))"; 117 | let s2 = "CREATE TABLE foo (name text, constraint c2 CHECK (length(name) > 5))"; 118 | let old: Table = s1.parse().unwrap(); 119 | let new: Table = s2.parse().unwrap(); 120 | let diff = old.diff(&new).unwrap().unwrap(); 121 | let plan = diff.plan().unwrap(); 122 | assert_eq!(plan.len(), 1); 123 | assert_eq!( 124 | plan[0], 125 | "ALTER TABLE ONLY public.foo RENAME CONSTRAINT c1 TO c2" 126 | ); 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/parser/table/column/mod.rs: -------------------------------------------------------------------------------- 1 | mod constraint_info; 2 | 3 | use crate::{ 4 | parser::{ 5 | utils::{node_to_embed_constraint, type_name_to_string}, 6 | Column, RelationId, SchemaId, Table, 7 | }, 8 | DeltaItem, 9 | }; 10 | use pg_query::{ 11 | protobuf::{ColumnDef, ConstrType}, 12 | NodeEnum, 13 | }; 14 | use std::{collections::BTreeSet, fmt}; 15 | 16 | impl TryFrom<(SchemaId, ColumnDef)> for Column { 17 | type Error = anyhow::Error; 18 | fn try_from((id, column): (SchemaId, ColumnDef)) -> Result { 19 | let name = column.colname.clone(); 20 | 21 | let type_name = type_name_to_string(column.type_name.as_ref().unwrap()); 22 | 23 | let mut constraints = BTreeSet::new(); 24 | 25 | let all_constraints: Vec<_> = column 26 | .constraints 27 | .iter() 28 | .filter_map(node_to_embed_constraint) 29 | .collect(); 30 | 31 | let mut nullable = true; 32 | let mut default = None; 33 | for constraint in all_constraints { 34 | match constraint.con_type { 35 | ConstrType::ConstrNotnull => { 36 | nullable = false; 37 | } 38 | ConstrType::ConstrDefault => { 39 | default = Some(constraint); 40 | } 41 | _ => { 42 | constraints.insert(constraint); 43 | } 44 | } 45 | } 46 | 47 | Ok(Self { 48 | id: RelationId::new_with(id, name), 49 | type_name, 50 | nullable, 51 | constraints, 52 | default, 53 | node: NodeEnum::ColumnDef(Box::new(column)), 54 | }) 55 | } 56 | } 57 | 58 | impl Column { 59 | pub(super) fn generate_add_sql(self) -> anyhow::Result { 60 | let sql = format!("ALTER TABLE ONLY {} ADD COLUMN {}", self.id.schema_id, self); 61 | Ok(sql) 62 | } 63 | 64 | fn default_str(&self) -> Option { 65 | self.default.as_ref().map(|v| v.to_string()) 66 | } 67 | } 68 | 69 | impl DeltaItem for Column { 70 | type SqlNode = Table; 71 | fn drop(self, item: &Self::SqlNode) -> anyhow::Result> { 72 | let sql = format!("ALTER TABLE {} DROP COLUMN {}", item.id, self.id.name); 73 | 74 | Ok(vec![sql]) 75 | } 76 | 77 | fn create(self, _item: &Self::SqlNode) -> anyhow::Result> { 78 | let sql = self.generate_add_sql()?; 79 | Ok(vec![sql]) 80 | } 81 | 82 | fn rename(self, item: &Self::SqlNode, new: Self) -> anyhow::Result> { 83 | if self.type_name == new.type_name 84 | && self.nullable == new.nullable 85 | && self.default == new.default 86 | && self.constraints == new.constraints 87 | { 88 | return Ok(vec![format!( 89 | "ALTER TABLE ONLY {} RENAME COLUMN {} TO {}", 90 | item.id, self.id.name, new.id.name 91 | )]); 92 | } 93 | Ok(vec![]) 94 | } 95 | 96 | fn alter(self, item: &Self::SqlNode, new: Self) -> anyhow::Result> { 97 | assert_eq!(self.id, new.id); 98 | let mut migrations = vec![]; 99 | let mut commands = vec![]; 100 | 101 | if self.type_name != new.type_name { 102 | commands.push(format!( 103 | "ALTER COLUMN {} TYPE {}", 104 | new.id.name, new.type_name 105 | )); 106 | } 107 | 108 | if self.nullable != new.nullable { 109 | let nullable = format!( 110 | "ALTER COLUMN {} {}", 111 | new.id.name, 112 | if new.nullable { 113 | "DROP NOT NULL" 114 | } else { 115 | "SET NOT NULL" 116 | } 117 | ); 118 | commands.push(nullable); 119 | } 120 | 121 | if self.default != new.default { 122 | let default = format!( 123 | "ALTER COLUMN {} {}", 124 | new.id.name, 125 | if let Some(v) = new.default { 126 | format!("SET {}", v) 127 | } else { 128 | "DROP DEFAULT".to_string() 129 | } 130 | ); 131 | commands.push(default); 132 | } 133 | 134 | if !commands.is_empty() { 135 | let sql = format!("ALTER TABLE {} {}", item.id, commands.join(", ")); 136 | migrations.push(sql); 137 | } 138 | 139 | Ok(migrations) 140 | } 141 | } 142 | 143 | impl fmt::Display for Column { 144 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 145 | let mut fragments = vec![self.id.name.clone(), self.type_name.clone()]; 146 | if !self.nullable { 147 | fragments.push("NOT NULL".to_owned()); 148 | } 149 | if let Some(default) = self.default_str() { 150 | fragments.push(default); 151 | } 152 | for constraint in &self.constraints { 153 | fragments.push(constraint.to_string()); 154 | } 155 | 156 | write!(f, "{}", fragments.join(" ")) 157 | } 158 | } 159 | 160 | #[cfg(test)] 161 | mod tests { 162 | use crate::{Differ, MigrationPlanner}; 163 | 164 | use super::*; 165 | 166 | #[test] 167 | fn table_add_column_with_default_function_should_work() { 168 | let s1 = "CREATE TABLE foo (name text)"; 169 | let s2 = "CREATE TABLE foo (name text default random_name())"; 170 | let old: Table = s1.parse().unwrap(); 171 | let new: Table = s2.parse().unwrap(); 172 | let diff = old.diff(&new).unwrap().unwrap(); 173 | let plan = diff.plan().unwrap(); 174 | assert_eq!(plan.len(), 1); 175 | assert_eq!( 176 | plan[0], 177 | "ALTER TABLE public.foo ALTER COLUMN name SET DEFAULT random_name()" 178 | ); 179 | } 180 | 181 | #[test] 182 | fn table_add_column_with_default_value_should_work() { 183 | let s1 = "CREATE TABLE foo (name text)"; 184 | let s2 = "CREATE TABLE foo (name text default '')"; 185 | let old: Table = s1.parse().unwrap(); 186 | let new: Table = s2.parse().unwrap(); 187 | let diff = old.diff(&new).unwrap().unwrap(); 188 | let plan = diff.plan().unwrap(); 189 | assert_eq!(plan.len(), 1); 190 | assert_eq!( 191 | plan[0], 192 | "ALTER TABLE public.foo ALTER COLUMN name SET DEFAULT ''" 193 | ); 194 | } 195 | 196 | #[test] 197 | fn table_change_column_type_should_work() { 198 | let s1 = "CREATE TABLE foo (name varchar(128))"; 199 | let s2 = "CREATE TABLE foo (name varchar(256))"; 200 | let old: Table = s1.parse().unwrap(); 201 | let new: Table = s2.parse().unwrap(); 202 | let diff = old.diff(&new).unwrap().unwrap(); 203 | let plan = diff.plan().unwrap(); 204 | assert_eq!(plan.len(), 1); 205 | assert_eq!( 206 | plan[0], 207 | "ALTER TABLE public.foo ALTER COLUMN name TYPE pg_catalog.varchar(256)" 208 | ); 209 | } 210 | 211 | #[test] 212 | fn table_change_column_array_type_should_work() { 213 | let s1 = "CREATE TABLE foo (name text[][4])"; 214 | let s2 = "CREATE TABLE foo (name varchar(256)[][5])"; 215 | let old: Table = s1.parse().unwrap(); 216 | let new: Table = s2.parse().unwrap(); 217 | let diff = old.diff(&new).unwrap().unwrap(); 218 | let plan = diff.plan().unwrap(); 219 | assert_eq!(plan.len(), 1); 220 | assert_eq!( 221 | plan[0], 222 | "ALTER TABLE public.foo ALTER COLUMN name TYPE pg_catalog.varchar(256)[][5]" 223 | ); 224 | } 225 | 226 | #[test] 227 | fn table_add_column_array_type_should_work() { 228 | let s1 = "CREATE TABLE foo (name varchar(256))"; 229 | let s2 = "CREATE TABLE foo (name varchar(256), tags text [])"; 230 | let old: Table = s1.parse().unwrap(); 231 | let new: Table = s2.parse().unwrap(); 232 | let diff = old.diff(&new).unwrap().unwrap(); 233 | let plan = diff.plan().unwrap(); 234 | assert_eq!(plan.len(), 1); 235 | assert_eq!( 236 | plan[0], 237 | "ALTER TABLE ONLY public.foo ADD COLUMN tags text[]" 238 | ); 239 | } 240 | 241 | #[test] 242 | fn simple_table_rename_column_should_work() { 243 | let s1 = "CREATE TABLE foo (name varchar(256))"; 244 | let s2 = "CREATE TABLE foo (name1 varchar(256))"; 245 | let old: Table = s1.parse().unwrap(); 246 | let new: Table = s2.parse().unwrap(); 247 | let diff = old.diff(&new).unwrap().unwrap(); 248 | let plan = diff.plan().unwrap(); 249 | assert_eq!(plan.len(), 1); 250 | assert_eq!( 251 | plan[0], 252 | "ALTER TABLE ONLY public.foo RENAME COLUMN name TO name1" 253 | ); 254 | } 255 | 256 | #[test] 257 | fn table_rename_column_should_work() { 258 | let s1 = "CREATE TABLE public.todos ( 259 | title text NOT NULL, 260 | completed boolean, 261 | id bigint DEFAULT nextval('public.todos_id_seq'::regclass) NOT NULL, 262 | CONSTRAINT todos_title_check1 CHECK (length(title) > 5) 263 | )"; 264 | let s2 = "CREATE TABLE public.todos ( 265 | title text NOT NULL, 266 | completed1 boolean, 267 | id bigint DEFAULT nextval('public.todos_id_seq'::regclass) NOT NULL, 268 | CONSTRAINT todos_title_check1 CHECK (length(title) > 5) 269 | )"; 270 | let t1: Table = s1.parse().unwrap(); 271 | let t2: Table = s2.parse().unwrap(); 272 | let diff = t1.diff(&t2).unwrap().unwrap(); 273 | let plan = diff.plan().unwrap(); 274 | assert_eq!(plan.len(), 1); 275 | assert_eq!( 276 | plan[0], 277 | "ALTER TABLE ONLY public.todos RENAME COLUMN completed TO completed1" 278 | ); 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /src/parser/table/mod.rs: -------------------------------------------------------------------------------- 1 | mod alter_table; 2 | mod column; 3 | mod table_constraint; 4 | mod table_index; 5 | mod table_owner; 6 | mod table_policy; 7 | mod table_rls; 8 | mod table_sequence; 9 | mod table_trigger; 10 | 11 | use super::{Column, ConstraintInfo, SchemaId, Table}; 12 | use crate::{MigrationPlanner, MigrationResult, NodeDelta, NodeDiff, NodeItem}; 13 | use indexmap::IndexMap; 14 | use pg_query::{protobuf::CreateStmt, NodeEnum, NodeRef}; 15 | 16 | impl NodeItem for Table { 17 | type Inner = CreateStmt; 18 | 19 | fn id(&self) -> String { 20 | self.id.to_string() 21 | } 22 | 23 | fn type_name(&self) -> &'static str { 24 | "table" 25 | } 26 | 27 | fn node(&self) -> &NodeEnum { 28 | &self.node 29 | } 30 | 31 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 32 | match &self.node { 33 | NodeEnum::CreateStmt(stmt) => Ok(stmt), 34 | _ => anyhow::bail!("not a create table statement"), 35 | } 36 | } 37 | 38 | fn revert(&self) -> anyhow::Result { 39 | let sql = format!("DROP TABLE {}", self.id); 40 | let parsed = pg_query::parse(&sql)?; 41 | let node = parsed.protobuf.nodes()[0].0; 42 | match node { 43 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 44 | _ => anyhow::bail!("not a drop statement"), 45 | } 46 | } 47 | } 48 | 49 | impl TryFrom<&CreateStmt> for Table { 50 | type Error = anyhow::Error; 51 | fn try_from(stmt: &CreateStmt) -> Result { 52 | let id = SchemaId::from(stmt.relation.as_ref()); 53 | let (columns, constraints) = parse_nodes(id.clone(), stmt)?; 54 | let node = NodeEnum::CreateStmt(stmt.clone()); 55 | Ok(Self { 56 | id, 57 | columns, 58 | constraints, 59 | node, 60 | }) 61 | } 62 | } 63 | 64 | impl MigrationPlanner for NodeDiff { 65 | type Migration = String; 66 | 67 | fn drop(&self) -> MigrationResult { 68 | if let Some(old) = &self.old { 69 | let sqls = vec![old.revert()?.deparse()?]; 70 | Ok(sqls) 71 | } else { 72 | Ok(vec![]) 73 | } 74 | } 75 | 76 | fn create(&self) -> MigrationResult { 77 | if let Some(new) = &self.new { 78 | let sqls = vec![new.node.deparse()?]; 79 | Ok(sqls) 80 | } else { 81 | Ok(vec![]) 82 | } 83 | } 84 | 85 | fn alter(&self) -> MigrationResult { 86 | match (&self.old, &self.new) { 87 | (Some(old), Some(new)) => { 88 | let delta = 89 | NodeDelta::create(old.columns.iter().collect(), new.columns.iter().collect()); 90 | let mut migrations = delta.plan(old)?; 91 | let delta = NodeDelta::create( 92 | old.constraints.iter().collect(), 93 | new.constraints.iter().collect(), 94 | ); 95 | migrations.extend(delta.plan(old)?); 96 | Ok(migrations) 97 | } 98 | _ => Ok(vec![]), 99 | } 100 | } 101 | } 102 | 103 | fn parse_nodes( 104 | id: SchemaId, 105 | stmt: &CreateStmt, 106 | ) -> anyhow::Result<(IndexMap, IndexMap)> { 107 | let mut columns = IndexMap::new(); 108 | let mut constraints = IndexMap::new(); 109 | 110 | for node in stmt.table_elts.iter().filter_map(|n| n.node.as_ref()) { 111 | match node { 112 | NodeEnum::ColumnDef(col) => { 113 | let column = Column::try_from((id.clone(), col.as_ref().clone()))?; 114 | columns.insert(column.id.name.to_string(), column); 115 | } 116 | NodeEnum::Constraint(constraint) => { 117 | let constraint = ConstraintInfo::try_from(constraint.as_ref())?; 118 | constraints.insert(constraint.name.clone(), constraint); 119 | } 120 | _ => {} 121 | } 122 | } 123 | Ok((columns, constraints)) 124 | } 125 | 126 | #[cfg(test)] 127 | mod tests { 128 | use pg_query::protobuf::ConstrType; 129 | 130 | use crate::Differ; 131 | 132 | use super::*; 133 | 134 | #[test] 135 | fn test_parse_and_to_string() { 136 | let sql = "CREATE TABLE foo (id int PRIMARY KEY, name text NOT NULL UNIQUE)"; 137 | let table: Table = sql.parse().unwrap(); 138 | let sql1 = table.node.deparse().unwrap(); 139 | assert_eq!(sql, sql1); 140 | } 141 | 142 | #[test] 143 | fn table_should_be_parsed_correctly() { 144 | let sql = 145 | "CREATE TABLE foo (id serial not null primary key, name text default random_name(), CHECK (check_name(name)))"; 146 | let table: Table = sql.parse().unwrap(); 147 | assert_eq!(table.id.to_string(), "public.foo"); 148 | assert_eq!(table.columns.len(), 2); 149 | let col = table.columns.get("id").unwrap(); 150 | assert_eq!(col.id.name, "id"); 151 | assert_eq!(col.type_name, "serial"); 152 | 153 | assert_eq!(col.constraints.len(), 1); 154 | let constraints: Vec<_> = col.constraints.iter().collect(); 155 | let cons = constraints.get(0).unwrap(); 156 | assert_eq!(cons.con_type, ConstrType::ConstrPrimary); 157 | assert!(!col.nullable); 158 | 159 | let col = table.columns.get("name").unwrap(); 160 | assert_eq!(col.id.name, "name"); 161 | assert_eq!(col.type_name, "text"); 162 | assert!(col.nullable); 163 | assert_eq!(col.constraints.len(), 0); 164 | 165 | let cons = col.default.as_ref().unwrap(); 166 | assert_eq!(cons.con_type, ConstrType::ConstrDefault); 167 | 168 | let constraints: Vec<_> = table.constraints.iter().collect(); 169 | assert_eq!(constraints.len(), 1); 170 | let (_, cons) = constraints.get(0).unwrap(); 171 | assert_eq!(cons.con_type, ConstrType::ConstrCheck); 172 | } 173 | 174 | #[test] 175 | fn table_should_generate_valid_plan() { 176 | let s1 = 177 | "CREATE TABLE foo (id serial not null primary key, name text default random_name())"; 178 | let s2 = "CREATE TABLE foo (id serial not null primary key, name text default random_name(), email text, constraint c1 CHECK (check_name(name)))"; 179 | let old: Table = s1.parse().unwrap(); 180 | let new: Table = s2.parse().unwrap(); 181 | let diff = old.diff(&new).unwrap().unwrap(); 182 | let plan = diff.plan().unwrap(); 183 | assert_eq!(plan.len(), 2); 184 | assert_eq!(plan[0], "ALTER TABLE ONLY public.foo ADD COLUMN email text"); 185 | assert_eq!( 186 | plan[1], 187 | "ALTER TABLE ONLY public.foo ADD CONSTRAINT c1 CHECK (check_name(name))" 188 | ); 189 | } 190 | 191 | #[test] 192 | fn same_table_should_generate_valid_plan() { 193 | let s1 = "CREATE TABLE public.todos (title text, completed boolean)"; 194 | let s2 = "CREATE TABLE public.todos (title text, completed boolean)"; 195 | let old: Table = s1.parse().unwrap(); 196 | let new: Table = s2.parse().unwrap(); 197 | let diff = old.diff(&new).unwrap(); 198 | assert!(diff.is_none()); 199 | } 200 | 201 | #[test] 202 | fn table_level_constraint_should_generate_correct_migration() { 203 | let s1 = "CREATE TABLE users ( 204 | name TEXT NOT NULL, constraint c1 CHECK (length(name) > 4) 205 | )"; 206 | let s2 = "CREATE TABLE users ( 207 | name TEXT NOT NULL, constraint c1 CHECK (length(name) > 5) 208 | )"; 209 | let old: Table = s1.parse().unwrap(); 210 | let new: Table = s2.parse().unwrap(); 211 | let diff = Differ::diff(&old, &new).unwrap().unwrap(); 212 | let plan = diff.plan().unwrap(); 213 | assert_eq!(plan.len(), 2); 214 | assert_eq!(plan[0], "ALTER TABLE ONLY public.users DROP CONSTRAINT c1"); 215 | assert_eq!( 216 | plan[1], 217 | "ALTER TABLE ONLY public.users ADD CONSTRAINT c1 CHECK (length(name) > 5)" 218 | ); 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /src/parser/table/table_constraint.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parser::{AlterTable, AlterTableAction, ConstraintInfo, RelationId, SchemaId, TableConstraint}, 3 | NodeItem, 4 | }; 5 | use pg_query::{ 6 | protobuf::{AlterTableStmt, Constraint as PgConstraint}, 7 | NodeEnum, NodeRef, 8 | }; 9 | 10 | impl NodeItem for TableConstraint { 11 | type Inner = AlterTableStmt; 12 | 13 | fn id(&self) -> String { 14 | self.id.name.clone() 15 | } 16 | 17 | fn type_name(&self) -> &'static str { 18 | "constraint" 19 | } 20 | 21 | fn node(&self) -> &NodeEnum { 22 | &self.node 23 | } 24 | 25 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 26 | match &self.node { 27 | NodeEnum::AlterTableStmt(stmt) => Ok(stmt), 28 | _ => anyhow::bail!("not a alter table statement"), 29 | } 30 | } 31 | 32 | fn revert(&self) -> anyhow::Result { 33 | let sql = format!( 34 | "ALTER TABLE ONLY {} DROP CONSTRAINT {}", 35 | self.id.schema_id, self.id.name 36 | ); 37 | let parsed = pg_query::parse(&sql)?; 38 | let node = parsed.protobuf.nodes()[0].0; 39 | match node { 40 | NodeRef::AlterTableStmt(stmt) => Ok(NodeEnum::AlterTableStmt(stmt.clone())), 41 | _ => anyhow::bail!("not a alter table drop constraint statement"), 42 | } 43 | } 44 | } 45 | 46 | impl TryFrom for TableConstraint { 47 | type Error = anyhow::Error; 48 | fn try_from(AlterTable { id, action, node }: AlterTable) -> Result { 49 | match action { 50 | AlterTableAction::Constraint(info) => Ok(TableConstraint::new(id, *info, node)), 51 | _ => anyhow::bail!("not an add constraint"), 52 | } 53 | } 54 | } 55 | 56 | impl TableConstraint { 57 | fn new(id: SchemaId, info: ConstraintInfo, node: NodeEnum) -> Self { 58 | let id = RelationId::new_with(id, info.name.clone()); 59 | Self { id, info, node } 60 | } 61 | } 62 | 63 | impl TryFrom<&PgConstraint> for ConstraintInfo { 64 | type Error = anyhow::Error; 65 | fn try_from(constraint: &PgConstraint) -> Result { 66 | let con_type = constraint.contype(); 67 | let node = NodeEnum::Constraint(Box::new(constraint.clone())); 68 | let name = constraint.conname.clone(); 69 | Ok(Self { 70 | name, 71 | con_type, 72 | node, 73 | }) 74 | } 75 | } 76 | 77 | #[cfg(test)] 78 | mod tests { 79 | use pg_query::protobuf::ConstrType; 80 | 81 | use super::*; 82 | use crate::{Differ, MigrationPlanner}; 83 | 84 | #[test] 85 | fn alter_table_constraint_should_parse() { 86 | let sql = "ALTER TABLE ONLY users ADD CONSTRAINT users_pkey PRIMARY KEY (id)"; 87 | let parsed: TableConstraint = sql.parse().unwrap(); 88 | assert_eq!(parsed.id.name, "users_pkey"); 89 | assert_eq!(parsed.id.schema_id.to_string(), "public.users"); 90 | assert_eq!(parsed.info.name, "users_pkey"); 91 | assert_eq!(parsed.info.con_type, ConstrType::ConstrPrimary); 92 | } 93 | 94 | #[test] 95 | fn alter_table_constraint_should_revert() { 96 | let sql = "ALTER TABLE ONLY users ADD CONSTRAINT users_pkey PRIMARY KEY (id)"; 97 | let parsed: TableConstraint = sql.parse().unwrap(); 98 | let reverted = parsed.revert().unwrap().deparse().unwrap(); 99 | assert_eq!( 100 | reverted, 101 | "ALTER TABLE ONLY public.users DROP CONSTRAINT users_pkey" 102 | ); 103 | } 104 | 105 | #[test] 106 | fn alter_table_constraint_migration_should_drop_and_create() { 107 | let sql1 = "ALTER TABLE ONLY users ADD CONSTRAINT users_pkey PRIMARY KEY (id)"; 108 | let sql2 = "ALTER TABLE ONLY users ADD CONSTRAINT users_pkey PRIMARY KEY (id, name)"; 109 | let old: TableConstraint = sql1.parse().unwrap(); 110 | let new: TableConstraint = sql2.parse().unwrap(); 111 | let diff = Differ::diff(&old, &new).unwrap().unwrap(); 112 | let plan = diff.plan().unwrap(); 113 | assert_eq!(plan.len(), 2); 114 | assert_eq!( 115 | plan[0], 116 | "ALTER TABLE ONLY public.users DROP CONSTRAINT users_pkey" 117 | ); 118 | assert_eq!( 119 | plan[1], 120 | "ALTER TABLE ONLY users ADD CONSTRAINT users_pkey PRIMARY KEY (id, name)" 121 | ); 122 | } 123 | 124 | #[test] 125 | fn alter_table_unique_constraint_migration_should_drop_and_create() { 126 | let sql1 = "ALTER TABLE ONLY users ADD CONSTRAINT c1 UNIQUE (name)"; 127 | let sql2 = "ALTER TABLE ONLY users ADD CONSTRAINT c1 UNIQUE (id, name)"; 128 | let old: TableConstraint = sql1.parse().unwrap(); 129 | let new: TableConstraint = sql2.parse().unwrap(); 130 | let diff = Differ::diff(&old, &new).unwrap().unwrap(); 131 | let plan = diff.plan().unwrap(); 132 | assert_eq!(plan.len(), 2); 133 | assert_eq!(plan[0], "ALTER TABLE ONLY public.users DROP CONSTRAINT c1"); 134 | assert_eq!( 135 | plan[1], 136 | "ALTER TABLE ONLY users ADD CONSTRAINT c1 UNIQUE (id, name)" 137 | ); 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/parser/table/table_index.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parser::{RelationId, TableIndex}, 3 | NodeItem, 4 | }; 5 | use pg_query::{protobuf::IndexStmt, NodeEnum, NodeRef}; 6 | 7 | impl NodeItem for TableIndex { 8 | type Inner = IndexStmt; 9 | fn id(&self) -> String { 10 | self.id.name.clone() 11 | } 12 | 13 | fn type_name(&self) -> &'static str { 14 | "index" 15 | } 16 | 17 | fn node(&self) -> &NodeEnum { 18 | &self.node 19 | } 20 | 21 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 22 | match &self.node { 23 | NodeEnum::IndexStmt(stmt) => Ok(stmt), 24 | _ => anyhow::bail!("not a create index statement"), 25 | } 26 | } 27 | 28 | fn revert(&self) -> anyhow::Result { 29 | let sql = format!("DROP INDEX {}", self.id.name); 30 | let parsed = pg_query::parse(&sql)?; 31 | let node = parsed.protobuf.nodes()[0].0; 32 | match node { 33 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 34 | _ => anyhow::bail!("not a drop index statement"), 35 | } 36 | } 37 | } 38 | 39 | impl TryFrom<&IndexStmt> for TableIndex { 40 | type Error = anyhow::Error; 41 | fn try_from(stmt: &IndexStmt) -> Result { 42 | let id = get_id(stmt); 43 | let node = pg_query::NodeEnum::IndexStmt(Box::new(stmt.clone())); 44 | Ok(Self { id, node }) 45 | } 46 | } 47 | 48 | fn get_id(stmt: &IndexStmt) -> RelationId { 49 | let name = stmt.idxname.clone(); 50 | assert!(stmt.relation.is_some()); 51 | let schema_id = stmt.relation.as_ref().unwrap().into(); 52 | RelationId { name, schema_id } 53 | } 54 | 55 | #[cfg(test)] 56 | mod tests { 57 | use super::*; 58 | use crate::{Differ, MigrationPlanner}; 59 | 60 | #[test] 61 | fn index_should_parse() { 62 | let sql = "CREATE INDEX foo ON bar (baz)"; 63 | let index: TableIndex = sql.parse().unwrap(); 64 | assert_eq!(index.id.name, "foo"); 65 | assert_eq!(index.id.schema_id.schema, "public"); 66 | assert_eq!(index.id.schema_id.name, "bar"); 67 | } 68 | 69 | #[test] 70 | fn unchanged_index_should_return_none() { 71 | let sql1 = "CREATE INDEX foo ON bar (baz)"; 72 | let sql2 = "CREATE INDEX foo ON bar (baz)"; 73 | let old: TableIndex = sql1.parse().unwrap(); 74 | let new: TableIndex = sql2.parse().unwrap(); 75 | let diff = old.diff(&new).unwrap(); 76 | assert!(diff.is_none()); 77 | } 78 | 79 | #[test] 80 | fn changed_index_should_generate_migration() { 81 | let sql1 = "CREATE INDEX foo ON bar (baz)"; 82 | let sql2 = "CREATE INDEX foo ON bar (ooo)"; 83 | let old: TableIndex = sql1.parse().unwrap(); 84 | let new: TableIndex = sql2.parse().unwrap(); 85 | let diff = old.diff(&new).unwrap().unwrap(); 86 | let migrations = diff.plan().unwrap(); 87 | assert_eq!(migrations[0], "DROP INDEX foo"); 88 | assert_eq!(migrations[1], "CREATE INDEX foo ON bar USING btree (ooo)"); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/parser/table/table_owner.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parser::{AlterTable, AlterTableAction, SchemaId, TableOwner}, 3 | NodeItem, 4 | }; 5 | use pg_query::{protobuf::AlterTableStmt, NodeEnum, NodeRef}; 6 | 7 | impl NodeItem for TableOwner { 8 | type Inner = AlterTableStmt; 9 | 10 | fn id(&self) -> String { 11 | self.id.to_string() 12 | } 13 | 14 | fn type_name(&self) -> &'static str { 15 | "table owner" 16 | } 17 | 18 | fn node(&self) -> &NodeEnum { 19 | &self.node 20 | } 21 | 22 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 23 | match &self.node { 24 | NodeEnum::AlterTableStmt(stmt) => Ok(stmt), 25 | _ => anyhow::bail!("not a alter table statement"), 26 | } 27 | } 28 | 29 | /// we don't know what the old owner is, so we can only revert to session_user 30 | fn revert(&self) -> anyhow::Result { 31 | let sql = format!("ALTER TABLE {} OWNER TO session_user", self.id); 32 | let parsed = pg_query::parse(&sql)?; 33 | let node = parsed.protobuf.nodes()[0].0; 34 | match node { 35 | NodeRef::AlterTableStmt(stmt) => Ok(NodeEnum::AlterTableStmt(stmt.clone())), 36 | _ => anyhow::bail!("not a alter table owner to statement"), 37 | } 38 | } 39 | } 40 | 41 | impl TryFrom for TableOwner { 42 | type Error = anyhow::Error; 43 | fn try_from(AlterTable { id, action, node }: AlterTable) -> Result { 44 | match action { 45 | AlterTableAction::Owner(owner) => Ok(TableOwner::new(id, owner, node)), 46 | _ => anyhow::bail!("not an owner change"), 47 | } 48 | } 49 | } 50 | 51 | impl TableOwner { 52 | fn new(id: SchemaId, owner: String, node: NodeEnum) -> Self { 53 | Self { id, owner, node } 54 | } 55 | } 56 | 57 | #[cfg(test)] 58 | mod tests { 59 | use super::*; 60 | use crate::{Differ, MigrationPlanner}; 61 | 62 | #[test] 63 | fn table_owner_to_should_parse() { 64 | let sql = "ALTER TABLE foo OWNER TO bar"; 65 | let parsed: TableOwner = sql.parse().unwrap(); 66 | assert_eq!(parsed.id.name, "foo"); 67 | assert_eq!(parsed.owner, "bar"); 68 | } 69 | 70 | #[test] 71 | fn table_owner_to_should_revert() { 72 | let sql = "ALTER TABLE foo OWNER TO bar"; 73 | let parsed: TableOwner = sql.parse().unwrap(); 74 | let reverted = parsed.revert().unwrap().deparse().unwrap(); 75 | assert_eq!(reverted, "ALTER TABLE public.foo OWNER TO SESSION_USER"); 76 | } 77 | 78 | #[test] 79 | fn table_owner_to_should_generate_drop_create_migration() { 80 | let sql1 = "ALTER TABLE foo OWNER TO bar"; 81 | let sql2 = "ALTER TABLE foo OWNER TO baz"; 82 | let old: TableOwner = sql1.parse().unwrap(); 83 | let new: TableOwner = sql2.parse().unwrap(); 84 | let diff = old.diff(&new).unwrap().unwrap(); 85 | let plan = diff.plan().unwrap(); 86 | assert_eq!(plan.len(), 2); 87 | assert_eq!(plan[0], "ALTER TABLE public.foo OWNER TO SESSION_USER"); 88 | assert_eq!(plan[1], sql2); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/parser/table/table_policy.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parser::{utils::node_to_string, RelationId, TablePolicy}, 3 | NodeItem, 4 | }; 5 | use pg_query::{protobuf::CreatePolicyStmt, NodeEnum, NodeRef}; 6 | 7 | impl NodeItem for TablePolicy { 8 | type Inner = CreatePolicyStmt; 9 | fn id(&self) -> String { 10 | self.id.name.clone() 11 | } 12 | 13 | fn type_name(&self) -> &'static str { 14 | "policy" 15 | } 16 | 17 | fn node(&self) -> &NodeEnum { 18 | &self.node 19 | } 20 | 21 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 22 | match &self.node { 23 | NodeEnum::CreatePolicyStmt(stmt) => Ok(stmt), 24 | _ => anyhow::bail!("not a create policy statement"), 25 | } 26 | } 27 | 28 | fn revert(&self) -> anyhow::Result { 29 | let sql = format!("DROP POLICY {} On {}", self.id.name, self.id.schema_id); 30 | let parsed = pg_query::parse(&sql)?; 31 | let node = parsed.protobuf.nodes()[0].0; 32 | match node { 33 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 34 | _ => anyhow::bail!("not a drop index statement"), 35 | } 36 | } 37 | } 38 | 39 | impl TryFrom<&CreatePolicyStmt> for TablePolicy { 40 | type Error = anyhow::Error; 41 | fn try_from(stmt: &CreatePolicyStmt) -> Result { 42 | let id = get_id(stmt); 43 | let cmd_name = stmt.cmd_name.clone(); 44 | let permissive = stmt.permissive; 45 | let roles = stmt.roles.iter().filter_map(node_to_string).collect(); 46 | let qual = stmt.qual.as_deref().and_then(node_to_string); 47 | let with_check = stmt.with_check.as_deref().and_then(node_to_string); 48 | let node = NodeEnum::CreatePolicyStmt(Box::new(stmt.clone())); 49 | Ok(Self { 50 | id, 51 | cmd_name, 52 | permissive, 53 | roles, 54 | qual, 55 | with_check, 56 | node, 57 | }) 58 | } 59 | } 60 | 61 | fn get_id(stmt: &CreatePolicyStmt) -> RelationId { 62 | let name = stmt.policy_name.clone(); 63 | assert!(stmt.table.is_some()); 64 | let schema_id = stmt.table.as_ref().unwrap().into(); 65 | RelationId { name, schema_id } 66 | } 67 | 68 | #[cfg(test)] 69 | mod tests { 70 | use super::*; 71 | use crate::{Differ, MigrationPlanner}; 72 | 73 | #[test] 74 | fn policy_should_parse() { 75 | let sql = "CREATE POLICY baz ON foo.bar FOR ALL USING(username = CURRENT_USER) WITH CHECK (username = CURRENT_USER)"; 76 | let p: TablePolicy = sql.parse().unwrap(); 77 | assert_eq!(p.id.name, "baz"); 78 | assert_eq!(p.id.schema_id.schema, "foo"); 79 | assert_eq!(p.id.schema_id.name, "bar"); 80 | } 81 | 82 | #[test] 83 | fn unchanged_policy_should_return_none() { 84 | let sql1 = "CREATE POLICY foo ON bar FOR ALL TO postgres USING(true)"; 85 | let sql2 = "CREATE POLICY foo ON bar FOR ALL TO postgres USING(true)"; 86 | let old: TablePolicy = sql1.parse().unwrap(); 87 | let new: TablePolicy = sql2.parse().unwrap(); 88 | let diff = old.diff(&new).unwrap(); 89 | assert!(diff.is_none()); 90 | } 91 | 92 | #[test] 93 | fn changed_policy_should_generate_migration() { 94 | let sql1 = "CREATE POLICY foo ON bar FOR ALL TO postgres USING(true)"; 95 | let sql2 = "CREATE POLICY foo ON bar FOR SELECT TO postgres USING(true)"; 96 | let old: TablePolicy = sql1.parse().unwrap(); 97 | let new: TablePolicy = sql2.parse().unwrap(); 98 | let diff = old.diff(&new).unwrap().unwrap(); 99 | let migrations = diff.plan().unwrap(); 100 | assert_eq!(migrations[0], "DROP POLICY foo ON public.bar"); 101 | assert_eq!( 102 | migrations[1], 103 | "CREATE POLICY foo ON bar FOR SELECT TO postgres USING (true) " 104 | ); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/parser/table/table_rls.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parser::{AlterTable, AlterTableAction, SchemaId, TableRls}, 3 | NodeItem, 4 | }; 5 | use pg_query::{protobuf::AlterTableStmt, NodeEnum, NodeRef}; 6 | 7 | impl NodeItem for TableRls { 8 | type Inner = AlterTableStmt; 9 | 10 | fn id(&self) -> String { 11 | self.id.to_string() 12 | } 13 | 14 | fn type_name(&self) -> &'static str { 15 | "table RLS" 16 | } 17 | 18 | fn node(&self) -> &NodeEnum { 19 | &self.node 20 | } 21 | 22 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 23 | match &self.node { 24 | NodeEnum::AlterTableStmt(stmt) => Ok(stmt), 25 | _ => anyhow::bail!("not a alter table statement"), 26 | } 27 | } 28 | 29 | /// we don't know what the old owner is, so we can only revert to session_user 30 | fn revert(&self) -> anyhow::Result { 31 | let sql = format!("ALTER TABLE {} DISABLE ROW LEVEL SECURITY", self.id); 32 | let parsed = pg_query::parse(&sql)?; 33 | let node = parsed.protobuf.nodes()[0].0; 34 | match node { 35 | NodeRef::AlterTableStmt(stmt) => Ok(NodeEnum::AlterTableStmt(stmt.clone())), 36 | _ => anyhow::bail!("not a alter table RLS statement"), 37 | } 38 | } 39 | } 40 | 41 | impl TryFrom for TableRls { 42 | type Error = anyhow::Error; 43 | fn try_from(AlterTable { id, action, node }: AlterTable) -> Result { 44 | match action { 45 | AlterTableAction::Rls => Ok(TableRls::new(id, node)), 46 | _ => anyhow::bail!("not an owner change"), 47 | } 48 | } 49 | } 50 | 51 | impl TableRls { 52 | fn new(id: SchemaId, node: NodeEnum) -> Self { 53 | Self { id, node } 54 | } 55 | } 56 | 57 | #[cfg(test)] 58 | mod tests { 59 | use super::*; 60 | use crate::{MigrationPlanner, NodeDiff}; 61 | 62 | #[test] 63 | fn table_rls_should_parse() { 64 | let sql = "ALTER TABLE foo ENABLE ROW LEVEL SECURITY"; 65 | let parsed: TableRls = sql.parse().unwrap(); 66 | assert_eq!(parsed.id, SchemaId::new("public", "foo")); 67 | } 68 | 69 | #[test] 70 | fn table_rls_should_revert() { 71 | let sql = "ALTER TABLE foo ENABLE ROW LEVEL SECURITY"; 72 | let parsed: TableRls = sql.parse().unwrap(); 73 | let reverted = parsed.revert().unwrap().deparse().unwrap(); 74 | assert_eq!( 75 | reverted, 76 | "ALTER TABLE public.foo DISABLE ROW LEVEL SECURITY" 77 | ); 78 | } 79 | 80 | #[test] 81 | fn table_rls_should_generate_drop_create_migration() { 82 | let sql1 = "ALTER TABLE foo ENABLE ROW LEVEL SECURITY"; 83 | 84 | let diff: NodeDiff = NodeDiff::with_old(sql1.parse().unwrap()); 85 | let plan = diff.plan().unwrap(); 86 | assert_eq!(plan, &["ALTER TABLE public.foo DISABLE ROW LEVEL SECURITY"]); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/parser/table/table_sequence.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parser::{AlterTable, AlterTableAction, RelationId, SchemaId, SequenceInfo, TableSequence}, 3 | NodeItem, 4 | }; 5 | use pg_query::{protobuf::AlterTableStmt, NodeEnum, NodeRef}; 6 | 7 | impl NodeItem for TableSequence { 8 | type Inner = AlterTableStmt; 9 | fn id(&self) -> String { 10 | self.id.name.clone() 11 | } 12 | 13 | fn type_name(&self) -> &'static str { 14 | "table sequence" 15 | } 16 | 17 | fn node(&self) -> &NodeEnum { 18 | &self.node 19 | } 20 | 21 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 22 | match self.node() { 23 | NodeEnum::AlterTableStmt(stmt) => Ok(stmt), 24 | _ => anyhow::bail!("not a create index statement"), 25 | } 26 | } 27 | 28 | fn revert(&self) -> anyhow::Result { 29 | let sql = format!( 30 | "ALTER TABLE {} ALTER COLUMN {} DROP DEFAULT", 31 | self.id.schema_id, self.id.name 32 | ); 33 | let parsed = pg_query::parse(&sql)?; 34 | let node = parsed.protobuf.nodes()[0].0; 35 | match node { 36 | NodeRef::AlterTableStmt(stmt) => Ok(NodeEnum::AlterTableStmt(stmt.clone())), 37 | _ => anyhow::bail!("not a alter table statement"), 38 | } 39 | } 40 | } 41 | 42 | impl TryFrom for TableSequence { 43 | type Error = anyhow::Error; 44 | fn try_from(AlterTable { id, action, node }: AlterTable) -> Result { 45 | match action { 46 | AlterTableAction::Sequence(info) => Ok(TableSequence::new(id, *info, node)), 47 | _ => anyhow::bail!("not an add constraint"), 48 | } 49 | } 50 | } 51 | 52 | impl TableSequence { 53 | fn new(id: SchemaId, info: SequenceInfo, node: NodeEnum) -> Self { 54 | let id = RelationId::new_with(id, info.column); 55 | Self { id, node } 56 | } 57 | } 58 | 59 | #[cfg(test)] 60 | mod tests { 61 | use super::*; 62 | use crate::{Differ, MigrationPlanner}; 63 | 64 | #[test] 65 | fn alter_table_set_default_sequence_should_parse() { 66 | let sql = "ALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq'::regclass)"; 67 | let parsed: TableSequence = sql.parse().unwrap(); 68 | assert_eq!(parsed.id.schema_id.to_string(), "public.users"); 69 | assert_eq!(parsed.id.name, "id"); 70 | } 71 | 72 | #[test] 73 | fn alter_table_set_default_sequence_should_revert() { 74 | let sql = "ALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq'::regclass)"; 75 | let parsed: TableSequence = sql.parse().unwrap(); 76 | let reverted = parsed.revert().unwrap().deparse().unwrap(); 77 | assert_eq!( 78 | reverted, 79 | "ALTER TABLE public.users ALTER COLUMN id DROP DEFAULT" 80 | ); 81 | } 82 | 83 | #[test] 84 | fn alter_table_set_default_sequence_migration_should_drop_and_create() { 85 | let sql1 = "ALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq'::regclass)"; 86 | let sql2 = "ALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq1'::regclass)"; 87 | let old: TableSequence = sql1.parse().unwrap(); 88 | let new: TableSequence = sql2.parse().unwrap(); 89 | let diff = Differ::diff(&old, &new).unwrap().unwrap(); 90 | let plan = diff.plan().unwrap(); 91 | assert_eq!(plan.len(), 2); 92 | assert_eq!( 93 | plan[0], 94 | "ALTER TABLE public.users ALTER COLUMN id DROP DEFAULT" 95 | ); 96 | assert_eq!( 97 | plan[1], 98 | "ALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq1'::regclass)" 99 | ); 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/parser/table/table_trigger.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parser::{RelationId, Trigger}, 3 | NodeItem, 4 | }; 5 | use pg_query::{protobuf::CreateTrigStmt, NodeEnum, NodeRef}; 6 | 7 | impl NodeItem for Trigger { 8 | type Inner = CreateTrigStmt; 9 | 10 | fn id(&self) -> String { 11 | self.id.name.clone() 12 | } 13 | 14 | fn type_name(&self) -> &'static str { 15 | "trigger" 16 | } 17 | 18 | fn node(&self) -> &NodeEnum { 19 | &self.node 20 | } 21 | 22 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 23 | match &self.node { 24 | NodeEnum::CreateTrigStmt(stmt) => Ok(stmt), 25 | _ => anyhow::bail!("not a create trigger statement"), 26 | } 27 | } 28 | 29 | fn revert(&self) -> anyhow::Result { 30 | let sql = format!("DROP TRIGGER {} on {}", self.id.name, self.id.schema_id); 31 | let parsed = pg_query::parse(&sql)?; 32 | let node = parsed.protobuf.nodes()[0].0; 33 | match node { 34 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 35 | _ => anyhow::bail!("not a drop index statement"), 36 | } 37 | } 38 | } 39 | 40 | impl TryFrom<&CreateTrigStmt> for Trigger { 41 | type Error = anyhow::Error; 42 | fn try_from(stmt: &CreateTrigStmt) -> Result { 43 | let name = stmt.trigname.clone(); 44 | let schema_id = stmt.relation.as_ref().into(); 45 | let id = RelationId::new_with(schema_id, name); 46 | let node = NodeEnum::CreateTrigStmt(Box::new(stmt.clone())); 47 | Ok(Self { id, node }) 48 | } 49 | } 50 | 51 | #[allow(dead_code)] 52 | fn get_id(stmt: &CreateTrigStmt) -> RelationId { 53 | let name = stmt.trigname.clone(); 54 | assert!(stmt.relation.is_some()); 55 | let schema_id = stmt.relation.as_ref().unwrap().into(); 56 | 57 | RelationId::new_with(schema_id, name) 58 | } 59 | 60 | #[cfg(test)] 61 | mod tests { 62 | use crate::{Differ, MigrationPlanner}; 63 | 64 | use super::*; 65 | 66 | #[test] 67 | fn trigger_should_parse() { 68 | let sql = "CREATE TRIGGER test_trigger BEFORE INSERT ON test_table FOR EACH ROW EXECUTE FUNCTION test_function()"; 69 | let trigger: Trigger = sql.parse().unwrap(); 70 | assert_eq!(trigger.id.name, "test_trigger"); 71 | assert_eq!(trigger.id.schema_id.to_string(), "public.test_table"); 72 | } 73 | 74 | #[test] 75 | fn trigger_diff_should_work() { 76 | let sql1 = "CREATE TRIGGER test_trigger BEFORE INSERT ON test_table FOR EACH ROW EXECUTE FUNCTION test_function()"; 77 | let sql2 = "CREATE TRIGGER test_trigger AFTER INSERT ON test_table FOR EACH ROW EXECUTE FUNCTION test_function()"; 78 | let trigger1: Trigger = sql1.parse().unwrap(); 79 | let trigger2: Trigger = sql2.parse().unwrap(); 80 | let diff = trigger1.diff(&trigger2).unwrap().unwrap(); 81 | let plan = diff.plan().unwrap(); 82 | assert_eq!(plan.len(), 2); 83 | assert_eq!(plan[0], "DROP TRIGGER test_trigger ON public.test_table"); 84 | assert_eq!(plan[1], sql2); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/parser/utils/macros.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parser::{ 3 | AlterTable, CompositeType, EnumType, Function, MatView, Privilege, Sequence, Table, 4 | TableConstraint, TableIndex, TableOwner, TablePolicy, TableRls, TableSequence, Trigger, 5 | View, 6 | }, 7 | MigrationPlanner, MigrationResult, NodeDiff, NodeItem, 8 | }; 9 | use anyhow::Context; 10 | use pg_query::NodeRef; 11 | use std::str::FromStr; 12 | 13 | macro_rules! def_display { 14 | ($($name:ident),*) => { 15 | $(impl std::fmt::Display for $name { 16 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 17 | let sql = self.node().deparse().map_err(|_| std::fmt::Error)?; 18 | write!(f, "{}", sql) 19 | } 20 | })* 21 | }; 22 | } 23 | 24 | macro_rules! def_simple_planner { 25 | ($($name:ident),*) => { 26 | $(impl MigrationPlanner for NodeDiff<$name> { 27 | type Migration = String; 28 | 29 | fn drop(&self) -> MigrationResult { 30 | if let Some(old) = &self.old { 31 | let sql = old.revert()?.deparse()?; 32 | Ok(vec![sql]) 33 | } else { 34 | Ok(vec![]) 35 | } 36 | } 37 | 38 | fn create(&self) -> MigrationResult { 39 | if let Some(new) = &self.new { 40 | let sql = new.to_string(); 41 | Ok(vec![sql]) 42 | } else { 43 | Ok(vec![]) 44 | } 45 | } 46 | 47 | fn alter(&self) -> MigrationResult { 48 | Ok(vec![]) 49 | } 50 | })* 51 | }; 52 | } 53 | 54 | macro_rules! def_from_str { 55 | ($name:ident, $node_name:ident) => { 56 | impl FromStr for $name { 57 | type Err = anyhow::Error; 58 | 59 | fn from_str(s: &str) -> anyhow::Result { 60 | let parsed = pg_query::parse(s) 61 | .with_context(|| format!("Failed to parse {}: {}", stringify!($name), s))?; 62 | let node = parsed.protobuf.nodes()[0].0; 63 | match node { 64 | NodeRef::$node_name(stmt) => Self::try_from(stmt), 65 | _ => anyhow::bail!("not a {}: {}", stringify!($name), s), 66 | } 67 | } 68 | } 69 | }; 70 | ($name:ident) => { 71 | impl FromStr for $name { 72 | type Err = anyhow::Error; 73 | 74 | fn from_str(s: &str) -> anyhow::Result { 75 | let parsed = pg_query::parse(s).with_context(|| { 76 | format!( 77 | "Failed to parse {} for alter table: {}", 78 | stringify!($name), 79 | s 80 | ) 81 | })?; 82 | let node = parsed.protobuf.nodes()[0].0; 83 | match node { 84 | NodeRef::AlterTableStmt(stmt) => AlterTable::try_from(stmt)?.try_into(), 85 | _ => anyhow::bail!("not a {}: {}", stringify!($name), s), 86 | } 87 | } 88 | } 89 | }; 90 | } 91 | 92 | def_display!( 93 | CompositeType, 94 | EnumType, 95 | Function, 96 | MatView, 97 | Privilege, 98 | Sequence, 99 | Table, 100 | TableConstraint, 101 | TableIndex, 102 | TableOwner, 103 | TablePolicy, 104 | TableRls, 105 | TableSequence, 106 | Trigger, 107 | View 108 | ); 109 | 110 | def_simple_planner!( 111 | CompositeType, 112 | MatView, 113 | Sequence, 114 | TableConstraint, 115 | TableIndex, 116 | TableOwner, 117 | TablePolicy, 118 | TableRls, 119 | TableSequence, 120 | Trigger, 121 | View 122 | ); 123 | 124 | def_from_str!(CompositeType, CompositeTypeStmt); 125 | def_from_str!(EnumType, CreateEnumStmt); 126 | def_from_str!(Function, CreateFunctionStmt); 127 | def_from_str!(MatView, CreateTableAsStmt); 128 | def_from_str!(Sequence, CreateSeqStmt); 129 | def_from_str!(Table, CreateStmt); 130 | def_from_str!(TableConstraint); 131 | def_from_str!(TableIndex, IndexStmt); 132 | def_from_str!(TableOwner); 133 | def_from_str!(TablePolicy, CreatePolicyStmt); 134 | def_from_str!(TableRls); 135 | def_from_str!(TableSequence); 136 | def_from_str!(Trigger, CreateTrigStmt); 137 | def_from_str!(Privilege, GrantStmt); 138 | def_from_str!(View, ViewStmt); 139 | -------------------------------------------------------------------------------- /src/parser/utils/mod.rs: -------------------------------------------------------------------------------- 1 | mod macros; 2 | mod node; 3 | pub mod parsec; 4 | 5 | pub use node::{ 6 | node_enum_to_string, node_to_embed_constraint, node_to_string, type_name_to_string, 7 | }; 8 | 9 | #[allow(dead_code)] 10 | pub fn drain_where bool>(source: Vec, pred: Pred) -> (Vec, Vec) { 11 | let mut orig: Vec = Vec::with_capacity(source.len()); 12 | let mut drained: Vec = Vec::new(); 13 | 14 | for v in source.into_iter() { 15 | if pred(&v) { 16 | drained.push(v); 17 | } else { 18 | orig.push(v); 19 | } 20 | } 21 | (orig, drained) 22 | } 23 | -------------------------------------------------------------------------------- /src/parser/utils/node.rs: -------------------------------------------------------------------------------- 1 | use crate::parser::ConstraintInfo; 2 | use itertools::Itertools; 3 | use pg_query::{ 4 | protobuf::{AExprKind, RoleSpecType, SqlValueFunctionOp, TypeName}, 5 | Node, NodeEnum, 6 | }; 7 | 8 | pub fn node_to_embed_constraint(node: &Node) -> Option { 9 | match &node.node { 10 | Some(NodeEnum::Constraint(v)) => ConstraintInfo::try_from(v.as_ref()).ok(), 11 | _ => None, 12 | } 13 | } 14 | 15 | pub fn type_name_to_string(n: &TypeName) -> String { 16 | let typname = n.names.iter().filter_map(node_to_string).join("."); 17 | let typmod = n.typmods.iter().filter_map(node_to_string).join(""); 18 | let array_bounds = array_bounds_to_string(&n.array_bounds); 19 | 20 | match (typmod.as_str(), array_bounds.as_str()) { 21 | ("", "") => typname, 22 | ("", b) => format!("{}{}", typname, b), 23 | (m, "") => format!("{}({})", typname, m), 24 | (m, b) => format!("{}({}){}", typname, m, b), 25 | } 26 | } 27 | 28 | pub fn array_bounds_to_string(bounds: &[Node]) -> String { 29 | bounds 30 | .iter() 31 | .filter_map(node_to_string) 32 | .map(|s| { 33 | if s == "-1" { 34 | "[]".to_owned() 35 | } else { 36 | format!("[{}]", s) 37 | } 38 | }) 39 | .join("") 40 | } 41 | 42 | pub fn node_to_string(node: &Node) -> Option { 43 | match &node.node { 44 | Some(n) => node_enum_to_string(n), 45 | _ => None, 46 | } 47 | } 48 | 49 | pub fn node_enum_to_string(node: &NodeEnum) -> Option { 50 | match node { 51 | NodeEnum::String(s) => Some(s.str.clone()), 52 | NodeEnum::Integer(i) => Some(i.ival.to_string()), 53 | NodeEnum::AConst(a) => a.val.as_ref().and_then(|v| match &v.node { 54 | Some(NodeEnum::String(s)) => Some(format!("'{}'", s.str)), 55 | Some(NodeEnum::Integer(i)) => Some(i.ival.to_string()), 56 | _ => None, 57 | }), 58 | NodeEnum::FuncCall(f) => { 59 | let fname = f.funcname.iter().filter_map(node_to_string).join("."); 60 | let args = f.args.iter().filter_map(node_to_string).join(", "); 61 | Some(format!("{}({})", fname, args)) 62 | } 63 | NodeEnum::AExpr(e) => { 64 | let left = e.lexpr.as_deref().and_then(node_to_string); 65 | let right = e.rexpr.as_deref().and_then(node_to_string); 66 | let op = e.name.iter().filter_map(node_to_string).join("."); 67 | let op_kind = e.kind(); 68 | match (left, right) { 69 | (Some(l), Some(r)) => match e.kind() { 70 | AExprKind::AexprOp => Some(format!("{} {} {}", l, op, r)), 71 | AExprKind::AexprOpAll => Some(format!("{} {} ALL ({})", l, op, r)), 72 | AExprKind::AexprOpAny => Some(format!("{} {} ANY ({})", l, op, r)), 73 | _ => panic!("Unsupported AExprKind: {:?}", op_kind), 74 | }, 75 | (l, r) => panic!("Expect left and right to exists. Got {:?} and {:?}", l, r), 76 | } 77 | } 78 | NodeEnum::TypeCast(c) => { 79 | let arg = c.arg.as_deref().and_then(node_to_string); 80 | let typname = c 81 | .type_name 82 | .as_ref() 83 | .map(|t| t.names.iter().filter_map(node_to_string).join(".")); 84 | match (arg, typname) { 85 | (Some(a), Some(t)) => Some(format!("{}::{}", a, t)), 86 | _ => None, 87 | } 88 | } 89 | NodeEnum::TypeName(t) => Some(type_name_to_string(t)), 90 | NodeEnum::ColumnRef(c) => { 91 | let fields = c.fields.iter().filter_map(node_to_string).join(","); 92 | Some(fields) 93 | } 94 | NodeEnum::SqlvalueFunction(f) => match f.op() { 95 | SqlValueFunctionOp::SvfopCurrentUser => Some("CURRENT_USER".to_owned()), 96 | SqlValueFunctionOp::SvfopCurrentRole => Some("CURRENT_ROLE".to_owned()), 97 | op => unimplemented!("Unsupported SqlValueFunctionOp: {:?}", op), 98 | }, 99 | NodeEnum::AArrayExpr(a) => { 100 | let elements = a.elements.iter().filter_map(node_to_string).join(","); 101 | Some(format!("ARRAY [{}]", elements)) 102 | } 103 | NodeEnum::RoleSpec(r) => match r.roletype() { 104 | RoleSpecType::RolespecCstring => Some(r.rolename.clone()), 105 | RoleSpecType::RolespecCurrentUser => Some("CURRENT_USER".to_owned()), 106 | RoleSpecType::RolespecSessionUser => Some("SESSION_USER".to_owned()), 107 | RoleSpecType::RolespecPublic => None, 108 | RoleSpecType::Undefined => None, 109 | }, 110 | _ => None, 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /src/parser/utils/parsec.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeSet; 2 | 3 | use nom::{ 4 | branch::alt, 5 | bytes::complete::tag, 6 | character::complete::{alpha1, alphanumeric1, multispace0}, 7 | combinator::{opt, recognize}, 8 | error::ParseError, 9 | multi::{many0_count, separated_list1}, 10 | sequence::{delimited, pair, tuple}, 11 | AsChar, Compare, IResult, InputLength, InputTake, InputTakeAtPosition, Parser, 12 | }; 13 | 14 | use crate::parser::SinglePriv; 15 | 16 | pub fn parse_single_priv(input: &str) -> IResult<&str, SinglePriv> { 17 | tuple((identifier, opt(parse_fields)))(input).map(|(remaining, (name, fields))| { 18 | let cols: BTreeSet = fields 19 | .unwrap_or_default() 20 | .into_iter() 21 | .map(Into::into) 22 | .collect(); 23 | 24 | ( 25 | remaining, 26 | SinglePriv { 27 | name: name.into(), 28 | cols, 29 | }, 30 | ) 31 | }) 32 | } 33 | 34 | /// parse "(a, b, c)" t vec!["a", "b", "c"]s 35 | pub fn parse_fields(input: &str) -> IResult<&str, Vec<&str>> { 36 | delb("(", ")", separated_list1(dels(tag(",")), identifier))(input) 37 | } 38 | 39 | /// delimited by spaces 40 | pub fn dels(parser: impl Parser) -> impl FnMut(I) -> IResult 41 | where 42 | E: ParseError, 43 | I: InputTakeAtPosition, 44 | ::Item: AsChar + Clone, 45 | { 46 | delimited(multispace0, parser, multispace0) 47 | } 48 | 49 | /// delimited by brackets 50 | pub fn delb( 51 | skip1: T, 52 | skip2: T, 53 | parser: impl Parser, 54 | ) -> impl FnMut(I) -> IResult 55 | where 56 | E: ParseError, 57 | I: InputTake + InputTakeAtPosition + Compare, 58 | T: InputLength + Clone, 59 | ::Item: AsChar + Clone, 60 | { 61 | delimited(dels(tag(skip1)), parser, dels(tag(skip2))) 62 | } 63 | 64 | /// parse identifier 65 | pub fn identifier(input: &str) -> IResult<&str, &str> { 66 | recognize(pair( 67 | alt((alpha1, tag("_"))), 68 | many0_count(alt((alphanumeric1, tag("_")))), 69 | ))(input) 70 | } 71 | -------------------------------------------------------------------------------- /src/parser/view.rs: -------------------------------------------------------------------------------- 1 | use super::{SchemaId, View}; 2 | use crate::NodeItem; 3 | use pg_query::{protobuf::ViewStmt, NodeEnum, NodeRef}; 4 | 5 | impl NodeItem for View { 6 | type Inner = ViewStmt; 7 | fn id(&self) -> String { 8 | self.id.to_string() 9 | } 10 | 11 | fn type_name(&self) -> &'static str { 12 | "view" 13 | } 14 | 15 | fn node(&self) -> &NodeEnum { 16 | &self.node 17 | } 18 | 19 | fn inner(&self) -> anyhow::Result<&Self::Inner> { 20 | match &self.node { 21 | NodeEnum::ViewStmt(stmt) => Ok(stmt), 22 | _ => anyhow::bail!("not a create view statement"), 23 | } 24 | } 25 | 26 | fn revert(&self) -> anyhow::Result { 27 | let sql = format!("DROP VIEW {}", self.id); 28 | let parsed = pg_query::parse(&sql)?; 29 | let node = parsed.protobuf.nodes()[0].0; 30 | match node { 31 | NodeRef::DropStmt(stmt) => Ok(NodeEnum::DropStmt(stmt.clone())), 32 | _ => anyhow::bail!("not a drop index statement"), 33 | } 34 | } 35 | } 36 | 37 | impl TryFrom<&ViewStmt> for View { 38 | type Error = anyhow::Error; 39 | fn try_from(stmt: &ViewStmt) -> Result { 40 | let id = get_view_id(stmt); 41 | let node = NodeEnum::ViewStmt(Box::new(stmt.clone())); 42 | Ok(Self { id, node }) 43 | } 44 | } 45 | 46 | fn get_view_id(stmt: &ViewStmt) -> SchemaId { 47 | assert!(stmt.view.is_some()); 48 | stmt.view.as_ref().unwrap().into() 49 | } 50 | 51 | #[cfg(test)] 52 | mod tests { 53 | use super::*; 54 | use crate::{Differ, MigrationPlanner}; 55 | 56 | #[test] 57 | fn view_should_parse() { 58 | let sql = "CREATE VIEW foo AS SELECT 1"; 59 | let view: View = sql.parse().unwrap(); 60 | assert_eq!(view.id.to_string(), "public.foo"); 61 | } 62 | 63 | #[test] 64 | fn test_view_migration() { 65 | let sql1 = "CREATE VIEW foo AS SELECT 1"; 66 | let sql2 = "CREATE VIEW foo AS SELECT 2"; 67 | let old: View = sql1.parse().unwrap(); 68 | let new: View = sql2.parse().unwrap(); 69 | let diff = old.diff(&new).unwrap().unwrap(); 70 | let migrations = diff.plan().unwrap(); 71 | assert_eq!(migrations.len(), 2); 72 | assert_eq!(migrations[0], "DROP VIEW public.foo"); 73 | assert_eq!(migrations[1], "CREATE VIEW foo AS SELECT 2"); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/repo/applier.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | 3 | use crate::{utils::load_config, DatabaseRepo, DatabaseSchema, SchemaLoader, SqlSaver}; 4 | use anyhow::{bail, Result}; 5 | use sqlx::{Connection, Executor, PgConnection}; 6 | use tokio::runtime::Runtime; 7 | use url::Url; 8 | use uuid::Uuid; 9 | 10 | impl DatabaseRepo { 11 | pub async fn load_sql_string(&self, remote: bool) -> Result { 12 | let url = if remote { &self.remote_url } else { &self.url }; 13 | 14 | let output = async_process::Command::new("pg_dump") 15 | .arg("-s") 16 | .arg(url) 17 | .output() 18 | .await?; 19 | 20 | if !output.status.success() { 21 | bail!("{}", String::from_utf8(output.stderr)?); 22 | } 23 | 24 | let sql = String::from_utf8(output.stdout)?; 25 | Ok(sql) 26 | } 27 | pub async fn normalize(&self, sql: &str) -> Result { 28 | let tdb = TmpDb::new(self.server_url()?, sql).await?; 29 | let repo = DatabaseRepo::new_with(tdb.url()); 30 | repo.load().await 31 | } 32 | 33 | /// Apply the migration plan to the remote database server. 34 | pub async fn apply(&self, plan: Vec, remote: bool) -> Result<()> { 35 | if !remote { 36 | self.do_apply(&plan, &self.url).await?; 37 | } else if self.url != self.remote_url { 38 | self.do_apply(&plan, &self.remote_url).await?; 39 | } 40 | Ok(()) 41 | } 42 | 43 | /// Fetch the most recent schema from the remote database server. 44 | pub async fn fetch(&self) -> Result { 45 | let schema = self.load().await?; 46 | let config = load_config().await?; 47 | schema.save(&config.output).await?; 48 | Ok(schema) 49 | } 50 | 51 | /// create & init local database if not exists 52 | pub async fn init_local_database(&self) -> Result<()> { 53 | let ret = PgConnection::connect(&self.url).await; 54 | match ret { 55 | Ok(_) => Ok(()), 56 | Err(_) => { 57 | let server_url = self.server_url()?; 58 | let sql = if self.url != self.remote_url { 59 | self.load_sql_string(true).await? 60 | } else { 61 | "".to_owned() 62 | }; 63 | init_database(&server_url, &self.db_name()?, &sql).await?; 64 | 65 | Ok(()) 66 | } 67 | } 68 | } 69 | 70 | /// drop database 71 | pub async fn drop_database(&self) -> Result<()> { 72 | drop_database(&self.server_url()?, &self.db_name()?).await 73 | } 74 | 75 | async fn do_apply(&self, plan: &[String], url: &str) -> Result<()> { 76 | let mut conn = PgConnection::connect(url).await?; 77 | let mut tx = conn.begin().await?; 78 | 79 | for sql in plan { 80 | tx.execute(sql.as_str()).await?; 81 | } 82 | tx.commit().await?; 83 | 84 | self.fetch().await?; 85 | Ok(()) 86 | } 87 | 88 | fn server_url(&self) -> Result { 89 | let mut url = Url::parse(&self.url)?; 90 | url.set_path(""); 91 | Ok(url.to_string()) 92 | } 93 | 94 | fn db_name(&self) -> Result { 95 | let url = Url::parse(&self.url)?; 96 | let path = url.path(); 97 | let db_name = path.trim_start_matches('/'); 98 | Ok(db_name.to_string()) 99 | } 100 | } 101 | 102 | #[derive(Debug)] 103 | pub struct TmpDb { 104 | pub server_url: String, 105 | pub dbname: String, 106 | } 107 | 108 | impl TmpDb { 109 | pub async fn new(server_url: String, sql: &str) -> Result { 110 | let dbname = format!("tmpdb_{}", Uuid::new_v4()); 111 | init_database(&server_url, &dbname, sql).await?; 112 | Ok(Self { server_url, dbname }) 113 | } 114 | 115 | pub fn server_url(&self) -> String { 116 | self.server_url.clone() 117 | } 118 | 119 | pub fn url(&self) -> String { 120 | format!("{}/{}", self.server_url, self.dbname) 121 | } 122 | } 123 | 124 | impl Drop for TmpDb { 125 | fn drop(&mut self) { 126 | let server_url = self.server_url(); 127 | let dbname = self.dbname.clone(); 128 | thread::spawn(move || { 129 | let rt = Runtime::new().unwrap(); 130 | rt.block_on(async move { 131 | drop_database(&server_url, &dbname).await.unwrap(); 132 | }); 133 | }) 134 | .join() 135 | .expect("failed to drop database"); 136 | } 137 | } 138 | 139 | async fn init_database(server_url: &str, dbname: &str, sql: &str) -> Result<()> { 140 | // create database dbname 141 | // use server url to create database 142 | let mut conn = PgConnection::connect(server_url).await?; 143 | conn.execute(format!(r#"CREATE DATABASE "{}""#, dbname).as_str()) 144 | .await?; 145 | 146 | // now connect to test database for migration 147 | let url = format!("{}/{}", server_url, dbname); 148 | let mut conn = PgConnection::connect(&url).await?; 149 | let mut tx = conn.begin().await?; 150 | tx.execute(sql).await?; 151 | tx.commit().await?; 152 | Ok(()) 153 | } 154 | 155 | async fn drop_database(server_url: &str, dbname: &str) -> Result<()> { 156 | let mut conn = PgConnection::connect(server_url).await?; 157 | // terminate existing connections 158 | sqlx::query(&format!(r#"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND datname = '{}'"#, dbname)) 159 | .execute( &mut conn) 160 | .await 161 | .expect("Terminate all other connections"); 162 | conn.execute(format!(r#"DROP DATABASE "{}""#, dbname).as_str()) 163 | .await?; 164 | 165 | Ok(()) 166 | } 167 | -------------------------------------------------------------------------------- /src/repo/git.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::unwrap_used)] 2 | use git2::{Error, IndexAddOption, Object, ObjectType, Oid, Repository, Signature}; 3 | use std::{ 4 | env, fmt, fs, 5 | path::{Path, PathBuf}, 6 | sync::Arc, 7 | }; 8 | 9 | pub struct GitRepo(Arc); 10 | 11 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 12 | pub enum BumpVersion { 13 | Major, 14 | Minor, 15 | Patch, 16 | } 17 | 18 | const IGNORE_RULES: &str = "dist\nnode_modules\n"; 19 | 20 | impl GitRepo { 21 | pub fn init(path: impl AsRef) -> Result { 22 | let path = path.as_ref(); 23 | let repo = if let Ok(repo) = Repository::discover(path) { 24 | repo 25 | } else { 26 | Repository::init(path)? 27 | }; 28 | 29 | let ignore = path.join(".gitignore"); 30 | if !ignore.exists() { 31 | fs::write(ignore, IGNORE_RULES).expect("should write"); 32 | } 33 | Ok(GitRepo(Arc::new(repo))) 34 | } 35 | 36 | pub fn open(path: impl AsRef) -> Result { 37 | let repo = Repository::discover(path)?; 38 | Ok(GitRepo(Arc::new(repo))) 39 | } 40 | 41 | pub fn is_current_dir(&self) -> bool { 42 | let path = env::current_dir().unwrap(); 43 | self.get_root_path() == path 44 | } 45 | 46 | pub fn get_root_path(&self) -> &Path { 47 | self.0.path().parent().unwrap() 48 | } 49 | 50 | pub fn get_relative_dir(&self) -> PathBuf { 51 | let path = env::current_dir().unwrap(); 52 | let repo_path = self.get_root_path(); 53 | path.strip_prefix(repo_path).unwrap().to_path_buf() 54 | } 55 | 56 | pub fn is_dirty(&self) -> bool { 57 | // let mut status = Default::default(); 58 | let statuses = self.0.statuses(None).unwrap(); 59 | let mut filtered = statuses 60 | .iter() 61 | .filter(|s| !s.status().is_ignored()) 62 | .peekable(); 63 | 64 | filtered.peek().is_some() 65 | } 66 | 67 | pub fn commit(&self, message: impl AsRef) -> Result { 68 | let mut index = self.0.index()?; 69 | index.add_all(["*"].iter(), IndexAddOption::DEFAULT, None)?; 70 | let oid = index.write_tree()?; 71 | index.write()?; 72 | let sig = Signature::now("Bot", "bot@renovate.tools")?; 73 | let parent_commit = self 74 | .find_last_commit() 75 | .ok() 76 | .and_then(|o| o.into_commit().ok()); 77 | 78 | let parents = if let Some(c) = parent_commit.as_ref() { 79 | vec![c] 80 | } else { 81 | vec![] 82 | }; 83 | 84 | let tree = self.0.find_tree(oid)?; 85 | let msg = format!("[renovate bot] {}", message.as_ref()); 86 | self.0 87 | .commit(Some("HEAD"), &sig, &sig, &msg, &tree, &parents) 88 | } 89 | 90 | pub fn tag(&self, name: impl AsRef, message: impl AsRef) -> Result { 91 | let sig = Signature::now("Bot", "bot@renovate.tools")?; 92 | let head_object = self.find_last_commit()?; 93 | self.0 94 | .tag(name.as_ref(), &head_object, &sig, message.as_ref(), false) 95 | } 96 | 97 | pub fn get_last_commit_id(&self) -> Result { 98 | let commit = self.find_last_commit()?; 99 | let sid = commit.short_id()?.as_str().unwrap().to_owned(); 100 | Ok(sid) 101 | } 102 | 103 | pub fn checkout(&self, refname: &str) -> Result { 104 | let old_ref = self.0.head()?.shorthand().unwrap().to_owned(); 105 | let (object, reference) = self.0.revparse_ext(refname)?; 106 | self.0.checkout_tree(&object, None)?; 107 | 108 | match reference { 109 | // gref is an actual reference like branches or tags 110 | Some(gref) => self.0.set_head(gref.name().unwrap())?, 111 | // this is a commit, not a reference 112 | None => self.0.set_head_detached(object.id())?, 113 | }; 114 | 115 | Ok(old_ref) 116 | } 117 | 118 | pub fn find_last_commit(&self) -> Result { 119 | self.0.head()?.resolve()?.peel(ObjectType::Commit) 120 | } 121 | 122 | pub fn list_tags(&self, n: usize, prefix: Option) -> Result, Error> { 123 | let tags = self 124 | .0 125 | .tag_names(None)? 126 | .into_iter() 127 | .rev() 128 | .filter(|t| t.is_some()) 129 | .map(|t| t.unwrap().to_owned()) 130 | .filter(|t| { 131 | if let Some(p) = prefix.as_ref() { 132 | t.starts_with(p) 133 | } else { 134 | true 135 | } 136 | }) 137 | .take(n) 138 | .collect(); 139 | 140 | Ok(tags) 141 | } 142 | 143 | pub fn get_prefix_name(&self) -> Option { 144 | if !self.is_current_dir() { 145 | let path = env::current_dir().ok(); 146 | path.as_ref() 147 | .and_then(|p| p.file_name()) 148 | .and_then(|s| s.to_str()) 149 | .map(|s| s.to_owned()) 150 | } else { 151 | None 152 | } 153 | } 154 | } 155 | 156 | impl fmt::Debug for GitRepo { 157 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 158 | write!(f, "GitRepo({:?})", self.get_root_path()) 159 | } 160 | } 161 | 162 | #[cfg(test)] 163 | mod tests { 164 | use super::*; 165 | use tokio::fs; 166 | 167 | #[tokio::test] 168 | async fn git_repo_should_work() { 169 | let root = tempfile::tempdir().unwrap(); 170 | let root = root.path(); 171 | let repo = GitRepo::init(root).unwrap(); 172 | fs::write(root.join("file.txt"), "Hello World") 173 | .await 174 | .unwrap(); 175 | repo.commit("Initial commit").unwrap(); 176 | repo.tag("v1.0.0", "Initial tag").unwrap(); 177 | let id = repo.get_last_commit_id().unwrap(); 178 | assert_eq!(id.len(), 7); 179 | fs::write(root.join("file.txt"), "Hello Tyr").await.unwrap(); 180 | repo.commit("2nd commit").unwrap(); 181 | repo.tag("v2.0.0", "2nd tag").unwrap(); 182 | let old_ref = repo.checkout("v1.0.0").unwrap(); 183 | assert_eq!(old_ref, "master"); 184 | repo.checkout(&old_ref).unwrap(); 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /src/repo/loader.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | map_insert_relation, map_insert_schema, 3 | parser::{ 4 | AlterTable, AlterTableAction, CompositeType, EnumType, Function, MatView, Privilege, 5 | Sequence, Table, TableConstraint, TableIndex, TableOwner, TablePolicy, TableRls, 6 | TableSequence, Trigger, View, 7 | }, 8 | utils::ignore_file, 9 | DatabaseRepo, DatabaseSchema, LocalRepo, SchemaLoader, SqlLoader, 10 | }; 11 | use anyhow::{Context, Result}; 12 | use async_trait::async_trait; 13 | use glob::glob; 14 | use pg_query::NodeRef; 15 | use std::path::PathBuf; 16 | use tokio::fs; 17 | use tracing::info; 18 | 19 | #[async_trait] 20 | impl SchemaLoader for LocalRepo { 21 | async fn load(&self) -> Result { 22 | let sql = self.load_sql().await?; 23 | SqlLoader(sql).load().await 24 | } 25 | 26 | async fn load_sql(&self) -> Result { 27 | let files = self.files()?; 28 | // concatenate all the sql files into one string 29 | let mut sql = String::with_capacity(16 * 1024); 30 | for file in files { 31 | let content = fs::read_to_string(file.as_path()) 32 | .await 33 | .with_context(|| format!("Failed to read file: {:?}", file))?; 34 | sql.push_str(&content); 35 | } 36 | 37 | // parse the sql to see if the syntax is correct 38 | let ret = pg_query::parse(&sql)?; 39 | let sql = ret.deparse()?; 40 | Ok(sql) 41 | } 42 | } 43 | 44 | #[async_trait] 45 | impl SchemaLoader for DatabaseRepo { 46 | /// run pg_dump us async process and get the output sql 47 | async fn load(&self) -> anyhow::Result { 48 | let sql = self.load_sql().await?; 49 | SqlLoader(sql).load().await 50 | } 51 | 52 | async fn load_sql(&self) -> anyhow::Result { 53 | #[cfg(feature = "cli-test")] 54 | self.init_local_database().await?; 55 | self.load_sql_string(false).await 56 | } 57 | } 58 | 59 | #[async_trait] 60 | impl SchemaLoader for SqlLoader { 61 | async fn load(&self) -> Result { 62 | let result = pg_query::parse(&self.0).with_context(|| "Failed to parse SQL statements")?; 63 | let nodes = result.protobuf.nodes(); 64 | let mut data = DatabaseSchema::default(); 65 | 66 | for (node, _, _) in nodes { 67 | match node { 68 | NodeRef::CompositeTypeStmt(stmt) => { 69 | let item: CompositeType = stmt.try_into()?; 70 | map_insert_schema!(data.composite_types, item); 71 | } 72 | NodeRef::CreateEnumStmt(stmt) => { 73 | let item: EnumType = stmt.try_into()?; 74 | map_insert_schema!(data.enum_types, item); 75 | } 76 | NodeRef::CreateStmt(stmt) => { 77 | let item: Table = stmt.try_into()?; 78 | map_insert_schema!(data.tables, item); 79 | } 80 | NodeRef::ViewStmt(stmt) => { 81 | let item: View = stmt.try_into()?; 82 | map_insert_schema!(data.views, item); 83 | } 84 | NodeRef::CreateTableAsStmt(stmt) => { 85 | let item: MatView = stmt.try_into()?; 86 | map_insert_schema!(data.mviews, item); 87 | } 88 | NodeRef::CreateFunctionStmt(stmt) => { 89 | let item: Function = stmt.try_into()?; 90 | map_insert_schema!(data.functions, item); 91 | } 92 | NodeRef::CreateTrigStmt(stmt) => { 93 | let item: Trigger = stmt.try_into()?; 94 | map_insert_relation!(data.table_triggers, item); 95 | } 96 | NodeRef::AlterTableStmt(stmt) => { 97 | let item: AlterTable = stmt.try_into()?; 98 | match &item.action { 99 | AlterTableAction::Constraint(_) => { 100 | let constraint: TableConstraint = item.try_into()?; 101 | map_insert_relation!(data.table_constraints, constraint); 102 | } 103 | AlterTableAction::Sequence(_) => { 104 | let sequence: TableSequence = item.try_into()?; 105 | map_insert_relation!(data.table_sequences, sequence); 106 | } 107 | AlterTableAction::Rls => { 108 | let rls: TableRls = item.try_into()?; 109 | data.table_rls.insert(rls.id.clone(), rls); 110 | } 111 | AlterTableAction::Owner(_) => { 112 | let owner: TableOwner = item.try_into()?; 113 | data.table_owners.insert(owner.id.clone(), owner); 114 | } 115 | _ => { 116 | info!("ignore alter table action: {:?}", item.action); 117 | } 118 | } 119 | } 120 | NodeRef::IndexStmt(index) => { 121 | let item: TableIndex = index.try_into()?; 122 | map_insert_relation!(data.table_indexes, item); 123 | } 124 | NodeRef::GrantStmt(grant) => { 125 | let item: Privilege = grant.try_into()?; 126 | data.privileges 127 | .entry(item.id.clone()) 128 | .or_default() 129 | .insert(item); 130 | } 131 | NodeRef::CommentStmt(_comment) => { 132 | info!("ignore comment"); 133 | } 134 | NodeRef::CreateExtensionStmt(_ext) => { 135 | info!("TODO: extension"); 136 | } 137 | NodeRef::CreateSchemaStmt(_schema) => { 138 | info!("ignore schema creation statement since we already have the schema name"); 139 | } 140 | NodeRef::CreateSeqStmt(seq) => { 141 | let item: Sequence = seq.try_into()?; 142 | map_insert_schema!(data.sequences, item); 143 | } 144 | NodeRef::CreateForeignTableStmt(_table) => { 145 | info!("TODO: foreign table"); 146 | } 147 | NodeRef::CreateForeignServerStmt(_server) => { 148 | info!("TODO: foreign server"); 149 | } 150 | NodeRef::CreateFdwStmt(_fdw) => { 151 | info!("TODO: fwd"); 152 | } 153 | NodeRef::CreatePolicyStmt(policy) => { 154 | let item: TablePolicy = policy.try_into()?; 155 | map_insert_relation!(data.table_policies, item); 156 | } 157 | _ => { 158 | info!("unhandled node: {:?}", node.deparse()); 159 | } 160 | } 161 | } 162 | data.update_schema_names(); 163 | Ok(data) 164 | } 165 | 166 | async fn load_sql(&self) -> anyhow::Result { 167 | Ok(self.0.clone()) 168 | } 169 | } 170 | 171 | impl LocalRepo { 172 | // load all the .sql files in subdirectories except the "_meta" directory 173 | pub fn files(&self) -> Result> { 174 | let glob_path = self.path.join("**/*.sql"); 175 | let mut files = glob(glob_path.as_os_str().to_str().unwrap())? 176 | .filter_map(Result::ok) 177 | .filter(|p| ignore_file(p, "_")) 178 | .collect::>(); 179 | 180 | files.sort(); 181 | Ok(files) 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /src/repo/mod.rs: -------------------------------------------------------------------------------- 1 | mod applier; 2 | pub mod git; 3 | mod loader; 4 | mod saver; 5 | 6 | use crate::{DatabaseRepo, LocalRepo, RenovateConfig, SqlLoader}; 7 | use std::path::PathBuf; 8 | 9 | impl LocalRepo { 10 | pub fn new(path: impl Into) -> Self { 11 | Self { path: path.into() } 12 | } 13 | } 14 | 15 | impl DatabaseRepo { 16 | pub fn new(config: &RenovateConfig) -> Self { 17 | Self { 18 | url: config.url.clone(), 19 | remote_url: config.remote_url.clone(), 20 | } 21 | } 22 | 23 | pub fn new_with(url: String) -> Self { 24 | Self { 25 | url: url.clone(), 26 | remote_url: url, 27 | } 28 | } 29 | } 30 | 31 | impl Default for LocalRepo { 32 | fn default() -> Self { 33 | Self::new(".") 34 | } 35 | } 36 | 37 | impl SqlLoader { 38 | pub fn new(sql: impl Into) -> Self { 39 | Self(sql.into()) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/types/differ.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | utils::{create_diff, create_diff_added, create_diff_removed}, 3 | Differ, MigrationPlanner, NodeDiff, NodeItem, 4 | }; 5 | 6 | impl Differ for T 7 | where 8 | T: PartialEq + Clone + NodeItem + ToString, 9 | NodeDiff: MigrationPlanner, 10 | { 11 | type Diff = NodeDiff; 12 | fn diff(&self, new: &Self) -> anyhow::Result> { 13 | let old_id = self.id(); 14 | let new_id = new.id(); 15 | if old_id != new_id { 16 | anyhow::bail!("can't diff {} and {}", old_id, new_id); 17 | } 18 | 19 | let self_str = self.to_string(); 20 | let new_str = new.to_string(); 21 | if self_str != new_str { 22 | let diff = create_diff(self, new)?; 23 | Ok(Some(NodeDiff { 24 | old: Some(self.clone()), 25 | new: Some(new.clone()), 26 | diff, 27 | })) 28 | } else { 29 | Ok(None) 30 | } 31 | } 32 | } 33 | 34 | impl NodeDiff 35 | where 36 | T: NodeItem, 37 | { 38 | pub fn with_old(old: T) -> Self { 39 | let diff = create_diff_removed(&old).unwrap(); 40 | Self { 41 | old: Some(old), 42 | new: None, 43 | diff, 44 | } 45 | } 46 | 47 | pub fn with_new(new: T) -> Self { 48 | let diff = create_diff_added(&new).unwrap(); 49 | Self { 50 | old: None, 51 | new: Some(new), 52 | diff, 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/types/mod.rs: -------------------------------------------------------------------------------- 1 | mod differ; 2 | mod node_delta; 3 | mod relation_id; 4 | mod schema_id; 5 | -------------------------------------------------------------------------------- /src/types/node_delta.rs: -------------------------------------------------------------------------------- 1 | use crate::{DeltaItem, NodeDelta}; 2 | use std::{ 3 | collections::{BTreeMap, BTreeSet}, 4 | fmt, 5 | }; 6 | 7 | impl NodeDelta 8 | where 9 | T: Clone + Ord + DeltaItem + fmt::Debug, 10 | { 11 | pub fn create(old: BTreeMap<&String, &T>, new: BTreeMap<&String, &T>) -> NodeDelta { 12 | let mut delta = NodeDelta::default(); 13 | 14 | let old_keys: BTreeSet<_> = old.keys().collect(); 15 | let new_keys: BTreeSet<_> = new.keys().collect(); 16 | let added = new_keys.difference(&old_keys); 17 | let removed = old_keys.difference(&new_keys); 18 | let might_changed = old_keys.intersection(&new_keys); 19 | 20 | for key in added { 21 | delta.added.insert(new[*key].clone()); 22 | } 23 | 24 | for key in removed { 25 | delta.removed.insert(old[*key].clone()); 26 | } 27 | 28 | for key in might_changed { 29 | let old_priv = old[*key]; 30 | let new_priv = new[*key]; 31 | if old_priv.to_string() != new_priv.to_string() { 32 | delta.changed.insert((old_priv.clone(), new_priv.clone())); 33 | } 34 | } 35 | 36 | delta 37 | } 38 | 39 | pub fn plan(self, item: &Item) -> anyhow::Result> { 40 | let mut migrations = Vec::new(); 41 | 42 | let mut is_rename = false; 43 | // check if it is a case for rename 44 | if self.added.len() == 1 && self.removed.len() == 1 { 45 | let added = self.added.iter().next().unwrap(); 46 | let removed = self.removed.iter().next().unwrap(); 47 | let result = removed.to_owned().rename(item, added.to_owned())?; 48 | if !result.is_empty() { 49 | migrations.extend(result); 50 | is_rename = true; 51 | } 52 | } 53 | 54 | if !is_rename { 55 | for removed in self.removed { 56 | let sqls = removed.drop(item)?; 57 | migrations.extend(sqls); 58 | } 59 | 60 | for added in self.added { 61 | let sqls = added.create(item)?; 62 | migrations.extend(sqls); 63 | } 64 | } 65 | 66 | for (v1, v2) in self.changed { 67 | let sqls = v1.alter(item, v2)?; 68 | migrations.extend(sqls); 69 | } 70 | Ok(migrations) 71 | } 72 | } 73 | 74 | impl Default for NodeDelta { 75 | fn default() -> Self { 76 | Self { 77 | added: BTreeSet::new(), 78 | removed: BTreeSet::new(), 79 | changed: BTreeSet::new(), 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/types/relation_id.rs: -------------------------------------------------------------------------------- 1 | use crate::parser::{RelationId, SchemaId}; 2 | 3 | impl RelationId { 4 | pub fn new( 5 | schema: impl Into, 6 | relation: impl Into, 7 | name: impl Into, 8 | ) -> Self { 9 | Self { 10 | schema_id: SchemaId::new(schema, relation), 11 | name: name.into(), 12 | } 13 | } 14 | 15 | pub fn new_with(schema_id: SchemaId, name: impl Into) -> Self { 16 | Self { 17 | schema_id, 18 | name: name.into(), 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/types/schema_id.rs: -------------------------------------------------------------------------------- 1 | use crate::parser::SchemaId; 2 | use pg_query::protobuf::RangeVar; 3 | use std::{fmt, str::FromStr}; 4 | 5 | impl SchemaId { 6 | pub fn new(schema: impl Into, name: impl Into) -> Self { 7 | Self { 8 | schema: schema.into(), 9 | name: name.into(), 10 | } 11 | } 12 | 13 | pub fn new_with(names: &[&str]) -> Self { 14 | if names.len() >= 2 { 15 | Self { 16 | schema: names[0].to_string(), 17 | name: names[1..].join("."), 18 | } 19 | } else { 20 | Self { 21 | schema: "public".to_string(), 22 | name: names[0].to_string(), 23 | } 24 | } 25 | } 26 | } 27 | 28 | impl From<&RangeVar> for SchemaId { 29 | fn from(v: &RangeVar) -> Self { 30 | let schema_name = if v.schemaname.is_empty() { 31 | "public" 32 | } else { 33 | v.schemaname.as_str() 34 | }; 35 | Self::new(schema_name, &v.relname) 36 | } 37 | } 38 | 39 | impl From> for SchemaId { 40 | fn from(v: Option<&RangeVar>) -> Self { 41 | assert!(v.is_some()); 42 | v.unwrap().into() 43 | } 44 | } 45 | 46 | impl FromStr for SchemaId { 47 | type Err = anyhow::Error; 48 | fn from_str(s: &str) -> Result { 49 | let parts: Vec<_> = s.split('.').collect(); 50 | Ok(Self::new_with(&parts)) 51 | } 52 | } 53 | 54 | impl fmt::Display for SchemaId { 55 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 56 | write!(f, "{}.{}", self.schema, self.name) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use crate::{config::RenovateFormatConfig, NodeItem, RenovateConfig}; 2 | use anyhow::{bail, Result}; 3 | use console::{style, Style}; 4 | use similar::{ChangeTag, TextDiff}; 5 | use std::{ 6 | fmt::{self, Write}, 7 | path::Path, 8 | }; 9 | 10 | struct Line(Option); 11 | 12 | impl fmt::Display for Line { 13 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 14 | match self.0 { 15 | None => write!(f, " "), 16 | Some(idx) => write!(f, "{:<4}", idx + 1), 17 | } 18 | } 19 | } 20 | 21 | pub fn ignore_file(p: &Path, pat: &str) -> bool { 22 | p.components().all(|c| { 23 | c.as_os_str() 24 | .to_str() 25 | .map(|s| !s.starts_with(pat)) 26 | .unwrap_or(true) 27 | }) 28 | } 29 | 30 | pub fn create_diff(old: &T, new: &T) -> Result { 31 | let format = RenovateFormatConfig::default().into(); 32 | 33 | let old = sqlformat::format(&old.to_string(), &Default::default(), format); 34 | let new = sqlformat::format(&new.to_string(), &Default::default(), format); 35 | 36 | diff_text(&old, &new) 37 | } 38 | 39 | pub fn create_diff_added(new: &T) -> Result { 40 | let format = RenovateFormatConfig::default().into(); 41 | 42 | let old = "".to_string(); 43 | let new = sqlformat::format(&new.to_string(), &Default::default(), format); 44 | 45 | diff_text(&old, &new) 46 | } 47 | 48 | pub fn create_diff_removed(old: &T) -> Result { 49 | let format = RenovateFormatConfig::default().into(); 50 | 51 | let old = sqlformat::format(&old.to_string(), &Default::default(), format); 52 | let new = "".to_string(); 53 | 54 | diff_text(&old, &new) 55 | } 56 | 57 | pub(crate) async fn load_config() -> Result { 58 | let config_file = Path::new("renovate.yml"); 59 | if !config_file.exists() { 60 | bail!("config file renovate.yml not found in current directory"); 61 | } 62 | let config = RenovateConfig::load(config_file).await?; 63 | Ok(config) 64 | } 65 | 66 | /// generate the diff between two strings. TODO: this is just for console output for now 67 | pub(crate) fn diff_text(text1: &str, text2: &str) -> Result { 68 | let mut output = String::new(); 69 | let diff = TextDiff::from_lines(text1, text2); 70 | 71 | for (idx, group) in diff.grouped_ops(3).iter().enumerate() { 72 | if idx > 0 { 73 | writeln!(&mut output, "{:-^1$}", "-", 80)?; 74 | } 75 | for op in group { 76 | for change in diff.iter_inline_changes(op) { 77 | let (sign, s) = match change.tag() { 78 | ChangeTag::Delete => ("-", Style::new().red()), 79 | ChangeTag::Insert => ("+", Style::new().green()), 80 | ChangeTag::Equal => (" ", Style::new().dim()), 81 | }; 82 | write!( 83 | &mut output, 84 | "{}{} |{}", 85 | style(Line(change.old_index())).dim(), 86 | style(Line(change.new_index())).dim(), 87 | s.apply_to(sign).bold(), 88 | )?; 89 | for (emphasized, value) in change.iter_strings_lossy() { 90 | if emphasized { 91 | write!(&mut output, "{}", s.apply_to(value).underlined().on_black())?; 92 | } else { 93 | write!(&mut output, "{}", s.apply_to(value))?; 94 | } 95 | } 96 | if change.missing_newline() { 97 | writeln!(&mut output)?; 98 | } 99 | } 100 | } 101 | } 102 | 103 | Ok(output) 104 | } 105 | -------------------------------------------------------------------------------- /tests/cli_tests.rs: -------------------------------------------------------------------------------- 1 | #[test] 2 | fn cli_tests() { 3 | trycmd::TestCases::new() 4 | .case("tests/cmd/*.toml") 5 | .case("README.md"); 6 | } 7 | -------------------------------------------------------------------------------- /tests/cmd/help.toml: -------------------------------------------------------------------------------- 1 | bin.name = "renovate" 2 | args = ["help"] 3 | status.code = 0 4 | stdout = """ 5 | renovate [..] 6 | A new way to handle Postgres schema migration. 7 | 8 | USAGE: 9 | renovate [OPTIONS] 10 | 11 | OPTIONS: 12 | --drop-on-exit drop database on exit (for testing purpose only) 13 | -h, --help Print help information 14 | -V, --version Print version information 15 | 16 | SUBCOMMANDS: 17 | generate generate something 18 | help Print this message or the help of the given subcommand(s) 19 | schema Schema migration 20 | """ 21 | stderr = "" 22 | -------------------------------------------------------------------------------- /tests/cmd/help_schema.toml: -------------------------------------------------------------------------------- 1 | bin.name = "renovate" 2 | args = ["schema", "help"] 3 | status.code = 0 4 | stdout = """ 5 | renovate-schema[..] 6 | Schema migration 7 | 8 | USAGE: 9 | renovate schema [OPTIONS] 10 | 11 | OPTIONS: 12 | --drop-on-exit drop database on exit (for testing purpose only) 13 | -h, --help Print help information 14 | 15 | SUBCOMMANDS: 16 | apply apply the migration plan to the remote database server 17 | fetch fetch the most recent schema from the remote database server 18 | help Print this message or the help of the given subcommand(s) 19 | init init a database migration repo 20 | normalize normalize local schema via a temp local database 21 | plan diff the local change and remote state, then make a migration plan 22 | """ 23 | stderr = "" 24 | -------------------------------------------------------------------------------- /tests/cmd/init.in/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tyrchen/renovate/26c122c47ede75f10cd0c469ac3e089f1d6eb67d/tests/cmd/init.in/.keep -------------------------------------------------------------------------------- /tests/cmd/init.toml: -------------------------------------------------------------------------------- 1 | bin.name = "renovate" 2 | fs.cwd = "init.in" 3 | fs.sandbox = true 4 | args = ["schema", "init", "postgres://postgres:postgres@localhost:5432/_test1234", "--drop-on-exit"] 5 | stdout = """ 6 | Database schema for postgres://postgres:postgres@localhost:5432/_test1234 has successfully dumped into ./_test1234. 7 | """ 8 | stderr = "" 9 | -------------------------------------------------------------------------------- /tests/cmd/normalize.in/renovate.yml: -------------------------------------------------------------------------------- 1 | url: postgres://postgres:postgres@localhost:5432/_test1234 2 | remote_url: postgres://postgres:postgres@localhost:5432/_test1234 3 | output: 4 | layout: normal 5 | path: . 6 | format: 7 | indent: 4 8 | uppercase: true 9 | lines_between_queries: 2 10 | -------------------------------------------------------------------------------- /tests/cmd/normalize.in/test.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE todos ( 2 | id int, 3 | title text, 4 | completed boolean 5 | ); 6 | -------------------------------------------------------------------------------- /tests/cmd/normalize.out/public/04_tables.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE public.todos (id int, title text, completed boolean); 2 | 3 | ALTER TABLE 4 | public.todos OWNER TO postgres; 5 | -------------------------------------------------------------------------------- /tests/cmd/normalize.out/renovate.yml: -------------------------------------------------------------------------------- 1 | url: postgres://postgres:postgres@localhost:5432/_test1234 2 | remote_url: postgres://postgres:postgres@localhost:5432/_test1234 3 | output: 4 | layout: normal 5 | path: . 6 | format: 7 | indent: 4 8 | uppercase: true 9 | lines_between_queries: 2 10 | -------------------------------------------------------------------------------- /tests/cmd/normalize.toml: -------------------------------------------------------------------------------- 1 | bin.name = "renovate" 2 | fs.cwd = "normalize.in" 3 | args = ["schema", "normalize", "--drop-on-exit"] 4 | stdout = "" 5 | stderr = "" 6 | -------------------------------------------------------------------------------- /tests/cmd/plan.in/renovate.yml: -------------------------------------------------------------------------------- 1 | url: postgres://postgres:postgres@localhost:5432/_test1234 2 | remote_url: postgres://postgres:postgres@localhost:5432/_test1234 3 | output: 4 | layout: normal 5 | path: . 6 | format: 7 | indent: 4 8 | uppercase: true 9 | lines_between_queries: 2 10 | -------------------------------------------------------------------------------- /tests/cmd/plan.in/test.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE todos ( 2 | id bigserial PRIMARY KEY NOT NULL, 3 | title text, 4 | completed boolean 5 | ); 6 | -------------------------------------------------------------------------------- /tests/cmd/plan.toml: -------------------------------------------------------------------------------- 1 | bin.name = "renovate" 2 | fs.cwd = "plan.in" 3 | args = ["schema", "plan"] 4 | stdout = """ 5 | The following SQLs will be applied: 6 | 7 | CREATE SCHEMA IF NOT EXISTS public; 8 | CREATE SEQUENCE public.todos_id_seq START 1 INCREMENT 1 NO MINVALUE NO MAXVALUE CACHE 1; 9 | CREATE TABLE public.todos ( 10 | id bigint NOT NULL, 11 | title text, 12 | completed boolean 13 | ); 14 | ALTER TABLE 15 | ONLY public.todos 16 | ALTER COLUMN 17 | id 18 | SET 19 | DEFAULT nextval('public.todos_id_seq' :: regclass); 20 | ALTER TABLE 21 | ONLY public.todos 22 | ADD 23 | CONSTRAINT todos_pkey PRIMARY KEY (id); 24 | ALTER TABLE 25 | public.todos OWNER TO postgres; 26 | ALTER TABLE 27 | public.todos_id_seq OWNER TO postgres; 28 | """ 29 | stderr = "" 30 | --------------------------------------------------------------------------------