├── .github └── workflows │ ├── build.yml │ ├── deploy_page.yml │ └── publish.yml ├── .gitignore ├── .rustfmt.toml ├── Cargo.toml ├── LICENSE ├── Makefile.toml ├── README.MD ├── cliff.toml ├── examples ├── mysql │ ├── main.rs │ └── migrations │ │ ├── m0001_simple.rs │ │ ├── m0002_with_parents.rs │ │ ├── m0003_use_macros.rs │ │ ├── m0004_complex_operation.rs │ │ ├── m0005_reference_complex.rs │ │ └── mod.rs ├── postgres │ ├── main.rs │ └── migrations │ │ ├── m0001_simple.rs │ │ ├── m0002_with_parents.rs │ │ ├── m0003_use_macros.rs │ │ ├── m0004_complex_operation.rs │ │ ├── m0005_reference_complex.rs │ │ └── mod.rs └── sqlite │ ├── main.rs │ └── migrations │ ├── m0001_simple.rs │ ├── m0002_with_parents.rs │ ├── m0003_use_macros.rs │ ├── m0004_complex_operation.rs │ ├── m0005_reference_complex.rs │ └── mod.rs └── src ├── cli.rs ├── error.rs ├── lib.rs ├── macros.rs ├── migration.rs ├── migrator ├── any.rs ├── mod.rs ├── mysql.rs ├── postgres.rs ├── sqlite.rs └── tests.rs └── operation.rs /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | 9 | jobs: 10 | build: 11 | name: Build 12 | strategy: 13 | matrix: 14 | include: 15 | - os: "ubuntu-latest" 16 | rust-version: "stable" 17 | - os: "macos-latest" 18 | rust-version: "stable" 19 | - os: "windows-latest" 20 | rust-version: "stable" 21 | - os: "ubuntu-latest" 22 | rust-version: "beta" 23 | - os: "ubuntu-latest" 24 | rust-version: "nightly" 25 | runs-on: ${{ matrix.os }} 26 | env: 27 | MAKE_FEATURES_FLAG: "--all-features" 28 | defaults: 29 | run: 30 | shell: bash 31 | 32 | steps: 33 | - uses: actions/checkout@v4 34 | - name: Setup rust toolchain 35 | uses: dtolnay/rust-toolchain@master 36 | with: 37 | toolchain: ${{ matrix.rust-version }} 38 | components: rustfmt, clippy 39 | - name: Install cargo make 40 | uses: davidB/rust-cargo-make@v1.10.0 41 | - name: Create env file 42 | uses: iamsauravsharma/create-dotenv@v3.0.0 43 | with: 44 | input-prefix: "MAKE_" 45 | - name: Run tests 46 | run: | 47 | cargo make --env-file=.env full 48 | 49 | run_example: 50 | name: Run Example 51 | runs-on: "ubuntu-latest" 52 | needs: build 53 | env: 54 | SQLITE_DATABASE_URL: "db.sqlite3" 55 | POSTGRES_DATABASE_URL: postgresql://postgres:postgres@127.0.0.1:5432/postgres 56 | MYSQL_DATABASE_URL: mysql://root:mysql@127.0.0.1:3306/default_db 57 | services: 58 | postgres: 59 | image: postgres 60 | env: 61 | POSTGRES_PASSWORD: postgres 62 | ports: 63 | - 5432:5432 64 | options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 65 | mysql: 66 | image: mysql 67 | env: 68 | MYSQL_ROOT_PASSWORD: mysql 69 | MYSQL_DATABASE: default_db 70 | ports: 71 | - 3306:3306 72 | options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=3 73 | 74 | steps: 75 | - uses: actions/checkout@v4 76 | - name: Setup rust toolchain 77 | uses: dtolnay/rust-toolchain@nightly 78 | - name: Install cargo make 79 | uses: davidB/rust-cargo-make@v1 80 | - name: Print cli help message 81 | run: | 82 | cargo make run_postgres_example --help 83 | cargo make run_postgres_example apply --help 84 | cargo make run_postgres_example drop --help 85 | cargo make run_postgres_example list --help 86 | cargo make run_postgres_example revert --help 87 | - name: Run postgres example 88 | run: | 89 | cargo make run_postgres_example apply 90 | cargo make run_postgres_example list 91 | cargo make run_postgres_example revert --all --force 92 | cargo make run_postgres_example list 93 | cargo make run_postgres_example drop 94 | - name: Run sqlite example 95 | run: | 96 | touch db.sqlite3 97 | cargo make run_sqlite_example apply 98 | cargo make run_sqlite_example list 99 | cargo make run_sqlite_example revert --all --force 100 | cargo make run_sqlite_example list 101 | cargo make run_sqlite_example drop 102 | - name: Run mysql example 103 | run: | 104 | cargo make run_mysql_example apply 105 | cargo make run_mysql_example list 106 | cargo make run_mysql_example revert --all --force 107 | cargo make run_mysql_example list 108 | cargo make run_mysql_example drop 109 | -------------------------------------------------------------------------------- /.github/workflows/deploy_page.yml: -------------------------------------------------------------------------------- 1 | name: Deploy github pages 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | permissions: 9 | contents: write 10 | 11 | jobs: 12 | deploy_page: 13 | name: Deploy Github page 14 | runs-on: ubuntu-latest 15 | env: 16 | MAKE_FEATURES_FLAG: "--all-features" 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: Setup rust toolchain 20 | uses: dtolnay/rust-toolchain@nightly 21 | - name: Install cargo make 22 | uses: davidB/rust-cargo-make@v1 23 | - name: Create env file 24 | uses: iamsauravsharma/create-dotenv@v3.0.0 25 | with: 26 | input-prefix: "MAKE_" 27 | - name: Generate documentation 28 | run: | 29 | cargo make --env-file=.env rustdoc 30 | - name: Generate index page 31 | run: | 32 | echo "" > target/doc/index.html 33 | - name: Deploy GitHub Page 34 | uses: JamesIves/github-pages-deploy-action@v4 35 | with: 36 | folder: target/doc 37 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | on: 3 | push: 4 | tags: 5 | - "v*" 6 | 7 | permissions: 8 | contents: write 9 | 10 | jobs: 11 | publish_crate: 12 | name: Publish to crates.io 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Setup rust toolchain 18 | uses: dtolnay/rust-toolchain@stable 19 | - name: Publish to crates.io 20 | env: 21 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 22 | run: | 23 | cargo publish --all-features 24 | 25 | publish_release_note: 26 | name: Publish release note 27 | runs-on: ubuntu-latest 28 | needs: publish_crate 29 | 30 | steps: 31 | - uses: actions/checkout@v4 32 | with: 33 | fetch-depth: 0 34 | - name: Generate a changelog 35 | uses: orhun/git-cliff-action@v3 36 | id: git-cliff 37 | with: 38 | config: cliff.toml 39 | args: -vv --current --strip header 40 | env: 41 | OUTPUT: CHANGELOG.md 42 | - name: Create GitHub release 43 | uses: softprops/action-gh-release@v2 44 | with: 45 | body_path: ${{ steps.git-cliff.outputs.changelog }} 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Rust related 2 | /target 3 | /Cargo.lock 4 | 5 | # MacOS related 6 | .DS_Store 7 | 8 | # VS code 9 | .vscode 10 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | combine_control_expr = false 2 | condense_wildcard_suffixes = true 3 | edition = "2024" 4 | error_on_line_overflow = true 5 | error_on_unformatted = true 6 | force_multiline_blocks = true 7 | format_code_in_doc_comments = true 8 | format_macro_matchers = true 9 | format_strings = true 10 | group_imports = "StdExternalCrate" 11 | imports_granularity = "Module" 12 | normalize_comments = true 13 | normalize_doc_attributes = true 14 | reorder_impl_items = true 15 | style_edition = "2024" 16 | unstable_features = true 17 | use_field_init_shorthand = true 18 | use_try_shorthand = true 19 | wrap_comments = true 20 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sqlx_migrator" 3 | version = "0.17.0" 4 | edition = "2024" 5 | authors = ["Saurav Sharma "] 6 | homepage = "https://github.com/iamsauravsharma/sqlx_migrator" 7 | repository = "https://github.com/iamsauravsharma/sqlx_migrator" 8 | description = "Migrator for writing sqlx migration using Rust instead of SQL" 9 | license = "MIT" 10 | readme = "README.MD" 11 | keywords = ["sqlx", "sqlx_migrations", "rust_migrations"] 12 | categories = ["database"] 13 | 14 | [dependencies] 15 | sqlx = { version = "0.8.0", default-features = false, features = ["macros"] } 16 | async-trait = "0.1.70" 17 | tracing = { version = "0.1.37" } 18 | thiserror = "2.0.0" 19 | clap = { version = "4.3.10", features = ["derive"], optional = true } 20 | crc32fast = { version = "1.3.2", optional = true } 21 | 22 | [dev-dependencies] 23 | tokio = { version = "1.34.0", features = ["rt-multi-thread", "macros"] } 24 | sqlx = { version = "0.8.0", features = ["runtime-tokio", "tls-rustls"] } 25 | 26 | [features] 27 | default = ["cli"] 28 | cli = ["dep:clap"] 29 | postgres = ["sqlx/postgres", "dep:crc32fast"] 30 | sqlite = ["sqlx/sqlite"] 31 | mysql = ["sqlx/mysql", "dep:crc32fast"] 32 | any = ["sqlx/any"] 33 | 34 | [[example]] 35 | name = "postgres" 36 | path = "examples/postgres/main.rs" 37 | required-features = ["postgres", "cli"] 38 | 39 | [[example]] 40 | name = "sqlite" 41 | path = "examples/sqlite/main.rs" 42 | required-features = ["sqlite", "cli"] 43 | 44 | [[example]] 45 | name = "mysql" 46 | path = "examples/mysql/main.rs" 47 | required-features = ["mysql", "cli"] 48 | 49 | [package.metadata.docs.rs] 50 | all-features = true 51 | rustdoc-args = ["--cfg", "docsrs"] 52 | 53 | [lints.rust] 54 | missing_docs = "warn" 55 | unreachable_pub = "warn" 56 | unused_crate_dependencies = "warn" 57 | unsafe_code = "deny" 58 | 59 | [lints.clippy] 60 | all = "deny" 61 | pedantic = "warn" 62 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Saurav Sharma 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile.toml: -------------------------------------------------------------------------------- 1 | [env] 2 | RUST_BACKTRACE = "full" 3 | 4 | [tasks.clean] 5 | description = "Clean target directory" 6 | command = "cargo" 7 | args = ["clean"] 8 | 9 | [tasks.build] 10 | description = "Run cargo build" 11 | command = "cargo" 12 | args = ["build", "--workspace", "@@split(FEATURES_FLAG, )"] 13 | 14 | [tasks.fmt] 15 | condition = { channels = ["nightly"] } 16 | description = "Check whether rust code is properly formatted or not" 17 | command = "cargo" 18 | args = ["fmt", "--", "--check"] 19 | 20 | [tasks.clippy] 21 | condition = { channels = ["nightly"] } 22 | description = "Check if clippy return any warnings or error" 23 | command = "cargo" 24 | args = [ 25 | "clippy", 26 | "--workspace", 27 | "@@split(FEATURES_FLAG, )", 28 | "--", 29 | "-D", 30 | "warnings", 31 | ] 32 | 33 | [tasks.test] 34 | description = "Run test" 35 | command = "cargo" 36 | args = ["test", "--workspace", "@@split(FEATURES_FLAG, )"] 37 | 38 | [tasks.doc] 39 | description = "Run doc" 40 | command = "cargo" 41 | args = ["doc", "--workspace", "--no-deps", "@@split(FEATURES_FLAG, )"] 42 | 43 | [tasks.rustdoc] 44 | description = "Run rustdoc" 45 | command = "cargo" 46 | args = ["rustdoc", "--all-features", "--", "--cfg", "docsrs"] 47 | 48 | [tasks.local] 49 | dependencies = ["fmt", "build", "clippy", "doc", "test"] 50 | 51 | [tasks.full] 52 | dependencies = ["clean", "local"] 53 | 54 | [tasks.run_postgres_example] 55 | description = "run postgres example" 56 | command = "cargo" 57 | args = ["run", "--example", "postgres", "--features", "postgres", "--", "${@}"] 58 | 59 | [tasks.run_sqlite_example] 60 | description = "run postgres example" 61 | command = "cargo" 62 | args = ["run", "--example", "sqlite", "--features", "sqlite", "--", "${@}"] 63 | 64 | [tasks.run_mysql_example] 65 | description = "run postgres example" 66 | command = "cargo" 67 | args = ["run", "--example", "mysql", "--features", "mysql", "--", "${@}"] 68 | -------------------------------------------------------------------------------- /README.MD: -------------------------------------------------------------------------------- 1 | # SQLX migrator 2 | 3 | A Rust library for writing SQLX migrations using Rust instead of SQL. 4 | 5 | | License | Crates Version | Docs | 6 | | :--------------------------------------------: | :---------------------------------------: | :----------------------------------: | 7 | | [![License: MIT][license_badge]][license_link] | [![Crate][cratesio_badge]][cratesio_link] | [![Docs][docsrs_badge]][docsrs_link] | 8 | 9 | Supported Databases: 10 | 11 | - [x] PostgreSQL 12 | - [x] SQLite 13 | - [x] MySql 14 | - [x] Any 15 | 16 | ## Installation 17 | 18 | Add `sqlx_migrator` to your `Cargo.toml` with the appropriate database feature: 19 | 20 | ```toml 21 | sqlx_migrator = { version = "0.17.0", features=["postgres"] } 22 | ``` 23 | 24 | OR 25 | 26 | ```toml 27 | sqlx_migrator = { version = "0.17.0", features=["mysql"] } 28 | ``` 29 | 30 | OR 31 | 32 | ```toml 33 | sqlx_migrator = { version = "0.17.0", features=["sqlite"] } 34 | ``` 35 | 36 | OR 37 | 38 | ```toml 39 | sqlx_migrator = { version = "0.17.0", features=[ 40 | "any", 41 | # Plus any one of above database driver 42 | ] } 43 | ``` 44 | 45 | # Usage 46 | 47 | To use `sqlx_migrator`, implement the `Operation` trait to define your migration logic. Here's an example using PostgreSQL: 48 | 49 | ```rust 50 | use sqlx_migrator::error::Error; 51 | use sqlx_migrator::operation::Operation; 52 | 53 | pub(crate) struct FirstOperation; 54 | 55 | #[async_trait::async_trait] 56 | impl Operation for FirstOperation { 57 | // Up function runs apply migration 58 | async fn up(&self, connection: &mut sqlx::PgConnection) -> Result<(), Error> { 59 | sqlx::query("CREATE TABLE sample (id INTEGER PRIMARY KEY, name TEXT)") 60 | // NOTE: if you want to use connection multiple times pass `&mut *connection` 61 | // as a parameter instead of `connection` 62 | .execute(connection) 63 | .await?; 64 | Ok(()) 65 | } 66 | 67 | // down migration runs down migration 68 | async fn down(&self, connection: &mut sqlx::PgConnection) -> Result<(), Error> { 69 | sqlx::query("DROP TABLE sample").execute(connection).await?; 70 | Ok(()) 71 | } 72 | } 73 | ``` 74 | After defining your operations, you can create a migration: 75 | 76 | ```rust 77 | use sqlx_migrator::error::Error; 78 | use sqlx_migrator::migration::Migration; 79 | use sqlx_migrator::operation::Operation; 80 | 81 | pub(crate) struct FirstMigration; 82 | 83 | impl Migration for FirstMigration { 84 | // app where migration lies can be any value 85 | fn app(&self) -> &str { 86 | "main" 87 | } 88 | 89 | // name of migration 90 | // Combination of migration app and name must be unique to work properly expects for virtual migration 91 | fn name(&self) -> &str { 92 | "first_migration" 93 | } 94 | 95 | // Use the parent function to add parents of a migration. 96 | // If you cannot access or create the parent migration easily, you can also use 97 | // `(A,N) where A: AsRef, N: AsRef` where A is the app name 98 | // and N is the name of the migration. 99 | fn parents(&self) -> Vec>> { 100 | vec![] 101 | // vec![("main", "initial_migration"), AnotherInitialMigration] 102 | } 103 | 104 | // use operations function to add operation part of migration 105 | fn operations(&self) -> Vec>> { 106 | vec![Box::new(FirstOperation)] 107 | } 108 | 109 | // Migration trait also have multiple other function see docs for usage 110 | } 111 | ``` 112 | 113 | This migration can be represented in a simpler form using macros: 114 | ```rust 115 | use sqlx_migrator::vec_box; 116 | sqlx_migrator::migration!( 117 | sqlx::Postgres, 118 | FirstMigration, 119 | "main", 120 | "first_migration", 121 | vec_box![], 122 | vec_box![FirstOperation] 123 | ); 124 | // OR 125 | sqlx_migrator::postgres_migration!( 126 | FirstMigration, 127 | "main", 128 | "first_migration", 129 | vec_box![], 130 | vec_box![FirstOperation] 131 | ); 132 | ``` 133 | 134 | If your up and down queries are simple strings, you can simplify the implementation: 135 | ```rust 136 | sqlx_migrator::postgres_migration!( 137 | FirstMigration, 138 | "main", 139 | "first_migration", 140 | sqlx_migrator::vec_box![], 141 | sqlx_migrator::vec_box![ 142 | ( 143 | "CREATE TABLE sample (id INTEGER PRIMARY KEY, name TEXT)", 144 | "DROP TABLE sample" 145 | ) 146 | ] 147 | ); 148 | ``` 149 | 150 | Finally, create a migrator to run your migrations: 151 | 152 | ```rust 153 | use sqlx_migrator::migrator::{Info, Migrate, Migrator}; 154 | use sqlx::Postgres; 155 | 156 | #[tokio::main] 157 | async fn main() { 158 | let uri = std::env::var("DATABASE_URL").unwrap(); 159 | let pool = sqlx::Pool::::connect(&uri).await.unwrap(); 160 | let mut migrator = Migrator::default(); 161 | // Adding migration can fail if another migration with same app and name and different values gets added 162 | // Adding migrations add its parents, replaces and not before as well 163 | migrator.add_migration(Box::new(FirstMigration)).unwrap(); 164 | } 165 | ``` 166 | 167 | # Running Migrations 168 | 169 | You can run migrations directly or integrate them into a CLI: 170 | ## Programmatic Execution 171 | ```rust 172 | use sqlx_migrator::migrator::Plan; 173 | let mut conn = pool.acquire().await?; 174 | // use apply all to apply all pending migration 175 | migrator.run(&mut *conn, Plan::apply_all()).await.unwrap(); 176 | // or use revert all to revert all applied migrations 177 | migrator.run(&mut *conn, Plan::revert_all()).await.unwrap(); 178 | // If you need to apply or revert to certain stage than see `Plan` docs 179 | ``` 180 | 181 | ## CLI Integration 182 | To integrate sqlx_migrator into your CLI, you can either use the built-in 183 | `MigrationCommand` or extend your own CLI with migrator support. Below are 184 | examples of both approaches: 185 | 186 | #### Built-in Migration Command 187 | 188 | ```rust 189 | use sqlx_migrator::cli::MigrationCommand; 190 | 191 | MigrationCommand::parse_and_run(&mut *conn, Box::new(migrator)).await.unwrap(); 192 | ``` 193 | 194 | #### Extending Your Own CLI with Migrator Support 195 | 196 | ```rust 197 | #[derive(clap::Parser)] 198 | struct Cli { 199 | #[command(subcommand)] 200 | sub_command: CliSubcommand 201 | } 202 | 203 | #[derive(clap::Subcommand)] 204 | enum CliSubcommand { 205 | #[command()] 206 | Migrator(sqlx_migrator::cli::MigrationCommand) 207 | } 208 | 209 | impl Cli { 210 | async fn run() { 211 | let cli = Self::parse(); 212 | // create connection 213 | match cli.sub_command { 214 | Migrator(m) => { 215 | m.run(&mut conn, Box::new(migrator)).await.unwrap() 216 | } 217 | } 218 | } 219 | } 220 | ``` 221 | 222 | # Migrate from sqlx default sql based migration 223 | 224 | To migrate from sqlx sql based migration to rust migration the recommended approach 225 | is to rewrite your SQL migrations as Rust operations and migrations as explained above. 226 | After rewriting your SQL migrations, you need to mark them as applied without re-executing them. 227 | This step ensures that the migration state aligns with the existing database. 228 | There are two ways to perform a fake apply: 229 | 230 | #### Programmatic Fake Apply 231 | 232 | Use the fake option with the `Plan::apply_all()` function: 233 | ```rust 234 | use sqlx_migrator::migrator::Plan; 235 | 236 | migrator.run(&mut *conn, Plan::apply_all().fake(true)).await.unwrap(); 237 | ``` 238 | 239 | #### CLI-Based Fake Apply 240 | If you're using a CLI, use the --fake flag with the apply command: ` apply --fake` 241 | 242 | ### Note: Before writing any other migrations 243 | 244 | Before adding new migrations for future updates, ensure you complete the above steps to mark existing migrations as applied. Run the fake apply only once to align the migration state. After this, remove the `fake(true)` option or the `--fake` flag to allow new migrations to execute normally. 245 | 246 | By following these steps, you can seamlessly transition from SQLX SQL-based migrations to Rust migrations while maintaining an accurate migration state and ensuring compatibility for future updates. 247 | 248 | [license_badge]: https://img.shields.io/github/license/iamsauravsharma/sqlx_migrator.svg?style=for-the-badge 249 | [license_link]: LICENSE 250 | [cratesio_badge]: https://img.shields.io/crates/v/sqlx_migrator.svg?style=for-the-badge 251 | [cratesio_link]: https://crates.io/crates/sqlx_migrator 252 | [docsrs_badge]: https://img.shields.io/docsrs/sqlx_migrator/latest?style=for-the-badge 253 | [docsrs_link]: https://docs.rs/sqlx_migrator 254 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | # git-cliff ~ default configuration file 2 | # https://git-cliff.org/docs/configuration 3 | # 4 | # Lines starting with "#" are comments. 5 | # Configuration options are organized into tables and keys. 6 | # See documentation for more information on available options. 7 | 8 | [changelog] 9 | # changelog header 10 | header = """ 11 | # Changelog\n 12 | All notable changes to this project will be documented in this file.\n 13 | """ 14 | # template for the changelog body 15 | # https://tera.netlify.app/docs 16 | body = """ 17 | {% if version %}\ 18 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 19 | {% else %}\ 20 | ## [unreleased] 21 | {% endif %}\ 22 | {% for group, commits in commits | group_by(attribute="group") %} 23 | ### {{ group | upper_first }} 24 | {% for commit in commits %} 25 | - {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }}\ 26 | {% endfor %} 27 | {% endfor %}\n 28 | """ 29 | # remove the leading and trailing whitespace from the template 30 | trim = true 31 | # changelog footer 32 | footer = """ 33 | 34 | """ 35 | 36 | [git] 37 | # parse the commits based on https://www.conventionalcommits.org 38 | conventional_commits = true 39 | # filter out the commits that are not conventional 40 | filter_unconventional = true 41 | # process each line of a commit as an individual commit 42 | split_commits = false 43 | # regex for preprocessing the commit messages 44 | commit_preprocessors = [ 45 | # { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](https://github.com/orhun/git-cliff/issues/${2}))"}, # replace issue numbers 46 | ] 47 | # regex for parsing and grouping commits 48 | commit_parsers = [ 49 | { message = "^feat", group = "Features" }, 50 | { message = "^fix", group = "Bug Fixes" }, 51 | { message = "^doc", group = "Documentation" }, 52 | { message = "^perf", group = "Performance" }, 53 | { message = "^refactor", group = "Refactor" }, 54 | { message = "^style", group = "Styling" }, 55 | { message = "^test", group = "Testing" }, 56 | { message = "^chore\\(release\\): prepare for", skip = true }, 57 | { message = "^chore", group = "Miscellaneous Tasks" }, 58 | { body = ".*security", group = "Security" }, 59 | ] 60 | # protect breaking changes from being skipped due to matching a skipping commit_parser 61 | protect_breaking_commits = false 62 | # filter out the commits that are not matched by commit parsers 63 | filter_commits = false 64 | # glob pattern for matching git tags 65 | tag_pattern = "v[0-9]*" 66 | # regex for skipping tags 67 | skip_tags = "v0.1.0-beta.1" 68 | # regex for ignoring tags 69 | ignore_tags = "" 70 | # sort the tags topologically 71 | topo_order = false 72 | # sort the commits inside sections by oldest/newest order 73 | sort_commits = "oldest" 74 | # limit the number of commits included in the changelog. 75 | # limit_commits = 42 76 | -------------------------------------------------------------------------------- /examples/mysql/main.rs: -------------------------------------------------------------------------------- 1 | #![expect(unused_crate_dependencies)] 2 | //! Example crate for mysql 3 | use sqlx::MySql; 4 | use sqlx_migrator::cli::MigrationCommand; 5 | use sqlx_migrator::migrator::{Info, Migrator}; 6 | 7 | mod migrations; 8 | 9 | #[tokio::main] 10 | async fn main() { 11 | let uri = std::env::var("MYSQL_DATABASE_URL").unwrap(); 12 | let pool = sqlx::Pool::::connect(&uri).await.unwrap(); 13 | let mut migrator = Migrator::default(); 14 | migrator.add_migrations(migrations::migrations()).unwrap(); 15 | // There are two way to run migration. Either you can create cli as shown below 16 | let mut conn = pool.acquire().await.unwrap(); 17 | MigrationCommand::parse_and_run(&mut *conn, Box::new(migrator)) 18 | .await 19 | .unwrap(); 20 | // Or you can directly use migrator run function instead of creating 21 | // cli 22 | // migrator 23 | // .run(&mut *conn, sqlx_migrator::migrator::Plan::apply_all()) 24 | // .await 25 | // .unwrap(); 26 | conn.close().await.unwrap(); 27 | } 28 | -------------------------------------------------------------------------------- /examples/mysql/migrations/m0001_simple.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{MySql, MySqlConnection}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0001Operation; 7 | 8 | #[async_trait::async_trait] 9 | impl Operation for M0001Operation { 10 | async fn up(&self, connection: &mut MySqlConnection) -> Result<(), Error> { 11 | sqlx::query("CREATE TABLE sample (id INTEGER PRIMARY KEY, name TEXT)") 12 | .execute(&mut *connection) 13 | .await?; 14 | Ok(()) 15 | } 16 | 17 | async fn down(&self, connection: &mut MySqlConnection) -> Result<(), Error> { 18 | sqlx::query("DROP TABLE sample").execute(connection).await?; 19 | Ok(()) 20 | } 21 | } 22 | 23 | pub(crate) struct M0001Migration; 24 | 25 | impl Migration for M0001Migration { 26 | fn app(&self) -> &'static str { 27 | "main" 28 | } 29 | 30 | fn name(&self) -> &'static str { 31 | "m0001_simple" 32 | } 33 | 34 | fn parents(&self) -> Vec>> { 35 | vec![] 36 | } 37 | 38 | fn operations(&self) -> Vec>> { 39 | vec![Box::new(M0001Operation)] 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /examples/mysql/migrations/m0002_with_parents.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{MySql, MySqlConnection}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0002Operation; 7 | 8 | #[async_trait::async_trait] 9 | impl Operation for M0002Operation { 10 | async fn up(&self, connection: &mut MySqlConnection) -> Result<(), Error> { 11 | sqlx::query("INSERT INTO sample (id, name) VALUES (99, 'Some text')") 12 | .execute(connection) 13 | .await?; 14 | Ok(()) 15 | } 16 | 17 | async fn down(&self, connection: &mut MySqlConnection) -> Result<(), Error> { 18 | sqlx::query("DELETE FROM sample WHERE id = 99") 19 | .execute(connection) 20 | .await?; 21 | Ok(()) 22 | } 23 | } 24 | 25 | pub(crate) struct M0002Migration; 26 | 27 | impl Migration for M0002Migration { 28 | fn app(&self) -> &'static str { 29 | "main" 30 | } 31 | 32 | fn name(&self) -> &'static str { 33 | "m0002_with_parents" 34 | } 35 | 36 | fn parents(&self) -> Vec>> { 37 | vec![Box::new(crate::migrations::m0001_simple::M0001Migration)] 38 | } 39 | 40 | fn operations(&self) -> Vec>> { 41 | vec![Box::new(M0002Operation)] 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /examples/mysql/migrations/m0003_use_macros.rs: -------------------------------------------------------------------------------- 1 | pub(crate) struct M0003Migration; 2 | 3 | sqlx_migrator::mysql_migration!( 4 | M0003Migration, 5 | "main", 6 | "m0003_use_macros", 7 | sqlx_migrator::vec_box![("main", "m0002_with_parents")], 8 | sqlx_migrator::vec_box![( 9 | "INSERT INTO sample (id, name) VALUES (999, 'Another text')", 10 | "DELETE FROM sample WHERE id = 999" 11 | )] 12 | ); 13 | -------------------------------------------------------------------------------- /examples/mysql/migrations/m0004_complex_operation.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{MySql, MySqlConnection}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0004Operation { 7 | id: i32, 8 | message: String, 9 | } 10 | 11 | #[async_trait::async_trait] 12 | impl Operation for M0004Operation { 13 | async fn up(&self, connection: &mut MySqlConnection) -> Result<(), Error> { 14 | sqlx::query("INSERT INTO sample (id, name) VALUES (?, ?)") 15 | .bind(self.id) 16 | .bind(&self.message) 17 | .execute(connection) 18 | .await?; 19 | Ok(()) 20 | } 21 | 22 | async fn down(&self, connection: &mut MySqlConnection) -> Result<(), Error> { 23 | sqlx::query("DELETE FROM sample WHERE id = ?") 24 | .bind(self.id) 25 | .execute(connection) 26 | .await?; 27 | Ok(()) 28 | } 29 | } 30 | 31 | pub(crate) struct M0004Migration { 32 | pub(crate) id: i32, 33 | pub(crate) message: String, 34 | } 35 | 36 | impl Migration for M0004Migration { 37 | fn app(&self) -> &'static str { 38 | "main" 39 | } 40 | 41 | fn name(&self) -> &'static str { 42 | "m0004_complex_operation" 43 | } 44 | 45 | fn parents(&self) -> Vec>> { 46 | vec![Box::new( 47 | crate::migrations::m0003_use_macros::M0003Migration, 48 | )] 49 | } 50 | 51 | fn operations(&self) -> Vec>> { 52 | vec![Box::new(M0004Operation { 53 | id: self.id, 54 | message: self.message.clone(), 55 | })] 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /examples/mysql/migrations/m0005_reference_complex.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{MySql, MySqlConnection}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0005Operation; 7 | 8 | #[async_trait::async_trait] 9 | impl Operation for M0005Operation { 10 | async fn up(&self, connection: &mut MySqlConnection) -> Result<(), Error> { 11 | sqlx::query("INSERT INTO sample (id, name) VALUES (888, 'complex')") 12 | .execute(connection) 13 | .await?; 14 | Ok(()) 15 | } 16 | 17 | async fn down(&self, connection: &mut MySqlConnection) -> Result<(), Error> { 18 | sqlx::query("DELETE FROM sample WHERE id = 888") 19 | .execute(connection) 20 | .await?; 21 | Ok(()) 22 | } 23 | } 24 | 25 | pub(crate) struct M0005Migration; 26 | 27 | impl Migration for M0005Migration { 28 | fn app(&self) -> &'static str { 29 | "main" 30 | } 31 | 32 | fn name(&self) -> &'static str { 33 | "m0005_reference_complex" 34 | } 35 | 36 | fn parents(&self) -> Vec>> { 37 | vec![Box::new(("main", "m0004_complex_operation"))] 38 | } 39 | 40 | fn operations(&self) -> Vec>> { 41 | vec![Box::new(M0005Operation)] 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /examples/mysql/migrations/mod.rs: -------------------------------------------------------------------------------- 1 | use sqlx::MySql; 2 | use sqlx_migrator::migration::Migration; 3 | use sqlx_migrator::vec_box; 4 | 5 | pub(crate) mod m0001_simple; 6 | pub(crate) mod m0002_with_parents; 7 | pub(crate) mod m0003_use_macros; 8 | pub(crate) mod m0004_complex_operation; 9 | pub(crate) mod m0005_reference_complex; 10 | 11 | pub(crate) fn migrations() -> Vec>> { 12 | vec_box![ 13 | m0001_simple::M0001Migration, 14 | m0002_with_parents::M0002Migration, 15 | m0003_use_macros::M0003Migration, 16 | m0004_complex_operation::M0004Migration { 17 | id: 23, 18 | message: "Custom String".to_string() 19 | }, 20 | m0005_reference_complex::M0005Migration, 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /examples/postgres/main.rs: -------------------------------------------------------------------------------- 1 | #![expect(unused_crate_dependencies)] 2 | //! Example crate for postgres 3 | use sqlx::Postgres; 4 | use sqlx_migrator::cli::MigrationCommand; 5 | use sqlx_migrator::migrator::{Info, Migrator}; 6 | 7 | mod migrations; 8 | #[tokio::main] 9 | async fn main() { 10 | let uri = std::env::var("POSTGRES_DATABASE_URL").unwrap(); 11 | let pool = sqlx::Pool::::connect(&uri).await.unwrap(); 12 | sqlx::query("CREATE SCHEMA IF NOT EXISTS random_schema_name") 13 | .execute(&pool) 14 | .await 15 | .unwrap(); 16 | let mut migrator = Migrator::default() 17 | .set_table_prefix("prefix") 18 | .unwrap() 19 | .set_schema("random_schema_name") 20 | .unwrap(); 21 | migrator.add_migrations(migrations::migrations()).unwrap(); 22 | // There are two way to run migration. Either you can create cli as shown below 23 | let mut conn = pool.acquire().await.unwrap(); 24 | MigrationCommand::parse_and_run(&mut *conn, Box::new(migrator)) 25 | .await 26 | .unwrap(); 27 | // Or you can directly use migrator run function instead of creating 28 | // cli 29 | // migrator 30 | // .run(&mut *conn, sqlx_migrator::migrator::Plan::apply_all()) 31 | // .await 32 | // .unwrap(); 33 | conn.close().await.unwrap(); 34 | } 35 | -------------------------------------------------------------------------------- /examples/postgres/migrations/m0001_simple.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{PgConnection, Postgres}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0001Operation; 7 | 8 | #[async_trait::async_trait] 9 | impl Operation for M0001Operation { 10 | async fn up(&self, connection: &mut PgConnection) -> Result<(), Error> { 11 | sqlx::query("CREATE TABLE sample (id INTEGER PRIMARY KEY, name TEXT)") 12 | .execute(connection) 13 | .await?; 14 | Ok(()) 15 | } 16 | 17 | async fn down(&self, connection: &mut PgConnection) -> Result<(), Error> { 18 | sqlx::query("DROP TABLE sample").execute(connection).await?; 19 | Ok(()) 20 | } 21 | } 22 | 23 | pub(crate) struct M0001Migration; 24 | 25 | impl Migration for M0001Migration { 26 | fn app(&self) -> &'static str { 27 | "main" 28 | } 29 | 30 | fn name(&self) -> &'static str { 31 | "m0001_simple" 32 | } 33 | 34 | fn parents(&self) -> Vec>> { 35 | vec![] 36 | } 37 | 38 | fn operations(&self) -> Vec>> { 39 | vec![Box::new(M0001Operation)] 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /examples/postgres/migrations/m0002_with_parents.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{PgConnection, Postgres}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0002Operation; 7 | 8 | #[async_trait::async_trait] 9 | impl Operation for M0002Operation { 10 | async fn up(&self, connection: &mut PgConnection) -> Result<(), Error> { 11 | sqlx::query("INSERT INTO sample (id, name) VALUES (99, 'Some text')") 12 | .execute(connection) 13 | .await?; 14 | Ok(()) 15 | } 16 | 17 | async fn down(&self, connection: &mut PgConnection) -> Result<(), Error> { 18 | sqlx::query("DELETE FROM sample WHERE id = 99") 19 | .execute(connection) 20 | .await?; 21 | Ok(()) 22 | } 23 | } 24 | 25 | pub(crate) struct M0002Migration; 26 | 27 | impl Migration for M0002Migration { 28 | fn app(&self) -> &'static str { 29 | "main" 30 | } 31 | 32 | fn name(&self) -> &'static str { 33 | "m0002_with_parents" 34 | } 35 | 36 | fn parents(&self) -> Vec>> { 37 | vec![Box::new(crate::migrations::m0001_simple::M0001Migration)] 38 | } 39 | 40 | fn operations(&self) -> Vec>> { 41 | vec![Box::new(M0002Operation)] 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /examples/postgres/migrations/m0003_use_macros.rs: -------------------------------------------------------------------------------- 1 | pub(crate) struct M0003Migration; 2 | 3 | sqlx_migrator::postgres_migration!( 4 | M0003Migration, 5 | "main", 6 | "m0003_use_macros", 7 | sqlx_migrator::vec_box![("main", "m0002_with_parents")], 8 | sqlx_migrator::vec_box![( 9 | "INSERT INTO sample (id, name) VALUES (999, 'Another text')", 10 | "DELETE FROM sample WHERE id = 999" 11 | )] 12 | ); 13 | -------------------------------------------------------------------------------- /examples/postgres/migrations/m0004_complex_operation.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{PgConnection, Postgres}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0004Operation { 7 | id: i32, 8 | message: String, 9 | } 10 | 11 | #[async_trait::async_trait] 12 | impl Operation for M0004Operation { 13 | async fn up(&self, connection: &mut PgConnection) -> Result<(), Error> { 14 | sqlx::query("INSERT INTO sample (id, name) VALUES ($1, $2)") 15 | .bind(self.id) 16 | .bind(&self.message) 17 | .execute(connection) 18 | .await?; 19 | Ok(()) 20 | } 21 | 22 | async fn down(&self, connection: &mut PgConnection) -> Result<(), Error> { 23 | sqlx::query("DELETE FROM sample WHERE id = $1") 24 | .bind(self.id) 25 | .execute(connection) 26 | .await?; 27 | Ok(()) 28 | } 29 | } 30 | 31 | pub(crate) struct M0004Migration { 32 | pub(crate) id: i32, 33 | pub(crate) message: String, 34 | } 35 | 36 | impl Migration for M0004Migration { 37 | fn app(&self) -> &'static str { 38 | "main" 39 | } 40 | 41 | fn name(&self) -> &'static str { 42 | "m0004_complex_operation" 43 | } 44 | 45 | fn parents(&self) -> Vec>> { 46 | vec![Box::new( 47 | crate::migrations::m0003_use_macros::M0003Migration, 48 | )] 49 | } 50 | 51 | fn operations(&self) -> Vec>> { 52 | vec![Box::new(M0004Operation { 53 | id: self.id, 54 | message: self.message.clone(), 55 | })] 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /examples/postgres/migrations/m0005_reference_complex.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{PgConnection, Postgres}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0005Operation; 7 | 8 | #[async_trait::async_trait] 9 | impl Operation for M0005Operation { 10 | async fn up(&self, connection: &mut PgConnection) -> Result<(), Error> { 11 | sqlx::query("INSERT INTO sample (id, name) VALUES (888, 'complex')") 12 | .execute(connection) 13 | .await?; 14 | Ok(()) 15 | } 16 | 17 | async fn down(&self, connection: &mut PgConnection) -> Result<(), Error> { 18 | sqlx::query("DELETE FROM sample WHERE id = 888") 19 | .execute(connection) 20 | .await?; 21 | Ok(()) 22 | } 23 | } 24 | 25 | pub(crate) struct M0005Migration; 26 | 27 | impl Migration for M0005Migration { 28 | fn app(&self) -> &'static str { 29 | "main" 30 | } 31 | 32 | fn name(&self) -> &'static str { 33 | "m0005_reference_complex" 34 | } 35 | 36 | fn parents(&self) -> Vec>> { 37 | vec![Box::new(("main", "m0004_complex_operation"))] 38 | } 39 | 40 | fn operations(&self) -> Vec>> { 41 | vec![Box::new(M0005Operation)] 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /examples/postgres/migrations/mod.rs: -------------------------------------------------------------------------------- 1 | use sqlx::Postgres; 2 | use sqlx_migrator::migration::Migration; 3 | use sqlx_migrator::vec_box; 4 | 5 | pub(crate) mod m0001_simple; 6 | pub(crate) mod m0002_with_parents; 7 | pub(crate) mod m0003_use_macros; 8 | pub(crate) mod m0004_complex_operation; 9 | pub(crate) mod m0005_reference_complex; 10 | 11 | pub(crate) fn migrations() -> Vec>> { 12 | vec_box![ 13 | m0001_simple::M0001Migration, 14 | m0002_with_parents::M0002Migration, 15 | m0003_use_macros::M0003Migration, 16 | m0004_complex_operation::M0004Migration { 17 | id: 23, 18 | message: "Custom String".to_string() 19 | }, 20 | m0005_reference_complex::M0005Migration, 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /examples/sqlite/main.rs: -------------------------------------------------------------------------------- 1 | #![expect(unused_crate_dependencies)] 2 | //! Example crate for sqlite 3 | use sqlx::Sqlite; 4 | use sqlx_migrator::cli::MigrationCommand; 5 | use sqlx_migrator::migrator::{Info, Migrator}; 6 | 7 | mod migrations; 8 | #[tokio::main] 9 | async fn main() { 10 | let uri = std::env::var("SQLITE_DATABASE_URL").unwrap(); 11 | let pool = sqlx::Pool::::connect(&uri).await.unwrap(); 12 | let mut migrator = Migrator::default(); 13 | migrator.add_migrations(migrations::migrations()).unwrap(); 14 | // There are two way to run migration. Either you can create cli as shown below 15 | let mut conn = pool.acquire().await.unwrap(); 16 | MigrationCommand::parse_and_run(&mut *conn, Box::new(migrator)) 17 | .await 18 | .unwrap(); 19 | // Or you can directly use migrator run function instead of creating 20 | // cli 21 | // migrator 22 | // .run(&mut *conn, sqlx_migrator::migrator::Plan::apply_all()) 23 | // .await 24 | // .unwrap(); 25 | conn.close().await.unwrap(); 26 | } 27 | -------------------------------------------------------------------------------- /examples/sqlite/migrations/m0001_simple.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{Sqlite, SqliteConnection}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0001Operation; 7 | 8 | #[async_trait::async_trait] 9 | impl Operation for M0001Operation { 10 | async fn up(&self, connection: &mut SqliteConnection) -> Result<(), Error> { 11 | sqlx::query("CREATE TABLE sample (id INTEGER PRIMARY KEY, name TEXT)") 12 | .execute(connection) 13 | .await?; 14 | Ok(()) 15 | } 16 | 17 | async fn down(&self, connection: &mut SqliteConnection) -> Result<(), Error> { 18 | sqlx::query("DROP TABLE sample").execute(connection).await?; 19 | Ok(()) 20 | } 21 | } 22 | 23 | pub(crate) struct M0001Migration; 24 | 25 | impl Migration for M0001Migration { 26 | fn app(&self) -> &'static str { 27 | "main" 28 | } 29 | 30 | fn name(&self) -> &'static str { 31 | "m0001_simple" 32 | } 33 | 34 | fn parents(&self) -> Vec>> { 35 | vec![] 36 | } 37 | 38 | fn operations(&self) -> Vec>> { 39 | vec![Box::new(M0001Operation)] 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /examples/sqlite/migrations/m0002_with_parents.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{Sqlite, SqliteConnection}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0002Operation; 7 | 8 | #[async_trait::async_trait] 9 | impl Operation for M0002Operation { 10 | async fn up(&self, connection: &mut SqliteConnection) -> Result<(), Error> { 11 | sqlx::query("INSERT INTO sample (id, name) VALUES (99, 'Some text')") 12 | .execute(connection) 13 | .await?; 14 | Ok(()) 15 | } 16 | 17 | async fn down(&self, connection: &mut SqliteConnection) -> Result<(), Error> { 18 | sqlx::query("DELETE FROM sample WHERE id = 99") 19 | .execute(connection) 20 | .await?; 21 | Ok(()) 22 | } 23 | } 24 | 25 | pub(crate) struct M0002Migration; 26 | 27 | impl Migration for M0002Migration { 28 | fn app(&self) -> &'static str { 29 | "main" 30 | } 31 | 32 | fn name(&self) -> &'static str { 33 | "m0002_with_parents" 34 | } 35 | 36 | fn parents(&self) -> Vec>> { 37 | vec![Box::new(crate::migrations::m0001_simple::M0001Migration)] 38 | } 39 | 40 | fn operations(&self) -> Vec>> { 41 | vec![Box::new(M0002Operation)] 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /examples/sqlite/migrations/m0003_use_macros.rs: -------------------------------------------------------------------------------- 1 | pub(crate) struct M0003Migration; 2 | 3 | sqlx_migrator::sqlite_migration!( 4 | M0003Migration, 5 | "main", 6 | "m0003_use_macros", 7 | sqlx_migrator::vec_box![("main", "m0002_with_parents")], 8 | sqlx_migrator::vec_box![( 9 | "INSERT INTO sample (id, name) VALUES (999, 'Another text')", 10 | "DELETE FROM sample WHERE id = 999" 11 | )] 12 | ); 13 | -------------------------------------------------------------------------------- /examples/sqlite/migrations/m0004_complex_operation.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{Sqlite, SqliteConnection}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0004Operation { 7 | id: i32, 8 | message: String, 9 | } 10 | 11 | #[async_trait::async_trait] 12 | impl Operation for M0004Operation { 13 | async fn up(&self, connection: &mut SqliteConnection) -> Result<(), Error> { 14 | sqlx::query("INSERT INTO sample (id, name) VALUES (?, ?)") 15 | .bind(self.id) 16 | .bind(&self.message) 17 | .execute(connection) 18 | .await?; 19 | Ok(()) 20 | } 21 | 22 | async fn down(&self, connection: &mut SqliteConnection) -> Result<(), Error> { 23 | sqlx::query("DELETE FROM sample WHERE id = ?") 24 | .bind(self.id) 25 | .execute(connection) 26 | .await?; 27 | Ok(()) 28 | } 29 | } 30 | 31 | pub(crate) struct M0004Migration { 32 | pub(crate) id: i32, 33 | pub(crate) message: String, 34 | } 35 | 36 | impl Migration for M0004Migration { 37 | fn app(&self) -> &'static str { 38 | "main" 39 | } 40 | 41 | fn name(&self) -> &'static str { 42 | "m0004_complex_operation" 43 | } 44 | 45 | fn parents(&self) -> Vec>> { 46 | vec![Box::new( 47 | crate::migrations::m0003_use_macros::M0003Migration, 48 | )] 49 | } 50 | 51 | fn operations(&self) -> Vec>> { 52 | vec![Box::new(M0004Operation { 53 | id: self.id, 54 | message: self.message.clone(), 55 | })] 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /examples/sqlite/migrations/m0005_reference_complex.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{Sqlite, SqliteConnection}; 2 | use sqlx_migrator::error::Error; 3 | use sqlx_migrator::migration::Migration; 4 | use sqlx_migrator::operation::Operation; 5 | 6 | pub(crate) struct M0005Operation; 7 | 8 | #[async_trait::async_trait] 9 | impl Operation for M0005Operation { 10 | async fn up(&self, connection: &mut SqliteConnection) -> Result<(), Error> { 11 | sqlx::query("INSERT INTO sample (id, name) VALUES (888, 'complex')") 12 | .execute(connection) 13 | .await?; 14 | Ok(()) 15 | } 16 | 17 | async fn down(&self, connection: &mut SqliteConnection) -> Result<(), Error> { 18 | sqlx::query("DELETE FROM sample WHERE id = 888") 19 | .execute(connection) 20 | .await?; 21 | Ok(()) 22 | } 23 | } 24 | 25 | pub(crate) struct M0005Migration; 26 | 27 | impl Migration for M0005Migration { 28 | fn app(&self) -> &'static str { 29 | "main" 30 | } 31 | 32 | fn name(&self) -> &'static str { 33 | "m0005_reference_complex" 34 | } 35 | 36 | fn parents(&self) -> Vec>> { 37 | vec![Box::new(("main", "m0004_complex_operation"))] 38 | } 39 | 40 | fn operations(&self) -> Vec>> { 41 | vec![Box::new(M0005Operation)] 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /examples/sqlite/migrations/mod.rs: -------------------------------------------------------------------------------- 1 | use sqlx::Sqlite; 2 | use sqlx_migrator::migration::Migration; 3 | use sqlx_migrator::vec_box; 4 | 5 | pub(crate) mod m0001_simple; 6 | pub(crate) mod m0002_with_parents; 7 | pub(crate) mod m0003_use_macros; 8 | pub(crate) mod m0004_complex_operation; 9 | pub(crate) mod m0005_reference_complex; 10 | 11 | pub(crate) fn migrations() -> Vec>> { 12 | vec_box![ 13 | m0001_simple::M0001Migration, 14 | m0002_with_parents::M0002Migration, 15 | m0003_use_macros::M0003Migration, 16 | m0004_complex_operation::M0004Migration { 17 | id: 23, 18 | message: "Custom String".to_string() 19 | }, 20 | m0005_reference_complex::M0005Migration, 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /src/cli.rs: -------------------------------------------------------------------------------- 1 | //! Module for creating and running cli with help of migrator 2 | //! 3 | //! CLI Command can directly used or extended 4 | //! 5 | //! For direct usage you can run `parse_and_run` function for `MigrationCommand` 6 | //! 7 | //! OR 8 | //! 9 | //! If you want to extend your own clap based cli then you can add migrator to 10 | //! sub command enum and then run migrator 11 | //! ```rust,no_run 12 | //! #[derive(clap::Parser)] 13 | //! struct Cli { 14 | //! #[command(subcommand)] 15 | //! sub_command: CliSubcommand, 16 | //! } 17 | //! 18 | //! #[derive(clap::Subcommand)] 19 | //! enum CliSubcommand { 20 | //! #[command()] 21 | //! Migrator(sqlx_migrator::cli::MigrationCommand), 22 | //! } 23 | //! ``` 24 | use std::io::Write; 25 | 26 | use clap::{Parser, Subcommand}; 27 | use sqlx::Database; 28 | 29 | use crate::error::Error; 30 | use crate::migrator::{Migrate, Plan}; 31 | 32 | /// Migration command for performing rust based sqlx migrations 33 | #[derive(Parser, Debug)] 34 | pub struct MigrationCommand { 35 | #[command(subcommand)] 36 | sub_command: SubCommand, 37 | } 38 | 39 | impl MigrationCommand { 40 | /// Parse [`MigrationCommand`] and run migration command line interface 41 | /// 42 | /// # Errors 43 | /// If migration command fails to complete and raise some issue 44 | pub async fn parse_and_run( 45 | connection: &mut ::Connection, 46 | migrator: Box>, 47 | ) -> Result<(), Error> 48 | where 49 | DB: Database, 50 | { 51 | let migration_command = Self::parse(); 52 | migration_command.run(connection, migrator).await 53 | } 54 | 55 | /// Run migration command line interface 56 | /// 57 | /// # Errors 58 | /// If migration command fails to complete and raise some issue 59 | pub async fn run( 60 | &self, 61 | connection: &mut ::Connection, 62 | migrator: Box>, 63 | ) -> Result<(), Error> 64 | where 65 | DB: Database, 66 | { 67 | self.sub_command 68 | .handle_subcommand(migrator, connection) 69 | .await?; 70 | Ok(()) 71 | } 72 | } 73 | 74 | #[derive(Subcommand, Debug)] 75 | enum SubCommand { 76 | /// Apply migrations 77 | #[command()] 78 | Apply(Apply), 79 | /// Drop migration information table. Needs all migrations to be 80 | /// reverted else raises error 81 | #[command()] 82 | Drop, 83 | /// List migrations along with their status and time applied if migrations 84 | /// is already applied 85 | #[command()] 86 | List, 87 | /// Revert migrations 88 | #[command()] 89 | Revert(Revert), 90 | } 91 | 92 | impl SubCommand { 93 | async fn handle_subcommand( 94 | &self, 95 | migrator: Box>, 96 | connection: &mut ::Connection, 97 | ) -> Result<(), Error> 98 | where 99 | DB: Database, 100 | { 101 | match self { 102 | SubCommand::Apply(apply) => apply.run(connection, migrator).await?, 103 | SubCommand::Drop => drop_migrations(connection, migrator).await?, 104 | SubCommand::List => list_migrations(connection, migrator).await?, 105 | SubCommand::Revert(revert) => revert.run(connection, migrator).await?, 106 | } 107 | Ok(()) 108 | } 109 | } 110 | 111 | async fn drop_migrations( 112 | connection: &mut ::Connection, 113 | migrator: Box>, 114 | ) -> Result<(), Error> 115 | where 116 | DB: Database, 117 | { 118 | migrator.ensure_migration_table_exists(connection).await?; 119 | if !migrator 120 | .fetch_applied_migration_from_db(connection) 121 | .await? 122 | .is_empty() 123 | { 124 | return Err(Error::AppliedMigrationExists); 125 | } 126 | migrator.drop_migration_table_if_exists(connection).await?; 127 | println!("Dropped migrations table"); 128 | Ok(()) 129 | } 130 | 131 | async fn list_migrations( 132 | connection: &mut ::Connection, 133 | migrator: Box>, 134 | ) -> Result<(), Error> 135 | where 136 | DB: Database, 137 | { 138 | let migration_plan = migrator.generate_migration_plan(connection, None).await?; 139 | 140 | let apply_plan = migrator 141 | .generate_migration_plan(connection, Some(&Plan::apply_all())) 142 | .await?; 143 | let applied_migrations = migrator.fetch_applied_migration_from_db(connection).await?; 144 | 145 | let widths = [5, 10, 50, 10, 40]; 146 | let full_width = widths.iter().sum::() + widths.len() * 3; 147 | 148 | let first_width = widths[0]; 149 | let second_width = widths[1]; 150 | let third_width = widths[2]; 151 | let fourth_width = widths[3]; 152 | let fifth_width = widths[4]; 153 | 154 | println!( 155 | "{:^first_width$} | {:^second_width$} | {:^third_width$} | {:^fourth_width$} | \ 156 | {:^fifth_width$}", 157 | "ID", "App", "Name", "Status", "Applied time" 158 | ); 159 | 160 | println!("{:^full_width$}", "-".repeat(full_width)); 161 | for migration in migration_plan { 162 | let mut id = String::from("N/A"); 163 | let mut status = "\u{2717}"; 164 | let mut applied_time = String::from("N/A"); 165 | 166 | let find_applied_migrations = applied_migrations 167 | .iter() 168 | .find(|&applied_migration| applied_migration == migration); 169 | 170 | if let Some(sqlx_migration) = find_applied_migrations { 171 | id = sqlx_migration.id().to_string(); 172 | status = "\u{2713}"; 173 | applied_time = sqlx_migration.applied_time().to_string(); 174 | } else if !apply_plan.contains(&migration) { 175 | status = "\u{2194}"; 176 | } 177 | 178 | println!( 179 | "{:^first_width$} | {:^second_width$} | {:^third_width$} | {:^fourth_width$} | \ 180 | {:^fifth_width$}", 181 | id, 182 | migration.app(), 183 | migration.name(), 184 | status, 185 | applied_time 186 | ); 187 | } 188 | Ok(()) 189 | } 190 | 191 | #[derive(Parser, Debug)] 192 | #[expect(clippy::struct_excessive_bools)] 193 | struct Apply { 194 | /// App name up to which migration needs to be applied. If migration option 195 | /// is also present than only till migration is applied 196 | #[arg(long)] 197 | app: Option, 198 | /// Check for pending migration 199 | #[arg(long)] 200 | check: bool, 201 | /// Number of migration to apply. Conflicts with app args 202 | #[arg(long, conflicts_with = "app")] 203 | count: Option, 204 | /// Make migration applied without running migration operations 205 | #[arg(long)] 206 | fake: bool, 207 | /// Force run apply operation without asking question if migration is 208 | /// destructible 209 | #[arg(long)] 210 | force: bool, 211 | /// Apply migration till provided migration. Requires app options to be 212 | /// present 213 | #[arg(long, requires = "app")] 214 | migration: Option, 215 | /// Show plan 216 | #[arg(long)] 217 | plan: bool, 218 | } 219 | impl Apply { 220 | async fn run( 221 | &self, 222 | connection: &mut ::Connection, 223 | migrator: Box>, 224 | ) -> Result<(), Error> 225 | where 226 | DB: Database, 227 | { 228 | let plan; 229 | if let Some(count) = self.count { 230 | plan = Plan::apply_count(count); 231 | } else if let Some(app) = &self.app { 232 | plan = Plan::apply_name(app, &self.migration); 233 | } else { 234 | plan = Plan::apply_all(); 235 | } 236 | let plan = plan.fake(self.fake); 237 | let migrations = migrator 238 | .generate_migration_plan(connection, Some(&plan)) 239 | .await?; 240 | if self.check && !migrations.is_empty() { 241 | return Err(Error::PendingMigrationPresent); 242 | } 243 | if self.plan { 244 | if migrations.is_empty() { 245 | println!("No migration exists for applying"); 246 | } else { 247 | let first_width = 10; 248 | let second_width = 50; 249 | let full_width = first_width + second_width + 3; 250 | println!("{:^first_width$} | {:^second_width$}", "App", "Name"); 251 | println!("{:^full_width$}", "-".repeat(full_width)); 252 | for migration in migrations { 253 | println!( 254 | "{:^first_width$} | {:^second_width$}", 255 | migration.app(), 256 | migration.name(), 257 | ); 258 | } 259 | } 260 | } else { 261 | let destructible_migrations = migrations 262 | .iter() 263 | .filter(|m| m.operations().iter().any(|o| o.is_destructible())) 264 | .collect::>(); 265 | if !self.force && !destructible_migrations.is_empty() && !self.fake { 266 | let mut input = String::new(); 267 | println!( 268 | "Do you want to apply destructible migrations {} (y/N)", 269 | destructible_migrations.len() 270 | ); 271 | for (position, migration) in destructible_migrations.iter().enumerate() { 272 | println!("{position}. {} : {}", migration.app(), migration.name()); 273 | } 274 | std::io::stdout().flush()?; 275 | std::io::stdin().read_line(&mut input)?; 276 | let input_trimmed = input.trim().to_ascii_lowercase(); 277 | // If answer is not y or yes then return 278 | if !["y", "yes"].contains(&input_trimmed.as_str()) { 279 | return Ok(()); 280 | } 281 | } 282 | migrator.run(connection, &plan).await?; 283 | println!("Successfully applied migrations according to plan"); 284 | } 285 | Ok(()) 286 | } 287 | } 288 | 289 | #[derive(Parser, Debug)] 290 | #[expect(clippy::struct_excessive_bools)] 291 | struct Revert { 292 | /// Revert all migration. Conflicts with app args 293 | #[arg(long, conflicts_with = "app")] 294 | all: bool, 295 | /// Revert migration till app migrations is reverted. If it is present 296 | /// alongside migration options than only till migration is reverted 297 | #[arg(long)] 298 | app: Option, 299 | /// Number of migration to revert. Conflicts with all and app args 300 | #[arg(long, conflicts_with_all = ["all", "app"])] 301 | count: Option, 302 | /// Make migration reverted without running revert operation 303 | #[arg(long)] 304 | fake: bool, 305 | /// Force run revert operation without asking question 306 | #[arg(long)] 307 | force: bool, 308 | /// Revert migration till provided migration. Requires app options to be 309 | /// present 310 | #[arg(long, requires = "app")] 311 | migration: Option, 312 | /// Show plan 313 | #[arg(long)] 314 | plan: bool, 315 | } 316 | impl Revert { 317 | async fn run( 318 | &self, 319 | connection: &mut ::Connection, 320 | migrator: Box>, 321 | ) -> Result<(), Error> 322 | where 323 | DB: Database, 324 | { 325 | let plan; 326 | if let Some(count) = self.count { 327 | plan = Plan::revert_count(count); 328 | } else if let Some(app) = &self.app { 329 | plan = Plan::revert_name(app, &self.migration); 330 | } else if self.all { 331 | plan = Plan::revert_all(); 332 | } else { 333 | plan = Plan::revert_count(1); 334 | } 335 | let plan = plan.fake(self.fake); 336 | let revert_migrations = migrator 337 | .generate_migration_plan(connection, Some(&plan)) 338 | .await?; 339 | 340 | if self.plan { 341 | if revert_migrations.is_empty() { 342 | println!("No migration exists for reverting"); 343 | } else { 344 | let first_width = 10; 345 | let second_width = 50; 346 | let full_width = first_width + second_width + 3; 347 | println!("{:^first_width$} | {:^second_width$}", "App", "Name"); 348 | println!("{:^full_width$}", "-".repeat(full_width)); 349 | for migration in revert_migrations { 350 | println!( 351 | "{:^first_width$} | {:^second_width$}", 352 | migration.app(), 353 | migration.name(), 354 | ); 355 | } 356 | } 357 | } else { 358 | if !self.force && !revert_migrations.is_empty() && !self.fake { 359 | let mut input = String::new(); 360 | println!( 361 | "Do you want to revert {} migrations (y/N)", 362 | revert_migrations.len() 363 | ); 364 | for (position, migration) in revert_migrations.iter().enumerate() { 365 | println!("{position}. {} : {}", migration.app(), migration.name()); 366 | } 367 | std::io::stdout().flush()?; 368 | std::io::stdin().read_line(&mut input)?; 369 | let input_trimmed = input.trim().to_ascii_lowercase(); 370 | // If answer is not y or yes then return 371 | if !["y", "yes"].contains(&input_trimmed.as_str()) { 372 | return Ok(()); 373 | } 374 | } 375 | migrator.run(connection, &plan).await?; 376 | println!("Successfully reverted migrations according to plan"); 377 | } 378 | Ok(()) 379 | } 380 | } 381 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | //! Module for library error 2 | 3 | /// Error enum to store different types of error 4 | #[derive(Debug, thiserror::Error)] 5 | #[non_exhaustive] 6 | pub enum Error { 7 | /// Error type created from error raised by sqlx 8 | #[error(transparent)] 9 | Sqlx(#[from] sqlx::Error), 10 | /// Error type created from error raised by box error 11 | #[error(transparent)] 12 | Box(#[from] Box), 13 | /// Error type created from error raised by std input output 14 | #[cfg(feature = "cli")] 15 | #[error(transparent)] 16 | StdIo(#[from] std::io::Error), 17 | /// Error generated during planning state 18 | #[error("plan error: {message}")] 19 | PlanError { 20 | /// Message for error 21 | message: String, 22 | }, 23 | /// Error for irreversible operation 24 | #[error("operation is irreversible")] 25 | IrreversibleOperation, 26 | /// Error for pending migration present 27 | #[cfg(feature = "cli")] 28 | #[error("pending migration present")] 29 | PendingMigrationPresent, 30 | /// Error when applied migrations exists 31 | #[cfg(feature = "cli")] 32 | #[error("applied migrations exists. Revert all using revert subcommand")] 33 | AppliedMigrationExists, 34 | /// Error when unsupported database is used as any database 35 | #[error("database not supported")] 36 | UnsupportedDatabase, 37 | /// Error when table prefix is invalid 38 | #[error("table prefix name can only contain [a-z0-9_]")] 39 | InvalidTablePrefix, 40 | /// Error when passed schema name is invalid 41 | #[error("schema name can only contain [a-z0-9_] and begin with [a-z_]")] 42 | InvalidSchema, 43 | /// Error raised when two migration with same name are added and there value 44 | /// is not consistent 45 | #[error("migration for app: {app} with name: {name} consists of inconsistent values")] 46 | InconsistentMigration { 47 | /// Migration application name 48 | app: String, 49 | /// Migration name 50 | name: String, 51 | }, 52 | /// Error raised when virtual migration is invalid virtual migration is 53 | /// invalid if it have any fields present expect app name and migration name 54 | #[error("invalid virtual migration")] 55 | InvalidVirtualMigration, 56 | } 57 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, feature(doc_auto_cfg))] 2 | 3 | //! Library to create sqlx migration using rust code instead of sql. 4 | //! 5 | //! Check `README.MD` for more detailed information of how to use a crate 6 | //! and visit [`Operation`], [`Migration`] and [`Migrator`] 7 | 8 | #[cfg(feature = "cli")] 9 | pub use crate::cli::MigrationCommand; 10 | pub use crate::error::Error; 11 | pub use crate::migration::Migration; 12 | pub use crate::migrator::{Info, Migrate, Migrator, Plan}; 13 | pub use crate::operation::Operation; 14 | 15 | #[cfg(feature = "cli")] 16 | pub mod cli; 17 | pub mod error; 18 | mod macros; 19 | pub mod migration; 20 | pub mod migrator; 21 | pub mod operation; 22 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | /// Macro for vector of [`Box`] 2 | #[macro_export] 3 | macro_rules! vec_box { 4 | ($elem:expr; $n:expr) => (vec![Box::new($elem); $n]); 5 | ($($x:expr),*) => (vec![$(Box::new($x)),*]); 6 | ($($x:expr,)*) => (vec![$(Box::new($x)),*]); 7 | ($($x:expr,)*) => (sqlx_migrator::vec_box![$($x),*]); 8 | } 9 | 10 | /// Macro for implementing the [Migration](crate::migration::Migration) trait 11 | /// for the provided database. 12 | /// 13 | /// This macro will use current file name as name for migration 14 | /// 15 | /// This macro expects the following arguments: 16 | /// - `$db:ty`: the type of database 17 | /// - `$op:ty`: The type for which the migration is being implemented 18 | /// - `$app_name:literal`: Name of app to be used for app variable 19 | /// - `$migration_name:literal`: Name of app to be used for app variable 20 | /// - `$parents:expr`: List of parents migration. 21 | /// - `$operations:expr`: List of operations 22 | #[macro_export] 23 | macro_rules! migration { 24 | ( 25 | $db:ty, $op:ty, $app_name:literal, $migration_name:literal, $parents:expr, $operations:expr 26 | ) => { 27 | impl sqlx_migrator::migration::Migration<$db> for $op { 28 | fn app(&self) -> &str { 29 | $app_name 30 | } 31 | 32 | fn name(&self) -> &str { 33 | $migration_name 34 | } 35 | 36 | fn parents(&self) -> Vec>> { 37 | $parents 38 | } 39 | 40 | fn operations(&self) -> Vec>> { 41 | $operations 42 | } 43 | } 44 | }; 45 | } 46 | 47 | /// Macro for implementing the [`migration`] macro for the `Any`. 48 | /// 49 | /// This macro calls [`migration`] macro with db value already set as 50 | /// `sqlx::Any` 51 | #[macro_export] 52 | #[cfg(all( 53 | any(feature = "postgres", feature = "mysql", feature = "sqlite"), 54 | feature = "any" 55 | ))] 56 | macro_rules! any_migration { 57 | ($op:ty, $app_name:expr, $migration_name:expr, $parents:expr, $operations:expr) => { 58 | sqlx_migrator::migration!( 59 | sqlx::Any, 60 | $op, 61 | $app_name, 62 | $migration_name, 63 | $parents, 64 | $operations 65 | ); 66 | }; 67 | } 68 | 69 | /// Macro for implementing the [`migration`] macro for the `MySql`. 70 | /// 71 | /// This macro calls [`migration`] macro with db value already set as 72 | /// `sqlx::MySql` 73 | #[macro_export] 74 | #[cfg(feature = "mysql")] 75 | macro_rules! mysql_migration { 76 | ($op:ty, $app_name:expr, $migration_name:expr, $parents:expr, $operations:expr) => { 77 | sqlx_migrator::migration!( 78 | sqlx::MySql, 79 | $op, 80 | $app_name, 81 | $migration_name, 82 | $parents, 83 | $operations 84 | ); 85 | }; 86 | } 87 | 88 | /// Macro for implementing the [`migration`] macro for the `Postgres`. 89 | /// 90 | /// This macro calls [`migration`] macro with db value already set as 91 | /// `sqlx::Postgres` 92 | #[macro_export] 93 | #[cfg(feature = "postgres")] 94 | macro_rules! postgres_migration { 95 | ($op:ty, $app_name:expr, $migration_name:expr, $parents:expr, $operations:expr) => { 96 | sqlx_migrator::migration!( 97 | sqlx::Postgres, 98 | $op, 99 | $app_name, 100 | $migration_name, 101 | $parents, 102 | $operations 103 | ); 104 | }; 105 | } 106 | 107 | /// Macro for implementing the [`migration`] macro for the `Sqlite`. 108 | /// 109 | /// This macro calls [`migration`] macro with db value already set as 110 | /// `sqlx::Sqlite` 111 | #[macro_export] 112 | #[cfg(feature = "sqlite")] 113 | macro_rules! sqlite_migration { 114 | ($op:ty, $app_name:expr, $migration_name:expr, $parents:expr, $operations:expr) => { 115 | sqlx_migrator::migration!( 116 | sqlx::Sqlite, 117 | $op, 118 | $app_name, 119 | $migration_name, 120 | $parents, 121 | $operations 122 | ); 123 | }; 124 | } 125 | -------------------------------------------------------------------------------- /src/migration.rs: -------------------------------------------------------------------------------- 1 | //! Module for defining the [`Migration`] trait, which represents a database 2 | //! migration. 3 | //! 4 | //! This module provides the necessary abstractions for defining migrations 5 | #![cfg_attr( 6 | feature = "sqlite", 7 | doc = r#" 8 | To create own implement migration trait for type 9 | 10 | ### Example 11 | ```rust,no_run 12 | use sqlx_migrator::error::Error; 13 | use sqlx_migrator::migration::Migration; 14 | use sqlx_migrator::operation::Operation; 15 | use sqlx::Sqlite; 16 | 17 | struct ExampleMigration; 18 | 19 | impl Migration for ExampleMigration { 20 | fn app(&self) -> &str { 21 | "example" 22 | } 23 | 24 | fn name(&self) -> &str { 25 | "first_migration" 26 | } 27 | 28 | fn parents(&self) -> Vec>> { 29 | vec![] 30 | } 31 | 32 | fn operations(&self) -> Vec>> { 33 | vec![] 34 | } 35 | 36 | fn replaces(&self) -> Vec>> { 37 | vec![] 38 | } 39 | 40 | fn run_before(&self) -> Vec>> { 41 | vec![] 42 | } 43 | 44 | fn is_atomic(&self) -> bool { 45 | true 46 | } 47 | 48 | fn is_virtual(&self) -> bool { 49 | false 50 | } 51 | } 52 | ``` 53 | "# 54 | )] 55 | 56 | use std::hash::Hash; 57 | 58 | use crate::operation::Operation; 59 | 60 | /// Trait for defining database migration 61 | /// 62 | /// A migration represents a set of operations that can be applied to or 63 | /// reverted from a database. Each migration has an associated application name, 64 | /// migration name, and may depend on other migrations. 65 | /// 66 | /// Migrations can also replace existing migrations, enforce ordering with 67 | /// run before and parents, and control atomicity and virtualization. 68 | /// 69 | /// Migration trait is implemented for `(A,N) where A: AsRef, N: 70 | /// AsRef` where A is the app name and N is the name of the migration. You 71 | /// can use migration in this form in `parents`, `replaces` and `run_before` if 72 | /// you cannot reference migration or create migration easily 73 | pub trait Migration: Send + Sync { 74 | /// Returns the application name associated with the migration. 75 | /// This can be the name of the folder or library where the migration is 76 | /// located. 77 | /// 78 | /// This value is used in combination with the migration name to uniquely 79 | /// identify a migration. 80 | fn app(&self) -> &str; 81 | 82 | /// Returns the migration name, typically the file name without the 83 | /// extension. 84 | /// 85 | /// This value, together with the application name, is used to uniquely 86 | /// identify a migration and determine equality between migrations. 87 | fn name(&self) -> &str; 88 | 89 | /// Returns the list of parent migrations. 90 | /// 91 | /// Parent migrations must be applied before this migration can be applied. 92 | /// If no parent migrations are required, return an empty vector. 93 | fn parents(&self) -> Vec>>; 94 | 95 | /// Returns the operations associated with this migration. 96 | /// 97 | /// A migration can include multiple operations (e.g., create, drop) that 98 | /// are related. 99 | fn operations(&self) -> Vec>>; 100 | 101 | /// Returns the list of migrations that this migration replaces. 102 | /// 103 | /// If any of these migrations have been applied, this migration will not be 104 | /// applied. If not, it will either be applied or reverted in place of 105 | /// those migrations. 106 | /// 107 | /// The default implementation returns an empty vector. 108 | fn replaces(&self) -> Vec>> { 109 | vec![] 110 | } 111 | 112 | /// Returns the list of migrations that this migration must run before(when 113 | /// applying) or after (when reverting). 114 | /// 115 | /// This can be useful when a migration from another library needs to be 116 | /// applied after this migration or reverted before this migration. 117 | /// 118 | /// The default implementation returns an empty vector. 119 | fn run_before(&self) -> Vec>> { 120 | vec![] 121 | } 122 | 123 | /// Indicates whether the migration is atomic. 124 | /// By default, this function returns `true`, meaning the migration is 125 | /// atomic. 126 | /// 127 | /// If the migration is non-atomic, all its operations will be non-atomic as 128 | /// well. For migrations requiring mixed atomicity, it's recommended to 129 | /// split them into separate migrations, each handling atomic and 130 | /// non-atomic operations respectively. 131 | fn is_atomic(&self) -> bool { 132 | true 133 | } 134 | 135 | /// Indicates whether the migration is virtual. 136 | /// By default, this function returns `false`, meaning the migration is not 137 | /// virtual. 138 | /// 139 | /// A virtual migration serves as a reference to another migration with the 140 | /// same app and name. If the migration is virtual, all other methods 141 | /// are ignored expect its application name and its own name to check with 142 | /// non virtual migration so such non virtual migration can be used in its 143 | /// place. 144 | fn is_virtual(&self) -> bool { 145 | false 146 | } 147 | } 148 | 149 | impl PartialEq for dyn Migration { 150 | fn eq(&self, other: &Self) -> bool { 151 | self.app() == other.app() && self.name() == other.name() 152 | } 153 | } 154 | 155 | impl Eq for dyn Migration {} 156 | 157 | impl Hash for dyn Migration { 158 | fn hash(&self, state: &mut H) { 159 | self.app().hash(state); 160 | self.name().hash(state); 161 | } 162 | } 163 | 164 | impl Migration for (A, N) 165 | where 166 | A: AsRef + Send + Sync, 167 | N: AsRef + Send + Sync, 168 | { 169 | fn app(&self) -> &str { 170 | self.0.as_ref() 171 | } 172 | 173 | fn name(&self) -> &str { 174 | self.1.as_ref() 175 | } 176 | 177 | fn parents(&self) -> Vec>> { 178 | vec![] 179 | } 180 | 181 | fn operations(&self) -> Vec>> { 182 | vec![] 183 | } 184 | 185 | fn is_virtual(&self) -> bool { 186 | true 187 | } 188 | } 189 | 190 | /// Struct representing a migration row from the database. 191 | /// 192 | /// This struct corresponds to the id, app, name, and applied time fields in the 193 | /// database. It is used to list the migrations that have been applied. 194 | #[derive(sqlx::FromRow, Clone)] 195 | pub struct AppliedMigrationSqlRow { 196 | id: i32, 197 | app: String, 198 | name: String, 199 | applied_time: String, 200 | } 201 | 202 | impl AppliedMigrationSqlRow { 203 | #[cfg(test)] 204 | pub(crate) fn new(id: i32, app: &str, name: &str) -> Self { 205 | Self { 206 | id, 207 | app: app.to_string(), 208 | name: name.to_string(), 209 | applied_time: String::new(), 210 | } 211 | } 212 | } 213 | 214 | impl AppliedMigrationSqlRow { 215 | /// Return id value present on database 216 | #[must_use] 217 | pub fn id(&self) -> i32 { 218 | self.id 219 | } 220 | 221 | /// Return migration applied time 222 | #[must_use] 223 | pub fn applied_time(&self) -> &str { 224 | &self.applied_time 225 | } 226 | } 227 | 228 | impl PartialEq>> for AppliedMigrationSqlRow { 229 | fn eq(&self, other: &Box>) -> bool { 230 | self.app == other.app() && self.name == other.name() 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /src/migrator/any.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "mysql")] 2 | use sqlx::MySql; 3 | #[cfg(feature = "postgres")] 4 | use sqlx::Postgres; 5 | #[cfg(feature = "sqlite")] 6 | use sqlx::Sqlite; 7 | use sqlx::any::AnyArguments; 8 | use sqlx::{Any, Arguments, Database}; 9 | 10 | #[cfg(feature = "mysql")] 11 | use super::mysql; 12 | #[cfg(feature = "postgres")] 13 | use super::postgres; 14 | #[cfg(feature = "sqlite")] 15 | use super::sqlite; 16 | use super::{DatabaseOperation, Migrator}; 17 | use crate::error::Error; 18 | use crate::migration::{AppliedMigrationSqlRow, Migration}; 19 | 20 | /// get database name 21 | async fn get_database_name( 22 | connection: &mut ::Connection, 23 | ) -> Result, Error> { 24 | let backend_name = connection.backend_name(); 25 | let database_name_query = match backend_name { 26 | #[cfg(feature = "postgres")] 27 | ::NAME => Some(postgres::current_database_query()), 28 | #[cfg(feature = "sqlite")] 29 | ::NAME => None, 30 | #[cfg(feature = "mysql")] 31 | ::NAME => Some(mysql::current_database_query()), 32 | _ => return Err(Error::UnsupportedDatabase), 33 | }; 34 | if let Some(sql) = database_name_query { 35 | let (database_name,) = sqlx::query_as::<_, (String,)>(sql) 36 | .fetch_one(connection) 37 | .await?; 38 | return Ok(Some(database_name)); 39 | } 40 | Ok(None) 41 | } 42 | 43 | #[async_trait::async_trait] 44 | impl DatabaseOperation for Migrator { 45 | async fn ensure_migration_table_exists( 46 | &self, 47 | connection: &mut ::Connection, 48 | ) -> Result<(), Error> { 49 | let sql_query = match connection.backend_name() { 50 | #[cfg(feature = "postgres")] 51 | ::NAME => { 52 | postgres::create_migrator_table_query(&self.table_name()) 53 | } 54 | #[cfg(feature = "sqlite")] 55 | ::NAME => sqlite::create_migrator_table_query(&self.table_name()), 56 | #[cfg(feature = "mysql")] 57 | ::NAME => mysql::create_migrator_table_query(&self.table_name()), 58 | _ => return Err(Error::UnsupportedDatabase), 59 | }; 60 | sqlx::query(&sql_query).execute(connection).await?; 61 | Ok(()) 62 | } 63 | 64 | async fn drop_migration_table_if_exists( 65 | &self, 66 | connection: &mut ::Connection, 67 | ) -> Result<(), Error> { 68 | let sql_query = match connection.backend_name() { 69 | #[cfg(feature = "postgres")] 70 | ::NAME => postgres::drop_table_query(&self.table_name()), 71 | #[cfg(feature = "sqlite")] 72 | ::NAME => sqlite::drop_table_query(&self.table_name()), 73 | #[cfg(feature = "mysql")] 74 | ::NAME => mysql::drop_table_query(&self.table_name()), 75 | _ => return Err(Error::UnsupportedDatabase), 76 | }; 77 | sqlx::query(&sql_query).execute(connection).await?; 78 | Ok(()) 79 | } 80 | 81 | async fn add_migration_to_db_table( 82 | &self, 83 | connection: &mut ::Connection, 84 | migration: &Box>, 85 | ) -> Result<(), Error> { 86 | let sql_query = match connection.backend_name() { 87 | #[cfg(feature = "postgres")] 88 | ::NAME => postgres::add_migration_query(&self.table_name()), 89 | #[cfg(feature = "sqlite")] 90 | ::NAME => sqlite::add_migration_query(&self.table_name()), 91 | #[cfg(feature = "mysql")] 92 | ::NAME => mysql::add_migration_query(&self.table_name()), 93 | _ => return Err(Error::UnsupportedDatabase), 94 | }; 95 | sqlx::query(&sql_query) 96 | .bind(migration.app()) 97 | .bind(migration.name()) 98 | .execute(connection) 99 | .await?; 100 | Ok(()) 101 | } 102 | 103 | async fn delete_migration_from_db_table( 104 | &self, 105 | connection: &mut ::Connection, 106 | migration: &Box>, 107 | ) -> Result<(), Error> { 108 | let sql_query = match connection.backend_name() { 109 | #[cfg(feature = "postgres")] 110 | ::NAME => postgres::delete_migration_query(&self.table_name()), 111 | #[cfg(feature = "sqlite")] 112 | ::NAME => sqlite::delete_migration_query(&self.table_name()), 113 | #[cfg(feature = "mysql")] 114 | ::NAME => mysql::delete_migration_query(&self.table_name()), 115 | _ => return Err(Error::UnsupportedDatabase), 116 | }; 117 | sqlx::query(&sql_query) 118 | .bind(migration.app()) 119 | .bind(migration.name()) 120 | .execute(connection) 121 | .await?; 122 | Ok(()) 123 | } 124 | 125 | async fn fetch_applied_migration_from_db( 126 | &self, 127 | connection: &mut ::Connection, 128 | ) -> Result, Error> { 129 | let backend_name = connection.backend_name(); 130 | let query = match backend_name { 131 | #[cfg(feature = "postgres")] 132 | ::NAME => postgres::fetch_rows_query(&self.table_name()), 133 | #[cfg(feature = "sqlite")] 134 | ::NAME => sqlite::fetch_rows_query(&self.table_name()), 135 | #[cfg(feature = "mysql")] 136 | ::NAME => mysql::fetch_rows_query(&self.table_name()), 137 | _ => return Err(Error::UnsupportedDatabase), 138 | }; 139 | Ok(sqlx::query_as::<_, AppliedMigrationSqlRow>(&query) 140 | .fetch_all(connection) 141 | .await?) 142 | } 143 | 144 | async fn lock(&self, connection: &mut ::Connection) -> Result<(), Error> { 145 | let database_name = get_database_name(connection).await?; 146 | if let Some(name) = database_name { 147 | let mut arguments = AnyArguments::default(); 148 | let query = match connection.backend_name() { 149 | #[cfg(feature = "postgres")] 150 | ::NAME => { 151 | arguments.add(postgres::get_lock_id(&name, &self.table_name()))?; 152 | postgres::lock_database_query() 153 | } 154 | #[cfg(feature = "sqlite")] 155 | ::NAME => return Ok(()), 156 | #[cfg(feature = "mysql")] 157 | ::NAME => { 158 | arguments.add(mysql::get_lock_id(&name, &self.table_name()))?; 159 | mysql::lock_database_query() 160 | } 161 | _ => return Err(Error::UnsupportedDatabase), 162 | }; 163 | sqlx::query_with(query, arguments) 164 | .execute(connection) 165 | .await?; 166 | } 167 | Ok(()) 168 | } 169 | 170 | async fn unlock(&self, connection: &mut ::Connection) -> Result<(), Error> { 171 | let database_name = get_database_name(connection).await?; 172 | if let Some(name) = database_name { 173 | let mut arguments = AnyArguments::default(); 174 | let query = match connection.backend_name() { 175 | #[cfg(feature = "postgres")] 176 | ::NAME => { 177 | arguments.add(postgres::get_lock_id(&name, &self.table_name()))?; 178 | postgres::unlock_database_query() 179 | } 180 | #[cfg(feature = "sqlite")] 181 | ::NAME => return Ok(()), 182 | #[cfg(feature = "mysql")] 183 | ::NAME => { 184 | arguments.add(mysql::get_lock_id(&name, &self.table_name()))?; 185 | mysql::unlock_database_query() 186 | } 187 | _ => return Err(Error::UnsupportedDatabase), 188 | }; 189 | sqlx::query_with(query, arguments) 190 | .execute(connection) 191 | .await?; 192 | } 193 | Ok(()) 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /src/migrator/mod.rs: -------------------------------------------------------------------------------- 1 | //! Migrator module 2 | //! 3 | //! It contains common enum and trait for implementing migrator for sqlx 4 | //! supported database 5 | //! 6 | //! It also provides its own struct [`Migrator`] which supports 7 | //! [`Any`](sqlx::Any), [`Postgres`](sqlx::Postgres), [`Sqlite`](sqlx::Sqlite) 8 | //! and [`MySql`](sqlx::MySql) database 9 | #![cfg_attr( 10 | feature = "postgres", 11 | doc = r#" 12 | # Example 13 | Create own custom Migrator which only supports postgres and uses own unique 14 | table name instead of default table name 15 | 16 | ```rust,no_run 17 | use sqlx::{Database, Pool, Postgres}; 18 | use sqlx_migrator::error::Error; 19 | use sqlx_migrator::migration::{AppliedMigrationSqlRow, Migration}; 20 | use sqlx_migrator::migrator::{DatabaseOperation, Info, Migrate}; 21 | 22 | #[derive(Default)] 23 | pub struct CustomMigrator { 24 | migrations: Vec>>, 25 | } 26 | 27 | impl Info for CustomMigrator { 28 | fn migrations(&self) -> &Vec>> { 29 | &self.migrations 30 | } 31 | 32 | fn migrations_mut(&mut self) -> &mut Vec>> { 33 | &mut self.migrations 34 | } 35 | } 36 | 37 | #[async_trait::async_trait] 38 | impl DatabaseOperation for CustomMigrator { 39 | async fn ensure_migration_table_exists( 40 | &self, 41 | connection: &mut ::Connection, 42 | ) -> Result<(), Error> { 43 | sqlx::query( 44 | "CREATE TABLE IF NOT EXISTS _custom_table_name ( 45 | id INT PRIMARY KEY NOT NULL GENERATED ALWAYS AS IDENTITY, 46 | app TEXT NOT NULL, 47 | name TEXT NOT NULL, 48 | applied_time TIMESTAMPTZ NOT NULL DEFAULT now(), 49 | UNIQUE (app, name) 50 | )", 51 | ) 52 | .execute(connection) 53 | .await?; 54 | Ok(()) 55 | } 56 | 57 | async fn drop_migration_table_if_exists( 58 | &self, 59 | connection: &mut ::Connection, 60 | ) -> Result<(), Error> { 61 | sqlx::query("DROP TABLE IF EXISTS _custom_table_name") 62 | .execute(connection) 63 | .await?; 64 | Ok(()) 65 | } 66 | 67 | async fn add_migration_to_db_table( 68 | &self, 69 | connection: &mut ::Connection, 70 | migration: &Box>, 71 | ) -> Result<(), Error> { 72 | sqlx::query("INSERT INTO _custom_table_name(app, name) VALUES ($1, $2)") 73 | .bind(migration.app()) 74 | .bind(migration.name()) 75 | .execute(connection) 76 | .await?; 77 | Ok(()) 78 | } 79 | 80 | async fn delete_migration_from_db_table( 81 | &self, 82 | connection: &mut ::Connection, 83 | migration: &Box>, 84 | ) -> Result<(), Error> { 85 | sqlx::query("DELETE FROM _custom_table_name WHERE app = $1 AND name = $2") 86 | .bind(migration.app()) 87 | .bind(migration.name()) 88 | .execute(connection) 89 | .await?; 90 | Ok(()) 91 | } 92 | 93 | async fn fetch_applied_migration_from_db( 94 | &self, 95 | connection: &mut ::Connection, 96 | ) -> Result, Error> { 97 | Ok(sqlx::query_as::<_, AppliedMigrationSqlRow>( 98 | "SELECT id, app, name, applied_time FROM _custom_table_name", 99 | ) 100 | .fetch_all(connection) 101 | .await?) 102 | } 103 | 104 | async fn lock( 105 | &self, 106 | connection: &mut ::Connection, 107 | ) -> Result<(), Error> { 108 | let (database_name,): (String,) = sqlx::query_as("SELECT CURRENT_DATABASE()") 109 | .fetch_one(&mut *connection) 110 | .await?; 111 | let lock_id = i64::from(crc32fast::hash(database_name.as_bytes())); 112 | sqlx::query("SELECT pg_advisory_lock($1)") 113 | .bind(lock_id) 114 | .execute(connection) 115 | .await?; 116 | Ok(()) 117 | } 118 | 119 | async fn unlock( 120 | &self, 121 | connection: &mut ::Connection, 122 | ) -> Result<(), Error> { 123 | let (database_name,): (String,) = sqlx::query_as("SELECT CURRENT_DATABASE()") 124 | .fetch_one(&mut *connection) 125 | .await?; 126 | let lock_id = i64::from(crc32fast::hash(database_name.as_bytes())); 127 | sqlx::query("SELECT pg_advisory_unlock($1)") 128 | .bind(lock_id) 129 | .execute(connection) 130 | .await?; 131 | Ok(()) 132 | } 133 | } 134 | impl Migrate for CustomMigrator {} 135 | ``` 136 | "# 137 | )] 138 | 139 | use std::collections::HashMap; 140 | 141 | use sqlx::{Connection, Database}; 142 | 143 | use crate::error::Error; 144 | use crate::migration::{AppliedMigrationSqlRow, Migration}; 145 | 146 | /// Any database module which support mysql, sqlite and postgres by default 147 | #[cfg(all( 148 | any(feature = "postgres", feature = "mysql", feature = "sqlite"), 149 | feature = "any" 150 | ))] 151 | mod any; 152 | 153 | /// Module for mysql 154 | #[cfg(feature = "mysql")] 155 | mod mysql; 156 | 157 | /// Module for sqlite 158 | #[cfg(feature = "sqlite")] 159 | mod sqlite; 160 | 161 | /// Module for postgres 162 | #[cfg(feature = "postgres")] 163 | mod postgres; 164 | 165 | /// Module for testing 166 | #[cfg(all(test, feature = "sqlite"))] 167 | mod tests; 168 | 169 | type BoxMigration = Box>; 170 | type MigrationVec<'migration, DB> = Vec<&'migration BoxMigration>; 171 | type MigrationVecResult<'migration, DB> = Result, Error>; 172 | 173 | #[derive(Debug)] 174 | enum PlanType { 175 | Apply, 176 | Revert, 177 | } 178 | 179 | /// Struct that determines the type of migration plan to execute. 180 | /// 181 | /// A [`Plan`] can specify whether to apply or revert migrations, and may target 182 | /// all migrations, specific migrations, or a limited number of migrations. 183 | #[derive(Debug)] 184 | pub struct Plan { 185 | #[expect( 186 | clippy::struct_field_names, 187 | reason = "type is a keyword so it cannot be used" 188 | )] 189 | plan_type: PlanType, 190 | app_migration: Option<(String, Option)>, 191 | count: Option, 192 | fake: bool, 193 | } 194 | 195 | impl Plan { 196 | fn new( 197 | plan_type: PlanType, 198 | app_migration: Option<(String, Option)>, 199 | count: Option, 200 | ) -> Self { 201 | Self { 202 | plan_type, 203 | app_migration, 204 | count, 205 | fake: false, 206 | } 207 | } 208 | 209 | /// Sets the plan as a "fake" plan. 210 | /// 211 | /// When the plan is marked as fake, the migration status is updated to 212 | /// either "applied" or "reverted" without actually performing any 213 | /// migration operations. This is useful for scenarios where you want to 214 | /// simulate the effect of applying or reverting a migration, but 215 | /// without making changes to the database. 216 | /// 217 | /// By default, the `fake` flag is set to `false`, and the migration 218 | /// operations are executed as expected. 219 | #[must_use] 220 | pub fn fake(self, fake: bool) -> Self { 221 | let mut plan = self; 222 | plan.fake = fake; 223 | plan 224 | } 225 | 226 | /// Creates a new plan to apply all migrations. 227 | #[must_use] 228 | pub fn apply_all() -> Self { 229 | Self::new(PlanType::Apply, None, None) 230 | } 231 | 232 | /// Creates a new plan to apply a specific migration by name. If migration 233 | /// name is not provided it will apply app all migrations 234 | #[must_use] 235 | pub fn apply_name(app: &str, name: &Option) -> Self { 236 | Self::new(PlanType::Apply, Some((app.to_string(), name.clone())), None) 237 | } 238 | 239 | /// Creates a new plan to apply a limited number of migrations. 240 | #[must_use] 241 | pub fn apply_count(count: usize) -> Self { 242 | Self::new(PlanType::Apply, None, Some(count)) 243 | } 244 | 245 | /// Creates a new plan to revert all migrations. 246 | #[must_use] 247 | pub fn revert_all() -> Self { 248 | Self::new(PlanType::Revert, None, None) 249 | } 250 | 251 | /// Creates a new plan to revert a specific migration by name. If migration 252 | /// name is not provided it will revert app all migrations 253 | #[must_use] 254 | pub fn revert_name(app: &str, name: &Option) -> Self { 255 | Self::new( 256 | PlanType::Revert, 257 | Some((app.to_string(), name.clone())), 258 | None, 259 | ) 260 | } 261 | 262 | /// Creates a new plan to revert a limited number of migrations. 263 | #[must_use] 264 | pub fn revert_count(count: usize) -> Self { 265 | Self::new(PlanType::Revert, None, Some(count)) 266 | } 267 | } 268 | 269 | /// The [`Info`] trait provides database-agnostic methods for managing 270 | /// migrations and interacting with migration states. 271 | pub trait Info { 272 | /// Returns a reference to the list of migrations. 273 | fn migrations(&self) -> &Vec>; 274 | 275 | /// Returns a mutable reference to the list of migrations. 276 | fn migrations_mut(&mut self) -> &mut Vec>; 277 | 278 | /// Adds a list of migrations to the migrator. 279 | /// 280 | /// This method accepts a vector of migrations and adds each one 281 | /// individually to ensure proper handling of migration relationships 282 | /// and duplicates. 283 | /// 284 | /// # Errors 285 | /// If migration is added with same app and name but inconsistent value i.e 286 | /// its parents, run before, replaces and is atomic differ and do not have 287 | /// same number of operation 288 | fn add_migrations(&mut self, migrations: Vec>) -> Result<(), Error> { 289 | for migration in migrations { 290 | self.add_migration(migration)?; 291 | } 292 | Ok(()) 293 | } 294 | 295 | /// Adds a single migration to the migrator. 296 | /// 297 | /// # Errors 298 | /// If migration is added with same app and name but inconsistent value i.e 299 | /// its parents, run before, replaces and is atomic differ and do not have 300 | /// same number of operation 301 | fn add_migration(&mut self, migration: BoxMigration) -> Result<(), Error> { 302 | // only check old value if provided migration for adding is not virtual 303 | if migration.is_virtual() { 304 | if !migration.parents().is_empty() 305 | || !migration.operations().is_empty() 306 | || !migration.replaces().is_empty() 307 | || !migration.run_before().is_empty() 308 | { 309 | return Err(Error::InvalidVirtualMigration); 310 | } 311 | } else if let Some((migration_index, found_migration)) = self 312 | .migrations() 313 | .iter() 314 | .enumerate() 315 | .find(|(_, elem)| elem == &&migration) 316 | { 317 | // if virtual migration is present in list with same app and name than remove 318 | // virtual migration from list first 319 | if found_migration.is_virtual() { 320 | self.migrations_mut().remove(migration_index); 321 | } 322 | // if found migrations value are not consistent to current provided migration then 323 | // raise error only raise error when found migration is not virtual 324 | else if found_migration.parents() != migration.parents() 325 | || found_migration.operations().len() != migration.operations().len() 326 | || found_migration.replaces() != migration.replaces() 327 | || found_migration.run_before() != migration.run_before() 328 | || found_migration.is_atomic() != migration.is_atomic() 329 | { 330 | return Err(Error::InconsistentMigration { 331 | app: migration.app().to_string(), 332 | name: migration.name().to_string(), 333 | }); 334 | } 335 | } 336 | 337 | // check if migration is already added or not and only do operation if migration 338 | // is not added till now 339 | if !self.migrations().contains(&migration) { 340 | // ignore parents, replaces and run before for virtual migration only add 341 | // migration only. If virtual migration provides those value than it is ignored 342 | if migration.is_virtual() { 343 | self.migrations_mut().push(migration); 344 | } else { 345 | let migration_parents = migration.parents(); 346 | let migration_replaces = migration.replaces(); 347 | let migration_run_before = migration.run_before(); 348 | 349 | self.migrations_mut().push(migration); 350 | 351 | for parent in migration_parents { 352 | self.add_migration(parent)?; 353 | } 354 | for replace in migration_replaces { 355 | self.add_migration(replace)?; 356 | } 357 | for run_before in migration_run_before { 358 | self.add_migration(run_before)?; 359 | } 360 | } 361 | } 362 | Ok(()) 363 | } 364 | } 365 | 366 | /// The [`DatabaseOperation`] trait defines a set of methods for performing 367 | /// operations related to migration management on the database. 368 | /// 369 | /// This trait is typically implemented for a database to support migration 370 | /// operations, such as ensuring the migration table exists, adding or 371 | /// removing migrations from the table, and locking the database during 372 | /// migration processes. 373 | #[async_trait::async_trait] 374 | pub trait DatabaseOperation 375 | where 376 | DB: Database, 377 | { 378 | /// Ensure migration table is created before running migrations. If not 379 | /// create one 380 | async fn ensure_migration_table_exists( 381 | &self, 382 | connection: &mut ::Connection, 383 | ) -> Result<(), Error>; 384 | 385 | /// Drop migration table if migration table exists 386 | async fn drop_migration_table_if_exists( 387 | &self, 388 | connection: &mut ::Connection, 389 | ) -> Result<(), Error>; 390 | 391 | /// Adds a migration record to the migration table in the database. 392 | async fn add_migration_to_db_table( 393 | &self, 394 | connection: &mut ::Connection, 395 | migration: &BoxMigration, 396 | ) -> Result<(), Error>; 397 | 398 | /// Removes a migration record from the migration table in the database. 399 | async fn delete_migration_from_db_table( 400 | &self, 401 | connection: &mut ::Connection, 402 | migration: &BoxMigration, 403 | ) -> Result<(), Error>; 404 | 405 | /// Fetches the list of applied migrations from the migration table in the 406 | /// database. 407 | async fn fetch_applied_migration_from_db( 408 | &self, 409 | connection: &mut ::Connection, 410 | ) -> Result, Error>; 411 | 412 | /// Lock database while doing migrations so no two migrations run together 413 | async fn lock(&self, connection: &mut ::Connection) -> Result<(), Error>; 414 | 415 | /// Unlock locked database 416 | async fn unlock(&self, connection: &mut ::Connection) -> Result<(), Error>; 417 | } 418 | 419 | fn populate_replace_recursive<'populate, DB>( 420 | replace_hash_map: &mut HashMap<&'populate BoxMigration, Vec<&'populate BoxMigration>>, 421 | key: &'populate BoxMigration, 422 | value: &'populate BoxMigration, 423 | ) -> Result<(), Error> { 424 | // protect against a case where two migration replaces each other 425 | if key == value { 426 | return Err(Error::PlanError { 427 | message: "two migrations replaces each other".to_string(), 428 | }); 429 | } 430 | let replace_hash_map_vec = replace_hash_map.entry(key).or_default(); 431 | if !replace_hash_map_vec.contains(&value) { 432 | replace_hash_map_vec.push(value); 433 | } 434 | if let Some(grand_values) = replace_hash_map.clone().get(value) { 435 | for grand_value in grand_values { 436 | populate_replace_recursive(replace_hash_map, key, grand_value)?; 437 | } 438 | } 439 | Ok(()) 440 | } 441 | 442 | fn get_parent_recursive( 443 | migration: &BoxMigration, 444 | original_migration: &[BoxMigration], 445 | ) -> Result>, Error> { 446 | let mut parents: Vec> = vec![]; 447 | for parent in migration.parents() { 448 | parents.push(Box::new(( 449 | parent.app().to_string(), 450 | parent.name().to_string(), 451 | ))); 452 | let found_parent = if parent.is_virtual() { 453 | original_migration 454 | .iter() 455 | .find(|&search_parent| search_parent == &parent) 456 | .ok_or(Error::PlanError { 457 | message: "failed to find parent non virtual migration".to_string(), 458 | })? 459 | } else { 460 | &parent 461 | }; 462 | parents.extend(get_parent_recursive(found_parent, original_migration)?); 463 | } 464 | Ok(parents) 465 | } 466 | 467 | fn get_run_before_recursive( 468 | migration: &BoxMigration, 469 | original_migration: &[BoxMigration], 470 | ) -> Result>, Error> { 471 | let mut run_before_list: Vec> = vec![]; 472 | for run_before in migration.run_before() { 473 | run_before_list.push(Box::new(( 474 | run_before.app().to_string(), 475 | run_before.name().to_string(), 476 | ))); 477 | let found_run_before = if run_before.is_virtual() { 478 | original_migration 479 | .iter() 480 | .find(|&search_run_before| search_run_before == &run_before) 481 | .ok_or(Error::PlanError { 482 | message: "failed to find run before non virtual migration".to_string(), 483 | })? 484 | } else { 485 | &run_before 486 | }; 487 | run_before_list.extend(get_parent_recursive(found_run_before, original_migration)?); 488 | } 489 | Ok(run_before_list) 490 | } 491 | 492 | // filter migration list to only contains migrations which is related to with 493 | // list migration, removes all migrations which is not related to them according 494 | // to provided plan. We should not check replaces migration since it is already 495 | // handled and all replaces migration are removed as required 496 | fn only_related_migration( 497 | migration_list: &mut MigrationVec, 498 | with_list: Vec<&BoxMigration>, 499 | plan_type: &PlanType, 500 | original_migration: &[BoxMigration], 501 | ) -> Result<(), Error> { 502 | let mut related_migrations = vec![]; 503 | for with in with_list { 504 | // check if with migrations is already added or not. Sometimes with list 505 | // contains migrations which are interrelated so we do not need to add 506 | // already added migration again 507 | if !related_migrations.contains(&with) { 508 | related_migrations.push(with); 509 | match plan_type { 510 | PlanType::Apply => { 511 | let with_parents = get_parent_recursive(with, original_migration)?; 512 | for &migration in migration_list.iter() { 513 | if !related_migrations.contains(&migration) 514 | && (with_parents.contains(migration) 515 | || get_run_before_recursive(migration, original_migration)? 516 | .contains(with)) 517 | { 518 | related_migrations.push(migration); 519 | } 520 | } 521 | } 522 | PlanType::Revert => { 523 | let with_run_before = get_run_before_recursive(with, original_migration)?; 524 | for &migration in migration_list.iter() { 525 | if !related_migrations.contains(&migration) 526 | && (with_run_before.contains(migration) 527 | || get_parent_recursive(migration, original_migration)? 528 | .contains(with)) 529 | { 530 | related_migrations.push(migration); 531 | } 532 | } 533 | } 534 | } 535 | } 536 | } 537 | migration_list.retain(|&migration| related_migrations.contains(&migration)); 538 | Ok(()) 539 | } 540 | 541 | /// Process plan to provided migrations list 542 | fn process_plan( 543 | migration_list: &mut MigrationVec, 544 | applied_migrations: &MigrationVec, 545 | plan: &Plan, 546 | original_migration: &[BoxMigration], 547 | ) -> Result<(), Error> 548 | where 549 | DB: Database, 550 | { 551 | // Modify migration list according to plan type 552 | match plan.plan_type { 553 | PlanType::Apply => { 554 | migration_list.retain(|migration| !applied_migrations.contains(migration)); 555 | } 556 | PlanType::Revert => { 557 | migration_list.retain(|migration| applied_migrations.contains(migration)); 558 | migration_list.reverse(); 559 | } 560 | } 561 | 562 | if let Some((app, migration_name)) = &plan.app_migration { 563 | // Find position of last migration which matches condition of provided app and 564 | // migration name 565 | let position = if let Some(name) = migration_name { 566 | let Some(pos) = migration_list 567 | .iter() 568 | .rposition(|migration| migration.app() == app && migration.name() == name) 569 | else { 570 | if migration_list 571 | .iter() 572 | .any(|migration| migration.app() == app) 573 | { 574 | return Err(Error::PlanError { 575 | message: format!("migration {app}:{name} doesn't exists for app"), 576 | }); 577 | } 578 | return Err(Error::PlanError { 579 | message: format!("app {app} doesn't exists"), 580 | }); 581 | }; 582 | pos 583 | } else { 584 | let Some(pos) = migration_list 585 | .iter() 586 | .rposition(|migration| migration.app() == app) 587 | else { 588 | return Err(Error::PlanError { 589 | message: format!("app {app} doesn't exists"), 590 | }); 591 | }; 592 | pos 593 | }; 594 | migration_list.truncate(position + 1); 595 | let with_list = if migration_name.is_some() { 596 | vec![migration_list[position]] 597 | } else { 598 | migration_list 599 | .iter() 600 | .filter(|pos_migration| pos_migration.app() == app) 601 | .copied() 602 | .collect::>() 603 | }; 604 | only_related_migration( 605 | migration_list, 606 | with_list, 607 | &plan.plan_type, 608 | original_migration, 609 | )?; 610 | } else if let Some(count) = plan.count { 611 | let actual_len = migration_list.len(); 612 | if count > actual_len { 613 | return Err(Error::PlanError { 614 | message: format!( 615 | "passed count value is larger than migration length: {actual_len}" 616 | ), 617 | }); 618 | } 619 | migration_list.truncate(count); 620 | } 621 | Ok(()) 622 | } 623 | 624 | fn get_recursive<'get, DB>( 625 | hash_map: &'get HashMap, &'get BoxMigration>, 626 | val: &'get BoxMigration, 627 | ) -> Vec<&'get BoxMigration> { 628 | let mut recursive_vec = vec![val]; 629 | if let Some(&parent) = hash_map.get(val) { 630 | recursive_vec.extend(get_recursive(hash_map, parent)); 631 | } 632 | recursive_vec 633 | } 634 | 635 | /// The [`Migrate`] trait defines methods to manage and apply database 636 | /// migrations according to a given plan. 637 | /// 638 | /// This trait combines the functionalities of the [`Info`] and 639 | /// [`DatabaseOperation`] traits, providing a full set of migration 640 | /// capabilities. All methods have default implementations, meaning no explicit 641 | /// implementation is required. Additionally, all methods are database-agnostic. 642 | #[async_trait::async_trait] 643 | pub trait Migrate: Info + DatabaseOperation + Send + Sync 644 | where 645 | DB: Database, 646 | { 647 | /// Generate migration plan according to plan. 648 | /// 649 | /// Returns a vector of migration. If plan is none than it will generate 650 | /// plan with all migrations in order of apply 651 | #[expect(clippy::too_many_lines)] 652 | async fn generate_migration_plan( 653 | &self, 654 | connection: &mut ::Connection, 655 | plan: Option<&Plan>, 656 | ) -> MigrationVecResult { 657 | if self.migrations().is_empty() { 658 | return Err(Error::PlanError { 659 | message: "no migration are added to migration list".to_string(), 660 | }); 661 | } 662 | if self 663 | .migrations() 664 | .iter() 665 | .any(|migration| migration.is_virtual()) 666 | { 667 | return Err(Error::PlanError { 668 | message: "virtual migrations which is not replaced is present".to_string(), 669 | }); 670 | } 671 | 672 | tracing::debug!("generating {:?} migration plan", plan); 673 | 674 | // Hashmap which contains key as migration and value is migration which replaces 675 | // this migration. One migration can only have one parent 676 | let mut replaces_child_parent_hash_map = HashMap::new(); 677 | 678 | for parent_migration in self.migrations() { 679 | for child_migration in parent_migration.replaces() { 680 | let child_name = format!("{}:{}", child_migration.app(), child_migration.name()); 681 | if replaces_child_parent_hash_map 682 | .insert(child_migration, parent_migration) 683 | .is_some() 684 | { 685 | return Err(Error::PlanError { 686 | message: format!("migration {child_name} replaced multiple times",), 687 | }); 688 | } 689 | } 690 | } 691 | 692 | // Hashmap which contains all children of migration generated from replace list 693 | let mut replace_children = HashMap::<_, Vec<_>>::new(); 694 | // in first loop add initial parent and child from parent due to replace 695 | for (child, &parent) in &replaces_child_parent_hash_map { 696 | let children_migration = if child.is_virtual() { 697 | self.migrations() 698 | .iter() 699 | .find(|&search_migration| search_migration == child) 700 | .ok_or(Error::PlanError { 701 | message: "Failed finding non virtual migration for virtual migration" 702 | .to_string(), 703 | })? 704 | } else { 705 | child 706 | }; 707 | replace_children 708 | .entry(parent) 709 | .or_default() 710 | .push(children_migration); 711 | } 712 | // in second loop through recursive add all descendants 713 | for (child, &parent) in &replaces_child_parent_hash_map { 714 | let children_migration = if child.is_virtual() { 715 | self.migrations() 716 | .iter() 717 | .find(|&search_migration| search_migration == child) 718 | .ok_or(Error::PlanError { 719 | message: "Failed finding non virtual migration for virtual migration" 720 | .to_string(), 721 | })? 722 | } else { 723 | child 724 | }; 725 | populate_replace_recursive(&mut replace_children, parent, children_migration)?; 726 | } 727 | // Hashmap which contains key as migration and value as list of migration 728 | // which becomes parent for key due to value having key as run before value 729 | let mut run_before_child_parent_hash_map = HashMap::<_, Vec<_>>::new(); 730 | 731 | for parent_migration in self.migrations() { 732 | for run_before_migration in parent_migration.run_before() { 733 | run_before_child_parent_hash_map 734 | .entry(run_before_migration) 735 | .or_default() 736 | .push(parent_migration); 737 | } 738 | } 739 | 740 | let mut migration_list = Vec::new(); 741 | 742 | // Create migration list until migration list length is equal to original vec 743 | // length 744 | let original_migration_length = self.migrations().len(); 745 | while migration_list.len() != original_migration_length { 746 | let loop_initial_migration_list_length = migration_list.len(); 747 | for migration in self.migrations() { 748 | let all_required_added = !migration_list.contains(&migration) 749 | && migration 750 | .parents() 751 | .iter() 752 | .all(|parent_migration| migration_list.contains(&parent_migration)) 753 | && run_before_child_parent_hash_map 754 | .get(migration) 755 | .unwrap_or(&vec![]) 756 | .iter() 757 | .all(|run_before_migration| migration_list.contains(run_before_migration)) 758 | && replaces_child_parent_hash_map 759 | .get(migration) 760 | .is_none_or(|replace_migration| migration_list.contains(replace_migration)) 761 | && replace_children.get(migration).is_none_or(|children| { 762 | // check if children parents and run before are added or not already before 763 | // adding replace migration. Since replace migration may not depend on 764 | // children parent its need to be added first 765 | children.iter().all(|&child| { 766 | child 767 | .parents() 768 | .iter() 769 | .all(|child_parent| migration_list.contains(&child_parent)) 770 | && run_before_child_parent_hash_map 771 | .get(child) 772 | .unwrap_or(&vec![]) 773 | .iter() 774 | .all(|run_before_migration| { 775 | migration_list.contains(run_before_migration) 776 | || children.contains(run_before_migration) 777 | }) 778 | }) 779 | }); 780 | if all_required_added { 781 | migration_list.push(migration); 782 | } 783 | } 784 | 785 | // If old migration plan length is equal to current length than no new migration 786 | // was added. Next loop also will not add migration so return error. This case 787 | // can arise due to looping in migration plan i.e If there is two migration A 788 | // and B, than when B is ancestor of A as well as descendants of A 789 | if loop_initial_migration_list_length == migration_list.len() { 790 | return Err(Error::PlanError { 791 | message: "reached deadlock stage during plan generation".to_string(), 792 | }); 793 | } 794 | } 795 | 796 | // if there is only plan than further process. In further process replaces 797 | // migrations are also handled for removing conflicting migrations where certain 798 | // migrations replaces certain other migrations. While initially creating 799 | // migrations both new and replaced migration are present 800 | if let Some(some_plan) = plan { 801 | self.ensure_migration_table_exists(connection).await?; 802 | 803 | let applied_migration_sql_rows = 804 | self.fetch_applied_migration_from_db(connection).await?; 805 | 806 | // convert applied migration sql rows to vector of migration implemented 807 | // objects 808 | let mut applied_migrations = Vec::new(); 809 | for migration in self.migrations() { 810 | if applied_migration_sql_rows 811 | .iter() 812 | .any(|sqlx_migration| sqlx_migration == migration) 813 | { 814 | applied_migrations.push(migration); 815 | } 816 | } 817 | 818 | // Check if any of parents of certain applied migrations are applied or not. If 819 | // any parents are not applied for applied migration than raises 820 | // error also takes consideration of replace migration 821 | for &migration in &applied_migrations { 822 | let mut parents = vec![]; 823 | if let Some(run_before_list) = run_before_child_parent_hash_map.get(migration) { 824 | for &run_before in run_before_list { 825 | parents.push(run_before); 826 | } 827 | } 828 | let main_parents = migration.parents(); 829 | for parent in &main_parents { 830 | parents.push(parent); 831 | } 832 | for parent in parents { 833 | let recursive_vec = get_recursive(&replaces_child_parent_hash_map, parent); 834 | if !applied_migrations 835 | .iter() 836 | .any(|applied| recursive_vec.contains(applied)) 837 | { 838 | return Err(Error::PlanError { 839 | message: format!( 840 | "children migration {}:{} applied before its parent migration \ 841 | {}:{}", 842 | migration.app(), 843 | migration.name(), 844 | parent.app(), 845 | parent.name() 846 | ), 847 | }); 848 | } 849 | } 850 | } 851 | 852 | // Remove migration from migration list according to replaces vector 853 | for migration in migration_list.clone() { 854 | // Only need to check case when migration have children 855 | if let Some(children) = replace_children.get(&migration) { 856 | // Check if any replaces children are applied or not 857 | let replaces_applied = children 858 | .iter() 859 | .any(|&replace_migration| applied_migrations.contains(&replace_migration)); 860 | 861 | // If any one of replaced migrations is applied than do not add current 862 | // migration to migration plan else add only current migration to migration plan 863 | if replaces_applied { 864 | // Error if current migration as well as replace migration both are applied 865 | if applied_migrations.contains(&migration) { 866 | return Err(Error::PlanError { 867 | message: format!( 868 | "migration {}:{} and its replaces are applied together", 869 | migration.app(), 870 | migration.name(), 871 | ), 872 | }); 873 | } 874 | migration_list.retain(|&plan_migration| migration != plan_migration); 875 | } else { 876 | // we can remove all children migrations here since migrations which 877 | // replaced them will be above them in generation list so migration will 878 | // apply in provided order 879 | for replaced_migration in children { 880 | migration_list 881 | .retain(|plan_migration| replaced_migration != plan_migration); 882 | } 883 | } 884 | } 885 | } 886 | 887 | process_plan( 888 | &mut migration_list, 889 | &applied_migrations, 890 | some_plan, 891 | self.migrations(), 892 | )?; 893 | } 894 | 895 | Ok(migration_list) 896 | } 897 | 898 | /// Run provided plan migrations 899 | /// 900 | /// # Errors 901 | /// If failed to run provided plan migrations 902 | async fn run( 903 | &self, 904 | connection: &mut ::Connection, 905 | plan: &Plan, 906 | ) -> Result<(), Error> { 907 | tracing::debug!("running plan {:?}", plan); 908 | self.lock(connection).await?; 909 | // do not return result of migrations early from run function hold it till lock 910 | // is unlocked 911 | let result = async { 912 | for migration in self.generate_migration_plan(connection, Some(plan)).await? { 913 | match plan.plan_type { 914 | PlanType::Apply => { 915 | tracing::debug!("applying {} : {}", migration.app(), migration.name()); 916 | let operations = migration.operations(); 917 | if migration.is_atomic() { 918 | let mut transaction = connection.begin().await?; 919 | if !plan.fake { 920 | for operation in operations { 921 | operation.up(&mut transaction).await?; 922 | } 923 | } 924 | self.add_migration_to_db_table(&mut transaction, migration) 925 | .await?; 926 | transaction.commit().await?; 927 | } else { 928 | if !plan.fake { 929 | for operation in operations { 930 | operation.up(connection).await?; 931 | } 932 | } 933 | self.add_migration_to_db_table(connection, migration) 934 | .await?; 935 | } 936 | } 937 | PlanType::Revert => { 938 | tracing::debug!("reverting {} : {}", migration.app(), migration.name()); 939 | 940 | // Reverse operation since last applied operation need to be reverted first 941 | let mut operations = migration.operations(); 942 | operations.reverse(); 943 | 944 | if migration.is_atomic() { 945 | let mut transaction = connection.begin().await?; 946 | if !plan.fake { 947 | for operation in operations { 948 | operation.down(&mut transaction).await?; 949 | } 950 | } 951 | self.delete_migration_from_db_table(&mut transaction, migration) 952 | .await?; 953 | transaction.commit().await?; 954 | } else { 955 | if !plan.fake { 956 | for operation in operations { 957 | operation.down(connection).await?; 958 | } 959 | } 960 | self.delete_migration_from_db_table(connection, migration) 961 | .await?; 962 | } 963 | } 964 | } 965 | } 966 | Ok(()) 967 | } 968 | .await; 969 | // unlock lock before returning result of applying migration 970 | self.unlock(connection).await?; 971 | result 972 | } 973 | } 974 | 975 | const DEFAULT_TABLE_NAME: &str = "_sqlx_migrator_migrations"; 976 | 977 | /// A struct that stores migration-related metadata, including the list of 978 | /// migrations and configuration such as table and schema name 979 | pub struct Migrator { 980 | migrations: Vec>, 981 | table_prefix: Option, 982 | schema: Option, 983 | } 984 | 985 | impl Migrator { 986 | /// Creates a new migrator 987 | /// 988 | /// # Example 989 | /// ```rust 990 | /// # #[cfg(feature="sqlite")] 991 | /// # fn main() { 992 | /// let migrator = sqlx_migrator::Migrator::::new(); 993 | /// assert_eq!(&migrator.table_name(), "_sqlx_migrator_migrations") 994 | /// # } 995 | /// # #[cfg(not(feature="sqlite"))] 996 | /// # fn main() { 997 | /// # } 998 | /// ``` 999 | #[must_use] 1000 | pub fn new() -> Self { 1001 | Self { 1002 | migrations: Vec::default(), 1003 | table_prefix: None, 1004 | schema: None, 1005 | } 1006 | } 1007 | 1008 | /// Configures a prefix for the migrator table name. 1009 | /// 1010 | /// The table name will be formatted as 1011 | /// `_{prefix}_sqlx_migrator_migrations`. Only ASCII lowercase, numeric 1012 | /// characters and underscores are allowed in the prefix. 1013 | /// 1014 | /// # Example 1015 | /// ```rust 1016 | /// # #[cfg(feature="sqlite")] 1017 | /// # fn main() { 1018 | /// let migrator = sqlx_migrator::Migrator::::new() 1019 | /// .set_table_prefix("prefix_value") 1020 | /// .unwrap(); 1021 | /// assert_eq!( 1022 | /// &migrator.table_name(), 1023 | /// "_prefix_value_sqlx_migrator_migrations" 1024 | /// ) 1025 | /// # } 1026 | /// # #[cfg(not(feature="sqlite"))] 1027 | /// # fn main() { 1028 | /// # } 1029 | /// ``` 1030 | /// 1031 | /// # Errors 1032 | /// When passed table prefix name contains invalid characters 1033 | pub fn set_table_prefix(mut self, prefix: impl Into) -> Result { 1034 | let prefix_str = prefix.into(); 1035 | if prefix_str.is_empty() 1036 | || !prefix_str 1037 | .chars() 1038 | .all(|c| char::is_ascii_lowercase(&c) || char::is_numeric(c) || c == '_') 1039 | { 1040 | return Err(Error::InvalidTablePrefix); 1041 | } 1042 | self.table_prefix = Some(prefix_str); 1043 | Ok(self) 1044 | } 1045 | 1046 | /// Configures a schema for the migrator table. 1047 | /// 1048 | /// When set, the table name will be formatted as `{schema}.{table_name}`. 1049 | /// Schema name can only contain [a-z0-9_] and begin with [a-z_] 1050 | /// 1051 | /// # Examples 1052 | /// ```rust 1053 | /// # #[cfg(feature="sqlite")] 1054 | /// # fn main() { 1055 | /// let migrator = sqlx_migrator::Migrator::::new() 1056 | /// .set_schema("migrations") 1057 | /// .unwrap(); 1058 | /// assert_eq!( 1059 | /// &migrator.table_name(), 1060 | /// "migrations._sqlx_migrator_migrations" 1061 | /// ); 1062 | /// # } 1063 | /// # #[cfg(not(feature="sqlite"))] 1064 | /// # fn main() {} 1065 | /// ``` 1066 | /// 1067 | /// # Errors 1068 | /// When passed schema name contains invalid characters 1069 | pub fn set_schema(mut self, schema: impl Into) -> Result { 1070 | let schema_str = schema.into(); 1071 | if schema_str.is_empty() 1072 | || schema_str.chars().next().is_none() 1073 | || !schema_str 1074 | .chars() 1075 | .all(|c| char::is_ascii_lowercase(&c) || char::is_numeric(c) || c == '_') 1076 | { 1077 | return Err(Error::InvalidSchema); 1078 | } 1079 | self.schema = Some(schema_str); 1080 | Ok(self) 1081 | } 1082 | 1083 | /// Get name of table which is used for storing migrations related 1084 | /// information in database 1085 | /// 1086 | /// Format depends on configuration: 1087 | /// - With schema: `{schema}._sqlx_migrator_migrations` 1088 | /// - With prefix: `_{prefix}_sqlx_migrator_migrations` 1089 | /// - With both: `{schema}._{prefix}_sqlx_migrator_migrations` 1090 | /// - Default: `_sqlx_migrator_migrations` 1091 | /// 1092 | /// # Examples 1093 | /// ```rust 1094 | /// # #[cfg(feature="sqlite")] 1095 | /// # fn main() { 1096 | /// let migrator = sqlx_migrator::Migrator::::new() 1097 | /// .set_schema("app_schema") 1098 | /// .unwrap() 1099 | /// .set_table_prefix("v1") 1100 | /// .unwrap(); 1101 | /// assert_eq!( 1102 | /// &migrator.table_name(), 1103 | /// "app_schema._v1_sqlx_migrator_migrations" 1104 | /// ); 1105 | /// # } 1106 | /// # #[cfg(not(feature="sqlite"))] 1107 | /// # fn main() {} 1108 | /// ``` 1109 | #[must_use] 1110 | pub fn table_name(&self) -> String { 1111 | let mut table_name = DEFAULT_TABLE_NAME.to_string(); 1112 | if let Some(prefix) = &self.table_prefix { 1113 | table_name = format!("_{prefix}{table_name}"); 1114 | } 1115 | if let Some(schema) = &self.schema { 1116 | table_name = format!("{schema}.{table_name}"); 1117 | } 1118 | table_name 1119 | } 1120 | } 1121 | 1122 | impl Default for Migrator { 1123 | fn default() -> Self { 1124 | Self::new() 1125 | } 1126 | } 1127 | 1128 | impl Info for Migrator { 1129 | fn migrations(&self) -> &Vec> { 1130 | &self.migrations 1131 | } 1132 | 1133 | fn migrations_mut(&mut self) -> &mut Vec> { 1134 | &mut self.migrations 1135 | } 1136 | } 1137 | 1138 | impl Migrate for Migrator 1139 | where 1140 | DB: Database, 1141 | Self: DatabaseOperation, 1142 | { 1143 | } 1144 | -------------------------------------------------------------------------------- /src/migrator/mysql.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{Database, MySql}; 2 | 3 | use super::{DatabaseOperation, Migrator}; 4 | use crate::error::Error; 5 | use crate::migration::{AppliedMigrationSqlRow, Migration}; 6 | 7 | /// create migrator table query 8 | #[must_use] 9 | pub(crate) fn create_migrator_table_query(table_name: &str) -> String { 10 | format!( 11 | "CREATE TABLE IF NOT EXISTS {table_name} ( 12 | id INT PRIMARY KEY NOT NULL AUTO_INCREMENT, 13 | app VARCHAR(384) NOT NULL, 14 | name VARCHAR(384) NOT NULL, 15 | applied_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, 16 | UNIQUE (app, name) 17 | )" 18 | ) 19 | } 20 | 21 | /// Drop table query 22 | #[must_use] 23 | pub(crate) fn drop_table_query(table_name: &str) -> String { 24 | format!("DROP TABLE IF EXISTS {table_name}") 25 | } 26 | 27 | /// fetch rows 28 | pub(crate) fn fetch_rows_query(table_name: &str) -> String { 29 | format!( 30 | "SELECT id, app, name, DATE_FORMAT(applied_time, '%Y-%m-%d %H:%i:%s') AS applied_time \ 31 | FROM {table_name}" 32 | ) 33 | } 34 | 35 | /// add migration query 36 | #[must_use] 37 | pub(crate) fn add_migration_query(table_name: &str) -> String { 38 | format!("INSERT INTO {table_name}(app, name) VALUES (?, ?)") 39 | } 40 | 41 | /// delete migration query 42 | #[must_use] 43 | pub(crate) fn delete_migration_query(table_name: &str) -> String { 44 | format!("DELETE FROM {table_name} WHERE app = ? AND name = ?") 45 | } 46 | 47 | /// get current database query 48 | pub(crate) fn current_database_query() -> &'static str { 49 | "SELECT DATABASE()" 50 | } 51 | 52 | /// get lock database query 53 | /// # Errors 54 | /// Failed to lock database 55 | pub(crate) fn lock_database_query() -> &'static str { 56 | "SELECT GET_LOCK(?, -1)" 57 | } 58 | 59 | /// get lock database query 60 | /// # Errors 61 | /// Failed to lock database 62 | pub(crate) fn unlock_database_query() -> &'static str { 63 | "SELECT RELEASE_LOCK(?)" 64 | } 65 | 66 | /// generate lock id 67 | pub(crate) fn get_lock_id(database_name: &str, table_name: &str) -> String { 68 | let buf = format!("{database_name}/{table_name}"); 69 | crc32fast::hash(buf.as_bytes()).to_string() 70 | } 71 | 72 | #[async_trait::async_trait] 73 | impl DatabaseOperation for Migrator { 74 | async fn ensure_migration_table_exists( 75 | &self, 76 | connection: &mut ::Connection, 77 | ) -> Result<(), Error> { 78 | sqlx::query(&create_migrator_table_query(&self.table_name())) 79 | .execute(connection) 80 | .await?; 81 | Ok(()) 82 | } 83 | 84 | async fn drop_migration_table_if_exists( 85 | &self, 86 | connection: &mut ::Connection, 87 | ) -> Result<(), Error> { 88 | sqlx::query(&drop_table_query(&self.table_name())) 89 | .execute(connection) 90 | .await?; 91 | Ok(()) 92 | } 93 | 94 | async fn add_migration_to_db_table( 95 | &self, 96 | connection: &mut ::Connection, 97 | migration: &Box>, 98 | ) -> Result<(), Error> { 99 | sqlx::query(&add_migration_query(&self.table_name())) 100 | .bind(migration.app()) 101 | .bind(migration.name()) 102 | .execute(connection) 103 | .await?; 104 | Ok(()) 105 | } 106 | 107 | async fn delete_migration_from_db_table( 108 | &self, 109 | connection: &mut ::Connection, 110 | migration: &Box>, 111 | ) -> Result<(), Error> { 112 | sqlx::query(&delete_migration_query(&self.table_name())) 113 | .bind(migration.app()) 114 | .bind(migration.name()) 115 | .execute(connection) 116 | .await?; 117 | Ok(()) 118 | } 119 | 120 | async fn fetch_applied_migration_from_db( 121 | &self, 122 | connection: &mut ::Connection, 123 | ) -> Result, Error> { 124 | Ok( 125 | sqlx::query_as::<_, AppliedMigrationSqlRow>(&fetch_rows_query(&self.table_name())) 126 | .fetch_all(connection) 127 | .await?, 128 | ) 129 | } 130 | 131 | async fn lock(&self, connection: &mut ::Connection) -> Result<(), Error> { 132 | let (database_name,): (String,) = sqlx::query_as(current_database_query()) 133 | .fetch_one(&mut *connection) 134 | .await?; 135 | let lock_id = get_lock_id(&database_name, &self.table_name()); 136 | sqlx::query(lock_database_query()) 137 | .bind(lock_id) 138 | .execute(connection) 139 | .await?; 140 | Ok(()) 141 | } 142 | 143 | async fn unlock(&self, connection: &mut ::Connection) -> Result<(), Error> { 144 | let (database_name,): (String,) = sqlx::query_as(current_database_query()) 145 | .fetch_one(&mut *connection) 146 | .await?; 147 | let lock_id = get_lock_id(&database_name, &self.table_name()); 148 | sqlx::query(unlock_database_query()) 149 | .bind(lock_id) 150 | .execute(connection) 151 | .await?; 152 | Ok(()) 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/migrator/postgres.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{Database, Postgres}; 2 | 3 | use super::{DatabaseOperation, Migrator}; 4 | use crate::error::Error; 5 | use crate::migration::{AppliedMigrationSqlRow, Migration}; 6 | 7 | /// Create migrator table query 8 | #[must_use] 9 | pub(crate) fn create_migrator_table_query(table_name: &str) -> String { 10 | format!( 11 | "CREATE TABLE IF NOT EXISTS {table_name} ( 12 | id INT PRIMARY KEY NOT NULL GENERATED ALWAYS AS IDENTITY, 13 | app TEXT NOT NULL, 14 | name TEXT NOT NULL, 15 | applied_time TIMESTAMPTZ NOT NULL DEFAULT now(), 16 | UNIQUE (app, name) 17 | )" 18 | ) 19 | } 20 | 21 | /// Drop table query 22 | #[must_use] 23 | pub(crate) fn drop_table_query(table_name: &str) -> String { 24 | format!("DROP TABLE IF EXISTS {table_name}") 25 | } 26 | 27 | /// Fetch rows 28 | pub(crate) fn fetch_rows_query(table_name: &str) -> String { 29 | format!("SELECT id, app, name, applied_time::TEXT FROM {table_name}") 30 | } 31 | 32 | /// Add migration query 33 | #[must_use] 34 | pub(crate) fn add_migration_query(table_name: &str) -> String { 35 | format!("INSERT INTO {table_name}(app, name) VALUES ($1, $2)") 36 | } 37 | 38 | /// Delete migration query 39 | #[must_use] 40 | pub(crate) fn delete_migration_query(table_name: &str) -> String { 41 | format!("DELETE FROM {table_name} WHERE app = $1 AND name = $2") 42 | } 43 | 44 | /// get current database query 45 | pub(crate) fn current_database_query() -> &'static str { 46 | "SELECT CURRENT_DATABASE()" 47 | } 48 | 49 | /// get lock database query 50 | pub(crate) fn lock_database_query() -> &'static str { 51 | "SELECT pg_advisory_lock($1)" 52 | } 53 | 54 | /// get lock database query 55 | pub(crate) fn unlock_database_query() -> &'static str { 56 | "SELECT pg_advisory_unlock($1)" 57 | } 58 | 59 | /// generate lock id 60 | pub(crate) fn get_lock_id(database_name: &str, table_name: &str) -> i64 { 61 | let buf = format!("{database_name}/{table_name}"); 62 | i64::from(crc32fast::hash(buf.as_bytes())) 63 | } 64 | 65 | #[async_trait::async_trait] 66 | impl DatabaseOperation for Migrator { 67 | async fn ensure_migration_table_exists( 68 | &self, 69 | connection: &mut ::Connection, 70 | ) -> Result<(), Error> { 71 | sqlx::query(&create_migrator_table_query(&self.table_name())) 72 | .execute(connection) 73 | .await?; 74 | Ok(()) 75 | } 76 | 77 | async fn drop_migration_table_if_exists( 78 | &self, 79 | connection: &mut ::Connection, 80 | ) -> Result<(), Error> { 81 | sqlx::query(&drop_table_query(&self.table_name())) 82 | .execute(connection) 83 | .await?; 84 | Ok(()) 85 | } 86 | 87 | async fn add_migration_to_db_table( 88 | &self, 89 | connection: &mut ::Connection, 90 | migration: &Box>, 91 | ) -> Result<(), Error> { 92 | sqlx::query(&add_migration_query(&self.table_name())) 93 | .bind(migration.app()) 94 | .bind(migration.name()) 95 | .execute(connection) 96 | .await?; 97 | Ok(()) 98 | } 99 | 100 | async fn delete_migration_from_db_table( 101 | &self, 102 | connection: &mut ::Connection, 103 | migration: &Box>, 104 | ) -> Result<(), Error> { 105 | sqlx::query(&delete_migration_query(&self.table_name())) 106 | .bind(migration.app()) 107 | .bind(migration.name()) 108 | .execute(connection) 109 | .await?; 110 | Ok(()) 111 | } 112 | 113 | async fn fetch_applied_migration_from_db( 114 | &self, 115 | connection: &mut ::Connection, 116 | ) -> Result, Error> { 117 | Ok( 118 | sqlx::query_as::<_, AppliedMigrationSqlRow>(&fetch_rows_query(&self.table_name())) 119 | .fetch_all(connection) 120 | .await?, 121 | ) 122 | } 123 | 124 | async fn lock(&self, connection: &mut ::Connection) -> Result<(), Error> { 125 | let (database_name,): (String,) = sqlx::query_as(current_database_query()) 126 | .fetch_one(&mut *connection) 127 | .await?; 128 | let lock_id = get_lock_id(&database_name, &self.table_name()); 129 | sqlx::query(lock_database_query()) 130 | .bind(lock_id) 131 | .execute(connection) 132 | .await?; 133 | Ok(()) 134 | } 135 | 136 | async fn unlock( 137 | &self, 138 | connection: &mut ::Connection, 139 | ) -> Result<(), Error> { 140 | let (database_name,): (String,) = sqlx::query_as(current_database_query()) 141 | .fetch_one(&mut *connection) 142 | .await?; 143 | let lock_id = get_lock_id(&database_name, &self.table_name()); 144 | sqlx::query(unlock_database_query()) 145 | .bind(lock_id) 146 | .execute(connection) 147 | .await?; 148 | Ok(()) 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/migrator/sqlite.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{Database, Sqlite}; 2 | 3 | use super::{DatabaseOperation, Migrator}; 4 | use crate::error::Error; 5 | use crate::migration::{AppliedMigrationSqlRow, Migration}; 6 | 7 | /// create migrator table 8 | #[must_use] 9 | pub(crate) fn create_migrator_table_query(table_name: &str) -> String { 10 | format!( 11 | "CREATE TABLE IF NOT EXISTS {table_name} ( 12 | id INTEGER PRIMARY KEY AUTOINCREMENT, 13 | app TEXT NOT NULL, 14 | name TEXT NOT NULL, 15 | applied_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, 16 | UNIQUE (app, name) 17 | )" 18 | ) 19 | } 20 | 21 | /// Drop table 22 | #[must_use] 23 | pub(crate) fn drop_table_query(table_name: &str) -> String { 24 | format!("DROP TABLE IF EXISTS {table_name}") 25 | } 26 | 27 | /// fetch rows 28 | pub(crate) fn fetch_rows_query(table_name: &str) -> String { 29 | format!("SELECT id, app, name, applied_time FROM {table_name}") 30 | } 31 | 32 | /// add migration query 33 | #[must_use] 34 | pub(crate) fn add_migration_query(table_name: &str) -> String { 35 | format!("INSERT INTO {table_name}(app, name) VALUES ($1, $2)") 36 | } 37 | 38 | /// delete migration query 39 | #[must_use] 40 | pub(crate) fn delete_migration_query(table_name: &str) -> String { 41 | format!("DELETE FROM {table_name} WHERE app = $1 AND name = $2") 42 | } 43 | 44 | #[async_trait::async_trait] 45 | impl DatabaseOperation for Migrator { 46 | async fn ensure_migration_table_exists( 47 | &self, 48 | connection: &mut ::Connection, 49 | ) -> Result<(), Error> { 50 | sqlx::query(&create_migrator_table_query(&self.table_name())) 51 | .execute(connection) 52 | .await?; 53 | Ok(()) 54 | } 55 | 56 | async fn drop_migration_table_if_exists( 57 | &self, 58 | connection: &mut ::Connection, 59 | ) -> Result<(), Error> { 60 | sqlx::query(&drop_table_query(&self.table_name())) 61 | .execute(connection) 62 | .await?; 63 | Ok(()) 64 | } 65 | 66 | async fn add_migration_to_db_table( 67 | &self, 68 | connection: &mut ::Connection, 69 | migration: &Box>, 70 | ) -> Result<(), Error> { 71 | sqlx::query(&add_migration_query(&self.table_name())) 72 | .bind(migration.app()) 73 | .bind(migration.name()) 74 | .execute(connection) 75 | .await?; 76 | Ok(()) 77 | } 78 | 79 | async fn delete_migration_from_db_table( 80 | &self, 81 | connection: &mut ::Connection, 82 | migration: &Box>, 83 | ) -> Result<(), Error> { 84 | sqlx::query(&delete_migration_query(&self.table_name())) 85 | .bind(migration.app()) 86 | .bind(migration.name()) 87 | .execute(connection) 88 | .await?; 89 | Ok(()) 90 | } 91 | 92 | async fn fetch_applied_migration_from_db( 93 | &self, 94 | connection: &mut ::Connection, 95 | ) -> Result, Error> { 96 | Ok( 97 | sqlx::query_as::<_, AppliedMigrationSqlRow>(&fetch_rows_query(&self.table_name())) 98 | .fetch_all(connection) 99 | .await?, 100 | ) 101 | } 102 | 103 | async fn lock(&self, _connection: &mut ::Connection) -> Result<(), Error> { 104 | Ok(()) 105 | } 106 | 107 | async fn unlock( 108 | &self, 109 | _connection: &mut ::Connection, 110 | ) -> Result<(), Error> { 111 | Ok(()) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /src/migrator/tests.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{Database, Sqlite, SqlitePool}; 2 | 3 | use super::{DatabaseOperation, Info, Migrate, Migrator}; 4 | use crate::error::Error; 5 | use crate::migration::{AppliedMigrationSqlRow, Migration}; 6 | use crate::migrator::Plan; 7 | use crate::vec_box; 8 | 9 | #[derive(Default)] 10 | struct CustomMigrator { 11 | internal_migrator: Migrator, 12 | migrations: Vec>>, 13 | applied_migrations: Vec, 14 | } 15 | 16 | impl CustomMigrator { 17 | fn add_applied_migrations( 18 | &mut self, 19 | migrations: Vec>>, 20 | ) -> Result<(), Error> { 21 | for migration in migrations { 22 | self.add_applied_migration(migration)?; 23 | } 24 | Ok(()) 25 | } 26 | 27 | fn add_applied_migration( 28 | &mut self, 29 | migration: Box>, 30 | ) -> Result<(), Error> { 31 | let current_length = self.migrations.len(); 32 | self.applied_migrations.push(AppliedMigrationSqlRow::new( 33 | i32::try_from(current_length).unwrap(), 34 | migration.app(), 35 | migration.name(), 36 | )); 37 | self.internal_migrator.add_migration(migration) 38 | } 39 | } 40 | 41 | impl Info for CustomMigrator { 42 | fn migrations(&self) -> &Vec>> { 43 | &self.migrations 44 | } 45 | 46 | fn migrations_mut(&mut self) -> &mut Vec>> { 47 | &mut self.migrations 48 | } 49 | } 50 | 51 | #[async_trait::async_trait] 52 | impl DatabaseOperation for CustomMigrator { 53 | async fn ensure_migration_table_exists( 54 | &self, 55 | _connection: &mut ::Connection, 56 | ) -> Result<(), Error> { 57 | Ok(()) 58 | } 59 | 60 | async fn drop_migration_table_if_exists( 61 | &self, 62 | _connection: &mut ::Connection, 63 | ) -> Result<(), Error> { 64 | Ok(()) 65 | } 66 | 67 | async fn add_migration_to_db_table( 68 | &self, 69 | _connection: &mut ::Connection, 70 | _migration: &Box>, 71 | ) -> Result<(), Error> { 72 | Ok(()) 73 | } 74 | 75 | async fn delete_migration_from_db_table( 76 | &self, 77 | _connection: &mut ::Connection, 78 | _migration: &Box>, 79 | ) -> Result<(), Error> { 80 | Ok(()) 81 | } 82 | 83 | async fn fetch_applied_migration_from_db( 84 | &self, 85 | _connection: &mut ::Connection, 86 | ) -> Result, Error> { 87 | Ok(self.applied_migrations.clone()) 88 | } 89 | 90 | async fn lock(&self, _connection: &mut ::Connection) -> Result<(), Error> { 91 | Ok(()) 92 | } 93 | 94 | async fn unlock( 95 | &self, 96 | _connection: &mut ::Connection, 97 | ) -> Result<(), Error> { 98 | Ok(()) 99 | } 100 | } 101 | 102 | impl Migrate for CustomMigrator {} 103 | 104 | macro_rules! migration { 105 | ($op:ty, $name:literal, $parents:expr, $replaces:expr, $run_before:expr) => { 106 | impl crate::migration::Migration for $op { 107 | fn app(&self) -> &str { 108 | "test" 109 | } 110 | 111 | fn name(&self) -> &str { 112 | $name 113 | } 114 | 115 | fn parents(&self) -> Vec>> { 116 | $parents 117 | } 118 | 119 | fn operations(&self) -> Vec>> { 120 | vec![] 121 | } 122 | 123 | fn replaces(&self) -> Vec>> { 124 | $replaces 125 | } 126 | 127 | fn run_before(&self) -> Vec>> { 128 | $run_before 129 | } 130 | } 131 | }; 132 | } 133 | 134 | async fn generate_apply_all_plan( 135 | migrator: &mut CustomMigrator, 136 | migration_list: Vec>>, 137 | ) -> Result>>, Error> { 138 | migrator.add_migrations(migration_list)?; 139 | let sqlite = SqlitePool::connect("sqlite::memory:").await.unwrap(); 140 | let mut conn = sqlite.acquire().await.unwrap(); 141 | migrator 142 | .generate_migration_plan(&mut conn, Some(&Plan::apply_all())) 143 | .await 144 | } 145 | 146 | #[tokio::test] 147 | async fn simple_test() { 148 | struct A; 149 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 150 | struct B; 151 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 152 | struct C; 153 | migration!(C, "c", vec_box!(B), vec_box!(), vec_box!()); 154 | let mut migrator = CustomMigrator::default(); 155 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C)) 156 | .await 157 | .unwrap(); 158 | let mut plan_iter = plan.iter(); 159 | assert!(plan_iter.next() == Some(&&(Box::new(A) as Box>))); 160 | assert!(plan_iter.next() == Some(&&(Box::new(B) as Box>))); 161 | assert!(plan_iter.next() == Some(&&(Box::new(C) as Box>))); 162 | assert!(plan_iter.next().is_none()); 163 | } 164 | 165 | #[tokio::test] 166 | async fn no_migration() { 167 | struct _A; 168 | migration!(_A, "a", vec_box!(), vec_box!(), vec_box!()); 169 | struct _B; 170 | migration!(_B, "b", vec_box!(_A), vec_box!(), vec_box!()); 171 | let mut migrator = CustomMigrator::default(); 172 | let plan = generate_apply_all_plan(&mut migrator, vec_box!()).await; 173 | assert_eq!( 174 | plan.err().map(|e| e.to_string()), 175 | Some("plan error: no migration are added to migration list".to_string()) 176 | ); 177 | } 178 | 179 | #[tokio::test] 180 | async fn same_name_used_multiple_time() { 181 | struct A; 182 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 183 | struct B; 184 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 185 | struct C; 186 | migration!(C, "c", vec_box!(), vec_box!(), vec_box!()); 187 | struct D; 188 | migration!(D, "b", vec_box!(C), vec_box!(), vec_box!()); 189 | let mut migrator = CustomMigrator::default(); 190 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D)).await; 191 | assert_eq!( 192 | plan.err().map(|e| e.to_string()), 193 | Some("migration for app: test with name: b consists of inconsistent values".to_string()) 194 | ); 195 | } 196 | 197 | #[tokio::test] 198 | async fn interrelated_test() { 199 | struct A; 200 | migration!(A, "a", vec_box!(B), vec_box!(), vec_box!()); 201 | struct B; 202 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 203 | let mut migrator = CustomMigrator::default(); 204 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B)).await; 205 | assert_eq!( 206 | plan.err().map(|e| e.to_string()), 207 | Some("plan error: reached deadlock stage during plan generation".to_string()) 208 | ); 209 | } 210 | 211 | #[tokio::test] 212 | async fn run_before_interrelated_test() { 213 | struct A; 214 | migration!(A, "a", vec_box!(), vec_box!(B), vec_box!()); 215 | struct B; 216 | migration!(B, "b", vec_box!(), vec_box!(A), vec_box!()); 217 | let mut migrator = CustomMigrator::default(); 218 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B)).await; 219 | assert_eq!( 220 | plan.err().map(|e| e.to_string()), 221 | Some("plan error: two migrations replaces each other".to_string()) 222 | ); 223 | } 224 | 225 | #[tokio::test] 226 | async fn replace_interrelated_test() { 227 | struct A; 228 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!(B)); 229 | struct B; 230 | migration!(B, "b", vec_box!(), vec_box!(), vec_box!(A)); 231 | let mut migrator = CustomMigrator::default(); 232 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B)).await; 233 | assert_eq!( 234 | plan.err().map(|e| e.to_string()), 235 | Some("plan error: reached deadlock stage during plan generation".to_string()) 236 | ); 237 | } 238 | 239 | #[tokio::test] 240 | async fn depend_on_itself() { 241 | struct A; 242 | migration!(A, "a", vec_box!(A), vec_box!(), vec_box!()); 243 | struct B; 244 | migration!(B, "b", vec_box!(B), vec_box!(), vec_box!()); 245 | let mut migrator = CustomMigrator::default(); 246 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B)).await; 247 | assert_eq!( 248 | plan.err().map(|e| e.to_string()), 249 | Some("plan error: reached deadlock stage during plan generation".to_string()) 250 | ); 251 | } 252 | 253 | #[tokio::test] 254 | async fn run_before_depend_on_itself() { 255 | struct A; 256 | migration!(A, "a", vec_box!(), vec_box!(A), vec_box!()); 257 | struct B; 258 | migration!(B, "b", vec_box!(), vec_box!(B), vec_box!()); 259 | let mut migrator = CustomMigrator::default(); 260 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B)).await; 261 | assert_eq!( 262 | plan.err().map(|e| e.to_string()), 263 | Some("plan error: two migrations replaces each other".to_string()) 264 | ); 265 | } 266 | 267 | #[tokio::test] 268 | async fn replace_depend_on_itself() { 269 | struct A; 270 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!(A)); 271 | struct B; 272 | migration!(B, "b", vec_box!(), vec_box!(), vec_box!(B)); 273 | let mut migrator = CustomMigrator::default(); 274 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B)).await; 275 | assert_eq!( 276 | plan.err().map(|e| e.to_string()), 277 | Some("plan error: reached deadlock stage during plan generation".to_string()) 278 | ); 279 | } 280 | 281 | #[tokio::test] 282 | async fn replace_test() { 283 | struct A; 284 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 285 | struct B; 286 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 287 | struct C; 288 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!()); 289 | struct D; 290 | migration!(D, "d", vec_box!(), vec_box!(C), vec_box!()); 291 | let mut migrator = CustomMigrator::default(); 292 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D)) 293 | .await 294 | .unwrap(); 295 | let mut plan_iter = plan.iter(); 296 | assert!(plan_iter.next() == Some(&&(Box::new(A) as Box>))); 297 | assert!(plan_iter.next() == Some(&&(Box::new(D) as Box>))); 298 | assert!(plan_iter.next().is_none()); 299 | } 300 | 301 | #[tokio::test] 302 | async fn run_before_test() { 303 | struct A; 304 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 305 | struct B; 306 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 307 | struct C; 308 | migration!(C, "c", vec_box!(), vec_box!(), vec_box!(B)); 309 | struct D; 310 | migration!(D, "d", vec_box!(), vec_box!(), vec_box!(C)); 311 | let mut migrator = CustomMigrator::default(); 312 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D)) 313 | .await 314 | .unwrap(); 315 | let mut plan_iter = plan.iter(); 316 | assert!(plan_iter.next() == Some(&&(Box::new(A) as Box>))); 317 | assert!(plan_iter.next() == Some(&&(Box::new(D) as Box>))); 318 | assert!(plan_iter.next() == Some(&&(Box::new(C) as Box>))); 319 | assert!(plan_iter.next() == Some(&&(Box::new(B) as Box>))); 320 | assert!(plan_iter.next().is_none()); 321 | } 322 | 323 | #[tokio::test] 324 | async fn replaces_multiple_times() { 325 | struct A; 326 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 327 | struct B; 328 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 329 | struct C; 330 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!()); 331 | struct D; 332 | migration!(D, "d", vec_box!(), vec_box!(B), vec_box!()); 333 | let mut migrator = CustomMigrator::default(); 334 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D)).await; 335 | assert_eq!( 336 | plan.err().map(|e| e.to_string()), 337 | Some("plan error: migration test:b replaced multiple times".to_string()) 338 | ); 339 | } 340 | 341 | #[tokio::test] 342 | async fn replace_run_before_cond_1() { 343 | struct A; 344 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 345 | struct B; 346 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 347 | struct C; 348 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!()); 349 | struct D; 350 | migration!(D, "d", vec_box!(), vec_box!(), vec_box!(B)); 351 | let mut migrator = CustomMigrator::default(); 352 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D)) 353 | .await 354 | .unwrap(); 355 | let mut plan_iter = plan.iter(); 356 | assert!(plan_iter.next() == Some(&&(Box::new(A) as Box>))); 357 | assert!(plan_iter.next() == Some(&&(Box::new(D) as Box>))); 358 | assert!(plan_iter.next() == Some(&&(Box::new(C) as Box>))); 359 | assert!(plan_iter.next().is_none()); 360 | } 361 | 362 | #[tokio::test] 363 | async fn replaces_run_before_cond_2() { 364 | struct A; 365 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 366 | struct B; 367 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 368 | struct C; 369 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!()); 370 | struct D; 371 | migration!(D, "d", vec_box!(), vec_box!(C), vec_box!()); 372 | struct E; 373 | migration!(E, "e", vec_box!(), vec_box!(), vec_box!(C)); 374 | let mut migrator = CustomMigrator::default(); 375 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D, E)) 376 | .await 377 | .unwrap(); 378 | let mut plan_iter = plan.iter(); 379 | assert!(plan_iter.next() == Some(&&(Box::new(A) as Box>))); 380 | assert!(plan_iter.next() == Some(&&(Box::new(E) as Box>))); 381 | assert!(plan_iter.next() == Some(&&(Box::new(D) as Box>))); 382 | assert!(plan_iter.next().is_none()); 383 | } 384 | 385 | #[tokio::test] 386 | async fn replaces_run_before_cond_3() { 387 | struct A; 388 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 389 | struct B; 390 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 391 | struct C; 392 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!()); 393 | struct D; 394 | migration!(D, "d", vec_box!(), vec_box!(C), vec_box!()); 395 | struct E; 396 | migration!(E, "e", vec_box!(), vec_box!(), vec_box!(D)); 397 | let mut migrator = CustomMigrator::default(); 398 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D, E)) 399 | .await 400 | .unwrap(); 401 | let mut plan_iter = plan.iter(); 402 | assert!(plan_iter.next() == Some(&&(Box::new(A) as Box>))); 403 | assert!(plan_iter.next() == Some(&&(Box::new(E) as Box>))); 404 | assert!(plan_iter.next() == Some(&&(Box::new(D) as Box>))); 405 | assert!(plan_iter.next().is_none()); 406 | } 407 | 408 | #[tokio::test] 409 | async fn replaces_run_before_cond_4() { 410 | struct A; 411 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 412 | struct B; 413 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 414 | struct C; 415 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!()); 416 | struct D; 417 | migration!(D, "d", vec_box!(), vec_box!(C, E), vec_box!()); 418 | struct E; 419 | migration!(E, "e", vec_box!(), vec_box!(), vec_box!(C)); 420 | let mut migrator = CustomMigrator::default(); 421 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D, E)) 422 | .await 423 | .unwrap(); 424 | let mut plan_iter = plan.iter(); 425 | assert!(plan_iter.next() == Some(&&(Box::new(A) as Box>))); 426 | assert!(plan_iter.next() == Some(&&(Box::new(D) as Box>))); 427 | assert!(plan_iter.next().is_none()); 428 | } 429 | 430 | #[tokio::test] 431 | async fn replaces_run_before_cond_5() { 432 | struct A; 433 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 434 | struct B; 435 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 436 | struct C; 437 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!()); 438 | struct D; 439 | migration!(D, "d", vec_box!(), vec_box!(C), vec_box!(C)); 440 | let mut migrator = CustomMigrator::default(); 441 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D)).await; 442 | assert_eq!( 443 | plan.err().map(|e| e.to_string()), 444 | Some("plan error: reached deadlock stage during plan generation".to_string()) 445 | ); 446 | } 447 | 448 | #[tokio::test] 449 | async fn replaces_run_before_cond_6() { 450 | struct A; 451 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 452 | struct B; 453 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 454 | struct C; 455 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!()); 456 | struct D; 457 | migration!(D, "d", vec_box!(), vec_box!(C, E), vec_box!()); 458 | struct E; 459 | migration!(E, "e", vec_box!(), vec_box!(D), vec_box!(C)); 460 | let mut migrator = CustomMigrator::default(); 461 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D, E)).await; 462 | assert_eq!( 463 | plan.err().map(|e| e.to_string()), 464 | Some("plan error: two migrations replaces each other".to_string()) 465 | ); 466 | } 467 | 468 | #[tokio::test] 469 | async fn replaces_run_before_cond_7() { 470 | struct A; 471 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 472 | struct B; 473 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 474 | struct C; 475 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!(D)); 476 | struct D; 477 | migration!(D, "d", vec_box!(), vec_box!(C), vec_box!()); 478 | let mut migrator = CustomMigrator::default(); 479 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B, C, D)).await; 480 | assert_eq!( 481 | plan.err().map(|e| e.to_string()), 482 | Some("plan error: reached deadlock stage during plan generation".to_string()) 483 | ); 484 | } 485 | 486 | #[tokio::test] 487 | async fn loop_error() { 488 | struct A; 489 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 490 | struct B; 491 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!(A)); 492 | let mut migrator = CustomMigrator::default(); 493 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B)).await; 494 | assert_eq!( 495 | plan.err().map(|e| e.to_string()), 496 | Some("plan error: reached deadlock stage during plan generation".to_string()) 497 | ); 498 | } 499 | 500 | #[tokio::test] 501 | async fn parent_not_applied() { 502 | struct A; 503 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 504 | struct B; 505 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 506 | let mut migrator = CustomMigrator::default(); 507 | migrator.add_applied_migrations(vec_box!(B)).unwrap(); 508 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, B)).await; 509 | assert_eq!( 510 | plan.err().map(|e| e.to_string()), 511 | Some( 512 | "plan error: children migration test:b applied before its parent migration test:a" 513 | .to_string() 514 | ) 515 | ); 516 | } 517 | 518 | #[tokio::test] 519 | async fn replace_grand_child_applied() { 520 | struct A; 521 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 522 | struct B; 523 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 524 | struct C; 525 | migration!(C, "c", vec_box!(), vec_box!(B), vec_box!()); 526 | struct D; 527 | migration!(D, "d", vec_box!(), vec_box!(C), vec_box!()); 528 | let mut migrator = CustomMigrator::default(); 529 | migrator.add_applied_migrations(vec_box!(A, D)).unwrap(); 530 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(D, C, B, A)) 531 | .await 532 | .unwrap(); 533 | assert!(plan.is_empty()); 534 | } 535 | 536 | #[tokio::test] 537 | async fn replace_detailed_virtual() { 538 | struct A; 539 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 540 | struct B; 541 | migration!( 542 | B, 543 | "b", 544 | vec_box!((A.app(), A.name())), 545 | vec_box!(), 546 | vec_box!() 547 | ); 548 | struct C; 549 | migration!( 550 | C, 551 | "c", 552 | vec_box!(), 553 | vec_box!((B.app(), B.name())), 554 | vec_box!() 555 | ); 556 | struct D; 557 | migration!( 558 | D, 559 | "d", 560 | vec_box!(), 561 | vec_box!((C.app(), C.name())), 562 | vec_box!() 563 | ); 564 | let mut migrator = CustomMigrator::default(); 565 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(B, C, D, A)) 566 | .await 567 | .unwrap(); 568 | let mut plan_iter = plan.iter(); 569 | assert!(plan_iter.next() == Some(&&(Box::new(A) as Box>))); 570 | assert!(plan_iter.next() == Some(&&(Box::new(D) as Box>))); 571 | assert!(plan_iter.next().is_none()); 572 | } 573 | 574 | #[tokio::test] 575 | async fn virtual_not_added() { 576 | struct A; 577 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 578 | let mut migrator = CustomMigrator::default(); 579 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, ("test", "b"))).await; 580 | assert_eq!( 581 | plan.err().map(|e| e.to_string()), 582 | Some("plan error: virtual migrations which is not replaced is present".to_string()) 583 | ); 584 | } 585 | 586 | #[tokio::test] 587 | async fn virtual_replaced() { 588 | struct A; 589 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 590 | struct B; 591 | migration!(B, "b", vec_box!(A), vec_box!(), vec_box!()); 592 | let mut migrator = CustomMigrator::default(); 593 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(A, ("test", "b"), B, ("test", "b"))) 594 | .await 595 | .unwrap(); 596 | let mut plan_iter = plan.iter(); 597 | assert!(plan_iter.next() == Some(&&(Box::new(A) as Box>))); 598 | assert!(plan_iter.next() == Some(&&(Box::new(B) as Box>))); 599 | assert!(plan_iter.next().is_none()); 600 | } 601 | 602 | #[tokio::test] 603 | async fn virtual_reference() { 604 | struct A; 605 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 606 | struct B; 607 | migration!(B, "b", vec_box!(("test", "a")), vec_box!(), vec_box!()); 608 | let mut migrator = CustomMigrator::default(); 609 | let plan = generate_apply_all_plan(&mut migrator, vec_box!(B, A)) 610 | .await 611 | .unwrap(); 612 | assert_eq!(plan.len(), 2); 613 | } 614 | 615 | #[tokio::test] 616 | async fn apply_virtual_plan_size() { 617 | struct A; 618 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 619 | struct B; 620 | migration!( 621 | B, 622 | "b", 623 | vec_box!((A.app(), A.name())), 624 | vec_box!(), 625 | vec_box!() 626 | ); 627 | struct C; 628 | migration!( 629 | C, 630 | "c", 631 | vec_box!((B.app(), B.name())), 632 | vec_box!(), 633 | vec_box!() 634 | ); 635 | struct D; 636 | migration!( 637 | D, 638 | "d", 639 | vec_box!((B.app(), B.name())), 640 | vec_box!(), 641 | vec_box!() 642 | ); 643 | struct E; 644 | migration!( 645 | E, 646 | "e", 647 | vec_box!((C.app(), C.name())), 648 | vec_box!(), 649 | vec_box!() 650 | ); 651 | struct F; 652 | migration!( 653 | F, 654 | "f", 655 | vec_box!((D.app(), D.name())), 656 | vec_box!(), 657 | vec_box!() 658 | ); 659 | struct G; 660 | migration!( 661 | G, 662 | "g", 663 | vec_box!((E.app(), E.name())), 664 | vec_box!(), 665 | vec_box!() 666 | ); 667 | let mut migrator = CustomMigrator::default(); 668 | migrator 669 | .add_migrations(vec_box!(A, B, C, D, E, F, G)) 670 | .unwrap(); 671 | let sqlite = SqlitePool::connect("sqlite::memory:").await.unwrap(); 672 | let mut conn = sqlite.acquire().await.unwrap(); 673 | let full_plan = migrator 674 | .generate_migration_plan(&mut conn, Some(&Plan::apply_all())) 675 | .await 676 | .unwrap(); 677 | let mut full_plan_iter = full_plan.iter(); 678 | assert!(full_plan_iter.next() == Some(&&(Box::new(A) as Box>))); 679 | assert!(full_plan_iter.next() == Some(&&(Box::new(B) as Box>))); 680 | assert!(full_plan_iter.next() == Some(&&(Box::new(C) as Box>))); 681 | assert!(full_plan_iter.next() == Some(&&(Box::new(D) as Box>))); 682 | assert!(full_plan_iter.next() == Some(&&(Box::new(E) as Box>))); 683 | assert!(full_plan_iter.next() == Some(&&(Box::new(F) as Box>))); 684 | assert!(full_plan_iter.next() == Some(&&(Box::new(G) as Box>))); 685 | assert!(full_plan_iter.next().is_none()); 686 | let plan_till_f = migrator 687 | .generate_migration_plan( 688 | &mut conn, 689 | Some(&Plan::apply_name("test", &Some("f".to_string()))), 690 | ) 691 | .await 692 | .unwrap(); 693 | let mut plan_till_f_iter = plan_till_f.iter(); 694 | assert!(plan_till_f_iter.next() == Some(&&(Box::new(A) as Box>))); 695 | assert!(plan_till_f_iter.next() == Some(&&(Box::new(B) as Box>))); 696 | assert!(plan_till_f_iter.next() == Some(&&(Box::new(D) as Box>))); 697 | assert!(plan_till_f_iter.next() == Some(&&(Box::new(F) as Box>))); 698 | assert!(plan_till_f_iter.next().is_none()); 699 | let plan_till_g = migrator 700 | .generate_migration_plan( 701 | &mut conn, 702 | Some(&Plan::apply_name("test", &Some("g".to_string()))), 703 | ) 704 | .await 705 | .unwrap(); 706 | let mut plan_till_g_iter = plan_till_g.iter(); 707 | assert!(plan_till_g_iter.next() == Some(&&(Box::new(A) as Box>))); 708 | assert!(plan_till_g_iter.next() == Some(&&(Box::new(B) as Box>))); 709 | assert!(plan_till_g_iter.next() == Some(&&(Box::new(C) as Box>))); 710 | assert!(plan_till_g_iter.next() == Some(&&(Box::new(E) as Box>))); 711 | assert!(plan_till_g_iter.next() == Some(&&(Box::new(G) as Box>))); 712 | assert!(plan_till_g_iter.next().is_none()); 713 | } 714 | 715 | #[tokio::test] 716 | async fn revert_virtual_plan_size() { 717 | struct A; 718 | migration!(A, "a", vec_box!(), vec_box!(), vec_box!()); 719 | struct B; 720 | migration!( 721 | B, 722 | "b", 723 | vec_box!((A.app(), A.name())), 724 | vec_box!(), 725 | vec_box!() 726 | ); 727 | struct C; 728 | migration!( 729 | C, 730 | "c", 731 | vec_box!((B.app(), B.name())), 732 | vec_box!(), 733 | vec_box!() 734 | ); 735 | struct D; 736 | migration!( 737 | D, 738 | "d", 739 | vec_box!((B.app(), B.name())), 740 | vec_box!(), 741 | vec_box!() 742 | ); 743 | struct E; 744 | migration!( 745 | E, 746 | "e", 747 | vec_box!((C.app(), C.name())), 748 | vec_box!(), 749 | vec_box!() 750 | ); 751 | struct F; 752 | migration!( 753 | F, 754 | "f", 755 | vec_box!((D.app(), D.name())), 756 | vec_box!(), 757 | vec_box!() 758 | ); 759 | struct G; 760 | migration!( 761 | G, 762 | "g", 763 | vec_box!((E.app(), E.name())), 764 | vec_box!(), 765 | vec_box!() 766 | ); 767 | let mut migrator = CustomMigrator::default(); 768 | migrator 769 | .add_migrations(vec_box!(A, B, C, D, E, F, G)) 770 | .unwrap(); 771 | migrator 772 | .add_applied_migrations(vec_box!(A, B, C, D, E, F, G)) 773 | .unwrap(); 774 | let sqlite = SqlitePool::connect("sqlite::memory:").await.unwrap(); 775 | let mut conn = sqlite.acquire().await.unwrap(); 776 | let revert_plan = migrator 777 | .generate_migration_plan(&mut conn, Some(&Plan::revert_all())) 778 | .await 779 | .unwrap(); 780 | let mut revert_plan_iter = revert_plan.iter(); 781 | assert!(revert_plan_iter.next() == Some(&&(Box::new(G) as Box>))); 782 | assert!(revert_plan_iter.next() == Some(&&(Box::new(F) as Box>))); 783 | assert!(revert_plan_iter.next() == Some(&&(Box::new(E) as Box>))); 784 | assert!(revert_plan_iter.next() == Some(&&(Box::new(D) as Box>))); 785 | assert!(revert_plan_iter.next() == Some(&&(Box::new(C) as Box>))); 786 | assert!(revert_plan_iter.next() == Some(&&(Box::new(B) as Box>))); 787 | assert!(revert_plan_iter.next() == Some(&&(Box::new(A) as Box>))); 788 | assert!(revert_plan_iter.next().is_none()); 789 | let revert_till_f = Plan::revert_name("test", &Some("f".to_string())); 790 | let plan_till_f = migrator 791 | .generate_migration_plan(&mut conn, Some(&revert_till_f)) 792 | .await 793 | .unwrap(); 794 | let mut plan_till_f_iter = plan_till_f.iter(); 795 | assert!(plan_till_f_iter.next() == Some(&&(Box::new(F) as Box>))); 796 | assert!(plan_till_f_iter.next().is_none()); 797 | let revert_till_b = Plan::revert_name("test", &Some("b".to_string())); 798 | let plan_till_b = migrator 799 | .generate_migration_plan(&mut conn, Some(&revert_till_b)) 800 | .await 801 | .unwrap(); 802 | let mut plan_till_b_iter = plan_till_b.iter(); 803 | assert!(plan_till_b_iter.next() == Some(&&(Box::new(G) as Box>))); 804 | assert!(plan_till_b_iter.next() == Some(&&(Box::new(F) as Box>))); 805 | assert!(plan_till_b_iter.next() == Some(&&(Box::new(E) as Box>))); 806 | assert!(plan_till_b_iter.next() == Some(&&(Box::new(D) as Box>))); 807 | assert!(plan_till_b_iter.next() == Some(&&(Box::new(C) as Box>))); 808 | assert!(plan_till_b_iter.next() == Some(&&(Box::new(B) as Box>))); 809 | assert!(plan_till_b_iter.next().is_none()); 810 | } 811 | -------------------------------------------------------------------------------- /src/operation.rs: -------------------------------------------------------------------------------- 1 | //! Module for defining the [`Operation`] trait 2 | //! 3 | //! This module provides the [`Operation`] trait, allowing users to define 4 | //! database operations that can be executed as part of a migration process. 5 | //! These operations can be applied (`up`) or optionally reverted (`down`). 6 | #![cfg_attr( 7 | feature = "sqlite", 8 | doc = " 9 | To create own operation implement trait for type 10 | 11 | ### Example 12 | ```rust,no_run 13 | use sqlx_migrator::error::Error; 14 | use sqlx_migrator::operation::Operation; 15 | use sqlx::Sqlite; 16 | 17 | struct ExampleOperation; 18 | 19 | #[async_trait::async_trait] 20 | impl Operation for ExampleOperation { 21 | async fn up( 22 | &self, 23 | connection: &mut sqlx::SqliteConnection, 24 | ) -> Result<(), Error> { 25 | // Do some operations 26 | Ok(()) 27 | } 28 | 29 | // By default operation is irreversible and cannot be reversed if you want to support 30 | // reverse of migration than add down function as well 31 | async fn down( 32 | &self, 33 | connection: &mut sqlx::SqliteConnection, 34 | ) -> Result<(), Error> { 35 | // Do some operations 36 | Ok(()) 37 | } 38 | } 39 | ``` 40 | " 41 | )] 42 | 43 | use sqlx::Database; 44 | 45 | use crate::error::Error; 46 | 47 | /// Trait for defining a database operation. 48 | /// 49 | /// An Operation represents action that can be applied to or reverted from a 50 | /// database during a migration. Each operation can have an up method for 51 | /// applying the change and an optional down method for rolling it back. 52 | /// 53 | /// Operations can also specify whether they are destructible meaning that they 54 | /// require user confirmation before being applied, due to potential data loss 55 | /// or irreversible changes 56 | #[async_trait::async_trait] 57 | pub trait Operation: Send + Sync 58 | where 59 | DB: Database, 60 | { 61 | /// The up method executes the operation when applying the migration. 62 | /// 63 | /// This method is called when the migration is being applied to the 64 | /// database. Implement this method to define the changes you want to 65 | /// apply. 66 | async fn up(&self, connection: &mut ::Connection) -> Result<(), Error>; 67 | 68 | /// The down method reverses the operation when rolling back the 69 | /// migration. 70 | /// 71 | /// This method is called when the migration is being rolled back. Implement 72 | /// this method if you want to make the operation reversible. If not 73 | /// implemented, the operation is considered irreversible. 74 | async fn down(&self, connection: &mut ::Connection) -> Result<(), Error> { 75 | let _connection = connection; 76 | return Err(Error::IrreversibleOperation); 77 | } 78 | 79 | /// Indicates whether the `up` operation is destructible. 80 | /// 81 | /// If the operation is destructible, the user will be prompted for 82 | /// confirmation before running the migration via the CLI, due to the 83 | /// potential for data loss or irreversible changes. By default, `up` 84 | /// operations are considered non-destructible. Note that `down` operations 85 | /// are always considered destructible and cannot be changed. 86 | fn is_destructible(&self) -> bool { 87 | false 88 | } 89 | } 90 | 91 | #[async_trait::async_trait] 92 | impl Operation for (U, D) 93 | where 94 | DB: Database, 95 | U: AsRef + Send + Sync, 96 | D: AsRef + Send + Sync, 97 | for<'c> &'c mut ::Connection: sqlx::Executor<'c, Database = DB>, 98 | for<'q> ::Arguments<'q>: sqlx::IntoArguments<'q, DB>, 99 | { 100 | async fn up(&self, connection: &mut ::Connection) -> Result<(), Error> { 101 | sqlx::query(self.0.as_ref()) 102 | .execute(connection) 103 | .await 104 | .map_err(Error::from)?; 105 | Ok(()) 106 | } 107 | 108 | async fn down(&self, connection: &mut ::Connection) -> Result<(), Error> { 109 | sqlx::query(self.1.as_ref()) 110 | .execute(connection) 111 | .await 112 | .map_err(Error::from)?; 113 | Ok(()) 114 | } 115 | } 116 | --------------------------------------------------------------------------------