├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── examples ├── blog.rs ├── delete.rs ├── empty_table.rs ├── insert_and_select.rs ├── new_blog.rs ├── query_optional.rs ├── table_from.rs └── unchecked.rs ├── rust-query-macros ├── Cargo.toml └── src │ ├── dummy.rs │ ├── fields.rs │ ├── lib.rs │ ├── migrations.rs │ ├── multi.rs │ ├── parse.rs │ └── table.rs ├── src ├── alias.rs ├── ast.rs ├── client.rs ├── db.rs ├── dummy_impl.rs ├── hash.rs ├── lib.rs ├── migrate.rs ├── mymap.rs ├── query.rs ├── ref_cast_impl.rs ├── rows.rs ├── schema_pragma.rs ├── transaction.rs ├── value.rs ├── value │ ├── aggregate.rs │ ├── operations.rs │ ├── optional.rs │ └── trivial.rs └── writable.rs └── tests ├── chinook ├── expect │ ├── artist_details.dbg │ ├── artist_details.plan │ ├── avg_album_track_count_for_artist.dbg │ ├── avg_album_track_count_for_artist.plan │ ├── count_reporting.dbg │ ├── count_reporting.plan │ ├── customer_spending.dbg │ ├── customer_spending.plan │ ├── filtered_track.dbg │ ├── filtered_track.plan │ ├── genre_statistics.dbg │ ├── genre_statistics.plan │ ├── high_avg_invoice_total.dbg │ ├── high_avg_invoice_total.plan │ ├── invoice_info.dbg │ ├── invoice_info.plan │ ├── list_all_genres.dbg │ ├── list_all_genres.plan │ ├── playlist_track_count.dbg │ ├── playlist_track_count.plan │ ├── ten_space_tracks.dbg │ ├── ten_space_tracks.plan │ ├── the_artists.dbg │ └── the_artists.plan ├── main.rs ├── migrate.sql └── schema.rs ├── compile ├── aggregate_invariant.rs ├── aggregate_invariant.stderr ├── id_column.rs ├── id_column.stderr ├── no_reference.rs ├── no_reference.stderr ├── optional_invariant.rs ├── optional_invariant.stderr ├── schema_does_not_exist.rs ├── schema_does_not_exist.stderr ├── schema_types.rs ├── schema_types.stderr ├── table_does_not_exist.rs ├── table_does_not_exist.stderr ├── transaction_invariant.rs ├── transaction_invariant.stderr ├── unique_column_does_not_exists.rs ├── unique_column_does_not_exists.stderr ├── use_after_free.rs └── use_after_free.stderr └── tpc_c ├── delivery.rs ├── main.rs ├── new_order.rs ├── order_status.rs └── payment.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /docs 3 | Chinook_Sqlite.sql 4 | *.sqlite 5 | *.sqlite-shm 6 | *.sqlite-wal 7 | test.db 8 | /src/experiment 9 | /src/experiment.rs 10 | Cargo.lock 11 | TODO.md 12 | /.vscode 13 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Unreleased 2 | 3 | # 0.4.3 4 | 5 | - Fix panic when inserting into table without columns. 6 | - Add `Select::map` method. 7 | - Deprecate `IntoSelectExt::map_select`. 8 | - Deprecate `Aggregate::join_on`. 9 | 10 | # 0.4.2 11 | 12 | - Update the `Rows::join` method to take a constant argument. 13 | This is now the prefered join syntax and all examples have been updated. 14 | - Allow arbitrary correlated subqueries. 15 | This means that `Aggregate` now has an implied bound that allows leaking `Expr` from the 16 | out scope. Correlated subqueries are decorrelated before translating to SQL. 17 | - Fix loose lifetime on `Optional`. 18 | 19 | # 0.4.1 20 | 21 | - Change conflicts back to using `TableRow` instead of `Expr`. 22 | Changing the conflict type to `Expr` was a mistake, because the `Expr` can be invalidated. 23 | - Fix `#[schema]` macro not showing errors for unique constraints. 24 | 25 | # 0.4.0 26 | 27 | Blog post: https://blog.lucasholten.com/rust-query-0-4/ 28 | 29 | ## Optional Queries 30 | - Added `optional` combinator. 31 | - Changed `Expr` to be co-variant in its lifetime. 32 | 33 | ## Basic Datatypes and Operations 34 | - Added support for `Vec` data type (sqlite `BLOB`). 35 | - Added some more basic operations on expressions. 36 | 37 | ## Updates, Insert and Query 38 | - Added safe updates of a subset of columns for each table. 39 | - Update statements now use the `Update` type for each column. 40 | - Insert and update conflict is now an `Expr` (`find_or_insert` returns an `Expr` now too). 41 | - `Rows::into_vec` is no longer sorted automatically. 42 | 43 | ## Schema and Mirations 44 | - Changed `#[schema]` syntax to be a module of structs. 45 | - Added `#[from]` attribute to allow renaming tables and splitting tables in the schema. 46 | - The generated migration structs have moved from e.g. `v1::update::UserMigration` to `v0::migrate::User`. 47 | - Migrations now require explicit handling of potential unique constraint violations. 48 | - Migrations now require explicit handling of foreign key violations. 49 | 50 | ## Type Driven Select 51 | - Added a macro for each table to create ad-hoc column selection types like `User!(name, age)`. 52 | - Added the `FromExpr` trait to allow custom column selection and conversion. 53 | 54 | ## Feature Flags and Dependencies 55 | - `TransactionWeak::rusqlite_transaction` is renamed and no longer behind a feature flag. 56 | - `hash_schema` method was moved behind `dev` feature which is enabled by default. 57 | - Updated dependencies. 58 | 59 | ## Renaming 60 | - Renamed `Dummy` to `Select`. 61 | - Renamed `Column` to `Expr`. 62 | - Renamed `try_insert` to `insert` and `insert` to `insert_ok`. 63 | - Renamed `try_delete` to `delete` and `delete` to `delete_ok`. 64 | - Renamed `try_update` to `update` and `update` to `update_ok`. 65 | 66 | # 0.3.1 67 | 68 | - Added error message when defining an `id` column. 69 | - Added support for sqlite `LIKE` and `GLOB` operators (Contributed by @teamplayer3). 70 | - Added support for `DELETE` using `TransactionWeak` and `#[no_reference]`. 71 | - Added `TransactionWeak::unchecked_transaction` behind feature flag. 72 | - Added `impl ToSql for TableRow` behind `unchecked_transaction` feature flag. 73 | - Removed `impl RefCast for Transaction`, it was not intended to be public. 74 | - Removed `impl FromSql for TableRow`, it was not intended to be public. 75 | 76 | # 0.3.0 77 | 78 | - Added support for updating rows. 79 | - Added `Table::dummy` method, which makes it easier to do partial updates. 80 | - Reused table types in the generated API for both naming `TableRow` and dummies `User {name: "steve"}`. 81 | - Forbid `Option` in unique constraints. 82 | - Renamed `ThreadToken` to `LocalClient`. 83 | - Renamed and moved `read` and `write_lock` to `transaction` and `transaction_mut`. 84 | - Check `schema_version` at the start of every transaction. 85 | - Simplify migration and borrow `LocalClient` only once. 86 | - Renamed `Prepare` to `Config` and simplified its API. 87 | 88 | # 0.2.2 89 | 90 | - Bound the lifetime of `TableRow: IntoColumn` to the lifetime of the transaction. 91 | Without the bound it was possible to sneak `TableRow`s into following transacions.
92 | `query_one` now checks that its input lives for as long as the transaction. 93 | To make sure that `query_one` still checks that the dummy is "global", the transaction now has an invariant lifetime. 94 |
95 | 96 | # 0.2.1 97 | 98 | - Relax `Transaction` creation to not borrow the `Database`. 99 | - Add missing lifetime bound on `try_insert`s return value. 100 | Technically this is a breaking change, but it fixes a bug so it is still a patch release. 101 | - Fix the version of the macro crate exactly (=0.2.0) to allow future internal API changes with only a patch release. 102 | 103 | # 0.2.0 104 | 105 | - Rewrote almost the whole library to specify the schema using enum syntax with a proc macro. 106 | - Added a single Column type to handle a lot of query building. 107 | - Dummy trait to retrieve multiple values at once and allow post processing. 108 | - Added support for transactions and multiple schemas. 109 | 110 | # 0.1.x 111 | 112 | - This version was SQL schema first. It would generate the API based on the schema read from the database. 113 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["rust-query-macros"] 3 | 4 | [package] 5 | name = "rust-query" 6 | version = "0.4.3" 7 | edition = "2024" 8 | description = "A query builder using rust concepts." 9 | categories = ["database"] 10 | repository = "https://github.com/LHolten/rust-query/" 11 | license = "MIT OR Apache-2.0" 12 | rust-version = "1.85" 13 | 14 | [dependencies] 15 | sea-query = "0.32" 16 | sea-query-rusqlite = "0.7" 17 | rusqlite = { version = "0.32", features = ["modern_sqlite", "unlock_notify"] } 18 | k12 = {version = "0.3", optional = true} 19 | rust-query-macros = { path = "rust-query-macros", version = "=0.4.3" } 20 | ref-cast = "1.0.23" 21 | pretty_assertions = "1.4.0" 22 | r2d2_sqlite = "0.25.0" 23 | r2d2 = "0.8" 24 | static_assertions = "1.1.0" 25 | 26 | [dev-dependencies] 27 | trybuild = "1.0.97" 28 | expect-test = "1" 29 | rand = "0.9.1" 30 | 31 | [features] 32 | default = ["dev"] 33 | bundled = ["rusqlite/bundled"] 34 | dev = ["dep:k12"] 35 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | https://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2024 Lucas Holten 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Type safe SQLite using the Rust type system 2 | The goal of this library is to allow using relational databases (only SQLite right now) using familiar Rust syntax. 3 | The library should guarantee that queries and migrations can not fail when they compile. While rust-query goes quite far to achieve this, there are still some exceptions that can cause queries to fail, such as integer overflow. 4 | 5 | Writing queries using this library involves: 6 | - Interaction with row/column references as Rust values. 7 | - Lifetimes to check the scopes of row/column references. 8 | - Procedural mutation of row sets with methods like `filter` and `join`. 9 | - "Combinators" like `optional` and `aggregate`. 10 | 11 | Notably writing queries itself does not involve any new syntax or macro, while still being completely type safe. 12 | (There are macros to define the schema and to simplify defining composite types to retrieve from queries) 13 | 14 | ## What it looks like 15 | 16 | Define a schema using the syntax of a module with structs: 17 | ```rust 18 | # fn main() {} 19 | use rust_query::migration::schema; 20 | 21 | #[schema(MySchema)] 22 | pub mod vN { 23 | // Structs are database tables 24 | pub struct User { 25 | // This table has one column with String (sqlite TEXT) type. 26 | pub name: String, 27 | } 28 | pub struct Image { 29 | pub description: String, 30 | // This column has a foreign key constraint to the User table 31 | pub uploaded_by: User, 32 | } 33 | } 34 | ``` 35 | Get proof that we are running on a unique thread: 36 | ```rust 37 | # use rust_query::LocalClient; 38 | let mut client = LocalClient::try_new().unwrap(); 39 | ``` 40 | Initialize a database: 41 | ```rust,ignore 42 | let database = client 43 | .migrator(Config::open("my_database.sqlite")) 44 | .expect("database version is before supported versions") 45 | // migrations go here 46 | .finish() 47 | .expect("database version is after supported versions"); 48 | ``` 49 | Perform a transaction! 50 | ```rust,ignore 51 | let mut txn = client.transaction_mut(&database); 52 | do_stuff_with_database(&mut txn); 53 | // After we are done we commit the changes! 54 | txn.commit(); 55 | ``` 56 | Insert in the database: 57 | ```rust,ignore 58 | // Lets make a new user 'mike', 59 | let mike = User { name: "mike" }; 60 | let mike_id = txn.insert_ok(mike); 61 | // and also insert a dog picture for 'mike'. 62 | let dog_picture = Image { 63 | description: "dog", 64 | uploaded_by: mike_id, 65 | }; 66 | let _picture_id = txn.insert_ok(dog_picture); 67 | ``` 68 | Query from the database: 69 | ```rust,ignore 70 | // Now we want to get all pictures for 'mike'. 71 | let mike_pictures = txn.query(|rows| { 72 | // Initially there is one empty row. 73 | // Lets join the pictures table. 74 | let picture = rows.join(Image); 75 | // Now lets filter for pictures from mike, 76 | rows.filter(picture.uploaded_by().eq(mike_id)); 77 | // and finally turn the rows into a vec. 78 | rows.into_vec(picture.description()) 79 | }); 80 | 81 | println!("{mike_pictures:?}"); // This should print `["dog"]`. 82 | ``` 83 | The full example code can be found in [insert_and_select.rs](examples/insert_and_select.rs) 84 | 85 | ## Examples 86 | For more examples you can look at [the examples directory](/examples). 87 | 88 | ## Roadmap 89 | 90 | This project is under development and there are some things missing. 91 | Below is a checklist of planned features and implemented features. 92 | (Implemented features have a checkmark, planned features do not). 93 | 94 | Schema: 95 | - [x] Basic types (integer, real, text, blob, null) 96 | - [x] Basic foreign keys 97 | - [x] (Multi column) unique constraints 98 | - [ ] Check constraints 99 | - [ ] Overlapping foreign keys 100 | 101 | Statements: 102 | - [x] Multi row query + single row query (and optional query) 103 | - [x] Single row insert, update and delete 104 | 105 | Expressions: 106 | - [x] Some basic math, boolean and string operations 107 | - [x] Aggregate combinator 108 | - [x] Optional combinator 109 | - [ ] Everything else 110 | 111 | Advanced operations: 112 | - [ ] Window 113 | - [ ] Limit 114 | 115 | Despite these limitations, I am dogfooding this query builder and using it in my own project: [advent-of-wasm](https://github.com/LHolten/advent-of-wasm). 116 | -------------------------------------------------------------------------------- /examples/blog.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{ 2 | Database, LocalClient, Transaction, TransactionMut, aggregate, 3 | migration::{Config, schema}, 4 | }; 5 | 6 | #[schema(Schema)] 7 | #[version(0..=1)] 8 | pub mod vN { 9 | pub struct User { 10 | pub name: String, 11 | #[version(1..)] 12 | pub email: String, 13 | } 14 | pub struct Story { 15 | pub author: User, 16 | pub title: String, 17 | pub content: String, 18 | } 19 | #[unique(user, story)] 20 | pub struct Rating { 21 | pub user: User, 22 | pub story: Story, 23 | pub stars: i64, 24 | } 25 | } 26 | use v1::*; 27 | 28 | fn insert_data(txn: &mut TransactionMut) { 29 | // Insert users 30 | let alice = txn.insert_ok(User { 31 | name: "alice", 32 | email: "test", 33 | }); 34 | let bob = txn.insert_ok(User { 35 | name: "bob", 36 | email: "test", 37 | }); 38 | 39 | // Insert a story 40 | let dream = txn.insert_ok(Story { 41 | author: alice, 42 | title: "My crazy dream", 43 | content: "A dinosaur and a bird...", 44 | }); 45 | 46 | // Insert a rating - note the try_insert due to the unique constraint 47 | let _rating = txn 48 | .insert(Rating { 49 | user: bob, 50 | story: dream, 51 | stars: 5, 52 | }) 53 | .expect("no rating for this user and story exists yet"); 54 | } 55 | 56 | fn query_data(txn: &Transaction) { 57 | let results = txn.query(|rows| { 58 | let story = rows.join(Story); 59 | let avg_rating = aggregate(|rows| { 60 | let rating = rows.join(Rating); 61 | rows.filter(rating.story().eq(&story)); 62 | rows.avg(rating.stars().as_float()) 63 | }); 64 | rows.into_vec((story.title(), avg_rating)) 65 | }); 66 | 67 | for (title, avg_rating) in results { 68 | println!("story '{title}' has avg rating {avg_rating:?}"); 69 | } 70 | } 71 | 72 | pub fn migrate(client: &mut LocalClient) -> Database { 73 | let m = client 74 | .migrator(Config::open_in_memory()) 75 | .expect("database is older than supported versions"); 76 | let m = m.migrate(|txn| v0::migrate::Schema { 77 | user: txn.migrate_ok(|old_user: v0::User!(name)| v0::migrate::User { 78 | email: format!("{}@example.com", old_user.name), 79 | }), 80 | }); 81 | m.finish() 82 | .expect("database is newer than supported versions") 83 | } 84 | 85 | fn main() { 86 | let mut client = LocalClient::try_new().unwrap(); 87 | let db = migrate(&mut client); 88 | let mut txn = client.transaction_mut(&db); 89 | insert_data(&mut txn); 90 | query_data(&txn); 91 | } 92 | 93 | #[test] 94 | fn run() { 95 | main(); 96 | } 97 | 98 | #[test] 99 | #[cfg(feature = "dev")] 100 | fn schema_hash() { 101 | use expect_test::expect; 102 | use rust_query::migration::hash_schema; 103 | expect!["dd7f5d2f553f5b7a"].assert_eq(&hash_schema::()); 104 | expect!["66e6a7d64535bcda"].assert_eq(&hash_schema::()); 105 | } 106 | -------------------------------------------------------------------------------- /examples/delete.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{ 2 | Database, LocalClient, 3 | migration::{Config, schema}, 4 | }; 5 | 6 | #[schema(Schema)] 7 | pub mod vN { 8 | #[no_reference] 9 | pub struct Name { 10 | pub name: String, 11 | } 12 | } 13 | use v0::*; 14 | 15 | fn main() { 16 | // Get a LocalClient to prove that we have our own thread. 17 | // This is necessary to keep transactions separated. 18 | let mut client = LocalClient::try_new().unwrap(); 19 | let database: Database = client 20 | .migrator(Config::open_in_memory()) 21 | .expect("database version is before supported versions") 22 | // migrations go here 23 | .finish() 24 | .expect("database version is after supported versions"); 25 | 26 | let mut txn = client.transaction_mut(&database); 27 | 28 | let ids: Vec<_> = vec!["alpha", "bravo", "charlie", "delta"] 29 | .into_iter() 30 | .map(|name| txn.insert_ok(Name { name })) 31 | .collect(); 32 | 33 | let mut txn = txn.downgrade(); 34 | for id in ids.clone() { 35 | assert!(txn.delete_ok(id)); 36 | } 37 | for id in ids { 38 | assert!(!txn.delete_ok(id)); 39 | } 40 | } 41 | 42 | #[test] 43 | fn run() { 44 | main(); 45 | } 46 | 47 | #[test] 48 | #[cfg(feature = "dev")] 49 | fn schema_hash() { 50 | use expect_test::expect; 51 | use rust_query::migration::hash_schema; 52 | expect!["822e0ab9b42056f7"].assert_eq(&hash_schema::()); 53 | } 54 | -------------------------------------------------------------------------------- /examples/empty_table.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{ 2 | Database, IntoExpr, LocalClient, 3 | migration::{Config, schema}, 4 | }; 5 | 6 | #[schema(Schema)] 7 | pub mod vN { 8 | pub struct Empty; 9 | } 10 | 11 | pub fn main() { 12 | let mut client = LocalClient::try_new().unwrap(); 13 | let db: Database = client 14 | .migrator(Config::open_in_memory()) 15 | .expect("database is older than supported versions") 16 | .finish() 17 | .expect("database is newer than supported versions"); 18 | 19 | let mut txn = client.transaction_mut(&db); 20 | let id = txn.insert(v0::Empty).unwrap(); 21 | let id = txn.query_one(id.into_expr()); 22 | let mut txn = txn.downgrade(); 23 | assert!(txn.delete(id).unwrap()); 24 | txn.commit(); 25 | } 26 | -------------------------------------------------------------------------------- /examples/insert_and_select.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{ 2 | LocalClient, TransactionMut, 3 | migration::{Config, schema}, 4 | }; 5 | 6 | // Start by defining your schema. 7 | #[schema(MySchema)] 8 | pub mod vN { 9 | pub struct User { 10 | pub name: String, 11 | } 12 | pub struct Image { 13 | pub description: String, 14 | pub uploaded_by: User, 15 | } 16 | } 17 | // Bring the latest schema version into scope. 18 | use v0::*; 19 | 20 | // Use your schema to initalize a database. 21 | fn main() { 22 | // Get a LocalClient to prove that we have our own thread. 23 | // This is necessary to keep transactions separated. 24 | let mut client = LocalClient::try_new().unwrap(); 25 | let database = client 26 | .migrator(Config::open_in_memory()) 27 | .expect("database version is before supported versions") 28 | // migrations go here 29 | .finish() 30 | .expect("database version is after supported versions"); 31 | 32 | let mut txn = client.transaction_mut(&database); 33 | do_stuff_with_database(&mut txn); 34 | // After we are done we commit the changes! 35 | txn.commit(); 36 | } 37 | 38 | // Use the database to insert and query. 39 | fn do_stuff_with_database(txn: &mut TransactionMut) { 40 | // Lets make a new user 'mike', 41 | let mike = User { name: "mike" }; 42 | let mike_id = txn.insert_ok(mike); 43 | 44 | // and also insert a dog picture for 'mike'. 45 | let dog_picture = Image { 46 | description: "dog", 47 | uploaded_by: mike_id, 48 | }; 49 | let _picture_id = txn.insert_ok(dog_picture); 50 | 51 | // Now we want to get all pictures for 'mike'. 52 | let mike_pictures = txn.query(|rows| { 53 | // Initially there is one empty row. 54 | // Lets join the pictures table. 55 | let picture = rows.join(Image); 56 | // Now lets filter for pictures from mike, 57 | rows.filter(picture.uploaded_by().eq(mike_id)); 58 | // and finally turn the rows into a vec. 59 | rows.into_vec(picture.description()) 60 | }); 61 | 62 | println!("{mike_pictures:?}"); // This should print `["dog"]`. 63 | } 64 | 65 | #[test] 66 | fn run() { 67 | main(); 68 | } 69 | 70 | #[test] 71 | #[cfg(feature = "dev")] 72 | fn schema_hash() { 73 | use expect_test::expect; 74 | use rust_query::migration::hash_schema; 75 | expect!["e6dbf93daba3ccfa"].assert_eq(&hash_schema::()); 76 | } 77 | -------------------------------------------------------------------------------- /examples/new_blog.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{ 2 | Database, LocalClient, Select, TableRow, Transaction, aggregate, migration::schema, optional, 3 | }; 4 | 5 | #[schema(Schema)] 6 | #[version(0..=1)] 7 | pub mod vN { 8 | 9 | pub struct Measurement { 10 | #[version(..1)] 11 | pub score: i64, 12 | #[version(1..)] 13 | pub value: f64, 14 | pub duration: i64, 15 | pub confidence: f64, 16 | pub timestamp: i64, 17 | pub location: Location, 18 | } 19 | pub struct Location { 20 | pub name: String, 21 | } 22 | } 23 | 24 | mod using_v0 { 25 | use super::*; 26 | use rust_query::FromExpr; 27 | use v0::*; 28 | 29 | #[expect(unused)] 30 | #[derive(FromExpr, Select)] 31 | #[rust_query(From = Measurement)] 32 | struct Score { 33 | score: i64, 34 | timestamp: i64, 35 | } 36 | 37 | #[expect(unused)] 38 | fn read_scores(txn: &Transaction) -> Vec { 39 | txn.query(|rows| { 40 | let m = rows.join(Measurement); 41 | rows.into_vec(ScoreSelect { 42 | score: m.score(), 43 | timestamp: m.timestamp(), 44 | }) 45 | }) 46 | } 47 | 48 | #[expect(unused)] 49 | fn read_scores2(txn: &Transaction) -> Vec { 50 | txn.query(|rows| { 51 | let m = rows.join(Measurement); 52 | rows.into_vec(FromExpr::from_expr(m)) 53 | }) 54 | } 55 | 56 | #[expect(unused)] 57 | fn read_scores3(txn: &Transaction) -> Vec { 58 | txn.query(|rows| { 59 | let m = rows.join(Measurement); 60 | rows.into_vec(FromExpr::from_expr(m)) 61 | }) 62 | } 63 | } 64 | 65 | fn main() { 66 | let mut client = LocalClient::try_new().unwrap(); 67 | let db = using_v1::migrate(&mut client); 68 | let txn = client.transaction_mut(&db); 69 | using_v1::do_stuff(txn); 70 | } 71 | 72 | mod using_v1 { 73 | use super::*; 74 | use rust_query::{TransactionMut, TransactionWeak, migration::Config}; 75 | use v1::*; 76 | 77 | pub fn migrate(client: &mut LocalClient) -> Database { 78 | let m = client 79 | .migrator(Config::open("db.sqlite")) 80 | .expect("database should not be older than supported versions"); 81 | let m = m.migrate(|txn| v0::migrate::Schema { 82 | measurement: txn.migrate_ok(|old: v0::Measurement!(score)| v0::migrate::Measurement { 83 | value: old.score as f64, 84 | }), 85 | }); 86 | m.finish() 87 | .expect("database should not be newer than supported versions") 88 | } 89 | 90 | pub fn do_stuff(mut txn: TransactionMut) { 91 | let loc: TableRow = txn.insert_ok(Location { name: "Amsterdam" }); 92 | let _ = location_info(&txn, loc); 93 | 94 | let mut txn: TransactionWeak = txn.downgrade(); 95 | 96 | let is_deleted = txn 97 | .delete(loc) 98 | .expect("there should be no fk references to this row"); 99 | assert!(is_deleted); 100 | 101 | let is_not_deleted_twice = !txn 102 | .delete(loc) 103 | .expect("there should be no fk references to this row"); 104 | assert!(is_not_deleted_twice); 105 | } 106 | 107 | #[expect(unused)] 108 | #[derive(Select)] 109 | struct Info { 110 | average_value: f64, 111 | total_duration: i64, 112 | } 113 | 114 | fn location_info<'t>( 115 | txn: &Transaction<'t, Schema>, 116 | loc: TableRow<'t, Location>, 117 | ) -> Option { 118 | txn.query_one(aggregate(|rows| { 119 | let m = rows.join(Measurement); 120 | rows.filter(m.location().eq(loc)); 121 | 122 | optional(|row| { 123 | let average_value = row.and(rows.avg(m.value())); 124 | row.then(InfoSelect { 125 | average_value, 126 | total_duration: rows.sum(m.duration()), 127 | }) 128 | }) 129 | })) 130 | } 131 | } 132 | 133 | mod delete_example { 134 | use super::*; 135 | #[schema(Schema)] 136 | #[version(0..=1)] 137 | pub mod vN { 138 | #[version(..1)] 139 | pub struct User { 140 | pub name: String, 141 | } 142 | #[version(1..)] 143 | #[from(User)] 144 | pub struct Author { 145 | pub name: String, 146 | } 147 | pub struct Book { 148 | pub author: Author, 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /examples/query_optional.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{ 2 | Database, FromExpr, LocalClient, 3 | migration::{Config, schema}, 4 | optional, 5 | }; 6 | 7 | // Start by defining your schema. 8 | #[schema(Schema)] 9 | pub mod vN { 10 | pub struct Player { 11 | #[unique] 12 | pub pub_id: i64, 13 | pub name: String, 14 | pub score: i64, 15 | pub home: World, 16 | } 17 | pub struct World { 18 | pub name: String, 19 | } 20 | } 21 | // Bring the latest schema version into scope. 22 | use v0::*; 23 | 24 | fn main() { 25 | let pub_id = 100; 26 | 27 | let mut client = LocalClient::try_new().unwrap(); 28 | let database: Database = client 29 | .migrator(Config::open_in_memory()) 30 | .expect("database version is before supported versions") 31 | // migrations go here 32 | .finish() 33 | .expect("database version is after supported versions"); 34 | 35 | let mut txn = client.transaction_mut(&database); 36 | 37 | #[expect(unused)] 38 | #[derive(FromExpr)] 39 | #[rust_query(From = World, From = Player)] 40 | struct NameInfo { 41 | name: String, 42 | } 43 | 44 | type PlayerInfo = Player!(name, score, home as NameInfo); 45 | type PlayerInfo2<'t> = Player!(score, home<'t>); 46 | 47 | // old pattern, requires two queries 48 | let player = txn.query_one(Player::unique(pub_id)); 49 | let _info = player.map(|player| txn.query_one(PlayerInfo::from_expr(player))); 50 | 51 | // most powerful pattern, can retrieve optional data in one query 52 | let _info = txn.query_one(optional(|row| { 53 | let player = row.and(Player::unique(pub_id)); 54 | row.then(PlayerInfo::from_expr(player)) 55 | })); 56 | 57 | // for simple queries, use the trivial mapping 58 | let info = txn.query_one(Option::::from_expr(Player::unique(pub_id))); 59 | 60 | assert!(info.is_none()); 61 | 62 | let home = txn.insert_ok(World { name: "Dune" }); 63 | txn.insert(Player { 64 | pub_id, 65 | name: "Asterix", 66 | score: 3000, 67 | home, 68 | }) 69 | .expect("there is no player with this pub_id yet"); 70 | 71 | let info = txn.query_one(Option::::from_expr(Player::unique(pub_id))); 72 | assert!(info.is_some()) 73 | } 74 | 75 | #[test] 76 | fn run() { 77 | main(); 78 | } 79 | 80 | #[test] 81 | #[cfg(feature = "dev")] 82 | fn schema_hash() { 83 | use expect_test::expect; 84 | use rust_query::migration::hash_schema; 85 | expect!["93ca1485f9eba782"].assert_eq(&hash_schema::()); 86 | } 87 | -------------------------------------------------------------------------------- /examples/table_from.rs: -------------------------------------------------------------------------------- 1 | use rust_query::migration::schema; 2 | 3 | #[schema(Schema)] 4 | #[version(0..=1)] 5 | pub mod vN { 6 | pub struct Foo; 7 | #[from(Foo)] 8 | #[version(1..)] 9 | pub struct FooNext; 10 | pub struct Bar { 11 | // this will be `Foo` for v0 and `FooNext` for v1 12 | #[unique] 13 | pub evolving: FooNext, 14 | // this will be `Foo` in both v0 and v1 15 | pub foo: Foo, 16 | } 17 | } 18 | 19 | pub fn main() {} 20 | -------------------------------------------------------------------------------- /examples/unchecked.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{ 2 | Database, LocalClient, 3 | migration::{Config, schema}, 4 | }; 5 | 6 | #[schema(Schema)] 7 | pub mod vN { 8 | pub struct Name { 9 | pub name: String, 10 | } 11 | } 12 | use v0::*; 13 | 14 | fn main() { 15 | // Get a LocalClient to prove that we have our own thread. 16 | // This is necessary to keep transactions separated. 17 | let mut client = LocalClient::try_new().unwrap(); 18 | let database: Database = client 19 | .migrator(Config::open_in_memory()) 20 | .expect("database version is before supported versions") 21 | // migrations go here 22 | .finish() 23 | .expect("database version is after supported versions"); 24 | 25 | let mut txn = client.transaction_mut(&database); 26 | 27 | let ids: Vec<_> = vec!["alpha", "bravo", "charlie", "delta"] 28 | .into_iter() 29 | .map(|name| txn.insert_ok(Name { name })) 30 | .collect(); 31 | 32 | let mut txn = txn.downgrade(); 33 | 34 | let raw_txn = txn.rusqlite_transaction(); 35 | for id in ids { 36 | let name: String = raw_txn 37 | .query_row("select name from Name where id = $1", [&id], |row| { 38 | row.get(0) 39 | }) 40 | .unwrap(); 41 | println!("{name}") 42 | } 43 | 44 | txn.commit(); 45 | } 46 | 47 | #[test] 48 | fn run() { 49 | main(); 50 | } 51 | 52 | #[test] 53 | #[cfg(feature = "dev")] 54 | fn schema_hash() { 55 | use expect_test::expect; 56 | use rust_query::migration::hash_schema; 57 | expect!["822e0ab9b42056f7"].assert_eq(&hash_schema::()); 58 | } 59 | -------------------------------------------------------------------------------- /rust-query-macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-query-macros" 3 | version = "0.4.3" 4 | edition = "2021" 5 | description = "Proc-macro crate for rust-query." 6 | repository = "https://github.com/LHolten/rust-query/" 7 | license = "MIT OR Apache-2.0" 8 | 9 | [lib] 10 | proc-macro = true 11 | 12 | [dependencies] 13 | proc-macro2 = "1.0.82" 14 | quote = "1.0.36" 15 | syn = { version = "2.0.63", features = ["full"] } 16 | heck = "0.5.0" 17 | 18 | [dev-dependencies] 19 | ref-cast = "1.0.23" 20 | rust-query = { path = ".." } 21 | -------------------------------------------------------------------------------- /rust-query-macros/src/dummy.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::{Span, TokenStream}; 2 | use quote::{format_ident, quote, quote_spanned, ToTokens}; 3 | use syn::{spanned::Spanned, GenericParam, ItemStruct, Lifetime}; 4 | 5 | use crate::make_generic; 6 | 7 | struct CommonInfo { 8 | name: syn::Ident, 9 | original_generics: Vec, 10 | fields: Vec<(syn::Ident, syn::Type)>, 11 | } 12 | 13 | impl CommonInfo { 14 | fn from_item(item: ItemStruct) -> syn::Result { 15 | let name = item.ident; 16 | let original_generics = item.generics.params.into_iter().map(|x| { 17 | let GenericParam::Lifetime(lt) = x else { 18 | return Err(syn::Error::new_spanned( 19 | x, 20 | "Only lifetime generics are supported.", 21 | )); 22 | }; 23 | Ok(lt.lifetime) 24 | }); 25 | let fields = item.fields.into_iter().map(|field| { 26 | let Some(name) = field.ident else { 27 | return Err(syn::Error::new_spanned( 28 | field, 29 | "Tuple structs are not supported (yet).", 30 | )); 31 | }; 32 | Ok((name, field.ty)) 33 | }); 34 | Ok(Self { 35 | name, 36 | original_generics: original_generics.collect::>()?, 37 | fields: fields.collect::>()?, 38 | }) 39 | } 40 | } 41 | 42 | pub fn wrap(parts: &[impl ToTokens]) -> TokenStream { 43 | match parts { 44 | [] => quote! {()}, 45 | [typ] => typ.to_token_stream(), 46 | [a, b @ ..] => { 47 | let rest = wrap(b); 48 | quote! {(#a, #rest)} 49 | } 50 | } 51 | } 52 | 53 | pub fn dummy_impl(item: ItemStruct) -> syn::Result { 54 | let CommonInfo { 55 | name, 56 | original_generics, 57 | fields, 58 | } = CommonInfo::from_item(item)?; 59 | let dummy_name = format_ident!("{name}Select"); 60 | 61 | let transaction_lt = syn::Lifetime::new("'_a", Span::mixed_site()); 62 | let mut original_plus_transaction = original_generics.clone(); 63 | original_plus_transaction.push(transaction_lt.clone()); 64 | 65 | let mut defs = vec![]; 66 | let mut generics = vec![]; 67 | let mut constraints = vec![]; 68 | let mut dummies = vec![]; 69 | let mut typs = vec![]; 70 | let mut names = vec![]; 71 | for (name, typ) in &fields { 72 | let generic = make_generic(name); 73 | 74 | defs.push(quote! {#name: #generic}); 75 | constraints 76 | .push(quote! {#generic: ::rust_query::IntoSelect<'_t, #transaction_lt, S, Out = #typ>}); 77 | generics.push(quote! {#generic}); 78 | dummies.push(quote! {self.#name}); 79 | names.push(quote! {#name}); 80 | typs.push(quote! {#typ}); 81 | } 82 | 83 | let parts_name = wrap(&names); 84 | let parts_dummies = wrap(&dummies); 85 | 86 | Ok(quote! { 87 | struct #dummy_name<#(#generics),*> { 88 | #(#defs),* 89 | } 90 | 91 | impl<'_t #(,#original_plus_transaction)*, S #(,#constraints)*> ::rust_query::IntoSelect<'_t, #transaction_lt, S> for #dummy_name<#(#generics),*> 92 | where #name<#(#original_generics),*>: #transaction_lt { 93 | type Out = (#name<#(#original_generics),*>); 94 | 95 | fn into_select(self) -> ::rust_query::Select<'_t, #transaction_lt, S, Self::Out> { 96 | ::rust_query::IntoSelect::into_select(#parts_dummies).map(|#parts_name| #name { 97 | #(#names,)* 98 | }) 99 | } 100 | } 101 | 102 | }) 103 | } 104 | 105 | pub fn from_expr(item: ItemStruct) -> syn::Result { 106 | let mut transaction_lt = None; 107 | match *item.generics.lifetimes().collect::>() { 108 | [] => {} 109 | [lt] => transaction_lt = Some(lt.lifetime.clone()), 110 | _ => { 111 | return Err(syn::Error::new_spanned( 112 | item.generics, 113 | "can have at most one lifetime generic", 114 | )) 115 | } 116 | } 117 | 118 | let mut trivial = vec![]; 119 | for attr in &item.attrs { 120 | if attr.path().is_ident("rust_query") { 121 | attr.parse_nested_meta(|meta| { 122 | if meta.path.is_ident("From") { 123 | let path: syn::Path = meta.value()?.parse()?; 124 | trivial.push(path); 125 | return Ok(()); 126 | } 127 | Err(meta.error("unrecognized rust-query attribute")) 128 | })?; 129 | } 130 | } 131 | 132 | let CommonInfo { 133 | name, 134 | original_generics, 135 | fields, 136 | } = CommonInfo::from_item(item)?; 137 | 138 | let mut original_plus_transaction = original_generics.clone(); 139 | let builtin_lt = syn::Lifetime::new("'_a", Span::mixed_site()); 140 | if transaction_lt.is_none() { 141 | original_plus_transaction.push(builtin_lt.clone()); 142 | } 143 | let transaction_lt = transaction_lt.unwrap_or(builtin_lt); 144 | 145 | let mut names = vec![]; 146 | for (name, _) in &fields { 147 | names.push(quote! {#name}); 148 | } 149 | 150 | let trivial = trivial.into_iter().map(|trivial| { 151 | let schema = quote! {<#trivial as ::rust_query::Table>::Schema}; 152 | let mut trivial_prepared = vec![]; 153 | for (name, typ) in &fields { 154 | let span = typ.span(); 155 | trivial_prepared 156 | .push(quote_spanned! {span=> <#typ as ::rust_query::FromExpr<_, _>>::from_expr(col.#name())}); 157 | } 158 | let parts_dummies = wrap(&trivial_prepared); 159 | let parts_name = wrap(&names); 160 | 161 | quote! { 162 | impl<#(#original_plus_transaction),*> ::rust_query::FromExpr<#transaction_lt, #schema, #trivial> for #name<#(#original_generics),*> 163 | { 164 | fn from_expr<'_t>(col: impl ::rust_query::IntoExpr<'_t, #schema, Typ = #trivial>) -> ::rust_query::Select<'_t, #transaction_lt, #schema, Self> { 165 | let col = ::rust_query::IntoExpr::into_expr(col); 166 | ::rust_query::IntoSelect::into_select(#parts_dummies).map(|#parts_name| #name { 167 | #(#names,)* 168 | }) 169 | } 170 | } 171 | } 172 | }); 173 | 174 | Ok(quote! { 175 | #(#trivial)* 176 | }) 177 | } 178 | -------------------------------------------------------------------------------- /rust-query-macros/src/fields.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use proc_macro2::{Span, TokenStream}; 4 | use quote::{quote, quote_spanned}; 5 | use syn::{parse::Parse, punctuated::Punctuated, Ident, LitInt, Token}; 6 | 7 | struct Field { 8 | name: Ident, 9 | lt: Option<(Token![<], syn::Lifetime, Token![>])>, 10 | typ: Option<(Token![as], syn::Type)>, 11 | } 12 | 13 | impl Parse for Field { 14 | fn parse(input: syn::parse::ParseStream) -> syn::Result { 15 | Ok(Self { 16 | name: input.parse()?, 17 | lt: input 18 | .peek(Token![<]) 19 | .then(|| Ok::<_, syn::Error>((input.parse()?, input.parse()?, input.parse()?))) 20 | .transpose()?, 21 | typ: input 22 | .peek(Token![as]) 23 | .then(|| Ok::<_, syn::Error>((input.parse()?, input.parse()?))) 24 | .transpose()?, 25 | }) 26 | } 27 | } 28 | 29 | pub struct Spec { 30 | struct_id: LitInt, 31 | _brace_token1: syn::token::Brace, 32 | required_span: Span, 33 | required: Punctuated, 34 | _brace_token2: syn::token::Brace, 35 | all: Punctuated, 36 | } 37 | 38 | impl Parse for Spec { 39 | fn parse(input: syn::parse::ParseStream) -> syn::Result { 40 | let content1; 41 | let content2; 42 | Ok(Spec { 43 | struct_id: input.parse()?, 44 | _brace_token1: syn::braced!(content1 in input), 45 | required_span: content1.span(), 46 | required: content1.parse_terminated(Field::parse, Token![,])?, 47 | _brace_token2: syn::braced!(content2 in input), 48 | all: content2.parse_terminated(Ident::parse, Token![,])?, 49 | }) 50 | } 51 | } 52 | 53 | pub fn generate(spec: Spec) -> syn::Result { 54 | let mut m = HashMap::new(); 55 | for r in &spec.required { 56 | if m.insert(r.name.clone(), (r.lt.clone().map(|x| x.1), r.typ.clone())) 57 | .is_some() 58 | { 59 | return Err(syn::Error::new_spanned(&r.name, "duplicate name")); 60 | } 61 | } 62 | 63 | let span = spec.required_span; 64 | let static_lt = syn::Lifetime::new("'static", span); 65 | 66 | let mut out_typs = vec![]; 67 | for x in spec.all { 68 | if let Some((lt, typ)) = m.remove(&x) { 69 | if let Some((_, custom)) = typ { 70 | out_typs.push(quote! {::rust_query::private::Custom<#custom>}); 71 | } else { 72 | let lt = lt.unwrap_or(static_lt.clone()); 73 | out_typs.push(quote! {::rust_query::private::Native<#lt>}); 74 | } 75 | } else { 76 | out_typs.push(quote! {::rust_query::private::Ignore}); 77 | } 78 | } 79 | 80 | if let Some(name) = m.keys().next() { 81 | return Err(syn::Error::new_spanned(name, "unknown field name")); 82 | } 83 | 84 | if spec.required.is_empty() { 85 | return Ok(quote! {()}); 86 | } 87 | let struct_id = spec.struct_id; 88 | let typ = quote! {(#(#out_typs),*)}; 89 | Ok(quote_spanned! {span=> 90 | 91 | >::Out}) 92 | } 93 | -------------------------------------------------------------------------------- /rust-query-macros/src/migrations.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::BTreeMap, ops::Not}; 2 | 3 | use crate::{ 4 | multi::{SingleVersionColumn, SingleVersionTable}, 5 | to_lower, 6 | }; 7 | use proc_macro2::TokenStream; 8 | use quote::{format_ident, quote}; 9 | use syn::Ident; 10 | 11 | pub fn migrations( 12 | schema_name: &Ident, 13 | mut prev_tables: BTreeMap, 14 | new_tables: &BTreeMap, 15 | prev_mod: TokenStream, 16 | new_mod: TokenStream, 17 | ) -> Result { 18 | let mut tables = vec![]; 19 | let mut create_table_name = vec![]; 20 | let mut create_table_lower = vec![]; 21 | let mut table_migrations = TokenStream::new(); 22 | // loop over all new table and see what changed 23 | for (i, table) in new_tables { 24 | let table_name = &table.name; 25 | 26 | let table_lower = to_lower(table_name); 27 | 28 | if let Some(prev_table) = prev_tables.remove(i) { 29 | // a table already existed, so we need to define a migration 30 | 31 | let Some(migration) = 32 | define_table_migration(&prev_table.columns, table, false, &new_mod)? 33 | else { 34 | continue; 35 | }; 36 | table_migrations.extend(migration); 37 | 38 | create_table_lower.push(table_lower); 39 | create_table_name.push(table_name); 40 | 41 | tables.push(quote! {b.drop_table::<#prev_mod::#table_name>()}) 42 | } else if table.prev.is_some() { 43 | let migration = 44 | define_table_migration(&BTreeMap::new(), table, true, &new_mod).unwrap(); 45 | 46 | table_migrations.extend(migration); 47 | create_table_lower.push(table_lower); 48 | create_table_name.push(table_name); 49 | } else { 50 | tables.push(quote! {b.create_empty::<#new_mod::#table_name>()}) 51 | } 52 | } 53 | for prev_table in prev_tables.into_values() { 54 | // a table was removed, so we drop it 55 | 56 | let table_ident = &prev_table.name; 57 | tables.push(quote! {b.drop_table::<#prev_mod::#table_ident>()}) 58 | } 59 | 60 | let lifetime = create_table_name.is_empty().not().then_some(quote! {'t,}); 61 | Ok(quote! { 62 | #table_migrations 63 | 64 | pub struct #schema_name<#lifetime> { 65 | #(pub #create_table_lower: ::rust_query::migration::Migrated<'t, #prev_mod::#schema_name, #new_mod::#create_table_name>,)* 66 | } 67 | 68 | impl<'t> ::rust_query::private::SchemaMigration<'t> for #schema_name<#lifetime> { 69 | type From = #prev_mod::#schema_name; 70 | type To = #new_mod::#schema_name; 71 | 72 | fn tables(self, b: &mut ::rust_query::private::SchemaBuilder<'t, Self::From>) { 73 | #(#tables;)* 74 | #(self.#create_table_lower.apply(b);)* 75 | } 76 | } 77 | }) 78 | } 79 | 80 | // prev_table is only used for the columns 81 | fn define_table_migration( 82 | prev_columns: &BTreeMap, 83 | table: &SingleVersionTable, 84 | always_migrate: bool, 85 | new_mod: &TokenStream, 86 | ) -> syn::Result> { 87 | let mut col_new = vec![]; 88 | let mut col_ident = vec![]; 89 | let mut alter_ident = vec![]; 90 | let mut alter_typ = vec![]; 91 | let mut alter_tmp = vec![]; 92 | 93 | let mut migration_conflict = quote! {::std::convert::Infallible}; 94 | let mut conflict_from = quote! {::std::unreachable!()}; 95 | 96 | for (i, col) in &table.columns { 97 | let name = &col.name; 98 | if prev_columns.contains_key(i) { 99 | col_new.push(quote! {prev.#name()}); 100 | } else { 101 | let mut unique_columns = table.uniques.iter().flat_map(|u| &u.columns); 102 | if unique_columns.any(|c| c == name) { 103 | migration_conflict = quote! {::rust_query::TableRow<'t, Self::From>}; 104 | conflict_from = quote! {val}; 105 | } 106 | col_new.push(quote! {val.#name}); 107 | 108 | alter_ident.push(name); 109 | alter_typ.push(&col.typ); 110 | alter_tmp.push(format_ident!("Tmp{i}")) 111 | } 112 | col_ident.push(name); 113 | } 114 | 115 | // check that nothing was added or removed 116 | // we don't need input if only stuff was removed, but it still needs migrating 117 | if !always_migrate && alter_ident.is_empty() && table.columns.len() == prev_columns.len() { 118 | return Ok(None); 119 | } 120 | 121 | let table_ident = &table.name; 122 | let typs_mod = format_ident!("_{table_ident}"); 123 | let migration_lt = alter_ident.is_empty().not().then_some(quote! {'t}); 124 | 125 | let migration = quote! { 126 | mod #typs_mod { 127 | use super::#new_mod::*; 128 | #( 129 | pub type #alter_tmp<'t> = <<#alter_typ as ::rust_query::private::MyTyp>::Prev as ::rust_query::private::MyTyp>::Out<'t>; 130 | )* 131 | } 132 | 133 | pub struct #table_ident<#migration_lt> {#( 134 | pub #alter_ident: #typs_mod::#alter_tmp<'t>, 135 | )*} 136 | 137 | impl<'t> ::rust_query::private::Migration<'t> for #table_ident<#migration_lt> { 138 | type To = #new_mod::#table_ident; 139 | type FromSchema = ::Schema; 140 | type From = ::MigrateFrom; 141 | type Conflict = #migration_conflict; 142 | 143 | fn prepare( 144 | val: Self, 145 | prev: ::rust_query::TableRow<'t, Self::From>, 146 | ) -> ::Insert<'t> { 147 | #new_mod::#table_ident {#( 148 | #col_ident: ::rust_query::Expr::_migrate::(#col_new), 149 | )*} 150 | } 151 | 152 | fn map_conflict(val: ::rust_query::TableRow<'t, Self::From>) -> Self::Conflict { 153 | #conflict_from 154 | } 155 | } 156 | }; 157 | Ok(Some(migration)) 158 | } 159 | -------------------------------------------------------------------------------- /rust-query-macros/src/multi.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use proc_macro2::{Span, TokenStream}; 4 | use quote::format_ident; 5 | use syn::Ident; 6 | 7 | #[derive(Clone)] 8 | pub(crate) struct Unique { 9 | pub name: Ident, 10 | pub columns: Vec, 11 | } 12 | 13 | pub(crate) struct VersionedSchema { 14 | pub versions: std::ops::Range, 15 | pub tables: Vec, 16 | } 17 | 18 | // This is a table fully parsed from the schema, it represents multiple versions 19 | pub(crate) struct VersionedTable { 20 | pub name: Ident, 21 | pub versions: std::ops::Range, 22 | // `prev` always has a distinct span from `name` 23 | pub prev: Option, 24 | pub uniques: Vec, 25 | pub columns: Vec, 26 | pub referenceable: bool, 27 | } 28 | 29 | pub(crate) struct VersionedColumn { 30 | pub versions: std::ops::Range, 31 | pub name: Ident, 32 | pub typ: TokenStream, 33 | } 34 | 35 | impl VersionedSchema { 36 | pub fn get(&self, version: u32) -> syn::Result> { 37 | assert!(self.versions.contains(&version)); 38 | let mut tables = BTreeMap::new(); 39 | for (i, t) in self.tables.iter().enumerate() { 40 | if t.versions.contains(&version) { 41 | tables.insert(i, self.get_table(t, version)?); 42 | } 43 | } 44 | Ok(tables) 45 | } 46 | 47 | fn get_table(&self, table: &VersionedTable, version: u32) -> syn::Result { 48 | assert!(table.versions.contains(&version)); 49 | let mut columns = BTreeMap::new(); 50 | for (i, c) in table.columns.iter().enumerate() { 51 | if c.versions.contains(&version) { 52 | columns.insert( 53 | i, 54 | SingleVersionColumn { 55 | name: c.name.clone(), 56 | typ: c.typ.clone(), 57 | is_def: version == c.versions.end - 1, 58 | }, 59 | ); 60 | } 61 | } 62 | // we don't want to leak the span from table.name into `prev` 63 | let mut prev = Some(format_ident!("{}", table.name, span = Span::call_site())); 64 | if version == table.versions.start { 65 | prev = table.prev.clone(); 66 | } 67 | if prev.is_some() && version == self.versions.start { 68 | return Err(syn::Error::new_spanned( 69 | prev, 70 | "the previous schema does not exists", 71 | )); 72 | } 73 | 74 | Ok(SingleVersionTable { 75 | prev, 76 | name: table.name.clone(), 77 | uniques: table.uniques.clone(), 78 | columns, 79 | referenceable: table.referenceable, 80 | }) 81 | } 82 | } 83 | 84 | pub(crate) struct SingleVersionTable { 85 | pub prev: Option, 86 | pub name: Ident, 87 | pub uniques: Vec, 88 | pub columns: BTreeMap, 89 | pub referenceable: bool, 90 | } 91 | 92 | pub(crate) struct SingleVersionColumn { 93 | pub name: Ident, 94 | pub typ: TokenStream, 95 | // is this the latest version where the column exists? 96 | pub is_def: bool, 97 | } 98 | -------------------------------------------------------------------------------- /rust-query-macros/src/parse.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Not, Range}; 2 | 3 | use quote::ToTokens; 4 | use syn::{punctuated::Punctuated, Attribute, Field, Ident, Item, Path, Token, Visibility}; 5 | 6 | use crate::multi::{Unique, VersionedColumn, VersionedSchema, VersionedTable}; 7 | 8 | impl VersionedColumn { 9 | pub fn parse(field: Field, limit: Range, uniques: &mut Vec) -> syn::Result { 10 | let Some(name) = field.ident.clone() else { 11 | return Err(syn::Error::new_spanned(field, "field must be named")); 12 | }; 13 | 14 | let Visibility::Public(_) = field.vis else { 15 | return Err(syn::Error::new_spanned(name, "field must be public")); 16 | }; 17 | 18 | // not sure if case matters here 19 | if name.to_string().to_lowercase() == "id" { 20 | return Err(syn::Error::new_spanned( 21 | name, 22 | "The `id` column is reserved to be used by rust-query internally", 23 | )); 24 | } 25 | 26 | let mut other_field_attr = vec![]; 27 | for attr in field.attrs.clone() { 28 | if let Some(unique) = is_unique(attr.path()) { 29 | attr.meta.require_path_only()?; 30 | uniques.push(Unique { 31 | name: unique, 32 | columns: vec![name.clone()], 33 | }) 34 | } else { 35 | other_field_attr.push(attr); 36 | } 37 | } 38 | let versions = parse_version(&other_field_attr)? 39 | .unwrap_or_default() 40 | .into_std(limit, true)?; 41 | 42 | Ok(VersionedColumn { 43 | versions, 44 | name, 45 | typ: field.ty.into_token_stream(), 46 | }) 47 | } 48 | } 49 | 50 | impl VersionedTable { 51 | pub fn parse(table: syn::ItemStruct, limit: Range) -> syn::Result { 52 | let Visibility::Public(_) = table.vis else { 53 | return Err(syn::Error::new_spanned(table.ident, "table must be public")); 54 | }; 55 | 56 | let mut other_attrs = vec![]; 57 | let mut uniques = vec![]; 58 | let mut prev = None; 59 | let mut referenceable = true; 60 | 61 | for attr in &table.attrs { 62 | if let Some(unique) = is_unique(attr.path()) { 63 | let idents = 64 | attr.parse_args_with(Punctuated::::parse_separated_nonempty)?; 65 | uniques.push(Unique { 66 | name: unique, 67 | columns: idents.into_iter().collect(), 68 | }) 69 | } else if attr.path().is_ident("no_reference") { 70 | referenceable = false; 71 | } else if attr.path().is_ident("from") { 72 | if prev.is_some() { 73 | return Err(syn::Error::new_spanned(attr, "can not have multiple from")); 74 | } 75 | prev = Some(attr.parse_args()?) 76 | } else { 77 | other_attrs.push(attr.clone()); 78 | } 79 | } 80 | 81 | if !referenceable && prev.is_some() { 82 | return Err(syn::Error::new_spanned( 83 | prev, 84 | "can not use `no_reference` and `from` together", 85 | )); 86 | } 87 | 88 | let versions = parse_version(&other_attrs)? 89 | .unwrap_or_default() 90 | .into_std(limit, true)?; 91 | 92 | let columns = table 93 | .fields 94 | .into_iter() 95 | .map(|x| VersionedColumn::parse(x, versions.clone(), &mut uniques)) 96 | .collect::>()?; 97 | 98 | Ok(VersionedTable { 99 | versions, 100 | prev, 101 | name: table.ident, 102 | columns, 103 | uniques, 104 | referenceable, 105 | }) 106 | } 107 | } 108 | 109 | impl VersionedSchema { 110 | pub fn parse(item: syn::ItemMod) -> syn::Result { 111 | if item.ident != "vN" { 112 | return Err(syn::Error::new_spanned( 113 | item.ident, 114 | "module name should be `vN`", 115 | )); 116 | } 117 | 118 | let versions = parse_version(&item.attrs)? 119 | .unwrap_or_default() 120 | .into_std(0..1, false)?; 121 | 122 | let Visibility::Public(_) = item.vis else { 123 | return Err(syn::Error::new_spanned(item.ident, "module must be public")); 124 | }; 125 | 126 | if let Some(unsafety) = item.unsafety { 127 | return Err(syn::Error::new_spanned( 128 | unsafety, 129 | "module can not be unsafe", 130 | )); 131 | }; 132 | 133 | let Some(content) = item.content else { 134 | return Err(syn::Error::new_spanned(item.ident, "module must be inline")); 135 | }; 136 | 137 | let tables = content 138 | .1 139 | .into_iter() 140 | .map(|x| { 141 | let Item::Struct(x) = x else { 142 | return Err(syn::Error::new_spanned(x, "only struct items are allowed")); 143 | }; 144 | 145 | VersionedTable::parse(x, versions.clone()) 146 | }) 147 | .collect::>()?; 148 | 149 | Ok(VersionedSchema { versions, tables }) 150 | } 151 | } 152 | 153 | #[derive(Default)] 154 | pub(crate) struct MyRange { 155 | pub start: Option, 156 | pub _dotdot: Token![..], 157 | pub end: Option, 158 | } 159 | 160 | pub(crate) struct RangeEnd { 161 | pub equals: Option, 162 | pub num: syn::LitInt, 163 | } 164 | 165 | impl syn::parse::Parse for MyRange { 166 | fn parse(input: syn::parse::ParseStream) -> syn::Result { 167 | Ok(Self { 168 | start: input.parse()?, 169 | _dotdot: input.parse()?, 170 | end: input.is_empty().not().then(|| input.parse()).transpose()?, 171 | }) 172 | } 173 | } 174 | 175 | impl syn::parse::Parse for RangeEnd { 176 | fn parse(input: syn::parse::ParseStream) -> syn::Result { 177 | Ok(Self { 178 | equals: input.parse()?, 179 | num: input.parse()?, 180 | }) 181 | } 182 | } 183 | 184 | impl MyRange { 185 | pub fn into_std(self, limit: Range, check: bool) -> syn::Result> { 186 | let start = self 187 | .start 188 | .as_ref() 189 | .map(|x| x.base10_parse()) 190 | .transpose()? 191 | .unwrap_or(limit.start); 192 | if check && start < limit.start { 193 | return Err(syn::Error::new_spanned( 194 | self.start, 195 | "start of range is before outer range start", 196 | )); 197 | } 198 | 199 | let end = self 200 | .end 201 | .as_ref() 202 | .map(|x| syn::Result::Ok(x.num.base10_parse::()? + (x.equals.is_some() as u32))) 203 | .transpose()? 204 | .unwrap_or(limit.end); 205 | if check && limit.end < end { 206 | return Err(syn::Error::new_spanned( 207 | self.end.unwrap().num, 208 | "end of range is after outer range end", 209 | )); 210 | } 211 | 212 | if end <= start { 213 | return Err(syn::Error::new_spanned(self._dotdot, "range is empty")); 214 | } 215 | 216 | Ok(start..end) 217 | } 218 | } 219 | 220 | fn parse_version(attrs: &[Attribute]) -> syn::Result> { 221 | let mut version = None; 222 | for attr in attrs { 223 | if attr.path().is_ident("version") { 224 | if version.is_some() { 225 | return Err(syn::Error::new_spanned( 226 | attr, 227 | "There should be only one version attribute.", 228 | )); 229 | } 230 | version = Some(attr.parse_args::()?); 231 | } else { 232 | return Err(syn::Error::new_spanned(attr, "unexpected attribute")); 233 | } 234 | } 235 | Ok(version) 236 | } 237 | 238 | fn is_unique(path: &Path) -> Option { 239 | path.get_ident().and_then(|ident| { 240 | ident 241 | .to_string() 242 | .starts_with("unique") 243 | .then(|| ident.clone()) 244 | }) 245 | } 246 | -------------------------------------------------------------------------------- /src/alias.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU64, Ordering}; 2 | 3 | use sea_query::Iden; 4 | 5 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 6 | pub(super) enum Field { 7 | U64(MyAlias), 8 | Str(&'static str), 9 | } 10 | 11 | #[derive(Default)] 12 | pub struct Scope { 13 | iden_num: AtomicU64, 14 | } 15 | 16 | impl Scope { 17 | pub fn tmp_table(&self) -> TmpTable { 18 | let next = self.iden_num.fetch_add(1, Ordering::Relaxed); 19 | TmpTable { name: next } 20 | } 21 | 22 | pub fn new_alias(&self) -> MyAlias { 23 | let next = self.iden_num.fetch_add(1, Ordering::Relaxed); 24 | MyAlias { name: next } 25 | } 26 | 27 | pub fn create(num_tables: usize, num_filter_on: usize) -> Self { 28 | Self { 29 | iden_num: AtomicU64::new(num_tables.max(num_filter_on) as u64), 30 | } 31 | } 32 | } 33 | 34 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 35 | pub struct MyAlias { 36 | name: u64, 37 | } 38 | 39 | impl MyAlias { 40 | pub fn new(idx: usize) -> Self { 41 | Self { name: idx as u64 } 42 | } 43 | } 44 | 45 | #[derive(Debug, Clone, Copy, PartialEq)] 46 | pub(super) struct TmpTable { 47 | name: u64, 48 | } 49 | 50 | impl sea_query::Iden for Field { 51 | fn unquoted(&self, s: &mut dyn std::fmt::Write) { 52 | match self { 53 | Field::U64(alias) => alias.unquoted(s), 54 | Field::Str(name) => write!(s, "{}", name).unwrap(), 55 | } 56 | } 57 | } 58 | 59 | impl sea_query::Iden for MyAlias { 60 | fn unquoted(&self, s: &mut dyn std::fmt::Write) { 61 | write!(s, "_{}", self.name).unwrap() 62 | } 63 | } 64 | 65 | impl sea_query::Iden for TmpTable { 66 | fn unquoted(&self, s: &mut dyn std::fmt::Write) { 67 | write!(s, "_tmp{}", self.name).unwrap() 68 | } 69 | } 70 | 71 | pub(crate) struct RawAlias(pub(crate) String); 72 | 73 | impl Iden for RawAlias { 74 | fn unquoted(&self, s: &mut dyn std::fmt::Write) { 75 | write!(s, "{}", self.0).unwrap() 76 | } 77 | fn prepare(&self, s: &mut dyn std::fmt::Write, _q: sea_query::Quote) { 78 | self.unquoted(s) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/ast.rs: -------------------------------------------------------------------------------- 1 | use std::rc::Rc; 2 | 3 | use sea_query::{Alias, Asterisk, Condition, Expr, NullAlias, SelectStatement, SimpleExpr}; 4 | 5 | use crate::{ 6 | alias::{Field, MyAlias, RawAlias, Scope}, 7 | value::{DynTyped, DynTypedExpr, Typed, ValueBuilder}, 8 | }; 9 | 10 | #[derive(Default, Clone)] 11 | pub struct MySelect { 12 | // this is used to check which `MySelect` a table is from 13 | pub(crate) scope_rc: Rc<()>, 14 | // tables to join, adding more requires mutating 15 | pub(super) tables: Vec, 16 | // all conditions to check 17 | pub(super) filters: Vec>, 18 | } 19 | 20 | #[derive(PartialEq, Clone)] 21 | pub(super) struct Source { 22 | pub(super) conds: Vec<(Field, SimpleExpr)>, 23 | pub(super) kind: SourceKind, 24 | } 25 | 26 | #[derive(Clone)] 27 | pub(super) enum SourceKind { 28 | Aggregate(Rc), 29 | // table and pk 30 | Implicit(String), 31 | } 32 | 33 | impl PartialEq for SourceKind { 34 | fn eq(&self, other: &Self) -> bool { 35 | match (self, other) { 36 | (Self::Implicit(l0), Self::Implicit(r0)) => l0 == r0, 37 | (Self::Aggregate(l0), Self::Aggregate(l1)) => l0 == l1, 38 | _ => false, 39 | } 40 | } 41 | } 42 | 43 | impl MySelect { 44 | pub fn full(self: Rc) -> ValueBuilder { 45 | ValueBuilder { 46 | scope: Scope::create(self.tables.len(), 0), 47 | extra: Default::default(), 48 | from: self, 49 | forwarded: Default::default(), 50 | } 51 | } 52 | } 53 | 54 | impl ValueBuilder { 55 | pub fn simple_one(&mut self, val: DynTypedExpr) -> (SelectStatement, MyAlias) { 56 | let (a, mut b) = self.simple(vec![val]); 57 | assert!(b.len() == 1); 58 | (a, b.swap_remove(0)) 59 | } 60 | 61 | pub fn simple(&mut self, select: Vec) -> (SelectStatement, Vec) { 62 | let res = self.build_select(false, select); 63 | assert!(self.forwarded.is_empty()); 64 | res 65 | } 66 | 67 | pub fn build_select( 68 | &mut self, 69 | must_group: bool, 70 | select_out: Vec, 71 | ) -> (SelectStatement, Vec) { 72 | let mut select = SelectStatement::new(); 73 | let from = self.from.clone(); 74 | 75 | // this stuff adds more to the self.extra list 76 | let select_out: Vec<_> = select_out.into_iter().map(|val| (val.0)(self)).collect(); 77 | let filters: Vec<_> = from.filters.iter().map(|x| x.build_expr(self)).collect(); 78 | 79 | let mut any_from = false; 80 | for (idx, table) in from.tables.iter().enumerate() { 81 | let tbl_ref = (Alias::new("main"), RawAlias(table.clone())); 82 | select.from_as(tbl_ref, MyAlias::new(idx)); 83 | any_from = true; 84 | } 85 | 86 | for (source, table_alias) in self.extra.iter() { 87 | let mut cond = Condition::all(); 88 | for (field, outer_value) in &source.conds { 89 | let id_field = Expr::expr(outer_value.clone()); 90 | let id_field2 = Expr::col((*table_alias, *field)); 91 | let filter = id_field.eq(id_field2); 92 | cond = cond.add(filter); 93 | } 94 | 95 | match &source.kind { 96 | SourceKind::Aggregate(ast) => { 97 | let join_type = sea_query::JoinType::LeftJoin; 98 | select.join_subquery(join_type, ast.as_ref().clone(), *table_alias, cond); 99 | } 100 | SourceKind::Implicit(table) => { 101 | let join_type = sea_query::JoinType::LeftJoin; 102 | let tbl_ref = (Alias::new("main"), Alias::new(table)); 103 | select.join_as(join_type, tbl_ref, *table_alias, cond); 104 | } 105 | } 106 | } 107 | 108 | for filter in filters { 109 | select.and_where(filter); 110 | } 111 | 112 | let mut any_expr = false; 113 | let mut any_group = false; 114 | 115 | for (idx, group) in self.forwarded.iter().enumerate() { 116 | select.from_as((Alias::new("main"), Alias::new(group.1.0)), group.1.2); 117 | any_from = true; 118 | 119 | select.expr_as( 120 | Expr::column((group.1.2, Alias::new("id"))), 121 | MyAlias::new(idx), 122 | ); 123 | any_expr = true; 124 | 125 | // this constant refers to the 1 indexed output column. 126 | // should work on postgresql and sqlite. 127 | let constant = SimpleExpr::Constant(sea_query::Value::BigInt(Some((idx + 1) as i64))); 128 | select.add_group_by([constant]); 129 | any_group = true; 130 | } 131 | 132 | let forwarded_len = self.forwarded.len(); 133 | 134 | let mut out_fields = vec![]; 135 | for (idx, aggr) in select_out.into_iter().enumerate() { 136 | let alias = MyAlias::new(forwarded_len + idx); 137 | out_fields.push(alias); 138 | select.expr_as(aggr, alias); 139 | any_expr = true; 140 | } 141 | 142 | if !any_from { 143 | select.from_values([1], NullAlias); 144 | any_from = true; 145 | } 146 | assert!(any_from); 147 | 148 | if !any_expr { 149 | select.expr_as(Expr::val(1), NullAlias); 150 | any_expr = true 151 | } 152 | assert!(any_expr); 153 | 154 | if !any_group && must_group { 155 | select.expr_as(Expr::count(Expr::col(Asterisk)), NullAlias); 156 | any_group = true; 157 | } 158 | assert_eq!(any_group, must_group); 159 | 160 | (select, out_fields) 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | 3 | use rusqlite::Connection; 4 | 5 | use crate::{Database, Transaction, TransactionMut}; 6 | 7 | /// The primary interface to the database. 8 | /// 9 | /// Only one [LocalClient] can exist in each thread and transactions need to mutably borrow a [LocalClient]. 10 | /// This makes it impossible to have access to two transactions from one thread. 11 | /// 12 | /// The only way to have concurrent read transactions is to have them on different threads. 13 | /// Write transactions never run in parallell with each other, but they do run in parallel with read transactions. 14 | pub struct LocalClient { 15 | _p: std::marker::PhantomData<*const ()>, 16 | pub(crate) conn: Option, 17 | } 18 | 19 | impl LocalClient { 20 | /// Create a [Transaction]. This operation always completes immediately as it does not need to wait on other transactions. 21 | /// 22 | /// This function will panic if the schema was modified compared to when the [Database] value 23 | /// was created. This can happen for example by running another instance of your program with 24 | /// additional migrations. 25 | pub fn transaction(&mut self, db: &Database) -> Transaction { 26 | use r2d2::ManageConnection; 27 | // TODO: could check here if the existing connection is good to use. 28 | let conn = self.conn.insert(db.manager.connect().unwrap()); 29 | let txn = conn.transaction().unwrap(); 30 | Transaction::new_checked(txn, db.schema_version) 31 | } 32 | 33 | /// Create a [TransactionMut]. 34 | /// This operation needs to wait for all other [TransactionMut]s for this database to be finished. 35 | /// 36 | /// The implementation uses the [unlock_notify](https://sqlite.org/unlock_notify.html) feature of sqlite. 37 | /// This makes it work across processes. 38 | /// 39 | /// Note: you can create a deadlock if you are holding on to another lock while trying to 40 | /// get a mutable transaction! 41 | /// 42 | /// This function will panic if the schema was modified compared to when the [Database] value 43 | /// was created. This can happen for example by running another instance of your program with 44 | /// additional migrations. 45 | pub fn transaction_mut(&mut self, db: &Database) -> TransactionMut { 46 | use r2d2::ManageConnection; 47 | // TODO: could check here if the existing connection is good to use. 48 | // TODO: make sure that when reusing a connection, the foreign keys are checked (migration doesn't) 49 | // .pragma_update(None, "foreign_keys", "ON").unwrap(); 50 | let conn = self.conn.insert(db.manager.connect().unwrap()); 51 | let txn = conn 52 | .transaction_with_behavior(rusqlite::TransactionBehavior::Immediate) 53 | .unwrap(); 54 | TransactionMut { 55 | inner: Transaction::new_checked(txn, db.schema_version), 56 | } 57 | } 58 | } 59 | 60 | thread_local! { 61 | static EXISTS: Cell = const { Cell::new(true) }; 62 | } 63 | 64 | impl LocalClient { 65 | fn new() -> Self { 66 | LocalClient { 67 | _p: std::marker::PhantomData, 68 | conn: None, 69 | } 70 | } 71 | 72 | /// Create a [LocalClient] if it was not created yet on this thread. 73 | /// 74 | /// Async tasks often share their thread and can thus not use this method. 75 | /// Instead you should use your equivalent of `spawn_blocking` or `block_in_place`. 76 | /// These functions guarantee that you have a unique thread and thus allow [LocalClient::try_new]. 77 | /// 78 | /// Note that using `spawn_blocking` for sqlite is actually a good practice. 79 | /// Sqlite queries can be expensive, it might need to read from disk which is slow. 80 | /// Doing so on all async runtime threads would prevent other tasks from executing. 81 | pub fn try_new() -> Option { 82 | EXISTS.replace(false).then(LocalClient::new) 83 | } 84 | } 85 | 86 | impl Drop for LocalClient { 87 | /// Dropping a [LocalClient] allows retrieving it with [LocalClient::try_new] again. 88 | fn drop(&mut self) { 89 | EXISTS.set(true) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/db.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Debug, marker::PhantomData, ops::Deref}; 2 | 3 | use ref_cast::RefCast; 4 | use sea_query::{Alias, SimpleExpr}; 5 | 6 | use crate::{ 7 | Expr, IntoExpr, LocalClient, Table, 8 | alias::MyAlias, 9 | value::{MyTableRef, Typed, ValueBuilder}, 10 | }; 11 | 12 | /// Table reference that is the result of a join. 13 | /// It can only be used in the query where it was created. 14 | /// Invariant in `'t`. 15 | pub(crate) struct Join { 16 | pub(crate) table_idx: MyTableRef, 17 | pub(crate) _p: PhantomData, 18 | } 19 | 20 | impl Join { 21 | pub(crate) fn new(table_idx: MyTableRef) -> Self { 22 | Self { 23 | table_idx, 24 | _p: PhantomData, 25 | } 26 | } 27 | } 28 | 29 | impl Typed for Join { 30 | type Typ = T; 31 | fn build_expr(&self, b: &mut ValueBuilder) -> SimpleExpr { 32 | sea_query::Expr::col((self.build_table(b), Alias::new(T::ID))).into() 33 | } 34 | fn build_table(&self, b: &mut ValueBuilder) -> MyAlias { 35 | b.get_table::(self.table_idx.clone()) 36 | } 37 | } 38 | 39 | /// Row reference that can be used in any query in the same transaction. 40 | /// 41 | /// [TableRow] is covariant in `'t` and restricted to a single thread to prevent it from being used in a different transaction. 42 | pub struct TableRow<'t, T> { 43 | pub(crate) _p: PhantomData<&'t ()>, 44 | pub(crate) _local: PhantomData, 45 | pub(crate) inner: TableRowInner, 46 | } 47 | impl TableRow<'_, T> { 48 | pub(crate) fn new(idx: i64) -> Self { 49 | Self { 50 | _p: PhantomData, 51 | _local: PhantomData, 52 | inner: TableRowInner { 53 | _p: PhantomData, 54 | idx, 55 | }, 56 | } 57 | } 58 | } 59 | 60 | impl Eq for TableRow<'_, T> {} 61 | 62 | impl PartialOrd for TableRow<'_, T> { 63 | fn partial_cmp(&self, other: &Self) -> Option { 64 | Some(self.cmp(other)) 65 | } 66 | } 67 | 68 | impl Ord for TableRow<'_, T> { 69 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 70 | self.inner.idx.cmp(&other.inner.idx) 71 | } 72 | } 73 | 74 | pub(crate) struct TableRowInner { 75 | pub(crate) _p: PhantomData, 76 | pub(crate) idx: i64, 77 | } 78 | 79 | impl PartialEq for TableRow<'_, T> { 80 | fn eq(&self, other: &Self) -> bool { 81 | self.inner.idx == other.inner.idx 82 | } 83 | } 84 | 85 | impl Debug for TableRow<'_, T> { 86 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 87 | write!(f, "db_{}", self.inner.idx) 88 | } 89 | } 90 | 91 | impl Clone for TableRow<'_, T> { 92 | fn clone(&self) -> Self { 93 | *self 94 | } 95 | } 96 | impl Copy for TableRow<'_, T> {} 97 | 98 | impl Clone for TableRowInner { 99 | fn clone(&self) -> Self { 100 | *self 101 | } 102 | } 103 | impl Copy for TableRowInner {} 104 | 105 | impl Deref for TableRow<'_, T> { 106 | type Target = T::Ext; 107 | 108 | fn deref(&self) -> &Self::Target { 109 | RefCast::ref_cast(self) 110 | } 111 | } 112 | 113 | impl From> for sea_query::Value { 114 | fn from(value: TableRow) -> Self { 115 | value.inner.idx.into() 116 | } 117 | } 118 | 119 | impl Typed for TableRowInner { 120 | type Typ = T; 121 | fn build_expr(&self, _: &mut ValueBuilder) -> SimpleExpr { 122 | sea_query::Expr::val(self.idx).into() 123 | } 124 | } 125 | 126 | impl<'t, S, T: Table> IntoExpr<'t, S> for TableRow<'t, T> { 127 | type Typ = T; 128 | fn into_expr(self) -> Expr<'t, S, Self::Typ> { 129 | Expr::new(self.inner) 130 | } 131 | } 132 | 133 | /// This makes it possible to use TableRow as a parameter in 134 | /// rusqlite queries and statements. 135 | impl rusqlite::ToSql for TableRow<'_, T> { 136 | fn to_sql(&self) -> rusqlite::Result> { 137 | self.inner.idx.to_sql() 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/hash.rs: -------------------------------------------------------------------------------- 1 | //! This can be used to define the layout of a table 2 | //! The layout is hashable and the hashes are independent 3 | //! of the column ordering and some other stuff. 4 | 5 | use std::{marker::PhantomData, ops::Deref}; 6 | 7 | use sea_query::TableCreateStatement; 8 | 9 | use crate::value::{EqTyp, MyTyp}; 10 | 11 | #[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] 12 | pub enum ColumnType { 13 | Integer = 0, 14 | Float = 1, 15 | String = 2, 16 | Blob = 3, 17 | } 18 | 19 | impl ColumnType { 20 | pub fn sea_type(&self) -> sea_query::ColumnType { 21 | use sea_query::ColumnType as T; 22 | match self { 23 | ColumnType::Integer => T::Integer, 24 | ColumnType::Float => T::custom("REAL"), 25 | ColumnType::String => T::Text, 26 | ColumnType::Blob => T::Blob, 27 | } 28 | } 29 | } 30 | 31 | #[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] 32 | pub struct Column { 33 | pub name: String, 34 | pub typ: ColumnType, 35 | pub nullable: bool, 36 | pub fk: Option<(String, String)>, 37 | } 38 | 39 | #[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] 40 | pub struct Unique { 41 | pub columns: MyVec, 42 | } 43 | 44 | #[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] 45 | pub struct Table { 46 | pub columns: MyVec, 47 | pub uniques: MyVec, 48 | } 49 | 50 | /// Special [Vec] wrapper with a hash that is independent of the item order 51 | #[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] 52 | pub struct MyVec { 53 | inner: Vec, 54 | } 55 | 56 | impl Default for MyVec { 57 | fn default() -> Self { 58 | Self { 59 | inner: Default::default(), 60 | } 61 | } 62 | } 63 | 64 | impl MyVec { 65 | pub fn insert(&mut self, item: T) { 66 | let index = self.inner.partition_point(|x| x < &item); 67 | self.inner.insert(index, item); 68 | } 69 | } 70 | 71 | impl Deref for MyVec { 72 | type Target = [T]; 73 | 74 | fn deref(&self) -> &Self::Target { 75 | &self.inner 76 | } 77 | } 78 | 79 | impl Table { 80 | pub fn create(&self) -> TableCreateStatement { 81 | use sea_query::*; 82 | let mut create = Table::create(); 83 | for col in &*self.columns { 84 | let name = Alias::new(&col.name); 85 | let mut def = ColumnDef::new_with_type(name.clone(), col.typ.sea_type()); 86 | if col.nullable { 87 | def.null(); 88 | } else { 89 | def.not_null(); 90 | } 91 | create.col(&mut def); 92 | if let Some((table, fk)) = &col.fk { 93 | create.foreign_key( 94 | ForeignKey::create() 95 | .to(Alias::new(table), Alias::new(fk)) 96 | .from_col(name), 97 | ); 98 | } 99 | } 100 | for unique in &*self.uniques { 101 | let mut index = sea_query::Index::create().unique().take(); 102 | for col in &*unique.columns { 103 | index.col(Alias::new(col)); 104 | } 105 | create.index(&mut index); 106 | } 107 | create 108 | } 109 | } 110 | 111 | #[derive(Debug, Hash, Default, PartialEq, Eq)] 112 | pub struct Schema { 113 | pub tables: MyVec<(String, Table)>, 114 | } 115 | 116 | #[cfg(feature = "dev")] 117 | pub mod dev { 118 | use std::{ 119 | hash::{Hash, Hasher}, 120 | io::{Read, Write}, 121 | }; 122 | 123 | use k12::{ 124 | KangarooTwelve, KangarooTwelveCore, 125 | digest::{ExtendableOutput, core_api::CoreWrapper}, 126 | }; 127 | 128 | pub struct KangarooHasher { 129 | inner: CoreWrapper>, 130 | } 131 | 132 | impl Default for KangarooHasher { 133 | fn default() -> Self { 134 | let core = KangarooTwelveCore::new(&[]); 135 | let hasher = KangarooTwelve::from_core(core); 136 | Self { inner: hasher } 137 | } 138 | } 139 | 140 | impl Hasher for KangarooHasher { 141 | fn finish(&self) -> u64 { 142 | let mut xof = self.inner.clone().finalize_xof(); 143 | let mut buf = [0; 8]; 144 | xof.read_exact(&mut buf).unwrap(); 145 | u64::from_le_bytes(buf) 146 | } 147 | 148 | fn write(&mut self, bytes: &[u8]) { 149 | self.inner.write_all(bytes).unwrap(); 150 | } 151 | } 152 | 153 | /// Calculate the hash of a shema. 154 | /// 155 | /// This is useful in a test to make sure that old schema versions are not accidentally modified. 156 | pub fn hash_schema() -> String { 157 | let mut b = crate::migrate::TableTypBuilder::default(); 158 | S::typs(&mut b); 159 | let mut hasher = KangarooHasher::default(); 160 | b.ast.hash(&mut hasher); 161 | format!("{:x}", hasher.finish()) 162 | } 163 | } 164 | 165 | pub struct TypBuilder { 166 | pub(crate) ast: Table, 167 | _p: PhantomData, 168 | } 169 | 170 | impl Default for TypBuilder { 171 | fn default() -> Self { 172 | Self { 173 | ast: Default::default(), 174 | _p: Default::default(), 175 | } 176 | } 177 | } 178 | 179 | impl TypBuilder { 180 | pub fn col>(&mut self, name: &'static str) { 181 | let mut item = Column { 182 | name: name.to_owned(), 183 | typ: T::TYP, 184 | nullable: T::NULLABLE, 185 | fk: None, 186 | }; 187 | if let Some((table, fk)) = T::FK { 188 | item.fk = Some((table.to_owned(), fk.to_owned())) 189 | } 190 | self.ast.columns.insert(item) 191 | } 192 | 193 | pub fn unique(&mut self, cols: &[&'static str]) { 194 | let mut unique = Unique::default(); 195 | for &col in cols { 196 | unique.columns.insert(col.to_owned()); 197 | } 198 | self.ast.uniques.insert(unique); 199 | } 200 | 201 | pub fn check_unique_compatible(&mut self) {} 202 | } 203 | 204 | struct Null; 205 | struct NotNull; 206 | 207 | // TODO: maybe remove this trait? 208 | // currently this prevents storing booleans and nested `Option`. 209 | #[diagnostic::on_unimplemented( 210 | message = "Can not use `{Self}` as a column type in schema `{S}`", 211 | note = "Table names can be used as schema column types as long as they are not #[no_reference]" 212 | )] 213 | trait SchemaType: MyTyp { 214 | type N; 215 | } 216 | 217 | impl SchemaType for String { 218 | type N = NotNull; 219 | } 220 | impl SchemaType for Vec { 221 | type N = NotNull; 222 | } 223 | impl SchemaType for i64 { 224 | type N = NotNull; 225 | } 226 | impl SchemaType for f64 { 227 | type N = NotNull; 228 | } 229 | impl> SchemaType for Option { 230 | type N = Null; 231 | } 232 | // only tables with `Referer = ()` are valid columns 233 | #[diagnostic::do_not_recommend] 234 | impl> SchemaType for T { 235 | type N = NotNull; 236 | } 237 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(private_bounds, private_interfaces)] 2 | #![doc = include_str!("../README.md")] 3 | 4 | extern crate self as rust_query; 5 | 6 | #[macro_use] 7 | extern crate static_assertions; 8 | 9 | mod alias; 10 | mod ast; 11 | mod client; 12 | mod db; 13 | mod dummy_impl; 14 | mod hash; 15 | mod migrate; 16 | mod mymap; 17 | mod query; 18 | mod ref_cast_impl; 19 | mod rows; 20 | mod schema_pragma; 21 | mod transaction; 22 | mod value; 23 | mod writable; 24 | 25 | pub use client::LocalClient; 26 | pub use db::TableRow; 27 | pub use dummy_impl::{IntoSelect, IntoSelectExt, Select}; 28 | use hash::TypBuilder; 29 | use private::Reader; 30 | use ref_cast::RefCast; 31 | use rows::Rows; 32 | pub use rust_query_macros::{FromExpr, Select}; 33 | pub use transaction::{Database, Transaction, TransactionMut, TransactionWeak}; 34 | use value::MyTyp; 35 | pub use value::aggregate::aggregate; 36 | pub use value::trivial::FromExpr; 37 | pub use value::{Expr, IntoExpr, UnixEpoch, optional::optional}; 38 | pub use writable::Update; 39 | 40 | /// Types that are used as closure arguments. 41 | /// 42 | /// You generally don't need to import these types. 43 | pub mod args { 44 | pub use crate::query::Query; 45 | pub use crate::rows::Rows; 46 | pub use crate::value::aggregate::Aggregate; 47 | pub use crate::value::optional::Optional; 48 | } 49 | 50 | /// Types to declare schemas and migrations. 51 | /// 52 | /// A good starting point is too look at [crate::migration::schema]. 53 | pub mod migration { 54 | #[cfg(feature = "dev")] 55 | pub use crate::hash::dev::hash_schema; 56 | pub use crate::migrate::{Config, Migrated, Migrator, TransactionMigrate}; 57 | pub use rust_query_macros::schema; 58 | } 59 | 60 | /// These items are only exposed for use by the proc macros. 61 | /// Direct use is unsupported. 62 | #[doc(hidden)] 63 | pub mod private { 64 | use std::marker::PhantomData; 65 | 66 | pub use crate::hash::TypBuilder; 67 | pub use crate::migrate::{Migration, Schema, SchemaBuilder, SchemaMigration, TableTypBuilder}; 68 | pub use crate::query::{get_plan, show_sql}; 69 | pub use crate::value::{ 70 | MyTyp, Typed, ValueBuilder, adhoc_expr, assume_expr, into_owned, new_column, new_dummy, 71 | }; 72 | pub use crate::writable::{Reader, TableInsert}; 73 | 74 | pub use ref_cast::RefCast; 75 | pub use rust_query_macros::fields; 76 | pub use sea_query::SimpleExpr; 77 | 78 | pub struct Native<'t>(PhantomData<&'t ()>); 79 | pub struct Ignore; 80 | pub struct Custom(PhantomData); 81 | pub struct Update<'t>(PhantomData<&'t ()>); 82 | pub struct AsExpr<'t>(PhantomData<&'t ()>); 83 | 84 | pub trait Apply { 85 | type Out; 86 | } 87 | 88 | impl<'t> Apply for Native<'t> { 89 | type Out = T::Out<'t>; 90 | } 91 | 92 | impl Apply for Ignore { 93 | type Out = (); 94 | } 95 | 96 | impl Apply for Custom { 97 | type Out = X; 98 | } 99 | 100 | impl<'t> Apply for Update<'t> { 101 | type Out = crate::Update<'t, S, T>; 102 | } 103 | 104 | impl<'t> Apply for AsExpr<'t> { 105 | type Out = crate::Expr<'t, S, T>; 106 | } 107 | 108 | pub trait Instantiate { 109 | type Out; 110 | } 111 | 112 | pub mod doctest { 113 | use crate::{LocalClient, TransactionMut, migrate::Config, migration}; 114 | 115 | #[migration::schema(Empty)] 116 | pub mod vN { 117 | pub struct User { 118 | #[unique] 119 | pub name: String, 120 | } 121 | } 122 | pub use v0::*; 123 | 124 | pub fn get_client() -> LocalClient { 125 | LocalClient::try_new().unwrap() 126 | } 127 | pub fn get_txn(client: &mut LocalClient) -> TransactionMut { 128 | let db = client 129 | .migrator(Config::open_in_memory()) 130 | .unwrap() 131 | .finish() 132 | .unwrap(); 133 | let mut txn = client.transaction_mut(&db); 134 | txn.insert(User { name: "Alice" }).unwrap(); 135 | txn 136 | } 137 | } 138 | } 139 | 140 | /// This trait is implemented for all table types as generated by the [crate::migration::schema] macro. 141 | /// 142 | /// **You can not implement this trait yourself!** 143 | pub trait Table: Sized + 'static { 144 | /// The associated type [Table::Ext] is used as the deref target by several types that implement [IntoExpr]. 145 | /// This adds convenient methods to access related tables that have a foreign key constraint. 146 | #[doc(hidden)] 147 | type Ext: RefCast; 148 | 149 | #[doc(hidden)] 150 | const TOKEN: Self; 151 | 152 | /// The schema that this table is a part of. 153 | type Schema; 154 | 155 | #[doc(hidden)] 156 | /// The table that this table can be migrated from. 157 | type MigrateFrom: MyTyp; 158 | 159 | /// Please refer to [Rows::join]. 160 | #[deprecated = "Please use `Rows::join`"] 161 | fn join<'inner>(rows: &mut Rows<'inner, Self::Schema>) -> Expr<'inner, Self::Schema, Self> { 162 | rows.join(Self::TOKEN) 163 | } 164 | 165 | /// The type of conflict that can result from inserting a row in this table. 166 | /// This is the same type that is used for row updates too. 167 | type Conflict<'t>; 168 | 169 | /// The type of updates used by [TransactionMut::update_ok]. 170 | type UpdateOk<'t>; 171 | /// The type of updates used by [TransactionMut::update]. 172 | type Update<'t>; 173 | /// The type of error when a delete fails due to a foreign key constraint. 174 | type Referer; 175 | 176 | #[doc(hidden)] 177 | type Insert<'t>; 178 | 179 | #[doc(hidden)] 180 | fn read<'t>(val: &Self::Insert<'t>, f: &mut Reader<'t, Self::Schema>); 181 | 182 | #[doc(hidden)] 183 | fn get_conflict_unchecked<'t>( 184 | txn: &Transaction<'t, Self::Schema>, 185 | val: &Self::Insert<'t>, 186 | ) -> Self::Conflict<'t>; 187 | 188 | #[doc(hidden)] 189 | fn update_into_try_update(val: Self::UpdateOk<'_>) -> Self::Update<'_>; 190 | 191 | #[doc(hidden)] 192 | fn apply_try_update<'t>( 193 | val: Self::Update<'t>, 194 | old: Expr<'t, Self::Schema, Self>, 195 | ) -> Self::Insert<'t>; 196 | 197 | #[doc(hidden)] 198 | fn get_referer_unchecked() -> Self::Referer; 199 | 200 | // used for the first join (useful for pragmas) 201 | #[doc(hidden)] 202 | fn name(&self) -> String { 203 | Self::NAME.to_owned() 204 | } 205 | #[doc(hidden)] 206 | fn typs(f: &mut TypBuilder); 207 | 208 | #[doc(hidden)] 209 | const ID: &'static str; 210 | #[doc(hidden)] 211 | const NAME: &'static str; 212 | } 213 | 214 | #[test] 215 | fn compile_tests() { 216 | let t = trybuild::TestCases::new(); 217 | t.compile_fail("tests/compile/*.rs"); 218 | } 219 | -------------------------------------------------------------------------------- /src/mymap.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Deref, DerefMut}; 2 | 3 | #[derive(Clone)] 4 | pub struct MyMap { 5 | inner: Vec<(K, V)>, 6 | } 7 | 8 | impl MyMap { 9 | pub fn pos_or_init(&mut self, k: K, f: impl FnOnce() -> V) -> usize { 10 | if let Some(res) = self.inner.iter().position(|x| x.0 == k) { 11 | return res; 12 | } 13 | let len = self.inner.len(); 14 | self.inner.push((k, f())); 15 | len 16 | } 17 | 18 | pub fn get_or_init(&mut self, k: K, f: impl FnOnce() -> V) -> &V { 19 | let idx = self.pos_or_init(k, f); 20 | &self.inner[idx].1 21 | } 22 | 23 | pub fn into_iter(self) -> std::vec::IntoIter<(K, V)> { 24 | self.inner.into_iter() 25 | } 26 | } 27 | 28 | impl Default for MyMap { 29 | fn default() -> Self { 30 | Self { 31 | inner: Default::default(), 32 | } 33 | } 34 | } 35 | 36 | impl Deref for MyMap { 37 | type Target = Vec<(K, V)>; 38 | 39 | fn deref(&self) -> &Self::Target { 40 | &self.inner 41 | } 42 | } 43 | 44 | impl DerefMut for MyMap { 45 | fn deref_mut(&mut self) -> &mut Self::Target { 46 | &mut self.inner 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/query.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::Cell, 3 | fmt::Debug, 4 | marker::PhantomData, 5 | ops::{Deref, DerefMut}, 6 | }; 7 | 8 | use rusqlite::Connection; 9 | use sea_query::SqliteQueryBuilder; 10 | use sea_query_rusqlite::{RusqliteBinder, RusqliteValues}; 11 | 12 | use crate::{ 13 | dummy_impl::{Cacher, IntoSelect, Prepared, Row, SelectImpl}, 14 | rows::Rows, 15 | }; 16 | 17 | /// This is the type used by the [crate::Transaction::query] method. 18 | pub struct Query<'outer, 'inner, S> { 19 | pub(crate) phantom: PhantomData<&'inner &'outer ()>, 20 | pub(crate) q: Rows<'inner, S>, 21 | pub(crate) conn: &'inner rusqlite::Connection, 22 | } 23 | 24 | impl<'inner, S> Deref for Query<'_, 'inner, S> { 25 | type Target = Rows<'inner, S>; 26 | 27 | fn deref(&self) -> &Self::Target { 28 | &self.q 29 | } 30 | } 31 | 32 | impl DerefMut for Query<'_, '_, S> { 33 | fn deref_mut(&mut self) -> &mut Self::Target { 34 | &mut self.q 35 | } 36 | } 37 | 38 | impl<'outer, 'inner, S> Query<'outer, 'inner, S> { 39 | /// Turn a database query into a [Vec] of results. 40 | /// 41 | /// The order of rows that is returned is unstable. This means that the order may change between any two 42 | /// executions of the exact same query. If a specific order (or even a consistent order) is required, 43 | /// then you have to use something like [slice::sort]. 44 | pub fn into_vec(&self, select: impl IntoSelect<'inner, 'outer, S, Out = O>) -> Vec { 45 | self.into_vec_private(select) 46 | } 47 | 48 | pub(crate) fn into_vec_private<'x, D>(&self, dummy: D) -> Vec 49 | where 50 | D: IntoSelect<'x, 'outer, S>, 51 | { 52 | let mut cacher = Cacher::new(); 53 | let mut prepared = dummy.into_select().inner.prepare(&mut cacher); 54 | 55 | let (select, cached) = self.ast.clone().full().simple(cacher.columns); 56 | let (sql, values) = select.build_rusqlite(SqliteQueryBuilder); 57 | if SHOW_SQL.get() { 58 | println!("{sql}"); 59 | println!("{values:?}"); 60 | } 61 | if GET_PLAN.get() { 62 | let node = get_node(&self.conn, &values, &sql); 63 | PLAN.set(Some(node)); 64 | } 65 | 66 | let mut statement = self.conn.prepare_cached(&sql).unwrap(); 67 | let mut rows = statement.query(&*values.as_params()).unwrap(); 68 | 69 | let mut out = vec![]; 70 | while let Some(row) = rows.next().unwrap() { 71 | out.push(prepared.call(Row::new(row, &cached))); 72 | } 73 | out 74 | } 75 | } 76 | 77 | thread_local! { 78 | static SHOW_SQL: Cell = const { Cell::new(false) }; 79 | static GET_PLAN: Cell = const { Cell::new(false) }; 80 | static PLAN: Cell> = const { Cell::new(None) }; 81 | } 82 | 83 | pub fn show_sql(f: impl FnOnce() -> R) -> R { 84 | let old = SHOW_SQL.get(); 85 | SHOW_SQL.set(true); 86 | let res = f(); 87 | SHOW_SQL.set(old); 88 | res 89 | } 90 | 91 | pub fn get_plan(f: impl FnOnce() -> R) -> (R, Node) { 92 | let old = GET_PLAN.get(); 93 | GET_PLAN.set(true); 94 | let res = f(); 95 | GET_PLAN.set(old); 96 | (res, PLAN.take().unwrap()) 97 | } 98 | 99 | fn get_node(conn: &Connection, values: &RusqliteValues, sql: &str) -> Node { 100 | let mut prepared = conn.prepare(&format!("EXPLAIN QUERY PLAN {sql}")).unwrap(); 101 | let rows = prepared 102 | .query_map(&*values.as_params(), |row| { 103 | Ok(( 104 | row.get_unwrap("parent"), 105 | Node { 106 | id: row.get_unwrap("id"), 107 | detail: row.get_unwrap("detail"), 108 | children: vec![], 109 | }, 110 | )) 111 | }) 112 | .unwrap(); 113 | let mut out = Node { 114 | id: 0, 115 | detail: "QUERY PLAN".to_owned(), 116 | children: vec![], 117 | }; 118 | rows.for_each(|res| { 119 | let (id, node) = res.unwrap(); 120 | out.get_mut(id).children.push(node); 121 | }); 122 | 123 | out 124 | } 125 | 126 | pub struct Node { 127 | id: i64, 128 | detail: String, 129 | children: Vec, 130 | } 131 | 132 | impl Node { 133 | fn get_mut(&mut self, id: i64) -> &mut Node { 134 | if self.id == id { 135 | return self; 136 | } 137 | self.children.last_mut().unwrap().get_mut(id) 138 | } 139 | } 140 | 141 | impl Debug for Node { 142 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 143 | f.write_str(&self.detail)?; 144 | if !self.children.is_empty() { 145 | f.write_str(" ")?; 146 | f.debug_list().entries(&self.children).finish()?; 147 | } 148 | Ok(()) 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/ref_cast_impl.rs: -------------------------------------------------------------------------------- 1 | /// Please don't use this macro, use `#[derive(RefCast)]` instead. 2 | /// This macro is only safe whenever `#[derive(RefCast)]` also works. 3 | /// The reason this macro exists is to not add a dependency on the `ref-cast` crate for all users of this library. 4 | #[macro_export] 5 | #[doc(hidden)] 6 | macro_rules! unsafe_impl_ref_cast { 7 | ($name:ident) => { 8 | impl ::rust_query::private::RefCast for $name { 9 | type From = T; 10 | 11 | #[inline] 12 | fn ref_cast(_from: &Self::From) -> &Self { 13 | unsafe { &*(_from as *const Self::From as *const Self) } 14 | } 15 | 16 | #[inline] 17 | fn ref_cast_mut(_from: &mut Self::From) -> &mut Self { 18 | unsafe { &mut *(_from as *mut Self::From as *mut Self) } 19 | } 20 | } 21 | }; 22 | } 23 | -------------------------------------------------------------------------------- /src/rows.rs: -------------------------------------------------------------------------------- 1 | use std::{marker::PhantomData, rc::Rc}; 2 | 3 | use sea_query::Iden; 4 | 5 | use crate::{ 6 | Expr, Table, 7 | alias::TmpTable, 8 | ast::MySelect, 9 | db::Join, 10 | value::{IntoExpr, MyTableRef, Typed}, 11 | }; 12 | 13 | /// [Rows] keeps track of all rows in the current query. 14 | /// 15 | /// This is the base type for other query types like [crate::args::Aggregate] and [crate::args::Query]. 16 | /// It contains basic query functionality like joining tables and filters. 17 | /// 18 | /// [Rows] mutability is only about which rows are included. 19 | /// Adding new columns does not require mutating [Rows]. 20 | pub struct Rows<'inner, S> { 21 | // we might store 'inner 22 | pub(crate) phantom: PhantomData &'inner ()>, 23 | pub(crate) _p: PhantomData, 24 | pub(crate) ast: Rc, 25 | } 26 | 27 | impl<'inner, S> Rows<'inner, S> { 28 | /// Join a table, this is like a super simple [Iterator::flat_map] but for queries. 29 | /// 30 | /// After this operation [Rows] has rows for the combinations of each original row with each row of the table. 31 | /// (Also called the "Carthesian product") 32 | pub fn join>(&mut self, _: T) -> Expr<'inner, S, T> { 33 | self.join_string(T::NAME.to_owned()) 34 | } 35 | 36 | pub(crate) fn join_custom>(&mut self, t: T) -> Expr<'inner, S, T> { 37 | self.join_string(t.name()) 38 | } 39 | 40 | pub(crate) fn join_tmp>(&mut self, tmp: TmpTable) -> Expr<'inner, S, T> { 41 | let mut tmp_string = String::new(); 42 | tmp.unquoted(&mut tmp_string); 43 | self.join_string(tmp_string) 44 | } 45 | 46 | fn join_string>(&mut self, name: String) -> Expr<'inner, S, T> { 47 | let table_idx = self.ast.tables.len(); 48 | Rc::make_mut(&mut self.ast).tables.push(name); 49 | Expr::new(Join::new(MyTableRef { 50 | scope_rc: self.ast.scope_rc.clone(), 51 | idx: table_idx, 52 | })) 53 | } 54 | 55 | // Join a vector of values. 56 | // pub fn vec>(&mut self, vec: Vec) -> Join<'inner, V::Typ> { 57 | // todo!() 58 | // } 59 | 60 | /// Filter rows based on a column. 61 | pub fn filter(&mut self, prop: impl IntoExpr<'inner, S, Typ = bool>) { 62 | let prop = prop.into_expr(); 63 | Rc::make_mut(&mut self.ast).filters.push(prop.inner); 64 | } 65 | 66 | /// Filter out rows where this column is [None]. 67 | /// 68 | /// Returns a new column with the unwrapped type. 69 | pub fn filter_some( 70 | &mut self, 71 | val: impl IntoExpr<'inner, S, Typ = Option>, 72 | ) -> Expr<'inner, S, Typ> { 73 | let val = val.into_expr(); 74 | Rc::make_mut(&mut self.ast) 75 | .filters 76 | .push(val.is_some().inner); 77 | 78 | Expr::adhoc(move |b| val.inner.build_expr(b)) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/schema_pragma.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, convert::Infallible}; 2 | 3 | use ref_cast::RefCast; 4 | 5 | use crate::{ 6 | Expr, FromExpr, Table, Transaction, hash, 7 | private::{Reader, new_column}, 8 | }; 9 | 10 | macro_rules! field { 11 | ($name:ident: $typ:ty) => { 12 | pub fn $name(&self) -> Expr<'x, Pragma, $typ> { 13 | new_column(&self.0, stringify!($name)) 14 | } 15 | }; 16 | ($name:ident($name_str:literal): $typ:ty) => { 17 | pub fn $name(&self) -> Expr<'x, Pragma, $typ> { 18 | new_column(&self.0, $name_str) 19 | } 20 | }; 21 | } 22 | 23 | macro_rules! table { 24 | ($typ:ident, $dummy:ident, $var:pat => $name:expr, $c:expr) => { 25 | impl Table for $typ { 26 | type MigrateFrom = Self; 27 | type Ext = $dummy; 28 | 29 | const TOKEN: Self = $c; 30 | 31 | type Schema = Pragma; 32 | type Referer = (); 33 | fn get_referer_unchecked() -> Self::Referer {} 34 | 35 | fn name(&self) -> String { 36 | let $var = self; 37 | $name 38 | } 39 | 40 | fn typs(_f: &mut hash::TypBuilder) {} 41 | 42 | type Conflict<'t> = Infallible; 43 | type UpdateOk<'t> = (); 44 | type Update<'t> = (); 45 | type Insert<'t> = (); 46 | 47 | fn read<'t>(_val: &Self::Insert<'t>, _f: &mut Reader<'t, Self::Schema>) { 48 | unreachable!() 49 | } 50 | 51 | fn get_conflict_unchecked<'t>( 52 | _txn: &crate::Transaction<'t, Self::Schema>, 53 | _val: &Self::Insert<'t>, 54 | ) -> Self::Conflict<'t> { 55 | unreachable!() 56 | } 57 | 58 | fn update_into_try_update(_val: Self::UpdateOk<'_>) -> Self::Update<'_> { 59 | unreachable!() 60 | } 61 | 62 | fn apply_try_update<'t>( 63 | _val: Self::Update<'t>, 64 | _old: Expr<'t, Self::Schema, Self>, 65 | ) -> Self::Insert<'t> { 66 | unreachable!() 67 | } 68 | 69 | const ID: &'static str = ""; 70 | const NAME: &'static str = ""; 71 | } 72 | }; 73 | } 74 | 75 | pub struct Pragma; 76 | 77 | struct TableList; 78 | 79 | #[repr(transparent)] 80 | #[derive(RefCast)] 81 | struct TableListSelect(T); 82 | 83 | #[allow(unused)] 84 | impl<'x> TableListSelect> { 85 | field! {schema: String} 86 | field! {name: String} 87 | field! {r#type("type"): String} 88 | field! {ncol: i64} 89 | field! {wr: i64} 90 | field! {strict: i64} 91 | } 92 | 93 | table! {TableList, TableListSelect, _ => "pragma_table_list".to_owned(), TableList} 94 | 95 | struct TableInfo(pub String); 96 | 97 | #[repr(transparent)] 98 | #[derive(RefCast)] 99 | struct TableInfoSelect(T); 100 | 101 | impl<'x> TableInfoSelect> { 102 | field! {name: String} 103 | field! {r#type("type"): String} 104 | field! {notnull: i64} 105 | field! {pk: i64} 106 | } 107 | 108 | table! {TableInfo, TableInfoSelect, val => format!("pragma_table_info('{}', 'main')", val.0), TableInfo(String::new())} 109 | 110 | struct ForeignKeyList(pub String); 111 | 112 | #[repr(transparent)] 113 | #[derive(RefCast)] 114 | struct ForeignKeyListSelect(T); 115 | 116 | #[allow(unused)] 117 | impl<'x> ForeignKeyListSelect> { 118 | field! {table: String} 119 | field! {from: String} 120 | field! {to: String} 121 | } 122 | 123 | table! {ForeignKeyList, ForeignKeyListSelect, val => format!("pragma_foreign_key_list('{}', 'main')", val.0), ForeignKeyList(String::new())} 124 | 125 | struct IndexList(String); 126 | 127 | #[repr(transparent)] 128 | #[derive(RefCast)] 129 | struct IndexListSelect(T); 130 | 131 | impl<'x> IndexListSelect> { 132 | field! {name: String} 133 | field! {unique: bool} 134 | field! {origin: String} 135 | field! {partial: bool} 136 | } 137 | 138 | table! {IndexList, IndexListSelect, val => format!("pragma_index_list('{}', 'main')", val.0), IndexList(String::new())} 139 | 140 | struct IndexInfo(String); 141 | 142 | #[repr(transparent)] 143 | #[derive(RefCast)] 144 | struct IndexInfoSelect(T); 145 | 146 | impl<'x> IndexInfoSelect> { 147 | field! {name: Option} 148 | } 149 | 150 | table! {IndexInfo, IndexInfoSelect, val => format!("pragma_index_info('{}', 'main')", val.0), IndexInfo(String::new())} 151 | 152 | pub fn read_schema(conn: &Transaction) -> hash::Schema { 153 | #[derive(Clone, FromExpr)] 154 | #[rust_query(From = TableInfo)] 155 | struct Column { 156 | name: String, 157 | r#type: String, 158 | pk: i64, 159 | notnull: i64, 160 | } 161 | 162 | let tables = conn.query(|q| { 163 | let table = q.join_custom(TableList); 164 | q.filter(table.schema().eq("main")); 165 | q.filter(table.r#type().eq("table")); 166 | q.filter(table.name().eq("sqlite_schema").not()); 167 | q.filter(table.name().eq("sqlite_stat1").not()); 168 | q.into_vec(table.name()) 169 | }); 170 | 171 | let mut output = hash::Schema::default(); 172 | 173 | for table_name in tables { 174 | let mut columns: Vec = conn.query(|q| { 175 | let table = q.join_custom(TableInfo(table_name.clone())); 176 | q.into_vec(Column::from_expr(table)) 177 | }); 178 | 179 | let fks: HashMap<_, _> = conn 180 | .query(|q| { 181 | let fk = q.join_custom(ForeignKeyList(table_name.to_owned())); 182 | q.into_vec((fk.from(), fk.table())) 183 | }) 184 | .into_iter() 185 | .collect(); 186 | 187 | let make_type = |col: &Column| match col.r#type.as_str() { 188 | "INTEGER" => hash::ColumnType::Integer, 189 | "TEXT" => hash::ColumnType::String, 190 | "REAL" => hash::ColumnType::Float, 191 | t => panic!("unknown type {t}"), 192 | }; 193 | 194 | // we only care about columns that are not a unique id and for which we know the type 195 | columns.retain(|col| { 196 | if col.pk != 0 { 197 | assert_eq!(col.name, "id"); 198 | return false; 199 | } 200 | true 201 | }); 202 | 203 | let mut table_def = hash::Table::default(); 204 | for col in columns { 205 | let def = hash::Column { 206 | fk: fks.get(&col.name).map(|x| (x.clone(), "id".to_owned())), 207 | typ: make_type(&col), 208 | name: col.name, 209 | nullable: col.notnull == 0, 210 | }; 211 | table_def.columns.insert(def) 212 | } 213 | 214 | let uniques = conn.query(|q| { 215 | let index = q.join_custom(IndexList(table_name.clone())); 216 | q.filter(index.unique()); 217 | q.filter(index.origin().eq("u")); 218 | q.filter(index.partial().not()); 219 | q.into_vec(index.name()) 220 | }); 221 | 222 | for unique_name in uniques { 223 | let columns = conn.query(|q| { 224 | let col = q.join_custom(IndexInfo(unique_name)); 225 | let name = q.filter_some(col.name()); 226 | q.into_vec(name) 227 | }); 228 | 229 | let mut unique_def = hash::Unique::default(); 230 | for column in columns { 231 | unique_def.columns.insert(column); 232 | } 233 | table_def.uniques.insert(unique_def); 234 | } 235 | 236 | output.tables.insert((table_name, table_def)) 237 | } 238 | output 239 | } 240 | -------------------------------------------------------------------------------- /src/value/aggregate.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | marker::PhantomData, 3 | ops::{Deref, DerefMut}, 4 | rc::Rc, 5 | }; 6 | 7 | use ref_cast::RefCast; 8 | use sea_query::{Func, SelectStatement, SimpleExpr}; 9 | 10 | use crate::{ 11 | Expr, Table, 12 | alias::MyAlias, 13 | rows::Rows, 14 | value::{EqTyp, IntoExpr, MyTyp, NumTyp, Typed, ValueBuilder}, 15 | }; 16 | 17 | use super::DynTypedExpr; 18 | 19 | /// This is the argument type used for [aggregate]. 20 | pub struct Aggregate<'outer, 'inner, S> { 21 | pub(crate) query: Rows<'inner, S>, 22 | _p: PhantomData<&'inner &'outer ()>, 23 | } 24 | 25 | impl<'inner, S> Deref for Aggregate<'_, 'inner, S> { 26 | type Target = Rows<'inner, S>; 27 | 28 | fn deref(&self) -> &Self::Target { 29 | &self.query 30 | } 31 | } 32 | 33 | impl DerefMut for Aggregate<'_, '_, S> { 34 | fn deref_mut(&mut self) -> &mut Self::Target { 35 | &mut self.query 36 | } 37 | } 38 | 39 | impl<'outer, 'inner, S: 'static> Aggregate<'outer, 'inner, S> { 40 | fn select( 41 | &self, 42 | expr: impl 'static + Fn(&mut ValueBuilder) -> SimpleExpr, 43 | ) -> Aggr> { 44 | let expr = DynTypedExpr(Rc::new(expr)); 45 | let mut builder = self.query.ast.clone().full(); 46 | let (select, mut fields) = builder.build_select(true, vec![expr]); 47 | 48 | let conds = builder.forwarded.into_iter().map(|x| x.1.1).collect(); 49 | 50 | Aggr { 51 | _p2: PhantomData, 52 | select: Rc::new(select), 53 | field: { 54 | debug_assert_eq!(fields.len(), 1); 55 | fields.swap_remove(0) 56 | }, 57 | conds, 58 | } 59 | } 60 | 61 | /// Filter the rows of this sub-query based on a value from the outer query. 62 | #[deprecated = "Please use `Rows::filter` instead"] 63 | pub fn filter_on( 64 | &mut self, 65 | val: impl IntoExpr<'inner, S, Typ = T>, 66 | on: impl IntoExpr<'outer, S, Typ = T>, 67 | ) { 68 | let on = on.into_expr(); 69 | self.filter(val.into_expr().eq(on)) 70 | } 71 | 72 | /// Return the average value in a column, this is [None] if there are zero rows. 73 | pub fn avg(&self, val: impl IntoExpr<'inner, S, Typ = f64>) -> Expr<'outer, S, Option> { 74 | let val = val.into_expr().inner; 75 | Expr::new(self.select(move |b| Func::avg(val.build_expr(b)).into())) 76 | } 77 | 78 | /// Return the maximum value in a column, this is [None] if there are zero rows. 79 | pub fn max(&self, val: impl IntoExpr<'inner, S, Typ = T>) -> Expr<'outer, S, Option> 80 | where 81 | T: NumTyp, 82 | { 83 | let val = val.into_expr().inner; 84 | Expr::new(self.select(move |b| Func::max(val.build_expr(b)).into())) 85 | } 86 | 87 | /// Return the minimum value in a column, this is [None] if there are zero rows. 88 | pub fn min(&self, val: impl IntoExpr<'inner, S, Typ = T>) -> Expr<'outer, S, Option> 89 | where 90 | T: NumTyp, 91 | { 92 | let val = val.into_expr().inner; 93 | Expr::new(self.select(move |b| Func::min(val.build_expr(b)).into())) 94 | } 95 | 96 | /// Return the sum of a column. 97 | pub fn sum(&self, val: impl IntoExpr<'inner, S, Typ = T>) -> Expr<'outer, S, T> 98 | where 99 | T: NumTyp, 100 | { 101 | let val = val.into_expr().inner; 102 | let val = self.select::(move |b| Func::sum(val.build_expr(b)).into()); 103 | 104 | Expr::adhoc(move |b| { 105 | sea_query::Expr::expr(val.build_expr(b)) 106 | .if_null(SimpleExpr::Constant(T::ZERO.into_sea_value())) 107 | }) 108 | } 109 | 110 | /// Return the number of distinct values in a column. 111 | pub fn count_distinct( 112 | &self, 113 | val: impl IntoExpr<'inner, S, Typ = T>, 114 | ) -> Expr<'outer, S, i64> { 115 | let val = val.into_expr().inner; 116 | let val = self.select::(move |b| Func::count_distinct(val.build_expr(b)).into()); 117 | Expr::adhoc(move |b| { 118 | sea_query::Expr::expr(val.build_expr(b)) 119 | .if_null(SimpleExpr::Constant(0i64.into_sea_value())) 120 | }) 121 | } 122 | 123 | /// Return whether there are any rows. 124 | pub fn exists(&self) -> Expr<'outer, S, bool> { 125 | let val = self.select::(|_| SimpleExpr::Constant(1.into_sea_value())); 126 | Expr::adhoc(move |b| sea_query::Expr::expr(val.build_expr(b)).is_not_null()) 127 | } 128 | } 129 | 130 | pub struct Aggr { 131 | pub(crate) _p2: PhantomData<(S, T)>, 132 | pub(crate) select: Rc, 133 | pub(crate) conds: Vec, 134 | pub(crate) field: MyAlias, 135 | } 136 | 137 | impl Clone for Aggr { 138 | fn clone(&self) -> Self { 139 | Self { 140 | _p2: PhantomData, 141 | select: self.select.clone(), 142 | conds: self.conds.clone(), 143 | field: self.field, 144 | } 145 | } 146 | } 147 | 148 | impl Typed for Aggr { 149 | type Typ = T; 150 | fn build_expr(&self, b: &mut ValueBuilder) -> SimpleExpr { 151 | sea_query::Expr::col((self.build_table(b), self.field)).into() 152 | } 153 | } 154 | 155 | impl Aggr { 156 | fn build_table(&self, b: &mut ValueBuilder) -> MyAlias { 157 | let conds = self.conds.iter().map(|expr| (expr.0)(b)).collect(); 158 | b.get_aggr(self.select.clone(), conds) 159 | } 160 | } 161 | 162 | impl Deref for Aggr { 163 | type Target = T::Ext; 164 | 165 | fn deref(&self) -> &Self::Target { 166 | RefCast::ref_cast(self) 167 | } 168 | } 169 | 170 | /// Perform an aggregate that returns a single result for each of the current rows. 171 | /// 172 | /// You can filter the rows in the aggregate based on values from the outer query. 173 | /// That is the only way to get a different aggregate for each outer row. 174 | /// 175 | /// ``` 176 | /// # use rust_query::aggregate; 177 | /// # use rust_query::private::doctest::*; 178 | /// # let mut client = get_client(); 179 | /// # let txn = get_txn(&mut client); 180 | /// let res = txn.query_one(aggregate(|rows| { 181 | /// let user = rows.join(User); 182 | /// rows.count_distinct(user) 183 | /// })); 184 | /// assert_eq!(res, 1, "there is one user in the database"); 185 | /// ``` 186 | pub fn aggregate<'outer, S, F, R>(f: F) -> R 187 | where 188 | F: for<'inner> FnOnce(&mut Aggregate<'outer, 'inner, S>) -> R, 189 | { 190 | let inner = Rows { 191 | phantom: PhantomData, 192 | ast: Default::default(), 193 | _p: PhantomData, 194 | }; 195 | let mut group = Aggregate { 196 | query: inner, 197 | _p: PhantomData, 198 | }; 199 | f(&mut group) 200 | } 201 | -------------------------------------------------------------------------------- /src/value/optional.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use sea_query::Nullable; 4 | 5 | use crate::{ 6 | IntoSelect, 7 | dummy_impl::{Cached, Cacher, ColumnImpl, Prepared, Row, Select, SelectImpl}, 8 | }; 9 | 10 | use super::{DynTyped, Expr, IntoExpr, MyTyp, Typed}; 11 | 12 | /// This is a combinator function that allows constructing single row optional queries. 13 | /// 14 | /// ``` 15 | /// # use rust_query::IntoExpr; 16 | /// # let mut client = rust_query::private::doctest::get_client(); 17 | /// # let txn = rust_query::private::doctest::get_txn(&mut client); 18 | /// # use rust_query::optional; 19 | /// let res = txn.query_one(optional(|row| { 20 | /// let x = row.and(Some("test")); 21 | /// let y = row.and(Some(42)); 22 | /// row.then((x, y)) 23 | /// })); 24 | /// assert_eq!(res, Some(("test".to_owned(), 42))); 25 | /// ``` 26 | /// 27 | /// ``` 28 | /// # use rust_query::IntoExpr; 29 | /// # let mut client = rust_query::private::doctest::get_client(); 30 | /// # let txn = rust_query::private::doctest::get_txn(&mut client); 31 | /// # use rust_query::optional; 32 | /// let res = txn.query_one(optional(|row| { 33 | /// let x = row.and(Some("test")); 34 | /// let y = row.and(None::); 35 | /// row.then((x, y)) 36 | /// })); 37 | /// assert_eq!(res, None); 38 | /// ``` 39 | pub fn optional<'outer, S, R>( 40 | f: impl for<'inner> FnOnce(&mut Optional<'outer, 'inner, S>) -> R, 41 | ) -> R { 42 | let mut optional = Optional { 43 | nulls: Vec::new(), 44 | _p: PhantomData, 45 | _p2: PhantomData, 46 | }; 47 | f(&mut optional) 48 | } 49 | 50 | /// This is the argument type used by the [optional] combinator. 51 | /// 52 | /// Joining more optional columns can be done with the [Optional::and] method. 53 | /// Finally it is possible to return selections or expressions using [Optional::then] and [Optional::then_expr]. 54 | pub struct Optional<'outer, 'inner, S> { 55 | nulls: Vec>, 56 | _p: PhantomData &'inner &'outer ()>, 57 | _p2: PhantomData, 58 | } 59 | 60 | impl<'outer, 'inner, S> Optional<'outer, 'inner, S> { 61 | /// Join an optional column to the current row. 62 | /// 63 | /// If the joined column is [None], then the whole [optional] combinator will return [None]. 64 | #[doc(alias = "join")] 65 | pub fn and( 66 | &mut self, 67 | col: impl IntoExpr<'inner, S, Typ = Option>, 68 | ) -> Expr<'inner, S, T> { 69 | let column = col.into_expr(); 70 | self.nulls.push(column.is_none().into_expr().inner); 71 | Expr::adhoc(move |b| column.inner.build_expr(b)) 72 | } 73 | 74 | pub fn is_none(&self) -> Expr<'outer, S, bool> { 75 | let nulls = self.nulls.clone(); 76 | Expr::adhoc(move |b| { 77 | nulls 78 | .iter() 79 | .map(|x| x.build_expr(b)) 80 | .reduce(|a, b| a.or(b)) 81 | .unwrap_or(false.into()) 82 | }) 83 | } 84 | 85 | /// Return a [bool] column indicating whether the current row exists. 86 | pub fn is_some(&self) -> Expr<'outer, S, bool> { 87 | self.is_none().not() 88 | } 89 | 90 | /// Return [Some] column if the current row exists and [None] column otherwise. 91 | pub fn then_expr + 'outer>( 92 | &self, 93 | col: impl IntoExpr<'inner, S, Typ = T>, 94 | ) -> Expr<'outer, S, Option> { 95 | const NULL: sea_query::SimpleExpr = 96 | sea_query::SimpleExpr::Keyword(sea_query::Keyword::Null); 97 | 98 | let col = col.into_expr().inner; 99 | let is_none = self.is_none().inner; 100 | Expr::adhoc(move |b| { 101 | sea_query::Expr::case(is_none.build_expr(b), NULL) 102 | .finally(col.build_expr(b)) 103 | .into() 104 | }) 105 | } 106 | 107 | /// Returns a [Select] with optional result. 108 | pub fn then<'transaction, Out: 'transaction>( 109 | &self, 110 | d: impl IntoSelect<'inner, 'transaction, S, Out = Out>, 111 | ) -> Select<'outer, 'transaction, S, Option> { 112 | Select::new(OptionalImpl { 113 | inner: d.into_select().inner, 114 | is_some: ColumnImpl { 115 | expr: self.is_some().into_expr().inner, 116 | }, 117 | }) 118 | } 119 | } 120 | 121 | pub struct OptionalImpl { 122 | inner: X, 123 | is_some: ColumnImpl, 124 | } 125 | 126 | impl<'transaction, X: SelectImpl<'transaction>> SelectImpl<'transaction> for OptionalImpl { 127 | type Out = Option; 128 | type Prepared = OptionalPrepared; 129 | 130 | fn prepare(self, cacher: &mut Cacher) -> Self::Prepared { 131 | OptionalPrepared { 132 | is_some: self.is_some.prepare(cacher), 133 | inner: self.inner.prepare(cacher), 134 | } 135 | } 136 | } 137 | 138 | pub struct OptionalPrepared { 139 | inner: X, 140 | is_some: Cached, 141 | } 142 | 143 | impl Prepared for OptionalPrepared { 144 | type Out = Option; 145 | 146 | fn call(&mut self, row: Row<'_>) -> Self::Out { 147 | if row.get(self.is_some) { 148 | Some(self.inner.call(row)) 149 | } else { 150 | None 151 | } 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/value/trivial.rs: -------------------------------------------------------------------------------- 1 | use crate::{IntoExpr, IntoSelect, Table, TableRow, dummy_impl::Select, optional}; 2 | 3 | use super::MyTyp; 4 | 5 | /// Trait for values that can be retrieved from the database using one expression. 6 | /// 7 | /// This is most likely the trait that you want to implement for your custom datatype. 8 | /// Together with the [crate::IntoExpr] trait. 9 | /// 10 | /// Note that this trait can also be implemented using the [derive@rust_query::FromExpr] derive macro. 11 | pub trait FromExpr<'transaction, S, From>: 'transaction + Sized { 12 | /// How to turn a column reference into a [Select]. 13 | fn from_expr<'columns>( 14 | col: impl IntoExpr<'columns, S, Typ = From>, 15 | ) -> Select<'columns, 'transaction, S, Self>; 16 | } 17 | 18 | macro_rules! from_expr { 19 | ($typ:ty) => { 20 | impl<'transaction, S> FromExpr<'transaction, S, $typ> for $typ { 21 | fn from_expr<'columns>( 22 | col: impl IntoExpr<'columns, S, Typ = $typ>, 23 | ) -> Select<'columns, 'transaction, S, Self> { 24 | col.into_expr().into_select() 25 | } 26 | } 27 | }; 28 | } 29 | 30 | from_expr! {String} 31 | from_expr! {Vec} 32 | from_expr! {i64} 33 | from_expr! {f64} 34 | from_expr! {bool} 35 | 36 | impl<'transaction, T: Table> FromExpr<'transaction, T::Schema, T> for TableRow<'transaction, T> { 37 | fn from_expr<'columns>( 38 | col: impl IntoExpr<'columns, T::Schema, Typ = T>, 39 | ) -> Select<'columns, 'transaction, T::Schema, Self> { 40 | col.into_expr().into_select() 41 | } 42 | } 43 | 44 | impl<'transaction, S, T, From: MyTyp> FromExpr<'transaction, S, Option> for Option 45 | where 46 | T: FromExpr<'transaction, S, From>, 47 | { 48 | fn from_expr<'columns>( 49 | col: impl IntoExpr<'columns, S, Typ = Option>, 50 | ) -> Select<'columns, 'transaction, S, Self> { 51 | let col = col.into_expr(); 52 | optional(|row| { 53 | let col = row.and(col); 54 | row.then(T::from_expr(col)) 55 | }) 56 | } 57 | } 58 | 59 | impl<'transaction, S, From> FromExpr<'transaction, S, From> for () { 60 | fn from_expr<'columns>( 61 | _col: impl IntoExpr<'columns, S, Typ = From>, 62 | ) -> Select<'columns, 'transaction, S, Self> { 63 | ().into_select() 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/writable.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use crate::{ 4 | Expr, IntoExpr, Table, 5 | value::{DynTypedExpr, NumTyp}, 6 | }; 7 | 8 | /// Defines a column update. 9 | pub struct Update<'t, S, Typ> { 10 | inner: Box) -> Expr<'t, S, Typ>>, 11 | } 12 | 13 | impl Default for Update<'_, S, Typ> { 14 | fn default() -> Self { 15 | Self { 16 | inner: Box::new(|x| x), 17 | } 18 | } 19 | } 20 | 21 | impl<'t, S: 't, Typ: 't> Update<'t, S, Typ> { 22 | /// Set the new value of the column. 23 | pub fn set(val: impl IntoExpr<'t, S, Typ = Typ>) -> Self { 24 | let val = val.into_expr(); 25 | Self { 26 | inner: Box::new(move |_| val.clone()), 27 | } 28 | } 29 | 30 | #[doc(hidden)] 31 | pub fn apply(&self, val: impl IntoExpr<'t, S, Typ = Typ>) -> Expr<'t, S, Typ> { 32 | (self.inner)(val.into_expr()) 33 | } 34 | } 35 | 36 | impl<'t, S: 't, Typ: NumTyp> Update<'t, S, Typ> { 37 | /// Update the column value to the old value plus some new value. 38 | pub fn add(val: impl IntoExpr<'t, S, Typ = Typ>) -> Self { 39 | let val = val.into_expr(); 40 | Self { 41 | inner: Box::new(move |old| old.add(&val)), 42 | } 43 | } 44 | } 45 | 46 | /// this trait has to be implemented by the `schema` macro. 47 | pub trait TableInsert<'t> { 48 | type T: Table; 49 | fn into_insert(self) -> ::Insert<'t>; 50 | } 51 | 52 | pub struct Reader<'t, S> { 53 | pub(crate) builder: Vec<(&'static str, DynTypedExpr)>, 54 | pub(crate) _p: PhantomData, 55 | pub(crate) _p2: PhantomData &'t ()>, 56 | } 57 | 58 | impl<'t, S> Default for Reader<'t, S> { 59 | fn default() -> Self { 60 | Self { 61 | builder: Default::default(), 62 | _p: Default::default(), 63 | _p2: Default::default(), 64 | } 65 | } 66 | } 67 | 68 | impl<'t, S> Reader<'t, S> { 69 | pub fn col(&mut self, name: &'static str, val: impl IntoExpr<'t, S>) { 70 | self.col_erased(name, val.into_expr().inner.erase()); 71 | } 72 | 73 | pub(crate) fn col_erased(&mut self, name: &'static str, val: DynTypedExpr) { 74 | self.builder.push((name, val)); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /tests/chinook/expect/artist_details.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | ArtistDetails { 3 | name: "U2", 4 | album_count: 10, 5 | track_stats: TrackStats { 6 | avg_len_milis: Some( 7 | 262385.05925925926, 8 | ), 9 | max_len_milis: Some( 10 | 591986, 11 | ), 12 | genre_count: 2, 13 | }, 14 | }, 15 | ] 16 | -------------------------------------------------------------------------------- /tests/chinook/expect/artist_details.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | CO-ROUTINE [ 3 | SCAN CONSTANT ROW, 4 | ], 5 | MATERIALIZE _1 [ 6 | USE TEMP B-TREE FOR count(DISTINCT), 7 | SCAN _0, 8 | ], 9 | MATERIALIZE _2 [ 10 | SCAN _0, 11 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 12 | ], 13 | MATERIALIZE _3 [ 14 | SCAN _0, 15 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 16 | ], 17 | MATERIALIZE _4 [ 18 | USE TEMP B-TREE FOR count(DISTINCT), 19 | SCAN _0, 20 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 21 | ], 22 | SCAN , 23 | SEARCH _0 USING INTEGER PRIMARY KEY (rowid=?) LEFT-JOIN, 24 | SCAN _1 LEFT-JOIN, 25 | SCAN _2 LEFT-JOIN, 26 | SCAN _3 LEFT-JOIN, 27 | SCAN _4 LEFT-JOIN, 28 | ] 29 | -------------------------------------------------------------------------------- /tests/chinook/expect/avg_album_track_count_for_artist.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | ( 3 | "A Cor Do Som", 4 | None, 5 | ), 6 | ( 7 | "AC/DC", 8 | Some( 9 | 9.0, 10 | ), 11 | ), 12 | ( 13 | "Aaron Copland & London Symphony Orchestra", 14 | Some( 15 | 1.0, 16 | ), 17 | ), 18 | ( 19 | "Aaron Goldberg", 20 | Some( 21 | 1.0, 22 | ), 23 | ), 24 | ( 25 | "Academy of St. Martin in the Fields & Sir Neville Marriner", 26 | Some( 27 | 2.0, 28 | ), 29 | ), 30 | ( 31 | "Academy of St. Martin in the Fields Chamber Ensemble & Sir Neville Marriner", 32 | Some( 33 | 1.0, 34 | ), 35 | ), 36 | ( 37 | "Academy of St. Martin in the Fields, John Birch, Sir Neville Marriner & Sylvia McNair", 38 | Some( 39 | 1.0, 40 | ), 41 | ), 42 | ( 43 | "Academy of St. Martin in the Fields, Sir Neville Marriner & Thurston Dart", 44 | Some( 45 | 1.0, 46 | ), 47 | ), 48 | ( 49 | "Academy of St. Martin in the Fields, Sir Neville Marriner & William Bennett", 50 | None, 51 | ), 52 | ( 53 | "Accept", 54 | Some( 55 | 2.0, 56 | ), 57 | ), 58 | ( 59 | "Adrian Leaper & Doreen de Feis", 60 | Some( 61 | 1.0, 62 | ), 63 | ), 64 | ( 65 | "Aerosmith", 66 | Some( 67 | 15.0, 68 | ), 69 | ), 70 | ( 71 | "Aerosmith & Sierra Leone's Refugee Allstars", 72 | None, 73 | ), 74 | ( 75 | "Aisha Duo", 76 | Some( 77 | 2.0, 78 | ), 79 | ), 80 | ( 81 | "Alanis Morissette", 82 | Some( 83 | 13.0, 84 | ), 85 | ), 86 | ( 87 | "Alberto Turco & Nova Schola Gregoriana", 88 | Some( 89 | 1.0, 90 | ), 91 | ), 92 | ( 93 | "Alice In Chains", 94 | Some( 95 | 12.0, 96 | ), 97 | ), 98 | ( 99 | "Amy Winehouse", 100 | Some( 101 | 11.5, 102 | ), 103 | ), 104 | ( 105 | "Anne-Sophie Mutter, Herbert Von Karajan & Wiener Philharmoniker", 106 | Some( 107 | 1.0, 108 | ), 109 | ), 110 | ( 111 | "Antal Doráti & London Symphony Orchestra", 112 | Some( 113 | 1.0, 114 | ), 115 | ), 116 | ] 117 | -------------------------------------------------------------------------------- /tests/chinook/expect/avg_album_track_count_for_artist.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | MATERIALIZE _1 [ 3 | MATERIALIZE _1 [ 4 | SCAN _0, 5 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 6 | USE TEMP B-TREE FOR GROUP BY, 7 | ], 8 | SCAN _2, 9 | BLOOM FILTER ON _0 (artist=?), 10 | SEARCH _0 USING AUTOMATIC COVERING INDEX (artist=?), 11 | BLOOM FILTER ON _1 (_0=?), 12 | SEARCH _1 USING AUTOMATIC COVERING INDEX (_0=?) LEFT-JOIN, 13 | ], 14 | SCAN _0, 15 | BLOOM FILTER ON _1 (_0=?), 16 | SEARCH _1 USING AUTOMATIC COVERING INDEX (_0=?) LEFT-JOIN, 17 | ] 18 | -------------------------------------------------------------------------------- /tests/chinook/expect/count_reporting.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | ( 3 | "Andrew", 4 | 2, 5 | ), 6 | ( 7 | "Jane", 8 | 0, 9 | ), 10 | ( 11 | "Laura", 12 | 0, 13 | ), 14 | ( 15 | "Margaret", 16 | 0, 17 | ), 18 | ( 19 | "Michael", 20 | 2, 21 | ), 22 | ( 23 | "Nancy", 24 | 3, 25 | ), 26 | ( 27 | "Robert", 28 | 0, 29 | ), 30 | ( 31 | "Steve", 32 | 0, 33 | ), 34 | ] 35 | -------------------------------------------------------------------------------- /tests/chinook/expect/count_reporting.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | MATERIALIZE _1 [ 3 | SCAN _0, 4 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 5 | USE TEMP B-TREE FOR GROUP BY, 6 | ], 7 | SCAN _0, 8 | BLOOM FILTER ON _1 (_0=?), 9 | SEARCH _1 USING AUTOMATIC COVERING INDEX (_0=?) LEFT-JOIN, 10 | ] 11 | -------------------------------------------------------------------------------- /tests/chinook/expect/customer_spending.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | CustomerSpending { 3 | customer_name: "Almeida", 4 | total_spending: 37.62, 5 | }, 6 | CustomerSpending { 7 | customer_name: "Barnett", 8 | total_spending: 43.62, 9 | }, 10 | CustomerSpending { 11 | customer_name: "Bernard", 12 | total_spending: 38.62, 13 | }, 14 | CustomerSpending { 15 | customer_name: "Brooks", 16 | total_spending: 37.62, 17 | }, 18 | CustomerSpending { 19 | customer_name: "Brown", 20 | total_spending: 37.62, 21 | }, 22 | CustomerSpending { 23 | customer_name: "Chase", 24 | total_spending: 37.62, 25 | }, 26 | CustomerSpending { 27 | customer_name: "Cunningham", 28 | total_spending: 47.62, 29 | }, 30 | CustomerSpending { 31 | customer_name: "Dubois", 32 | total_spending: 37.62, 33 | }, 34 | CustomerSpending { 35 | customer_name: "Fernandes", 36 | total_spending: 39.62, 37 | }, 38 | CustomerSpending { 39 | customer_name: "Francis", 40 | total_spending: 37.62, 41 | }, 42 | CustomerSpending { 43 | customer_name: "Girard", 44 | total_spending: 39.62, 45 | }, 46 | CustomerSpending { 47 | customer_name: "Gonçalves", 48 | total_spending: 39.62, 49 | }, 50 | CustomerSpending { 51 | customer_name: "Gordon", 52 | total_spending: 37.62, 53 | }, 54 | CustomerSpending { 55 | customer_name: "Goyer", 56 | total_spending: 38.62, 57 | }, 58 | CustomerSpending { 59 | customer_name: "Gray", 60 | total_spending: 37.62, 61 | }, 62 | CustomerSpending { 63 | customer_name: "Gruber", 64 | total_spending: 42.62, 65 | }, 66 | CustomerSpending { 67 | customer_name: "Gutiérrez", 68 | total_spending: 37.62, 69 | }, 70 | CustomerSpending { 71 | customer_name: "Hansen", 72 | total_spending: 39.62, 73 | }, 74 | CustomerSpending { 75 | customer_name: "Harris", 76 | total_spending: 37.62, 77 | }, 78 | CustomerSpending { 79 | customer_name: "Holý", 80 | total_spending: 49.62, 81 | }, 82 | ] 83 | -------------------------------------------------------------------------------- /tests/chinook/expect/customer_spending.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | MATERIALIZE _1 [ 3 | SCAN _0, 4 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 5 | USE TEMP B-TREE FOR GROUP BY, 6 | ], 7 | SCAN _0, 8 | BLOOM FILTER ON _1 (_0=?), 9 | SEARCH _1 USING AUTOMATIC COVERING INDEX (_0=?) LEFT-JOIN, 10 | ] 11 | -------------------------------------------------------------------------------- /tests/chinook/expect/filtered_track.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | FilteredTrack { 3 | track_name: "The Hellion", 4 | album_name: "Living After Midnight", 5 | stats: Stats { 6 | milis: 41900, 7 | }, 8 | }, 9 | ] 10 | -------------------------------------------------------------------------------- /tests/chinook/expect/filtered_track.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | SCAN _0, 3 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?) LEFT-JOIN, 4 | SEARCH _2 USING INTEGER PRIMARY KEY (rowid=?), 5 | ] 6 | -------------------------------------------------------------------------------- /tests/chinook/expect/genre_statistics.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | GenreStats { 3 | genre_name: "Alternative", 4 | byte_average: 5883473.6, 5 | milis_average: 264058.525, 6 | }, 7 | GenreStats { 8 | genre_name: "Alternative & Punk", 9 | byte_average: 7691002.942771085, 10 | milis_average: 234353.84939759035, 11 | }, 12 | GenreStats { 13 | genre_name: "Blues", 14 | byte_average: 8625575.629629629, 15 | milis_average: 270359.77777777775, 16 | }, 17 | GenreStats { 18 | genre_name: "Bossa Nova", 19 | byte_average: 7239057.0, 20 | milis_average: 219590.0, 21 | }, 22 | GenreStats { 23 | genre_name: "Classical", 24 | byte_average: 5220906.513513514, 25 | milis_average: 293867.5675675676, 26 | }, 27 | GenreStats { 28 | genre_name: "Comedy", 29 | byte_average: 316904465.7647059, 30 | milis_average: 1585263.705882353, 31 | }, 32 | GenreStats { 33 | genre_name: "Drama", 34 | byte_average: 506946966.765625, 35 | milis_average: 2575283.78125, 36 | }, 37 | GenreStats { 38 | genre_name: "Easy Listening", 39 | byte_average: 6160517.833333333, 40 | milis_average: 189164.20833333334, 41 | }, 42 | GenreStats { 43 | genre_name: "Electronica/Dance", 44 | byte_average: 10691926.466666667, 45 | milis_average: 302985.8, 46 | }, 47 | GenreStats { 48 | genre_name: "Heavy Metal", 49 | byte_average: 9474752.142857144, 50 | milis_average: 297452.9285714286, 51 | }, 52 | GenreStats { 53 | genre_name: "Hip Hop/Rap", 54 | byte_average: 6534717.485714286, 55 | milis_average: 178176.2857142857, 56 | }, 57 | GenreStats { 58 | genre_name: "Jazz", 59 | byte_average: 9488136.546153845, 60 | milis_average: 291755.3769230769, 61 | }, 62 | GenreStats { 63 | genre_name: "Latin", 64 | byte_average: 7710588.575129534, 65 | milis_average: 232859.26252158894, 66 | }, 67 | GenreStats { 68 | genre_name: "Metal", 69 | byte_average: 9234573.256684491, 70 | milis_average: 309749.4438502674, 71 | }, 72 | GenreStats { 73 | genre_name: "Opera", 74 | byte_average: 2861468.0, 75 | milis_average: 174813.0, 76 | }, 77 | GenreStats { 78 | genre_name: "Pop", 79 | byte_average: 4745668.020833333, 80 | milis_average: 229034.10416666666, 81 | }, 82 | GenreStats { 83 | genre_name: "R&B/Soul", 84 | byte_average: 6575925.868852459, 85 | milis_average: 220066.8524590164, 86 | }, 87 | GenreStats { 88 | genre_name: "Reggae", 89 | byte_average: 8237493.24137931, 90 | milis_average: 247177.75862068965, 91 | }, 92 | GenreStats { 93 | genre_name: "Rock", 94 | byte_average: 9007374.267540477, 95 | milis_average: 283910.0431765613, 96 | }, 97 | GenreStats { 98 | genre_name: "Rock And Roll", 99 | byte_average: 2123262.25, 100 | milis_average: 134643.5, 101 | }, 102 | ] 103 | -------------------------------------------------------------------------------- /tests/chinook/expect/genre_statistics.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | MATERIALIZE _1 [ 3 | SCAN _0, 4 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 5 | USE TEMP B-TREE FOR GROUP BY, 6 | ], 7 | MATERIALIZE _2 [ 8 | SCAN _0, 9 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 10 | USE TEMP B-TREE FOR GROUP BY, 11 | ], 12 | SCAN _0, 13 | BLOOM FILTER ON _1 (_0=?), 14 | SEARCH _1 USING AUTOMATIC COVERING INDEX (_0=?) LEFT-JOIN, 15 | BLOOM FILTER ON _2 (_0=?), 16 | SEARCH _2 USING AUTOMATIC COVERING INDEX (_0=?) LEFT-JOIN, 17 | ] 18 | -------------------------------------------------------------------------------- /tests/chinook/expect/high_avg_invoice_total.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | HighInvoiceInfo { 3 | customer_name: "Almeida", 4 | avg_spend: 5.374285714285714, 5 | high_avg_spend: 9.57, 6 | }, 7 | HighInvoiceInfo { 8 | customer_name: "Barnett", 9 | avg_spend: 6.231428571428571, 10 | high_avg_spend: 11.57, 11 | }, 12 | HighInvoiceInfo { 13 | customer_name: "Bernard", 14 | avg_spend: 5.517142857142857, 15 | high_avg_spend: 9.57, 16 | }, 17 | HighInvoiceInfo { 18 | customer_name: "Brooks", 19 | avg_spend: 5.374285714285714, 20 | high_avg_spend: 9.57, 21 | }, 22 | HighInvoiceInfo { 23 | customer_name: "Brown", 24 | avg_spend: 5.374285714285714, 25 | high_avg_spend: 9.57, 26 | }, 27 | HighInvoiceInfo { 28 | customer_name: "Chase", 29 | avg_spend: 5.374285714285714, 30 | high_avg_spend: 9.57, 31 | }, 32 | HighInvoiceInfo { 33 | customer_name: "Cunningham", 34 | avg_spend: 6.8028571428571425, 35 | high_avg_spend: 16.384999999999998, 36 | }, 37 | HighInvoiceInfo { 38 | customer_name: "Dubois", 39 | avg_spend: 5.374285714285714, 40 | high_avg_spend: 9.57, 41 | }, 42 | HighInvoiceInfo { 43 | customer_name: "Fernandes", 44 | avg_spend: 5.659999999999999, 45 | high_avg_spend: 10.236666666666666, 46 | }, 47 | HighInvoiceInfo { 48 | customer_name: "Francis", 49 | avg_spend: 5.374285714285714, 50 | high_avg_spend: 9.57, 51 | }, 52 | HighInvoiceInfo { 53 | customer_name: "Girard", 54 | avg_spend: 5.659999999999999, 55 | high_avg_spend: 9.57, 56 | }, 57 | HighInvoiceInfo { 58 | customer_name: "Gonçalves", 59 | avg_spend: 5.659999999999999, 60 | high_avg_spend: 9.57, 61 | }, 62 | HighInvoiceInfo { 63 | customer_name: "Gordon", 64 | avg_spend: 5.374285714285714, 65 | high_avg_spend: 9.57, 66 | }, 67 | HighInvoiceInfo { 68 | customer_name: "Goyer", 69 | avg_spend: 5.517142857142857, 70 | high_avg_spend: 9.57, 71 | }, 72 | HighInvoiceInfo { 73 | customer_name: "Gray", 74 | avg_spend: 5.374285714285714, 75 | high_avg_spend: 9.57, 76 | }, 77 | HighInvoiceInfo { 78 | customer_name: "Gruber", 79 | avg_spend: 6.088571428571428, 80 | high_avg_spend: 13.885, 81 | }, 82 | HighInvoiceInfo { 83 | customer_name: "Gutiérrez", 84 | avg_spend: 5.374285714285714, 85 | high_avg_spend: 9.57, 86 | }, 87 | HighInvoiceInfo { 88 | customer_name: "Hansen", 89 | avg_spend: 5.659999999999999, 90 | high_avg_spend: 10.236666666666666, 91 | }, 92 | HighInvoiceInfo { 93 | customer_name: "Harris", 94 | avg_spend: 5.374285714285714, 95 | high_avg_spend: 9.57, 96 | }, 97 | HighInvoiceInfo { 98 | customer_name: "Holý", 99 | avg_spend: 7.088571428571428, 100 | high_avg_spend: 17.384999999999998, 101 | }, 102 | ] 103 | -------------------------------------------------------------------------------- /tests/chinook/expect/high_avg_invoice_total.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | CO-ROUTINE _1 [ 3 | SCAN _0, 4 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 5 | USE TEMP B-TREE FOR GROUP BY, 6 | ], 7 | MATERIALIZE _2 [ 8 | CO-ROUTINE _2 [ 9 | SCAN _0, 10 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 11 | USE TEMP B-TREE FOR GROUP BY, 12 | ], 13 | SCAN _1, 14 | BLOOM FILTER ON _0 (customer=?), 15 | SEARCH _0 USING AUTOMATIC COVERING INDEX (customer=?), 16 | BLOOM FILTER ON _2 (_0=?), 17 | SEARCH _2 USING AUTOMATIC COVERING INDEX (_0=?), 18 | ], 19 | SCAN _1, 20 | SEARCH _0 USING INTEGER PRIMARY KEY (rowid=?), 21 | BLOOM FILTER ON _2 (_0=?), 22 | SEARCH _2 USING AUTOMATIC COVERING INDEX (_0=?) LEFT-JOIN, 23 | ] 24 | -------------------------------------------------------------------------------- /tests/chinook/expect/invoice_info.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | InvoiceInfo { 3 | track: "\"?\"", 4 | artist: "Lost", 5 | ivl_id: db_1627, 6 | }, 7 | InvoiceInfo { 8 | track: "#9 Dream", 9 | artist: "U2", 10 | ivl_id: db_535, 11 | }, 12 | InvoiceInfo { 13 | track: "'Round Midnight", 14 | artist: "Miles Davis", 15 | ivl_id: db_1820, 16 | }, 17 | InvoiceInfo { 18 | track: "(Anesthesia) Pulling Teeth", 19 | artist: "Metallica", 20 | ivl_id: db_874, 21 | }, 22 | InvoiceInfo { 23 | track: "(White Man) In Hammersmith Palais", 24 | artist: "The Clash", 25 | ivl_id: db_1005, 26 | }, 27 | InvoiceInfo { 28 | track: "(Wish I Could) Hideaway", 29 | artist: "Creedence Clearwater Revival", 30 | ivl_id: db_696, 31 | }, 32 | InvoiceInfo { 33 | track: "...And Found", 34 | artist: "Lost", 35 | ivl_id: db_1049, 36 | }, 37 | InvoiceInfo { 38 | track: "...And Justice For All", 39 | artist: "Metallica", 40 | ivl_id: db_1464, 41 | }, 42 | InvoiceInfo { 43 | track: "01 - Prowler", 44 | artist: "Iron Maiden", 45 | ivl_id: db_213, 46 | }, 47 | InvoiceInfo { 48 | track: "04 - Running Free", 49 | artist: "Iron Maiden", 50 | ivl_id: db_1931, 51 | }, 52 | InvoiceInfo { 53 | track: "05 - Phantom of the Opera", 54 | artist: "Iron Maiden", 55 | ivl_id: db_786, 56 | }, 57 | InvoiceInfo { 58 | track: "09 - Iron Maiden", 59 | artist: "Iron Maiden", 60 | ivl_id: db_1359, 61 | }, 62 | InvoiceInfo { 63 | track: "1/2 Full", 64 | artist: "Pearl Jam", 65 | ivl_id: db_2082, 66 | }, 67 | InvoiceInfo { 68 | track: "14 Years", 69 | artist: "Guns N' Roses", 70 | ivl_id: db_1920, 71 | }, 72 | InvoiceInfo { 73 | track: "2 A.M.", 74 | artist: "Iron Maiden", 75 | ivl_id: db_234, 76 | }, 77 | InvoiceInfo { 78 | track: "2 Minutes To Midnight", 79 | artist: "Iron Maiden", 80 | ivl_id: db_780, 81 | }, 82 | InvoiceInfo { 83 | track: "2 Minutes To Midnight", 84 | artist: "Iron Maiden", 85 | ivl_id: db_1367, 86 | }, 87 | InvoiceInfo { 88 | track: "2 Minutes To Midnight", 89 | artist: "Iron Maiden", 90 | ivl_id: db_1933, 91 | }, 92 | InvoiceInfo { 93 | track: "2 X 4", 94 | artist: "Metallica", 95 | ivl_id: db_301, 96 | }, 97 | InvoiceInfo { 98 | track: "32 Dentes", 99 | artist: "Titãs", 100 | ivl_id: db_461, 101 | }, 102 | ] 103 | -------------------------------------------------------------------------------- /tests/chinook/expect/invoice_info.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | SCAN _0, 3 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?) LEFT-JOIN, 4 | SEARCH _2 USING INTEGER PRIMARY KEY (rowid=?) LEFT-JOIN, 5 | SEARCH _3 USING INTEGER PRIMARY KEY (rowid=?) LEFT-JOIN, 6 | ] 7 | -------------------------------------------------------------------------------- /tests/chinook/expect/list_all_genres.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | "Alternative", 3 | "Alternative & Punk", 4 | "Blues", 5 | "Bossa Nova", 6 | "Classical", 7 | "Comedy", 8 | "Drama", 9 | "Easy Listening", 10 | "Electronica/Dance", 11 | "Heavy Metal", 12 | "Hip Hop/Rap", 13 | "Jazz", 14 | "Latin", 15 | "Metal", 16 | "Opera", 17 | "Pop", 18 | "R&B/Soul", 19 | "Reggae", 20 | "Rock", 21 | "Rock And Roll", 22 | ] 23 | -------------------------------------------------------------------------------- /tests/chinook/expect/list_all_genres.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | SCAN _0, 3 | ] 4 | -------------------------------------------------------------------------------- /tests/chinook/expect/playlist_track_count.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | PlaylistTrackCount { 3 | playlist: "90’s Music", 4 | track_count: 1477, 5 | }, 6 | PlaylistTrackCount { 7 | playlist: "Audiobooks", 8 | track_count: 0, 9 | }, 10 | PlaylistTrackCount { 11 | playlist: "Audiobooks", 12 | track_count: 0, 13 | }, 14 | PlaylistTrackCount { 15 | playlist: "Brazilian Music", 16 | track_count: 39, 17 | }, 18 | PlaylistTrackCount { 19 | playlist: "Classical", 20 | track_count: 75, 21 | }, 22 | PlaylistTrackCount { 23 | playlist: "Classical 101 - Deep Cuts", 24 | track_count: 25, 25 | }, 26 | PlaylistTrackCount { 27 | playlist: "Classical 101 - Next Steps", 28 | track_count: 25, 29 | }, 30 | PlaylistTrackCount { 31 | playlist: "Classical 101 - The Basics", 32 | track_count: 25, 33 | }, 34 | PlaylistTrackCount { 35 | playlist: "Grunge", 36 | track_count: 15, 37 | }, 38 | PlaylistTrackCount { 39 | playlist: "Heavy Metal Classic", 40 | track_count: 26, 41 | }, 42 | PlaylistTrackCount { 43 | playlist: "Movies", 44 | track_count: 0, 45 | }, 46 | PlaylistTrackCount { 47 | playlist: "Movies", 48 | track_count: 0, 49 | }, 50 | PlaylistTrackCount { 51 | playlist: "Music", 52 | track_count: 3290, 53 | }, 54 | PlaylistTrackCount { 55 | playlist: "Music", 56 | track_count: 3290, 57 | }, 58 | PlaylistTrackCount { 59 | playlist: "Music Videos", 60 | track_count: 1, 61 | }, 62 | PlaylistTrackCount { 63 | playlist: "On-The-Go 1", 64 | track_count: 1, 65 | }, 66 | PlaylistTrackCount { 67 | playlist: "TV Shows", 68 | track_count: 213, 69 | }, 70 | PlaylistTrackCount { 71 | playlist: "TV Shows", 72 | track_count: 213, 73 | }, 74 | ] 75 | -------------------------------------------------------------------------------- /tests/chinook/expect/playlist_track_count.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | MATERIALIZE _1 [ 3 | SCAN _0, 4 | SEARCH _1 USING INTEGER PRIMARY KEY (rowid=?), 5 | USE TEMP B-TREE FOR GROUP BY, 6 | ], 7 | SCAN _0, 8 | BLOOM FILTER ON _1 (_0=?), 9 | SEARCH _1 USING AUTOMATIC COVERING INDEX (_0=?) LEFT-JOIN, 10 | ] 11 | -------------------------------------------------------------------------------- /tests/chinook/expect/ten_space_tracks.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | "24 Caprices, Op. 1, No. 24, for Solo Violin, in A Minor", 3 | "3 Gymnopédies: No.1 - Lent Et Grave, No.3 - Lent Et Douloureux", 4 | "Blind Curve: Vocal Under A Bloodlight / Passing Strangers / Mylo / Perimeter Walk / Threshold", 5 | "Concerto No. 1 in E Major, RV 269 \"Spring\": I. Allegro", 6 | "Concerto for 2 Violins in D Minor, BWV 1043: I. Vivace", 7 | "Concerto for Cello and Orchestra in E minor, Op. 85: I. Adagio - Moderato", 8 | "Concerto for Piano No. 2 in F Minor, Op. 21: II. Larghetto", 9 | "Concerto for Violin, Strings and Continuo in G Major, Op. 3, No. 9: I. Allegro", 10 | "Homecoming / The Death Of St. Jimmy / East 12th St. / Nobody Likes You / Rock And Roll Girlfriend / We're Coming Home Again", 11 | "I Don't Wanna Be Kissed (By Anyone But You) (Alternate Take)", 12 | "It's The End Of The World As We Know It (And I Feel Fine)", 13 | "Jesus Of Suburbia / City Of The Damned / I Don't Care / Dearly Beloved / Tales Of Another Broken Home", 14 | "Music for the Funeral of Queen Mary: VI. \"Thou Knowest, Lord, the Secrets of Our Hearts\"", 15 | "Nimrod (Adagio) from Variations On an Original Theme, Op. 36 \"Enigma\"", 16 | "Piano Sonata No. 14 in C Sharp Minor, Op. 27, No. 2, \"Moonlight\": I. Adagio sostenuto", 17 | "Pini Di Roma (Pinien Von Rom) \\ I Pini Della Via Appia", 18 | "Posso Perder Minha Mulher, Minha Mãe, Desde Que Eu Tenha O Rock And Roll", 19 | "Quintet for Horn, Violin, 2 Violas, and Cello in E Flat Major, K. 407/386c: III. Allegro", 20 | "String Quartet No. 12 in C Minor, D. 703 \"Quartettsatz\": II. Andante - Allegro assai", 21 | "Suite No. 3 in D, BWV 1068: III. Gavotte I & II", 22 | ] 23 | -------------------------------------------------------------------------------- /tests/chinook/expect/ten_space_tracks.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | SCAN _0, 3 | ] 4 | -------------------------------------------------------------------------------- /tests/chinook/expect/the_artists.dbg: -------------------------------------------------------------------------------- 1 | [ 2 | "The 12 Cellists of The Berlin Philharmonic", 3 | "The Black Crowes", 4 | "The Clash", 5 | "The Cult", 6 | "The Doors", 7 | "The Flaming Lips", 8 | "The King's Singers", 9 | "The Office", 10 | "The Police", 11 | "The Posies", 12 | "The Postal Service", 13 | "The Rolling Stones", 14 | "The Tea Party", 15 | "The Who", 16 | ] 17 | -------------------------------------------------------------------------------- /tests/chinook/expect/the_artists.plan: -------------------------------------------------------------------------------- 1 | QUERY PLAN [ 2 | SEARCH _0 USING COVERING INDEX sqlite_autoindex_artist_1 (name>? AND name(file_name: &str, f: impl FnOnce() -> Vec) { 13 | let (mut val, plan) = rust_query::private::get_plan(f); 14 | let mut val = &mut val[..]; 15 | val.sort_by(|a, b| a.partial_cmp(b).unwrap()); 16 | if val.len() > 20 { 17 | val = &mut val[..20]; 18 | } 19 | let path = format!("expect/{file_name}.dbg"); 20 | expect_file![path].assert_debug_eq(&val); 21 | let path = format!("expect/{file_name}.plan"); 22 | expect_file![path].assert_debug_eq(&plan); 23 | } 24 | 25 | #[test] 26 | fn test_queries() { 27 | let mut client = LocalClient::try_new().unwrap(); 28 | let db = migrate(&mut client); 29 | let mut db = client.transaction_mut(&db); 30 | 31 | assert_dbg("invoice_info", || invoice_info(&db)); 32 | assert_dbg("playlist_track_count", || playlist_track_count(&db)); 33 | assert_dbg("avg_album_track_count_for_artist", || { 34 | avg_album_track_count_for_artist(&db) 35 | }); 36 | assert_dbg("count_reporting", || count_reporting(&db)); 37 | assert_dbg("list_all_genres", || list_all_genres(&db)); 38 | assert_dbg("filtered_track", || filtered_track(&db, "Metal", 1000 * 60)); 39 | assert_dbg("genre_statistics", || genre_statistics(&db)); 40 | assert_dbg("customer_spending", || all_customer_spending(&db)); 41 | assert_dbg("the_artists", || get_the_artists(&db)); 42 | assert_dbg("ten_space_tracks", || ten_space_tracks(&db)); 43 | assert_dbg("high_avg_invoice_total", || high_avg_invoice_total(&db)); 44 | let artist = db.query_one(Artist::unique("U2")).unwrap(); 45 | assert_dbg("artist_details", || vec![artist_details(&db, artist)]); 46 | assert_eq!( 47 | customer_spending_by_email(&db, "vstevens@yahoo.com"), 48 | Some(42.62) 49 | ); 50 | assert_eq!(customer_spending_by_email(&db, "asdf"), None); 51 | 52 | free_reference(&db); 53 | 54 | db.insert(Artist { name: "first" }).unwrap(); 55 | let id = db.insert(Artist { name: "second" }).unwrap(); 56 | 57 | let Err(_) = db.update( 58 | id, 59 | Artist { 60 | name: Update::set("first"), 61 | }, 62 | ) else { 63 | panic!() 64 | }; 65 | db.update( 66 | id, 67 | Artist { 68 | name: Update::set("other"), 69 | }, 70 | ) 71 | .unwrap(); 72 | assert_eq!(db.query_one(id.name()), "other"); 73 | 74 | let mut db = db.downgrade(); 75 | assert!(db.delete(id).unwrap()); 76 | } 77 | 78 | #[derive(Debug, Select, PartialEq, PartialOrd)] 79 | struct InvoiceInfo<'a> { 80 | track: String, 81 | artist: String, 82 | ivl_id: TableRow<'a, InvoiceLine>, 83 | } 84 | 85 | fn invoice_info<'a>(db: &'a Transaction) -> Vec> { 86 | db.query(|rows| { 87 | let ivl = rows.join(InvoiceLine); 88 | rows.into_vec(InvoiceInfoSelect { 89 | track: ivl.track().name(), 90 | artist: ivl.track().album().artist().name(), 91 | ivl_id: ivl, 92 | }) 93 | }) 94 | } 95 | 96 | #[derive(Debug, Select, PartialEq, PartialOrd)] 97 | struct PlaylistTrackCount { 98 | playlist: String, 99 | track_count: i64, 100 | } 101 | 102 | fn playlist_track_count(db: &Transaction) -> Vec { 103 | db.query(|rows| { 104 | let pl = rows.join(Playlist); 105 | let track_count = aggregate(|rows| { 106 | let plt = rows.join(PlaylistTrack); 107 | rows.filter(plt.playlist().eq(&pl)); 108 | rows.count_distinct(plt) 109 | }); 110 | 111 | rows.into_vec(PlaylistTrackCountSelect { 112 | playlist: pl.name(), 113 | track_count, 114 | }) 115 | }) 116 | } 117 | 118 | fn avg_album_track_count_for_artist(db: &Transaction) -> Vec<(String, Option)> { 119 | db.query(|rows| { 120 | let artist = rows.join(Artist); 121 | let avg_track_count = aggregate(|rows| { 122 | let album = rows.join(Album); 123 | rows.filter(album.artist().eq(&artist)); 124 | 125 | let track_count = aggregate(|rows| { 126 | let track = rows.join(Track); 127 | rows.filter(track.album().eq(album)); 128 | 129 | rows.count_distinct(track) 130 | }); 131 | rows.avg(track_count.as_float()) 132 | }); 133 | rows.into_vec((artist.name(), avg_track_count)) 134 | }) 135 | } 136 | 137 | fn count_reporting(db: &Transaction) -> Vec<(String, i64)> { 138 | db.query(|rows| { 139 | let receiver = rows.join(Employee); 140 | let report_count = aggregate(|rows| { 141 | let reporter = rows.join(Employee); 142 | // only count employees that report to someone 143 | let reports_to = rows.filter_some(reporter.reports_to()); 144 | rows.filter(reports_to.eq(&receiver)); 145 | rows.count_distinct(reporter) 146 | }); 147 | 148 | rows.into_vec((receiver.last_name(), report_count)) 149 | }) 150 | } 151 | 152 | fn list_all_genres(db: &Transaction) -> Vec { 153 | db.query(|rows| { 154 | let genre = rows.join(Genre); 155 | rows.into_vec(genre.name()) 156 | }) 157 | } 158 | 159 | #[derive(Debug, Select, PartialEq, PartialOrd)] 160 | struct FilteredTrack { 161 | track_name: String, 162 | album_name: String, 163 | stats: Stats, 164 | } 165 | 166 | #[derive(Debug, Select, PartialEq, PartialOrd)] 167 | struct Stats { 168 | milis: i64, 169 | } 170 | 171 | fn filtered_track(db: &Transaction, genre: &str, max_milis: i64) -> Vec { 172 | db.query(|rows| { 173 | let track = rows.join(Track); 174 | rows.filter(track.genre().name().eq(genre)); 175 | rows.filter(track.milliseconds().lt(max_milis)); 176 | rows.into_vec(FilteredTrackSelect { 177 | track_name: track.name(), 178 | album_name: track.album().title(), 179 | stats: StatsSelect { 180 | milis: track.milliseconds(), 181 | }, 182 | }) 183 | }) 184 | } 185 | 186 | #[derive(Debug, Select, PartialEq, PartialOrd)] 187 | struct GenreStats { 188 | genre_name: String, 189 | byte_average: f64, 190 | milis_average: f64, 191 | } 192 | 193 | fn genre_statistics(db: &Transaction) -> Vec { 194 | db.query(|rows| { 195 | let genre = rows.join(Genre); 196 | let (bytes, milis) = aggregate(|rows| { 197 | let track = rows.join(Track); 198 | rows.filter(track.genre().eq(&genre)); 199 | ( 200 | rows.avg(track.bytes().as_float()), 201 | rows.avg(track.milliseconds().as_float()), 202 | ) 203 | }); 204 | rows.into_vec(GenreStatsSelect { 205 | genre_name: genre.name(), 206 | byte_average: bytes.into_select().map(|x| x.unwrap()), 207 | milis_average: milis.into_select().map(|x| x.unwrap()), 208 | }) 209 | }) 210 | } 211 | 212 | #[derive(Debug, Select, PartialEq, PartialOrd)] 213 | struct HighInvoiceInfo { 214 | customer_name: String, 215 | avg_spend: f64, 216 | high_avg_spend: f64, 217 | } 218 | 219 | fn high_avg_invoice_total(db: &Transaction) -> Vec { 220 | db.query(|q_rows| { 221 | let customer = q_rows.join(Customer); 222 | aggregate(|rows| { 223 | let invoice = rows.join(Invoice); 224 | rows.filter(invoice.customer().eq(&customer)); 225 | let avg = q_rows.filter_some(rows.avg(invoice.total())); 226 | rows.filter(invoice.total().gt(&avg)); 227 | let high_avg = q_rows.filter_some(rows.avg(invoice.total())); 228 | q_rows.into_vec(HighInvoiceInfoSelect { 229 | customer_name: customer.last_name(), 230 | avg_spend: avg, 231 | high_avg_spend: high_avg, 232 | }) 233 | }) 234 | }) 235 | } 236 | 237 | #[derive(Debug, Select, PartialEq, PartialOrd)] 238 | struct CustomerSpending { 239 | customer_name: String, 240 | total_spending: f64, 241 | } 242 | 243 | fn all_customer_spending(db: &Transaction) -> Vec { 244 | db.query(|rows| { 245 | let customer = rows.join(Customer); 246 | let total = customer_spending(&customer); 247 | 248 | rows.into_vec(CustomerSpendingSelect { 249 | customer_name: customer.last_name(), 250 | total_spending: total, 251 | }) 252 | }) 253 | } 254 | 255 | fn customer_spending<'t>( 256 | customer: impl IntoExpr<'t, Schema, Typ = Customer>, 257 | ) -> Expr<'t, Schema, f64> { 258 | let customer = customer.into_expr(); 259 | aggregate(|rows| { 260 | let invoice = rows.join(Invoice); 261 | rows.filter(invoice.customer().eq(customer)); 262 | rows.sum(invoice.total()) 263 | }) 264 | } 265 | 266 | fn customer_spending_by_email(db: &Transaction, email: &str) -> Option { 267 | db.query_one(optional(|row| { 268 | let customer = row.and(Customer::unique_by_email(email)); 269 | row.then(customer_spending(customer)) 270 | })) 271 | } 272 | 273 | fn free_reference(db: &Transaction) { 274 | let tracks = db.query(|rows| { 275 | let track = rows.join(Track); 276 | rows.into_vec(track) 277 | }); 278 | 279 | for track in tracks { 280 | let _name = db.query_one(track.album().artist().name()); 281 | } 282 | } 283 | 284 | #[derive(Select, PartialEq, PartialOrd, Debug)] 285 | struct TrackStats { 286 | avg_len_milis: Option, 287 | max_len_milis: Option, 288 | genre_count: i64, 289 | } 290 | 291 | #[derive(Select, PartialEq, PartialOrd, Debug)] 292 | struct ArtistDetails { 293 | name: String, 294 | album_count: i64, 295 | track_stats: TrackStats, 296 | } 297 | 298 | fn artist_details<'a>(db: &Transaction<'a, Schema>, artist: TableRow<'a, Artist>) -> ArtistDetails { 299 | db.query_one(ArtistDetailsSelect { 300 | name: artist.name(), 301 | album_count: aggregate(|rows| { 302 | let album = rows.join(Album); 303 | rows.filter(album.artist().eq(&artist)); 304 | rows.count_distinct(album) 305 | }), 306 | track_stats: aggregate(|rows| { 307 | let track = rows.join(Track); 308 | rows.filter(track.album().artist().eq(artist)); 309 | TrackStatsSelect { 310 | avg_len_milis: rows.avg(track.milliseconds().as_float()), 311 | max_len_milis: rows.max(track.milliseconds()), 312 | genre_count: rows.count_distinct(track.genre()), 313 | } 314 | }), 315 | }) 316 | } 317 | 318 | fn get_the_artists(db: &Transaction) -> Vec { 319 | db.query(|rows| { 320 | let artist = rows.join(Artist); 321 | rows.filter(artist.name().starts_with("The ")); 322 | rows.into_vec(artist.name()) 323 | }) 324 | } 325 | 326 | fn ten_space_tracks(db: &Transaction) -> Vec { 327 | db.query(|rows| { 328 | let track = rows.join(Track); 329 | rows.filter(track.name().like("% % % % % % % % % % %")); 330 | rows.into_vec(track.name()) 331 | }) 332 | } 333 | -------------------------------------------------------------------------------- /tests/chinook/migrate.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO 2 | album (id, title, artist) 3 | SELECT 4 | AlbumId, Title, ArtistId 5 | FROM 6 | old.Album; 7 | 8 | INSERT INTO 9 | artist (id, name) 10 | SELECT 11 | ArtistId, Name 12 | FROM 13 | old.Artist; 14 | 15 | INSERT INTO 16 | customer (id, first_name, last_name, company, address, city, state, country, postal_code, phone, fax, email, support_rep) 17 | SELECT 18 | CustomerId, FirstName, LastName, Company, Address, City, State, Country, PostalCode, Phone, Fax, Email, SupportRepId 19 | FROM 20 | old.Customer; 21 | 22 | INSERT INTO 23 | employee (id, first_name, last_name, title, reports_to, birth_date, hire_date, address, city, state, country, postal_code, phone, fax, email) 24 | SELECT 25 | EmployeeId, LastName, FirstName, Title, ReportsTo, BirthDate, HireDate, Address, City, State, Country, PostalCode, Phone, Fax, Email 26 | FROM 27 | old.Employee; 28 | 29 | INSERT INTO 30 | genre (id, name) 31 | SELECT 32 | GenreId, Name 33 | FROM 34 | old.Genre; 35 | 36 | INSERT INTO 37 | invoice (id, customer, invoice_date, billing_address, billing_city, billing_state, billing_country, billing_postal_code, total) 38 | SELECT 39 | InvoiceId, CustomerId, InvoiceDate, BillingAddress, BillingCity, BillingState, BillingCountry, BillingPostalCode, Total 40 | FROM 41 | old.Invoice; 42 | 43 | INSERT INTO 44 | invoice_line (id, invoice, track, unit_price, quantity) 45 | SELECT 46 | InvoiceLineId, InvoiceId, TrackId, UnitPrice, Quantity 47 | FROM 48 | old.InvoiceLine; 49 | 50 | INSERT INTO 51 | media_type (id, name) 52 | SELECT 53 | MediaTypeId, Name 54 | FROM 55 | old.MediaType; 56 | 57 | INSERT INTO 58 | playlist (id, name) 59 | SELECT 60 | PlaylistId, Name 61 | FROM 62 | old.Playlist; 63 | 64 | INSERT INTO 65 | playlist_track (id, playlist, track) 66 | SELECT 67 | ROWID, PlaylistId, TrackId 68 | FROM 69 | old.PlaylistTrack; 70 | 71 | INSERT INTO 72 | track (id, name, album, media_type, genre, composer, milliseconds, bytes, unit_price) 73 | SELECT 74 | TrackId, Name, AlbumId, MediaTypeId, GenreId, Composer, Milliseconds, Bytes, UnitPrice 75 | FROM 76 | old.Track; 77 | -------------------------------------------------------------------------------- /tests/chinook/schema.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, fs}; 2 | 3 | use rust_query::{ 4 | Database, LocalClient, 5 | migration::{Config, Migrated, schema}, 6 | }; 7 | 8 | pub use v2::*; 9 | 10 | #[schema(Schema)] 11 | #[version(0..=2)] 12 | pub mod vN { 13 | pub struct Album { 14 | pub title: String, 15 | pub artist: Artist, 16 | } 17 | pub struct Artist { 18 | #[unique] 19 | pub name: String, 20 | } 21 | pub struct Customer { 22 | #[version(..2)] 23 | pub phone: Option, 24 | #[version(2..)] 25 | pub phone: Option, 26 | pub first_name: String, 27 | pub last_name: String, 28 | pub company: Option, 29 | pub address: String, 30 | pub city: String, 31 | pub state: Option, 32 | pub country: String, 33 | pub postal_code: Option, 34 | pub fax: Option, 35 | #[unique_by_email] 36 | pub email: String, 37 | pub support_rep: Employee, 38 | } 39 | #[version(1..)] 40 | #[unique(employee, artist)] 41 | pub struct ListensTo { 42 | pub employee: Employee, 43 | pub artist: Artist, 44 | } 45 | pub struct Employee { 46 | pub last_name: String, 47 | pub first_name: String, 48 | pub title: Option, 49 | pub reports_to: Option, 50 | pub birth_date: Option, 51 | pub hire_date: Option, 52 | pub address: Option, 53 | pub city: Option, 54 | pub state: Option, 55 | pub country: Option, 56 | pub postal_code: Option, 57 | pub phone: Option, 58 | pub fax: Option, 59 | #[version(..2)] 60 | pub email: String, 61 | } 62 | pub struct Genre { 63 | pub name: String, 64 | } 65 | #[version(1..)] 66 | #[from(Genre)] 67 | pub struct GenreNew { 68 | pub name: String, 69 | #[version(2..)] 70 | pub extra: i64, 71 | } 72 | #[version(1..)] 73 | #[from(Genre)] 74 | pub struct ShortGenre { 75 | pub name: String, 76 | } 77 | pub struct Invoice { 78 | pub customer: Customer, 79 | pub invoice_date: String, 80 | pub billing_address: Option, 81 | pub billing_city: Option, 82 | pub billing_state: Option, 83 | pub billing_country: Option, 84 | pub billing_postal_code: Option, 85 | pub total: f64, 86 | } 87 | pub struct InvoiceLine { 88 | #[version(..2)] 89 | pub invoice: Invoice, 90 | #[version(2..)] 91 | pub invoice_new: Invoice, 92 | pub track: Track, 93 | pub unit_price: f64, 94 | pub quantity: i64, 95 | } 96 | #[version(..2)] 97 | pub struct MediaType { 98 | pub name: String, 99 | } 100 | pub struct Playlist { 101 | pub name: String, 102 | } 103 | #[unique(playlist, track)] 104 | pub struct PlaylistTrack { 105 | pub playlist: Playlist, 106 | pub track: Track, 107 | } 108 | pub struct Track { 109 | pub name: String, 110 | pub album: Album, 111 | #[version(..2)] 112 | pub media_type: MediaType, 113 | #[version(2..)] 114 | pub media_type: String, 115 | pub genre: Genre, 116 | pub composer: Option, 117 | #[version(2..)] 118 | pub composer_table: Option, 119 | pub milliseconds: i64, 120 | pub bytes: i64, 121 | pub unit_price: f64, 122 | #[version(2..)] 123 | pub byte_price: f64, 124 | } 125 | #[version(2..)] 126 | pub struct Composer { 127 | pub name: String, 128 | } 129 | } 130 | 131 | pub fn migrate(client: &mut LocalClient) -> Database { 132 | if !fs::exists("Chinook_Sqlite.sqlite").unwrap() { 133 | panic!( 134 | "test data file 'Chinook_Sqlite.sqlite' does not exist. 135 | Please download it from https://github.com/lerocha/chinook-database/releases/tag/v1.4.5" 136 | ); 137 | } 138 | let config = Config::open_in_memory() 139 | .init_stmt("ATTACH 'Chinook_Sqlite.sqlite' AS old;") 140 | .init_stmt(include_str!("migrate.sql")); 141 | 142 | let genre_extra = HashMap::from([("rock", 10)]); 143 | let m = client.migrator(config).unwrap(); 144 | let m = m.migrate(|txn| v0::migrate::Schema { 145 | genre_new: txn.migrate_ok(|old: v0::Genre!(name)| v0::migrate::GenreNew { name: old.name }), 146 | short_genre: { 147 | let Ok(()) = txn.migrate_optional(|old: v0::Genre!(name)| { 148 | (old.name.len() <= 10).then_some(v0::migrate::GenreNew { name: old.name }) 149 | }); 150 | Migrated::map_fk_err(|| panic!()) 151 | }, 152 | }); 153 | 154 | let m = m.migrate(|txn| v1::migrate::Schema { 155 | customer: txn.migrate_ok(|old: v1::Customer!(phone)| { 156 | v1::migrate::Customer { 157 | // lets do some cursed phone number parsing :D 158 | phone: old.phone.and_then(|x| x.parse().ok()), 159 | } 160 | }), 161 | track: txn.migrate_ok( 162 | |old: v1::Track!(media_type as v1::MediaType!(name), unit_price, bytes)| { 163 | v1::migrate::Track { 164 | media_type: old.media_type.name, 165 | composer_table: None, 166 | byte_price: old.unit_price / old.bytes as f64, 167 | } 168 | }, 169 | ), 170 | genre_new: txn.migrate_ok(|old: v1::GenreNew!(name)| v1::migrate::GenreNew { 171 | extra: genre_extra.get(&*old.name).copied().unwrap_or(0), 172 | }), 173 | employee: txn.migrate_ok(|()| v1::migrate::Employee {}), 174 | invoice_line: txn.migrate_ok(|old: v1::InvoiceLine!(invoice<'_>)| { 175 | v1::migrate::InvoiceLine { 176 | invoice_new: old.invoice, 177 | } 178 | }), 179 | }); 180 | 181 | m.finish().unwrap() 182 | } 183 | 184 | #[cfg(test)] 185 | mod tests { 186 | use expect_test::expect; 187 | 188 | use super::*; 189 | 190 | #[test] 191 | #[cfg(feature = "dev")] 192 | fn backwards_compat() { 193 | use rust_query::migration::hash_schema; 194 | 195 | expect!["a57e97b8c243859a"].assert_eq(&hash_schema::()); 196 | expect!["9b14036757e3cc6b"].assert_eq(&hash_schema::()); 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /tests/compile/aggregate_invariant.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{Database, LocalClient, aggregate, migration::schema}; 2 | 3 | #[schema(Schema)] 4 | pub mod vN { 5 | pub struct MyTable { 6 | pub score: i64, 7 | } 8 | } 9 | use v0::*; 10 | 11 | fn test(db: Database) { 12 | let mut client = LocalClient::try_new().unwrap(); 13 | 14 | let txn = client.transaction(&db); 15 | let total = txn.query(|rows| { 16 | let item = rows.join(MyTable); 17 | 18 | txn.query_one(aggregate(|rows| rows.sum(item.score()))) 19 | }); 20 | 21 | println!("{total}"); 22 | } 23 | 24 | fn main() {} 25 | -------------------------------------------------------------------------------- /tests/compile/aggregate_invariant.stderr: -------------------------------------------------------------------------------- 1 | error: lifetime may not live long enough 2 | --> tests/compile/aggregate_invariant.rs:18:40 3 | | 4 | 14 | let txn = client.transaction(&db); 5 | | --- lifetime `'2` appears in the type of `txn` 6 | 15 | let total = txn.query(|rows| { 7 | | ---- has type `&mut Query<'_, '1, v0::Schema>` 8 | ... 9 | 18 | txn.query_one(aggregate(|rows| rows.sum(item.score()))) 10 | | ^^^^^^^^^^^^^^^^^^^^^^ returning this value requires that `'1` must outlive `'2` 11 | -------------------------------------------------------------------------------- /tests/compile/id_column.rs: -------------------------------------------------------------------------------- 1 | use rust_query::migration::schema; 2 | 3 | #[schema(Schema)] 4 | pub mod vN { 5 | pub struct MyTable { 6 | pub id: i64, 7 | } 8 | } 9 | 10 | fn main() {} 11 | -------------------------------------------------------------------------------- /tests/compile/id_column.stderr: -------------------------------------------------------------------------------- 1 | error: The `id` column is reserved to be used by rust-query internally 2 | --> tests/compile/id_column.rs:6:13 3 | | 4 | 6 | pub id: i64, 5 | | ^^ 6 | -------------------------------------------------------------------------------- /tests/compile/no_reference.rs: -------------------------------------------------------------------------------- 1 | use rust_query::migration::schema; 2 | 3 | #[schema(Schema)] 4 | pub mod vN { 5 | #[no_reference] 6 | pub struct SomeTable { 7 | pub data: String, 8 | } 9 | pub struct NotAllowed { 10 | pub marker: SomeTable, 11 | } 12 | } 13 | 14 | fn main() {} 15 | -------------------------------------------------------------------------------- /tests/compile/no_reference.stderr: -------------------------------------------------------------------------------- 1 | error[E0277]: Can not use `SomeTable` as a column type in schema `v0::Schema` 2 | --> tests/compile/no_reference.rs:10:21 3 | | 4 | 10 | pub marker: SomeTable, 5 | | ^^^^^^^^^ the trait `rust_query::hash::SchemaType` is not implemented for `SomeTable` 6 | | 7 | = note: Table names can be used as schema column types as long as they are not #[no_reference] 8 | = help: the following other types implement trait `rust_query::hash::SchemaType`: 9 | Option 10 | String 11 | Vec 12 | f64 13 | i64 14 | note: required by a bound in `TypBuilder::::col` 15 | --> src/hash.rs 16 | | 17 | | pub fn col>(&mut self, name: &'static str) { 18 | | ^^^^^^^^^^^^^ required by this bound in `TypBuilder::::col` 19 | -------------------------------------------------------------------------------- /tests/compile/optional_invariant.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{Database, LocalClient, migration::schema, optional}; 2 | 3 | #[schema(Schema)] 4 | pub mod vN { 5 | pub struct MyTable { 6 | pub score: i64, 7 | } 8 | } 9 | use v0::*; 10 | 11 | fn test(db: Database) { 12 | let mut client = LocalClient::try_new().unwrap(); 13 | 14 | let txn = client.transaction(&db); 15 | let score = txn.query(|rows| { 16 | let item = rows.join(MyTable); 17 | 18 | txn.query_one(optional(|row| row.then(item.score()))) 19 | }); 20 | 21 | println!("{score:?}"); 22 | } 23 | 24 | fn main() {} 25 | -------------------------------------------------------------------------------- /tests/compile/optional_invariant.stderr: -------------------------------------------------------------------------------- 1 | error: lifetime may not live long enough 2 | --> tests/compile/optional_invariant.rs:18:38 3 | | 4 | 14 | let txn = client.transaction(&db); 5 | | --- lifetime `'2` appears in the type of `txn` 6 | 15 | let score = txn.query(|rows| { 7 | | ---- has type `&mut Query<'_, '1, v0::Schema>` 8 | ... 9 | 18 | txn.query_one(optional(|row| row.then(item.score()))) 10 | | ^^^^^^^^^^^^^^^^^^^^^^ returning this value requires that `'1` must outlive `'2` 11 | -------------------------------------------------------------------------------- /tests/compile/schema_does_not_exist.rs: -------------------------------------------------------------------------------- 1 | use rust_query::migration::schema; 2 | 3 | #[schema(Schema)] 4 | #[version(1..=1)] 5 | pub mod vN { 6 | pub struct Foo; 7 | #[version(1..)] 8 | #[from(Foo)] 9 | pub struct FooNext; 10 | } 11 | 12 | pub fn main() {} 13 | -------------------------------------------------------------------------------- /tests/compile/schema_does_not_exist.stderr: -------------------------------------------------------------------------------- 1 | error: the previous schema does not exists 2 | --> tests/compile/schema_does_not_exist.rs:8:12 3 | | 4 | 8 | #[from(Foo)] 5 | | ^^^ 6 | -------------------------------------------------------------------------------- /tests/compile/schema_types.rs: -------------------------------------------------------------------------------- 1 | use rust_query::migration::schema; 2 | 3 | #[schema(Schema)] 4 | pub mod vN { 5 | #[unique(optional)] 6 | pub struct Table { 7 | pub my_bool: bool, 8 | pub nested: Option>, 9 | pub optional: Option, 10 | } 11 | } 12 | 13 | fn main() {} 14 | -------------------------------------------------------------------------------- /tests/compile/schema_types.stderr: -------------------------------------------------------------------------------- 1 | error[E0277]: Can not use `bool` as a column type in schema `v0::Schema` 2 | --> tests/compile/schema_types.rs:7:22 3 | | 4 | 7 | pub my_bool: bool, 5 | | ^^^^ the trait `rust_query::hash::SchemaType` is not implemented for `bool` 6 | | 7 | = note: Table names can be used as schema column types as long as they are not #[no_reference] 8 | = help: the following other types implement trait `rust_query::hash::SchemaType`: 9 | Option 10 | String 11 | Vec 12 | f64 13 | i64 14 | note: required by a bound in `TypBuilder::::col` 15 | --> src/hash.rs 16 | | 17 | | pub fn col>(&mut self, name: &'static str) { 18 | | ^^^^^^^^^^^^^ required by this bound in `TypBuilder::::col` 19 | 20 | error[E0277]: Can not use `Option>` as a column type in schema `v0::Schema` 21 | --> tests/compile/schema_types.rs:8:21 22 | | 23 | 3 | #[schema(Schema)] 24 | | ----------------- required by a bound introduced by this call 25 | ... 26 | 8 | pub nested: Option>, 27 | | ^^^^^^ the trait `rust_query::hash::SchemaType` is not implemented for `Option>` 28 | | 29 | = note: Table names can be used as schema column types as long as they are not #[no_reference] 30 | = help: the trait `rust_query::hash::SchemaType` is implemented for `Option` 31 | note: required by a bound in `TypBuilder::::col` 32 | --> src/hash.rs 33 | | 34 | | pub fn col>(&mut self, name: &'static str) { 35 | | ^^^^^^^^^^^^^ required by this bound in `TypBuilder::::col` 36 | 37 | error[E0277]: Columns with type `Option` can not be checked for equality 38 | --> tests/compile/schema_types.rs:9:23 39 | | 40 | 9 | pub optional: Option, 41 | | ^^^^^^ the trait `rust_query::value::EqTyp` is not implemented for `Option` 42 | | 43 | = note: `EqTyp` is also implemented for all table types 44 | = help: the following other types implement trait `rust_query::value::EqTyp`: 45 | String 46 | Vec 47 | bool 48 | f64 49 | i64 50 | note: required by a bound in `TypBuilder::::check_unique_compatible` 51 | --> src/hash.rs 52 | | 53 | | pub fn check_unique_compatible(&mut self) {} 54 | | ^^^^^ required by this bound in `TypBuilder::::check_unique_compatible` 55 | -------------------------------------------------------------------------------- /tests/compile/table_does_not_exist.rs: -------------------------------------------------------------------------------- 1 | use rust_query::migration::schema; 2 | 3 | #[schema(Schema)] 4 | #[version(0..=1)] 5 | pub mod vN { 6 | #[version(1..)] 7 | pub struct FooNext; 8 | pub struct Bar { 9 | pub evolving: FooNext, 10 | } 11 | } 12 | 13 | pub fn main() {} 14 | -------------------------------------------------------------------------------- /tests/compile/table_does_not_exist.stderr: -------------------------------------------------------------------------------- 1 | error[E0271]: type mismatch resolving `::Schema == Schema` 2 | --> tests/compile/table_does_not_exist.rs:9:23 3 | | 4 | 3 | #[schema(Schema)] 5 | | ----------------- required by a bound introduced by this call 6 | ... 7 | 9 | pub evolving: FooNext, 8 | | ^^^^^^^ expected `v0::Schema`, found `v1::Schema` 9 | | 10 | = note: `v1::Schema` and `v0::Schema` have similar names, but are actually distinct types 11 | note: `v1::Schema` is defined in module `crate::v1` of the current crate 12 | --> tests/compile/table_does_not_exist.rs:3:1 13 | | 14 | 3 | #[schema(Schema)] 15 | | ^^^^^^^^^^^^^^^ 16 | note: `v0::Schema` is defined in module `crate::v0` of the current crate 17 | --> tests/compile/table_does_not_exist.rs:3:1 18 | | 19 | 3 | #[schema(Schema)] 20 | | ^^^^^^^^^^^^^^^ 21 | note: required by a bound in `TypBuilder::::col` 22 | --> src/hash.rs 23 | | 24 | | pub fn col>(&mut self, name: &'static str) { 25 | | ^^^^^^^^^^^^^ required by this bound in `TypBuilder::::col` 26 | = note: this error originates in the attribute macro `schema` (in Nightly builds, run with -Z macro-backtrace for more info) 27 | -------------------------------------------------------------------------------- /tests/compile/transaction_invariant.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{Database, LocalClient, migration::schema}; 2 | 3 | #[schema(Schema)] 4 | pub mod vN { 5 | pub struct MyTable { 6 | pub name: String, 7 | } 8 | } 9 | use v0::*; 10 | 11 | fn test(db: Database) { 12 | let mut client = LocalClient::try_new().unwrap(); 13 | 14 | let txn = client.transaction(&db); 15 | let name = txn.query(|rows| { 16 | let item = rows.join(MyTable); 17 | 18 | txn.query_one(item.name()) 19 | }); 20 | 21 | println!("{name}"); 22 | } 23 | 24 | fn main() {} 25 | -------------------------------------------------------------------------------- /tests/compile/transaction_invariant.stderr: -------------------------------------------------------------------------------- 1 | error[E0521]: borrowed data escapes outside of closure 2 | --> tests/compile/transaction_invariant.rs:18:9 3 | | 4 | 14 | let txn = client.transaction(&db); 5 | | --- `txn` declared here, outside of the closure body 6 | 15 | let name = txn.query(|rows| { 7 | | ---- `rows` is a reference that is only valid in the closure body 8 | ... 9 | 18 | txn.query_one(item.name()) 10 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^ `rows` escapes the closure body here 11 | | 12 | = note: requirement occurs because of the type `Transaction<'_, v0::Schema>`, which makes the generic argument `'_` invariant 13 | = note: the struct `Transaction<'t, S>` is invariant over the parameter `'t` 14 | = help: see for more information about variance 15 | -------------------------------------------------------------------------------- /tests/compile/unique_column_does_not_exists.rs: -------------------------------------------------------------------------------- 1 | use rust_query::migration::schema; 2 | 3 | #[schema(Schema)] 4 | pub mod vN { 5 | #[unique(bar)] 6 | pub struct Foo; 7 | pub struct Bar { 8 | pub foo: Foo, 9 | } 10 | } 11 | 12 | pub fn main() {} 13 | -------------------------------------------------------------------------------- /tests/compile/unique_column_does_not_exists.stderr: -------------------------------------------------------------------------------- 1 | error: Expected a column to exists for every name in the unique constraint. 2 | --> tests/compile/unique_column_does_not_exists.rs:5:14 3 | | 4 | 5 | #[unique(bar)] 5 | | ^^^ 6 | -------------------------------------------------------------------------------- /tests/compile/use_after_free.rs: -------------------------------------------------------------------------------- 1 | use rust_query::{Database, IntoExpr, LocalClient, migration::schema}; 2 | 3 | #[schema(Schema)] 4 | pub mod vN { 5 | pub struct MyTable { 6 | pub name: String, 7 | } 8 | } 9 | use v0::*; 10 | 11 | fn test(db: Database) { 12 | let mut client = LocalClient::try_new().unwrap(); 13 | 14 | let txn = client.transaction(&db); 15 | let items = txn.query(|rows| { 16 | let item = rows.join(MyTable); 17 | rows.into_vec(item) 18 | }); 19 | let items: Vec<_> = items.into_iter().map(|x| x.into_expr()).collect(); 20 | drop(txn); 21 | 22 | let txn = client.transaction(&db); 23 | for item in items { 24 | let name = txn.query_one(item.name()); 25 | println!("{name}") 26 | } 27 | } 28 | 29 | fn main() {} 30 | -------------------------------------------------------------------------------- /tests/compile/use_after_free.stderr: -------------------------------------------------------------------------------- 1 | error[E0499]: cannot borrow `client` as mutable more than once at a time 2 | --> tests/compile/use_after_free.rs:22:15 3 | | 4 | 14 | let txn = client.transaction(&db); 5 | | ------ first mutable borrow occurs here 6 | ... 7 | 22 | let txn = client.transaction(&db); 8 | | ^^^^^^ second mutable borrow occurs here 9 | 23 | for item in items { 10 | | ----- first borrow later used here 11 | -------------------------------------------------------------------------------- /tests/tpc_c/delivery.rs: -------------------------------------------------------------------------------- 1 | use std::time::UNIX_EPOCH; 2 | 3 | use rust_query::{TransactionMut, Update, aggregate, optional}; 4 | 5 | use super::*; 6 | 7 | pub fn random_delivery<'a>(txn: TransactionMut<'a, Schema>, warehouse: TableRow<'a, Warehouse>) { 8 | delivery(txn, generate_input(warehouse)); 9 | } 10 | 11 | fn generate_input<'a>(warehouse: TableRow<'a, Warehouse>) -> DeliveryInput<'a> { 12 | let mut rng = rand::rng(); 13 | 14 | DeliveryInput { 15 | warehouse, 16 | carrier_id: rng.random_range(1..=10), 17 | delivery_d: UNIX_EPOCH.elapsed().unwrap().as_millis() as i64, 18 | } 19 | } 20 | 21 | struct DeliveryInput<'a> { 22 | warehouse: TableRow<'a, Warehouse>, 23 | carrier_id: i64, 24 | delivery_d: i64, 25 | } 26 | 27 | fn delivery<'a>(mut txn: TransactionMut<'a, Schema>, input: DeliveryInput<'a>) { 28 | let mut new_orders = vec![]; 29 | for district_num in 0..10 { 30 | let district = txn 31 | .query_one(District::unique(input.warehouse, district_num)) 32 | .unwrap(); 33 | 34 | let new_order = txn.query_one(optional(|row| { 35 | aggregate(|rows| { 36 | let new_order = rows.join(NewOrder); 37 | let order = new_order.order(); 38 | let customer = order.customer(); 39 | rows.filter(customer.district().eq(district)); 40 | 41 | let order_num = row.and(rows.min(order.number())); 42 | rows.filter(order.number().eq(&order_num)); 43 | 44 | let customer_num = row.and(rows.min(customer.number())); 45 | let customer = row.and(Customer::unique(district, customer_num)); 46 | let order = row.and(Order::unique(customer, order_num)); 47 | let new_order = row.and(NewOrder::unique(order)); 48 | row.then(new_order) 49 | }) 50 | })); 51 | let Some(new_order) = new_order else { 52 | continue; 53 | }; 54 | 55 | new_orders.push(new_order); 56 | 57 | txn.update_ok( 58 | new_order.order(), 59 | Order { 60 | carrier_id: Update::set(Some(input.carrier_id)), 61 | ..Default::default() 62 | }, 63 | ); 64 | 65 | let mut total_amount = 0; 66 | for (line, amount) in txn.query(|rows| { 67 | let ol = rows.join(OrderLine); 68 | rows.filter(ol.order().eq(new_order.order())); 69 | rows.into_vec((&ol, ol.amount())) 70 | }) { 71 | total_amount += amount; 72 | txn.update_ok( 73 | line, 74 | OrderLine { 75 | delivery_d: Update::set(Some(input.delivery_d)), 76 | ..Default::default() 77 | }, 78 | ); 79 | } 80 | 81 | txn.update_ok( 82 | new_order.order().customer(), 83 | Customer { 84 | balance: Update::add(total_amount), 85 | delivery_cnt: Update::add(1), 86 | ..Default::default() 87 | }, 88 | ); 89 | } 90 | let mut txn = txn.downgrade(); 91 | for new_order in new_orders { 92 | assert!(txn.delete_ok(new_order)); 93 | } 94 | 95 | txn.commit(); 96 | } 97 | -------------------------------------------------------------------------------- /tests/tpc_c/main.rs: -------------------------------------------------------------------------------- 1 | use std::time::SystemTime; 2 | 3 | use rand::{Rng, rngs::ThreadRng}; 4 | use rust_query::{ 5 | Database, IntoExpr, LocalClient, Select, TableRow, Transaction, 6 | migration::{Config, schema}, 7 | }; 8 | 9 | mod delivery; 10 | mod new_order; 11 | mod order_status; 12 | mod payment; 13 | 14 | #[schema(Schema)] 15 | pub mod vN { 16 | #[unique(number)] 17 | pub struct Warehouse { 18 | pub number: i64, 19 | pub name: String, 20 | pub street_1: String, 21 | pub street_2: String, 22 | pub city: String, 23 | pub state: String, 24 | pub zip: String, 25 | pub tax: f64, 26 | pub ytd: i64, 27 | } 28 | #[unique(warehouse, number)] 29 | pub struct District { 30 | pub warehouse: Warehouse, 31 | pub number: i64, 32 | pub name: String, 33 | pub street_1: String, 34 | pub street_2: String, 35 | pub city: String, 36 | pub state: String, 37 | pub zip: String, 38 | pub tax: f64, 39 | pub ytd: i64, 40 | pub next_order: i64, // next available order id 41 | } 42 | #[unique(district, number)] 43 | pub struct Customer { 44 | pub district: District, 45 | pub number: i64, 46 | pub first: String, 47 | pub middle: String, 48 | pub last: String, 49 | pub street_1: String, 50 | pub street_2: String, 51 | pub city: String, 52 | pub state: String, 53 | pub zip: String, 54 | pub phone: String, 55 | pub since: i64, 56 | pub credit: String, 57 | pub credit_lim: i64, 58 | pub discount: f64, 59 | pub balance: i64, 60 | pub ytd_payment: i64, 61 | pub payment_cnt: i64, 62 | pub delivery_cnt: i64, 63 | pub data: String, 64 | } 65 | pub struct History { 66 | pub customer: Customer, 67 | pub district: District, 68 | pub date: i64, 69 | pub amount: i64, 70 | pub data: String, 71 | } 72 | #[no_reference] 73 | pub struct NewOrder { 74 | #[unique] 75 | pub order: Order, 76 | } 77 | #[unique(customer, number)] 78 | pub struct Order { 79 | pub customer: Customer, 80 | pub number: i64, 81 | pub entry_d: i64, 82 | pub carrier_id: Option, 83 | pub order_line_cnt: i64, 84 | pub all_local: i64, 85 | } 86 | #[unique(order, number)] 87 | pub struct OrderLine { 88 | pub order: Order, 89 | pub number: i64, 90 | pub stock: Stock, 91 | pub delivery_d: Option, 92 | pub quantity: i64, 93 | pub amount: i64, // total cost of this line 94 | pub dist_info: String, 95 | } 96 | #[unique(number)] 97 | pub struct Item { 98 | pub number: i64, 99 | pub image_id: i64, 100 | pub name: String, 101 | pub price: i64, 102 | pub data: String, 103 | } 104 | #[unique(warehouse, item)] 105 | pub struct Stock { 106 | pub warehouse: Warehouse, 107 | pub item: Item, 108 | pub quantity: i64, 109 | pub dist_00: String, 110 | pub dist_01: String, 111 | pub dist_02: String, 112 | pub dist_03: String, 113 | pub dist_04: String, 114 | pub dist_05: String, 115 | pub dist_06: String, 116 | pub dist_07: String, 117 | pub dist_08: String, 118 | pub dist_09: String, 119 | pub dist_10: String, 120 | pub ytd: i64, 121 | pub order_cnt: i64, 122 | pub remote_cnt: i64, 123 | pub data: String, 124 | } 125 | } 126 | use v0::*; 127 | 128 | fn main() { 129 | let mut client = LocalClient::try_new().unwrap(); 130 | let db: Database = client 131 | .migrator(Config::open_in_memory()) 132 | .expect("database should not be too old") 133 | .finish() 134 | .expect("database should not be too new"); 135 | 136 | let mut txn = client.transaction_mut(&db); 137 | txn.insert(Warehouse { 138 | number: 0, 139 | name: "test", 140 | street_1: "", 141 | street_2: "", 142 | city: "", 143 | state: "", 144 | zip: "", 145 | tax: 0.5, 146 | ytd: 100, 147 | }) 148 | .unwrap(); 149 | txn.commit(); 150 | 151 | let txn = client.transaction_mut(&db); 152 | let warehouse = get_primary_warehouse(&txn); 153 | new_order::random_new_order(txn, warehouse); 154 | 155 | let txn = client.transaction_mut(&db); 156 | let warehouse = get_primary_warehouse(&txn); 157 | delivery::random_delivery(txn, warehouse); 158 | 159 | let txn = client.transaction_mut(&db); 160 | let warehouse = get_primary_warehouse(&txn); 161 | payment::random_payment(txn, warehouse); 162 | 163 | let txn = client.transaction(&db); 164 | let warehouse = get_primary_warehouse(&txn); 165 | order_status::random_order_status(&txn, warehouse); 166 | } 167 | 168 | fn get_primary_warehouse<'a>(txn: &Transaction<'a, Schema>) -> TableRow<'a, Warehouse> { 169 | txn.query_one(Warehouse::unique(0)) 170 | .expect("warehouse should exist") 171 | } 172 | 173 | trait NuRand { 174 | fn nurand(&mut self, a: i64, x: i64, y: i64) -> i64; 175 | } 176 | impl NuRand for ThreadRng { 177 | fn nurand(&mut self, a: i64, x: i64, y: i64) -> i64 { 178 | // TODO: select C at runtime? 179 | const C: i64 = 5; 180 | (((self.random_range(0..=a) | self.random_range(x..=y)) + C) % (y - x + 1)) + x 181 | } 182 | } 183 | 184 | /// `num` must be in range `0..=999` 185 | pub fn random_to_last_name(num: i64) -> String { 186 | let mut out = String::new(); 187 | for position in [100, 10, 1] { 188 | let digit = (num / position) % 10; 189 | out.push_str( 190 | [ 191 | "BAR", "OUGHT", "ABLE", "PRI", "PRES", "ESE", "ANTI", "CALLY", "ATION", "EING", 192 | ][digit as usize], 193 | ); 194 | } 195 | out 196 | } 197 | 198 | enum CustomerIdent<'a> { 199 | Number(TableRow<'a, Customer>), 200 | Name(TableRow<'a, District>, String), 201 | } 202 | 203 | impl<'a> CustomerIdent<'a> { 204 | pub fn lookup_customer(self, txn: &Transaction<'a, Schema>) -> TableRow<'a, Customer> { 205 | match self { 206 | CustomerIdent::Number(customer) => customer, 207 | CustomerIdent::Name(district, last_name) => { 208 | let mut customers = txn.query(|rows| { 209 | let customer = rows.join(Customer); 210 | rows.filter(customer.district().eq(district)); 211 | rows.filter(customer.last().eq(last_name)); 212 | rows.into_vec((customer.first(), customer)) 213 | }); 214 | customers.sort_by(|a, b| a.0.cmp(&b.0)); 215 | 216 | let count = customers.len(); 217 | let id = count.div_ceil(2) - 1; 218 | customers.swap_remove(id).1 219 | } 220 | } 221 | } 222 | } 223 | 224 | fn customer_ident<'a>( 225 | txn: &Transaction<'a, Schema>, 226 | rng: &mut ThreadRng, 227 | customer_district: TableRow<'a, District>, 228 | ) -> CustomerIdent<'a> { 229 | if rng.random_ratio(60, 100) { 230 | CustomerIdent::Name( 231 | customer_district, 232 | random_to_last_name(rng.nurand(255, 0, 999)), 233 | ) 234 | } else { 235 | let customer = txn 236 | .query_one(Customer::unique( 237 | customer_district, 238 | rng.nurand(1023, 1, 3000), 239 | )) 240 | .unwrap(); 241 | CustomerIdent::Number(customer) 242 | } 243 | } 244 | 245 | impl<'column> IntoExpr<'column, Schema> for SystemTime { 246 | type Typ = i64; 247 | 248 | fn into_expr(self) -> rust_query::Expr<'column, Schema, Self::Typ> { 249 | let millis = self 250 | .duration_since(SystemTime::UNIX_EPOCH) 251 | .unwrap() 252 | .as_millis(); 253 | (millis as i64).into_expr() 254 | } 255 | } 256 | -------------------------------------------------------------------------------- /tests/tpc_c/new_order.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use rust_query::{FromExpr, TableRow, Transaction, TransactionMut, Update, optional}; 3 | use std::time::SystemTime; 4 | 5 | pub fn random_new_order<'a>( 6 | txn: TransactionMut<'a, Schema>, 7 | warehouse: TableRow<'a, Warehouse>, 8 | ) -> OutputData<'a> { 9 | let input = generate_input(&txn, warehouse); 10 | new_order(txn, input) 11 | } 12 | 13 | fn generate_input<'a>( 14 | txn: &Transaction<'a, Schema>, 15 | warehouse: TableRow<'a, Warehouse>, 16 | ) -> NewOrderInput<'a> { 17 | let mut rng = rand::rng(); 18 | let district = txn 19 | .query_one(District::unique(warehouse, rng.random_range(1..=10))) 20 | .unwrap(); 21 | let customer = txn 22 | .query_one(Customer::unique(district, rng.nurand(1023, 1, 3000))) 23 | .unwrap(); 24 | let item_count = rng.random_range(5..=15); 25 | let rbk = rng.random_ratio(1, 100); 26 | 27 | let mut items = vec![]; 28 | for i in 1..=item_count { 29 | let mut item_number = rng.nurand(8191, 1, 100000); 30 | if rbk && i == item_count { 31 | // emulate input error 32 | item_number = -1 33 | }; 34 | 35 | items.push(ItemInput { 36 | item_number, 37 | // TODO: support remote warehouses in case there are multiple 38 | supplying_warehouse: warehouse, 39 | quantity: rng.random_range(1..=10), 40 | }); 41 | } 42 | 43 | NewOrderInput { 44 | customer, 45 | items, 46 | entry_date: SystemTime::now(), 47 | } 48 | } 49 | 50 | struct NewOrderInput<'a> { 51 | customer: TableRow<'a, Customer>, 52 | items: Vec>, 53 | entry_date: SystemTime, 54 | } 55 | 56 | struct ItemInput<'a> { 57 | item_number: i64, 58 | supplying_warehouse: TableRow<'a, Warehouse>, 59 | quantity: i64, 60 | } 61 | 62 | fn new_order<'a>(mut txn: TransactionMut<'a, Schema>, input: NewOrderInput<'a>) -> OutputData<'a> { 63 | let district = txn.query_one(input.customer.district()); 64 | 65 | let district_info: District!(warehouse<'_>, number, tax, next_order) = 66 | txn.query_one(FromExpr::from_expr(district)); 67 | 68 | let warehouse_tax = txn.query_one(district.warehouse().tax()); 69 | 70 | txn.update_ok( 71 | district, 72 | District { 73 | next_order: Update::add(1), 74 | ..Default::default() 75 | }, 76 | ); 77 | 78 | let customer_info: Customer!(discount, last, credit) = 79 | txn.query_one(FromExpr::from_expr(input.customer)); 80 | 81 | let local = input 82 | .items 83 | .iter() 84 | .all(|item| item.supplying_warehouse == district_info.warehouse); 85 | 86 | let order = txn 87 | .insert(Order { 88 | number: district_info.next_order, 89 | customer: input.customer, 90 | entry_d: input.entry_date, 91 | carrier_id: None::, 92 | all_local: local as i64, 93 | order_line_cnt: input.items.len() as i64, 94 | }) 95 | .unwrap(); 96 | txn.insert(NewOrder { order }).unwrap(); 97 | 98 | let mut output_order_lines = vec![]; 99 | 100 | let mut input_valid = true; 101 | 102 | for ( 103 | number, 104 | ItemInput { 105 | item_number, 106 | supplying_warehouse, 107 | quantity, 108 | }, 109 | ) in input.items.into_iter().enumerate() 110 | { 111 | let Some((item, item_info)): Option<(_, Item!(price, name, data))> = 112 | txn.query_one(optional(|row| { 113 | let item = row.and(Item::unique(item_number)); 114 | row.then((&item, FromExpr::from_expr(&item))) 115 | })) 116 | else { 117 | input_valid = false; 118 | continue; 119 | }; 120 | 121 | let stock = Stock::unique(supplying_warehouse, item); 122 | let stock = txn.query_one(stock).unwrap(); 123 | 124 | #[derive(Select)] 125 | struct StockInfo { 126 | quantity: i64, 127 | dist_xx: String, 128 | data: String, 129 | } 130 | let stock_info = txn.query_one(StockInfoSelect { 131 | quantity: stock.quantity(), 132 | dist_xx: &[ 133 | stock.dist_00(), 134 | stock.dist_01(), 135 | stock.dist_02(), 136 | stock.dist_03(), 137 | stock.dist_04(), 138 | stock.dist_05(), 139 | stock.dist_06(), 140 | stock.dist_07(), 141 | stock.dist_08(), 142 | stock.dist_09(), 143 | stock.dist_10(), 144 | ][district_info.number as usize], 145 | data: stock.data(), 146 | }); 147 | 148 | let new_quantity = if stock_info.quantity >= quantity + 10 { 149 | stock_info.quantity - quantity 150 | } else { 151 | stock_info.quantity - quantity + 91 152 | }; 153 | 154 | let is_remote = supplying_warehouse != district_info.warehouse; 155 | txn.update_ok( 156 | stock, 157 | Stock { 158 | ytd: Update::add(quantity), 159 | quantity: Update::set(new_quantity), 160 | order_cnt: Update::add(1), 161 | remote_cnt: Update::add(is_remote as i64), 162 | ..Default::default() 163 | }, 164 | ); 165 | 166 | let amount = quantity * item_info.price; 167 | let brand_generic = 168 | if item_info.data.contains("ORIGINAL") && stock_info.data.contains("ORIGINAL") { 169 | "B" 170 | } else { 171 | "G" 172 | }; 173 | 174 | txn.insert(OrderLine { 175 | order, 176 | number: number as i64, 177 | stock, 178 | delivery_d: None::, 179 | quantity, 180 | amount, 181 | dist_info: stock_info.dist_xx, 182 | }) 183 | .unwrap(); 184 | 185 | output_order_lines.push(OutputLine { 186 | supplying_warehouse, 187 | item, 188 | item_name: item_info.name, 189 | quantity, 190 | stock_quantity: stock_info.quantity, 191 | brand_generic, 192 | item_price: item_info.price, 193 | amount, 194 | }); 195 | } 196 | 197 | let total_amount = output_order_lines.iter().map(|x| x.amount).sum::() as f64 198 | * (1. - customer_info.discount) 199 | * (1. + warehouse_tax + district_info.tax); 200 | 201 | if input_valid { 202 | txn.commit(); 203 | } else { 204 | // rollback if there are input errors 205 | drop(txn); 206 | } 207 | 208 | OutputData { 209 | warehouse: district_info.warehouse, 210 | district, 211 | customer: input.customer, 212 | order, 213 | customer_last_name: customer_info.last, 214 | customer_credit: customer_info.credit, 215 | customer_discount: customer_info.discount, 216 | warehouse_tax, 217 | district_tax: district_info.tax, 218 | order_entry_date: input.entry_date, 219 | total_amount: total_amount as i64, 220 | order_lines: output_order_lines, 221 | input_valid, 222 | } 223 | } 224 | 225 | #[expect(unused)] 226 | pub struct OutputData<'t> { 227 | warehouse: TableRow<'t, Warehouse>, 228 | district: TableRow<'t, District>, 229 | customer: TableRow<'t, Customer>, 230 | order: TableRow<'t, Order>, 231 | customer_last_name: String, 232 | customer_credit: String, 233 | customer_discount: f64, 234 | warehouse_tax: f64, 235 | district_tax: f64, 236 | order_entry_date: SystemTime, 237 | total_amount: i64, 238 | // order_line_count: i64, 239 | order_lines: Vec>, 240 | input_valid: bool, 241 | } 242 | 243 | #[expect(unused)] 244 | pub struct OutputLine<'t> { 245 | supplying_warehouse: TableRow<'t, Warehouse>, 246 | item: TableRow<'t, Item>, 247 | item_name: String, 248 | quantity: i64, 249 | stock_quantity: i64, 250 | brand_generic: &'static str, 251 | item_price: i64, 252 | amount: i64, 253 | } 254 | -------------------------------------------------------------------------------- /tests/tpc_c/order_status.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use rust_query::{FromExpr, TableRow, Transaction, aggregate, optional}; 3 | 4 | pub fn random_order_status<'a>( 5 | txn: &Transaction<'a, Schema>, 6 | warehouse: TableRow<'a, Warehouse>, 7 | ) -> OrderStatus<'a> { 8 | let input = generate_input(txn, warehouse); 9 | order_status(txn, input) 10 | } 11 | 12 | fn generate_input<'a>( 13 | txn: &Transaction<'a, Schema>, 14 | warehouse: TableRow<'a, Warehouse>, 15 | ) -> CustomerIdent<'a> { 16 | let mut rng = rand::rng(); 17 | let district = txn 18 | .query_one(District::unique(warehouse, rng.random_range(1..=10))) 19 | .unwrap(); 20 | 21 | customer_ident(txn, &mut rng, district) 22 | } 23 | 24 | fn order_status<'a>(txn: &Transaction<'a, Schema>, input: CustomerIdent<'a>) -> OrderStatus<'a> { 25 | let customer = input.lookup_customer(txn); 26 | let last_order = txn 27 | .query_one(optional(|row| { 28 | aggregate(|rows| { 29 | let order = rows.join(Order); 30 | rows.filter(order.customer().eq(customer)); 31 | let max_number = row.and(rows.max(order.number())); 32 | rows.filter(order.number().eq(&max_number)); 33 | let order = row.and(Order::unique(customer, max_number)); 34 | row.then(order) 35 | }) 36 | })) 37 | .unwrap(); 38 | 39 | let order_lines_info = txn.query(|rows| { 40 | let order_line = rows.join(OrderLine); 41 | rows.filter(order_line.order().eq(last_order)); 42 | rows.into_vec(FromExpr::from_expr(order_line)) 43 | }); 44 | 45 | OrderStatus { 46 | customer_info: txn.query_one(FromExpr::from_expr(customer)), 47 | order_info: txn.query_one(FromExpr::from_expr(last_order)), 48 | order_lines_info, 49 | } 50 | } 51 | 52 | #[expect(unused)] 53 | pub struct OrderStatus<'a> { 54 | customer_info: Customer!(balance, first, middle, last), 55 | order_info: Order!(number, entry_d, carrier_id), 56 | order_lines_info: Vec>, 57 | } 58 | 59 | type OrderLineInfo<'a> = OrderLine!( 60 | stock as Stock!(item<'a>, warehouse<'a>), 61 | quantity, 62 | amount, 63 | delivery_d 64 | ); 65 | -------------------------------------------------------------------------------- /tests/tpc_c/payment.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use rust_query::{FromExpr, TableRow, Transaction, TransactionMut, Update}; 3 | 4 | pub fn random_payment<'a>( 5 | txn: TransactionMut<'a, Schema>, 6 | warehouse: TableRow<'a, Warehouse>, 7 | ) -> PaymentOutput<'a> { 8 | let input = generate_input(&txn, warehouse); 9 | payment(txn, input) 10 | } 11 | 12 | fn generate_input<'a>( 13 | txn: &Transaction<'a, Schema>, 14 | warehouse: TableRow<'a, Warehouse>, 15 | ) -> PaymentInput<'a> { 16 | let mut rng = rand::rng(); 17 | let district = txn 18 | .query_one(District::unique(warehouse, rng.random_range(1..=10))) 19 | .unwrap(); 20 | 21 | let customer_district = if rng.random_ratio(85, 100) { 22 | district 23 | } else { 24 | // TODO: select a different warehouse here 25 | txn.query_one(District::unique(warehouse, rng.random_range(1..=10))) 26 | .unwrap() 27 | }; 28 | 29 | let customer = customer_ident(txn, &mut rng, customer_district); 30 | 31 | PaymentInput { 32 | district, 33 | customer, 34 | amount: rng.random_range(100..=500000), 35 | date: SystemTime::now(), 36 | } 37 | } 38 | 39 | struct PaymentInput<'a> { 40 | district: TableRow<'a, District>, 41 | customer: CustomerIdent<'a>, 42 | amount: i64, 43 | date: SystemTime, 44 | } 45 | 46 | fn payment<'a>(mut txn: TransactionMut<'a, Schema>, input: PaymentInput<'a>) -> PaymentOutput<'a> { 47 | let district = input.district; 48 | let warehouse = district.warehouse(); 49 | let warehouse_info = txn.query_one(LocationYtd::from_expr(&warehouse)); 50 | 51 | txn.update_ok( 52 | &warehouse, 53 | Warehouse { 54 | ytd: Update::add(input.amount), 55 | ..Default::default() 56 | }, 57 | ); 58 | 59 | let district_info = txn.query_one(LocationYtd::from_expr(district)); 60 | 61 | txn.update_ok( 62 | district, 63 | District { 64 | ytd: Update::add(input.amount), 65 | ..Default::default() 66 | }, 67 | ); 68 | 69 | let customer = input.customer.lookup_customer(&txn); 70 | let customer_info: CustomerInfo = txn.query_one(FromExpr::from_expr(customer)); 71 | 72 | txn.update_ok( 73 | customer, 74 | Customer { 75 | ytd_payment: Update::add(input.amount), 76 | payment_cnt: Update::add(1), 77 | ..Default::default() 78 | }, 79 | ); 80 | 81 | let mut credit_data = None; 82 | if customer_info.credit == "BC" { 83 | let data = txn.query_one(customer.data()); 84 | let mut data = format!("{customer:?},{};{data}", input.amount); 85 | txn.update_ok( 86 | customer, 87 | Customer { 88 | data: Update::set(&data[..500]), 89 | ..Default::default() 90 | }, 91 | ); 92 | data.truncate(200); 93 | credit_data = Some(data); 94 | } 95 | 96 | let data = format!("{} {}", warehouse_info.name, district_info.name); 97 | txn.insert_ok(History { 98 | customer, 99 | district, 100 | date: input.date, 101 | amount: input.amount, 102 | data, 103 | }); 104 | 105 | txn.commit(); 106 | 107 | PaymentOutput { 108 | district, 109 | customer, 110 | warehouse_info, 111 | district_info, 112 | customer_info, 113 | data: credit_data, 114 | amount: input.amount, 115 | date: input.date, 116 | } 117 | } 118 | 119 | #[expect(unused)] 120 | #[derive(FromExpr)] 121 | #[rust_query(From = Warehouse, From = District)] 122 | struct LocationYtd { 123 | name: String, 124 | street_1: String, 125 | street_2: String, 126 | city: String, 127 | state: String, 128 | zip: String, 129 | ytd: i64, 130 | } 131 | 132 | #[expect(unused)] 133 | #[derive(FromExpr)] 134 | #[rust_query(From = Customer)] 135 | struct CustomerInfo { 136 | first: String, 137 | middle: String, 138 | last: String, 139 | street_1: String, 140 | street_2: String, 141 | city: String, 142 | state: String, 143 | zip: String, 144 | phone: String, 145 | since: i64, 146 | credit: String, 147 | credit_lim: i64, 148 | discount: f64, 149 | balance: i64, 150 | } 151 | 152 | #[expect(unused)] 153 | pub struct PaymentOutput<'a> { 154 | district: TableRow<'a, District>, 155 | customer: TableRow<'a, Customer>, 156 | warehouse_info: LocationYtd, 157 | district_info: LocationYtd, 158 | customer_info: CustomerInfo, 159 | data: Option, 160 | amount: i64, 161 | date: SystemTime, 162 | } 163 | --------------------------------------------------------------------------------