├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── sqlb-macros ├── Cargo.toml └── src │ ├── lib.rs │ └── utils.rs ├── src ├── core.rs ├── delete.rs ├── insert.rs ├── lib.rs ├── select.rs ├── sqlx_exec.rs ├── update.rs ├── utils.rs └── val.rs └── tests ├── test_macro.rs ├── test_rules.rs ├── test_sb_delete.rs ├── test_sb_enum.rs ├── test_sb_insert.rs ├── test_sb_macro.rs ├── test_sb_others.rs ├── test_sb_select.rs ├── test_sb_update.rs ├── test_sqlx.rs └── utils └── mod.rs /.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | !.gitignore 3 | 4 | target/ 5 | Cargo.lock 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sqlb" 3 | version = "0.4.0" 4 | authors = ["Jeremy Chone "] 5 | edition = "2018" 6 | homepage = "https://github.com/jeremychone/rust-sqlb" 7 | repository = "https://github.com/jeremychone/rust-sqlb" 8 | description = "Simple, expressive, and progressive SQL Builder for Rust." 9 | keywords = [ 10 | "sql", 11 | "sqlbuilder", 12 | "database", 13 | "orm", 14 | "postgres" 15 | ] 16 | categories = ["database"] 17 | license = "MIT OR Apache-2.0" 18 | 19 | [workspace] 20 | members = [".", "sqlb-macros"] 21 | 22 | [dependencies] 23 | sqlx = { version = "0.7", features = [ "runtime-tokio-rustls", "postgres", "time", "uuid" ] } 24 | sqlb-macros = { version="0.4.0", path = "sqlb-macros" } 25 | async-trait = "0.1" 26 | time = "0.3.20" 27 | uuid = "1.3.1" 28 | chrono = { version = "0.4", optional = true } 29 | serde_json = { version = "1.0", optional = true } 30 | rust_decimal = { version = "1.34", optional = true } 31 | 32 | [features] 33 | default = [] 34 | chrono-support = ["chrono"] 35 | json = ["serde_json"] 36 | decimal = ["rust_decimal", "sqlx:rust_decimal"] 37 | 38 | [dev-dependencies] 39 | anyhow = "1" 40 | tokio = { version = "1", features = ["full"] } 41 | serial_test = "2" 42 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2023 Jeremy Chone 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 Jeremy Chone 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **sqlb** is a simple and expressive SQLBuilder for Rust for [sqlx](https://crates.io/crates/sqlx), focusing on PostgreSQL (for now). 2 | 3 | **UPDATE 2023-11-21:** `sqlb 0.4.x` now uses `sqlx 0.7.x` 4 | 5 | **NOTE 2023-11-31:** I am currently exploring integration synergy opportunities with [sea-query](https://crates.io/crates/sea-query), as they share similar "SQL builder" principles. Some initial integration might appear in `sqlb 0.5.x`. Feel free to share your perspective on our discord: https://discord.gg/W2besKCzjx 6 | 7 | 8 | **Key Concepts** 9 | - **Simple** - Focused on providing an expressive, composable, and reasonably typed scheme to build and execute (via sqlx for now) parameterized SQL statements. The goal is NOT to abstract SQL but to make it expressive and composable using Rust programmatic constructs. 10 | - **NOT** a database **executor/driver** (Uses [sqlx](https://crates.io/crates/sqlx) as an SQL executor) 11 | - **NOT** an **ORM**, just an SQL builder. 12 | - **NOT** a full replacement for [sqlx](https://crates.io/crates/sqlx). Dropping into sqlx when sqlb is too limiting is a valid pattern. 13 | - **Expressive** - From arbitrary typed data in and out (list of names/values) to struct and mapping rules. 14 | - **Focused** 15 | - **[sqlx](https://crates.io/crates/sqlx)** - The first "database executor" provided will be [sqlx](https://github.com/launchbadge/sqlx). 16 | - **PostgreSQL** - First database support will be Postgres (via sqlx). Additional database support may be added based on interest and pull requests. 17 | - `sqlb` goal is to have a highly ergonomic API at a minimum performance cost. However, using sqlx directly for high batch commands or more advanced use-cases is an encouraged approach. 18 | - **Prepared Statement ONLY!** 19 | 20 | **Additional Notes** 21 | 22 | > NOTE 1: SQL Builders are typically not used directly by application business logic, but rather to be wrapped in some Application Model Access Layer (e.g., DAOs or MCs - Model Controller - patterns). Even when using ORMs, it is often a good code design to wrap those access via some model access layers. 23 | 24 | > NOTE 2: sqlb has the feature `runtime-tokio-rustls` enabled by the sqlx crate. Do not enable a conflicting runtime feature when adding sqlx to your project. 25 | 26 | > NOTE 3: During the `0.y.z` period, API changes will result in `.y` increments. 27 | 28 | Goals for first **0.y.z** releases: 29 | 30 | - **sqlx** - Only plan to be on top of [sqlx](https://crates.io/crates/sqlx). 31 | - **PostgreSQL** - Focus only on PostgreSQL. 32 | - **Macros** - Adding macros to keep things DRY (but they are optional. All can be implemented via trait objects) 33 | - **Limitations** - Currently, to make types work with `sqlb` they must implement`sqlb::SqlxBindable` trait. The aim is to implement `SqlxBindable` for all `sqlx` types and allowing app code to implement `SqlxBindable` for their specific types. If there are any external types that should be supported but are not currently, please feel free to log a ticket. A good pattern for this type is for `sqlb` to add type support by features (e.g., see `chrono_support` sqlb feature). 34 | 35 | 36 | ## Early API Example (just conceptual for now) 37 | 38 | ```rust 39 | // `sqlx::FromRow` allows to do sqlx_exec::fetch_as... 40 | // `sqlb::Fields` allows to have: 41 | // - `toto.fields()` (name, value)[] (only direct or NOT Not values) 42 | // - `Todo::field_names()` here would return `["id", "title"]` 43 | #[derive(sqlx::FromRow, sqlb::Fields)] 44 | pub struct Todo { 45 | id: i64, 46 | 47 | title: String, 48 | #[field(name="description")] 49 | desc: Option, 50 | 51 | #[field(skip)] 52 | someting_else: String, 53 | } 54 | 55 | #[derive(sqlb::Fields)] 56 | pub struct TodoForCreate { 57 | title: String, 58 | desc: Option, 59 | 60 | #[field(skip)] 61 | someting_else: String, 62 | } 63 | 64 | #[derive(sqlb::Fields)] 65 | pub struct TodoForUpdate { 66 | title: Option, 67 | desc: Option, 68 | } 69 | 70 | // -- Get the field names 71 | let field_names = Todo::field_names(); 72 | // ["id", "title", "description"] 73 | 74 | // -- Create new row 75 | let todo_c = TodoForCreate { title: "title 01".to_string(), desc: "desc 01".to_string() }; 76 | // will update all fields specified in TodoForCreate 77 | let sb = sqlb::insert().table("todo").data(todo_c.all_fields()); 78 | let sb = sb.returning(&["id", "title"]); 79 | let (_id, title) = sb.fetch_one::<_, (i64, String)>(&db_pool).await?; 80 | 81 | // -- Select 82 | let sb = sqlb::select().table("todo").columns(Todo::field_names()).order_by("!id"); 83 | let todos: Vec = sb.fetch_as_all(&db_pool).await?; 84 | 85 | // -- Update 86 | let todo_u - TodoForUpdate { desc: "Updated desc 01".to_string()}; 87 | let sb = sqlb::update().table("todo").data(todo_u.not_none_fields()).and_where_eq("id", 123); 88 | let row_affected = sb.exec(&db_pool).await?; 89 | // will not update .title because of the use of `.not_none_fields()`. 90 | ``` 91 | 92 | ## Thanks 93 | 94 | - Thanks to [KaiserBh](https://github.com/KaiserBh) for the `bindable!` generic type and `chrono` support. 95 | - Thanks to [eboody](https://github.com/eboody) for the potential sqlx conflict (see [PR 3](https://github.com/jeremychone/rust-sqlb/pull/3)). 96 | 97 | Open source is awesome! Feel free to enter ticket, ask questions, or do PR (concise and focused). 98 | 99 | Happy coding! 100 | 101 | ## Changelog 102 | 103 | `!` breaking change, `^` enhancement, `+` addition, `-` fix. 104 | 105 | - `0.4.0` - 2023-11-21 106 | - `^` Updated to `sqlx 0.7` 107 | - `0.3.8` - 2023-08-03 108 | - `+` generic types for `bindable!` macro. [PR from KaiserBh](https://github.com/jeremychone/rust-sqlb/pull/10) 109 | - `+` `chrono` binding under the feature `chrono_support`. [PR from KaiserBh](https://github.com/jeremychone/rust-sqlb/pull/10) 110 | - `0.3.2 .. 0.3.7` 111 | - `+` Add support for partial and fully qualified table and column names. #8 112 | - `+` Add `SqlxBindable` blanket implementation for `Option`. #7 113 | - `+` Add `.limit(..)` and `.offset(..)` for `Select`. 114 | - `+` Add `.count()` for `Select`. 115 | - `+` Add `#[field(skip)]` and `#[field(name="other_name")]` to skip or rename properties. 116 | - `0.3.1` 117 | - `!` BREAKING CHANGE - `HasFields.fields` has been rename to `HasFields.not_none_fields()`. 118 | - `!` BREAKING CHANGE - `HasFields.not_none_fields()` and `HasFields.all_fields()` consume the `self` (to avoid uncessary clone). 119 | - `+` - `HasFields.all_fields()` - returns all fields (even the one where value are None). 120 | - `+` - `HasFields::field_names(): &'static [&'static]` - list of field names (i.e., column names). 121 | - `+` - Added `SqlxBindable` for the `Option` (not a blanket impl at this point). 122 | - `0.3.0` been deprecated since did not have the `...fields(self)` behavior. 123 | - `0.2.0` 124 | - Changing the generic order to match `sqlx`. From `.fetch_one::<(i64, String), _>` to `.fetch_one::<_, (i64, String)>` 125 | - `0.0.7` 126 | - `sqlb::insert().table("todo")` (in 0.0.7) rather than `sqlb::insert("toto")` (<=0.0.6) (for all SqlBuilders) 127 | 128 | 129 | ## For sqlb Dev 130 | 131 | Start a PostgreSQL 132 | 133 | ```sh 134 | # In terminal 1 - start postges 135 | docker run --rm --name pg -p 5432:5432 -e POSTGRES_PASSWORD=welcome postgres:15 136 | 137 | # In terminal 2 - (optional) launch psql on the Postgres instance above 138 | docker exec -it -u postgres pg psql 139 | 140 | # In terminal 3 - 141 | cargo test 142 | 143 | # or watch a particular test target 144 | cargo watch -q -c -x 'test --test test_sb_insert 145 | ``` 146 | 147 |
148 | [sqlb github repo](https://github.com/jeremychone/rust-sqlb) 149 | -------------------------------------------------------------------------------- /sqlb-macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sqlb-macros" 3 | version = "0.4.0" 4 | authors = ["jeremy.chone@gmail.com"] 5 | edition = "2021" 6 | homepage = "https://github.com/jeremychone/rust-sqlb" 7 | repository = "https://github.com/jeremychone/rust-sqlb" 8 | description = "Macros for sqlb. Not intended to be used directly." 9 | license = "MIT OR Apache-2.0" 10 | 11 | [dependencies] 12 | quote = "1" 13 | syn = {version = "2", features = ["full", "parsing"]} 14 | proc-macro2 = "1" 15 | 16 | [lib] 17 | proc-macro = true -------------------------------------------------------------------------------- /sqlb-macros/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | extern crate proc_macro; 4 | 5 | use proc_macro::TokenStream; 6 | use quote::quote; 7 | use syn::{parse_macro_input, DeriveInput, Ident}; 8 | 9 | #[proc_macro_derive(Fields, attributes(field))] 10 | pub fn derives_fields(input: TokenStream) -> TokenStream { 11 | let ast = parse_macro_input!(input as DeriveInput); 12 | let struct_name = ast.ident; 13 | 14 | // -- get the fields 15 | let fields = if let syn::Data::Struct(syn::DataStruct { 16 | fields: syn::Fields::Named(ref fields), 17 | .. 18 | }) = ast.data 19 | { 20 | fields 21 | } else { 22 | panic!("Only support Struct") 23 | }; 24 | 25 | // -- Collect Elements 26 | let props = utils::get_props(fields); 27 | 28 | let props_all_idents: Vec<&Option> = props.iter().map(|p| p.ident).collect(); 29 | let props_all_names: Vec<&String> = props.iter().map(|p| &p.name).collect(); 30 | 31 | let props_option_idents: Vec<&Option> = props.iter().filter(|p| p.is_option).map(|p| p.ident).collect(); 32 | let props_option_names: Vec<&String> = props.iter().filter(|p| p.is_option).map(|p| &p.name).collect(); 33 | 34 | let props_not_option_idents: Vec<&Option> = props.iter().filter(|p| !p.is_option).map(|p| p.ident).collect(); 35 | let props_not_option_names: Vec<&String> = props.iter().filter(|p| !p.is_option).map(|p| &p.name).collect(); 36 | 37 | // -- Vec push code for the (name, value) 38 | let ff_all_pushes = quote! { 39 | #( 40 | ff.push((#props_all_names, self.#props_all_idents).into()); 41 | )* 42 | }; 43 | 44 | let ff_not_option_pushes = quote! { 45 | #( 46 | ff.push((#props_not_option_names, self.#props_not_option_idents).into()); 47 | )* 48 | }; 49 | 50 | let ff_option_not_none_pushes = quote! { 51 | #( 52 | if let Some(val) = self.#props_option_idents { 53 | ff.push((#props_option_names, val).into()); 54 | } 55 | )* 56 | }; 57 | 58 | // -- Compose the final code 59 | let output = quote! { 60 | impl sqlb::HasFields for #struct_name { 61 | 62 | fn not_none_fields<'a>( self) -> Vec> { 63 | let mut ff: Vec = Vec::new(); 64 | #ff_not_option_pushes 65 | #ff_option_not_none_pushes 66 | ff 67 | } 68 | 69 | fn all_fields<'a>( self) -> Vec> { 70 | let mut ff: Vec = Vec::new(); 71 | #ff_all_pushes 72 | ff 73 | } 74 | 75 | fn field_names() -> &'static [&'static str] { 76 | &[#( 77 | #props_all_names, 78 | )*] 79 | } 80 | } 81 | }; 82 | 83 | output.into() 84 | } 85 | -------------------------------------------------------------------------------- /sqlb-macros/src/utils.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] // For early development. 2 | use proc_macro2::Ident; 3 | use quote::{quote, ToTokens}; 4 | use syn::parse::Parse; 5 | use syn::punctuated::Punctuated; 6 | use syn::{parse_quote, Attribute, Expr, Field, FieldsNamed, Lit, LitInt, LitStr, Meta, MetaNameValue, Token}; 7 | 8 | // region: --- Prop (i.e., sqlb Field) 9 | pub struct Prop<'a> { 10 | pub name: String, 11 | pub is_option: bool, 12 | pub ident: &'a Option, 13 | } 14 | 15 | pub fn get_props(fields: &FieldsNamed) -> Vec { 16 | let mut props = Vec::new(); 17 | 18 | for field in fields.named.iter() { 19 | // -- Get the FieldAttr 20 | let field_attr = get_prop_attr(field); 21 | 22 | // TODO: Need to check better handling. 23 | let field_attr = field_attr.unwrap(); 24 | if field_attr.skip { 25 | continue; 26 | } 27 | 28 | // -- ident 29 | let ident = &field.ident; 30 | 31 | // -- is_option 32 | // NOTE: By macro limitation, we can do only type name match and it would not support type alias 33 | // For now, assume Option is used as is or type name contains it. 34 | // We can add other variants of Option if proven needed. 35 | let type_name = format!("{}", &field.ty.to_token_stream()); 36 | let is_option = type_name.contains("Option "); 37 | 38 | // -- name 39 | let name = if let Some(name) = field_attr.name { 40 | name 41 | } else { 42 | ident.as_ref().map(|i| i.to_string()).unwrap() 43 | // quote! {stringify!(#ident)} 44 | }; 45 | 46 | // -- Add to array. 47 | props.push(Prop { name, is_option, ident }) 48 | } 49 | 50 | props 51 | } 52 | // endregion: --- Prop (i.e., sqlb Field) 53 | 54 | // region: --- Attribute 55 | pub struct PropAttr { 56 | pub skip: bool, 57 | pub name: Option, 58 | } 59 | 60 | // #[field(skip, name = "new_name")] 61 | // #[field(name = "new_name")] 62 | pub fn get_prop_attr(field: &Field) -> Result { 63 | let attribute = get_attribute(field, "field"); 64 | 65 | let mut skip = false; 66 | let mut name: Option = None; 67 | 68 | if let Some(attribute) = attribute { 69 | let nested = attribute.parse_args_with(Punctuated::::parse_terminated)?; 70 | 71 | for meta in nested { 72 | match meta { 73 | // #[field(skip)] 74 | Meta::Path(path) if path.is_ident("skip") => { 75 | skip = true; 76 | } 77 | 78 | // #[field(name=value)] 79 | Meta::NameValue(nv) if nv.path.is_ident("name") => { 80 | if let Expr::Lit(exp_lit) = nv.value { 81 | if let Lit::Str(lit_str) = exp_lit.lit { 82 | name = Some(lit_str.value()) 83 | } 84 | } 85 | } 86 | 87 | /* ... */ 88 | _ => { 89 | return Err(syn::Error::new_spanned(meta, "unrecognized field")); 90 | } 91 | } 92 | } 93 | } 94 | 95 | Ok(PropAttr { skip, name }) 96 | } 97 | 98 | fn get_attribute<'a>(field: &'a Field, name: &str) -> Option<&'a Attribute> { 99 | field.attrs.iter().find(|a| a.path().is_ident(name)) 100 | } 101 | // endregion: --- Attribute 102 | -------------------------------------------------------------------------------- /src/core.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | pub use crate::delete::delete; 4 | pub use crate::delete::delete_all; 5 | pub use crate::delete::DeleteSqlBuilder; 6 | pub use crate::insert::insert; 7 | pub use crate::insert::InsertSqlBuilder; 8 | pub use crate::select::select; 9 | pub use crate::select::SelectSqlBuilder; 10 | pub use crate::update::update; 11 | pub use crate::update::update_all; 12 | pub use crate::update::UpdateSqlBuilder; 13 | use crate::utils::x_column_name; 14 | pub use crate::val::SqlxBindable; 15 | pub use sqlb_macros::Fields; 16 | use sqlx::Executor; 17 | use sqlx::FromRow; 18 | use sqlx::Postgres; 19 | 20 | #[derive(Debug)] 21 | pub struct Field<'a> { 22 | pub name: String, 23 | pub value: Box, 24 | } 25 | 26 | impl<'a, T: 'a + SqlxBindable + Send + Sync> From<(&str, T)> for Field<'a> { 27 | fn from((name, value): (&str, T)) -> Self { 28 | Field { 29 | name: name.to_owned(), 30 | value: Box::new(value), 31 | } 32 | } 33 | } 34 | 35 | impl<'a, T: 'a + SqlxBindable + Send + Sync> From<(String, T)> for Field<'a> { 36 | fn from((name, value): (String, T)) -> Self { 37 | Field { 38 | name, 39 | value: Box::new(value), 40 | } 41 | } 42 | } 43 | 44 | /// Implement that this struct have "fields" that can be expressed as 45 | /// `(name, value)` vector. 46 | /// Typically implemented with `#[derive(Fields)]` 47 | pub trait HasFields { 48 | /// Consume and returns the `Field(name, value)` where the value is a not none `SqlxBindable`. 49 | fn not_none_fields<'a>(self) -> Vec>; 50 | 51 | /// Consume and returns the `Field(name, value)` where the value is a `SqlxBindable`. 52 | fn all_fields<'a>(self) -> Vec>; 53 | 54 | /// Return the array of all field names this struct has. 55 | fn field_names() -> &'static [&'static str]; 56 | } 57 | 58 | // region: Common Types 59 | pub(crate) struct WhereItem<'a> { 60 | pub name: String, 61 | pub op: &'static str, 62 | pub val: Box, 63 | } 64 | 65 | impl<'a, T: 'a + SqlxBindable + Send + Sync> From<(&str, &'static str, T)> for WhereItem<'a> { 66 | fn from((name, op, value): (&str, &'static str, T)) -> Self { 67 | WhereItem { 68 | name: name.to_owned(), 69 | op, 70 | val: Box::new(value), 71 | } 72 | } 73 | } 74 | 75 | #[derive(Clone)] 76 | pub(crate) struct OrderItem { 77 | pub dir: OrderDir, 78 | pub name: String, 79 | } 80 | 81 | #[derive(Clone)] 82 | pub(crate) enum OrderDir { 83 | Asc, 84 | Desc, 85 | } 86 | 87 | impl From<&str> for OrderItem { 88 | fn from(v: &str) -> Self { 89 | if let Some(s) = v.strip_prefix('!') { 90 | OrderItem { 91 | dir: OrderDir::Desc, 92 | name: x_column_name(s), 93 | } 94 | } else { 95 | OrderItem { 96 | dir: OrderDir::Asc, 97 | name: x_column_name(v), 98 | } 99 | } 100 | } 101 | } 102 | 103 | impl From<&OrderItem> for String { 104 | fn from(odr: &OrderItem) -> Self { 105 | match odr.dir { 106 | OrderDir::Asc => odr.name.to_string(), 107 | OrderDir::Desc => format!("{} {}", odr.name, "DESC"), 108 | } 109 | } 110 | } 111 | 112 | #[async_trait] 113 | pub trait SqlBuilder<'a> { 114 | fn sql(&self) -> String; 115 | fn vals(&'a self) -> Box> + 'a + Send>; 116 | 117 | async fn fetch_one<'e, DB, D>(&'a self, db_pool: DB) -> Result 118 | where 119 | DB: Executor<'e, Database = Postgres>, 120 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send; 121 | 122 | async fn fetch_optional<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 123 | where 124 | DB: Executor<'e, Database = Postgres>, 125 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send; 126 | 127 | async fn fetch_all<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 128 | where 129 | DB: Executor<'e, Database = Postgres>, 130 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send; 131 | 132 | async fn exec<'q, DB>(&'a self, db_pool: DB) -> Result 133 | where 134 | DB: Executor<'q, Database = Postgres>; 135 | } 136 | 137 | pub trait Whereable<'a> { 138 | fn and_where_eq(self, name: &str, val: T) -> Self; 139 | fn and_where(self, name: &str, op: &'static str, val: T) -> Self; 140 | } 141 | 142 | // endregion: Common Types 143 | 144 | // region: property into helpers 145 | pub(crate) fn add_to_where<'a, T: 'a + SqlxBindable + Send + Sync>( 146 | and_wheres: &mut Vec>, 147 | name: &str, 148 | op: &'static str, 149 | val: T, 150 | ) { 151 | // Note: to_vec so that when it into_iter we do not get the reference of the tuple items. 152 | let wher = WhereItem { 153 | name: name.to_owned(), 154 | op, 155 | val: Box::new(val), 156 | }; 157 | 158 | and_wheres.push(wher); 159 | } 160 | 161 | // Note: for now does not care about the base. 162 | pub(crate) fn into_returnings(_base: Option>, names: &[&str]) -> Option> { 163 | Some(names.iter().map(|s| s.to_string()).collect()) 164 | } 165 | // endregion: property into helpers 166 | 167 | // region: Builder Utils 168 | 169 | // SQL: "name1", "name2", ... 170 | pub(crate) fn sql_comma_names(fields: &[Field]) -> String { 171 | fields 172 | .iter() 173 | .map(|Field { name, .. }| x_column_name(name)) 174 | .collect::>() 175 | .join(", ") 176 | } 177 | 178 | // SQL: $1, $2, $3, ... 179 | pub(crate) fn sql_comma_params(fields: &[Field]) -> (i32, String) { 180 | let mut vals = String::new(); 181 | let mut binding_idx = 1; 182 | 183 | for (idx, Field { value, .. }) in fields.iter().enumerate() { 184 | if idx > 0 { 185 | vals.push_str(", "); 186 | }; 187 | match value.raw() { 188 | None => { 189 | vals.push_str(&format!("${}", binding_idx)); 190 | binding_idx += 1; 191 | } 192 | Some(raw) => vals.push_str(raw), 193 | }; 194 | } 195 | (binding_idx, vals) 196 | } 197 | 198 | // If first array, idx_offset should be 1 199 | // SQL: "name1" = &1, ... 200 | pub(crate) fn sql_where_items(where_items: &[WhereItem], idx_start: usize) -> String { 201 | where_items 202 | .iter() 203 | .enumerate() 204 | .map(|(idx, WhereItem { name, op, .. })| format!("{} {} ${}", x_column_name(name), op, idx + idx_start)) 205 | .collect::>() 206 | .join(" AND ") 207 | } 208 | 209 | // SQL: "Id", "userName", ... 210 | pub(crate) fn sql_returnings(returnings: &[String]) -> String { 211 | returnings.iter().map(|r| x_column_name(r)).collect::>().join(", ") 212 | } 213 | // endregion: Builder Utils 214 | -------------------------------------------------------------------------------- /src/delete.rs: -------------------------------------------------------------------------------- 1 | use crate::core::{add_to_where, into_returnings, sql_returnings, sql_where_items}; 2 | use crate::core::{WhereItem, Whereable}; 3 | use crate::utils::x_table_name; 4 | use crate::{sqlx_exec, SqlBuilder, SqlxBindable}; 5 | use async_trait::async_trait; 6 | use sqlx::{Executor, FromRow, Postgres}; 7 | 8 | pub fn delete<'a>() -> DeleteSqlBuilder<'a> { 9 | DeleteSqlBuilder { 10 | guard_all: true, 11 | table: None, 12 | returnings: None, 13 | and_wheres: Vec::new(), 14 | } 15 | } 16 | 17 | pub fn delete_all<'a>() -> DeleteSqlBuilder<'a> { 18 | DeleteSqlBuilder { 19 | guard_all: false, 20 | table: None, 21 | returnings: None, 22 | and_wheres: Vec::new(), 23 | } 24 | } 25 | 26 | pub struct DeleteSqlBuilder<'a> { 27 | guard_all: bool, 28 | table: Option, 29 | returnings: Option>, 30 | and_wheres: Vec>, 31 | } 32 | 33 | impl<'a> DeleteSqlBuilder<'a> { 34 | pub fn table(mut self, table: &str) -> Self { 35 | self.table = Some(table.to_string()); 36 | self 37 | } 38 | pub fn and_where(mut self, name: &str, op: &'static str, val: T) -> Self { 39 | add_to_where(&mut self.and_wheres, name, op, val); 40 | self 41 | } 42 | 43 | pub fn and_where_eq(mut self, name: &str, val: T) -> Self { 44 | add_to_where(&mut self.and_wheres, name, "=", val); 45 | self 46 | } 47 | 48 | pub fn returning(mut self, names: &[&str]) -> Self { 49 | self.returnings = into_returnings(self.returnings, names); 50 | self 51 | } 52 | 53 | pub async fn exec<'q, DB>(&'a self, db_pool: DB) -> Result 54 | where 55 | DB: Executor<'q, Database = Postgres>, 56 | { 57 | sqlx_exec::exec(db_pool, self).await 58 | } 59 | 60 | pub async fn fetch_one<'e, DB, D>(&'a self, db_pool: DB) -> Result 61 | where 62 | DB: Executor<'e, Database = Postgres>, 63 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 64 | { 65 | sqlx_exec::fetch_as_one::(db_pool, self).await 66 | } 67 | 68 | pub async fn fetch_optional<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 69 | where 70 | DB: Executor<'e, Database = Postgres>, 71 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 72 | { 73 | sqlx_exec::fetch_as_optional::(db_pool, self).await 74 | } 75 | 76 | pub async fn fetch_all<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 77 | where 78 | DB: Executor<'e, Database = Postgres>, 79 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 80 | { 81 | sqlx_exec::fetch_as_all::(db_pool, self).await 82 | } 83 | } 84 | 85 | impl<'a> Whereable<'a> for DeleteSqlBuilder<'a> { 86 | fn and_where_eq(self, name: &str, val: T) -> Self { 87 | DeleteSqlBuilder::and_where_eq(self, name, val) 88 | } 89 | 90 | fn and_where(self, name: &str, op: &'static str, val: T) -> Self { 91 | DeleteSqlBuilder::and_where(self, name, op, val) 92 | } 93 | } 94 | 95 | #[async_trait] 96 | impl<'a> SqlBuilder<'a> for DeleteSqlBuilder<'a> { 97 | fn sql(&self) -> String { 98 | // SQL: DELETE FROM table_name WHERE w1 = $1, ... RETURNING r1, r2, ..; 99 | 100 | // SQL: DELETE FROM table_name 101 | let mut sql = String::from("DELETE FROM "); 102 | 103 | if let Some(table) = &self.table { 104 | sql.push_str(&x_table_name(table)); 105 | } 106 | 107 | // SQL: WHERE w1 < $1, ... 108 | if !self.and_wheres.is_empty() { 109 | let sql_where = sql_where_items(&self.and_wheres, 1); 110 | sql.push_str(&format!("WHERE {} ", &sql_where)); 111 | } else if self.guard_all { 112 | // For now panic, will return error later 113 | panic!("FATAL - Trying to call a delete without any where clause. If needed, use sqlb::delete_all(table_name). ") 114 | } 115 | 116 | // SQL: RETURNING "r1", "r2", ... 117 | if let Some(returnings) = &self.returnings { 118 | sql.push_str(&format!("RETURNING {} ", sql_returnings(returnings))); 119 | } 120 | 121 | sql 122 | } 123 | 124 | fn vals(&'a self) -> Box> + 'a + Send> { 125 | let iter = self.and_wheres.iter().map(|wi| &wi.val); 126 | Box::new(iter) 127 | } 128 | 129 | async fn exec<'q, DB>(&'a self, db_pool: DB) -> Result 130 | where 131 | DB: Executor<'q, Database = Postgres>, 132 | { 133 | Self::exec(self, db_pool).await 134 | } 135 | 136 | async fn fetch_one<'e, DB, D>(&'a self, db_pool: DB) -> Result 137 | where 138 | DB: Executor<'e, Database = Postgres>, 139 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 140 | { 141 | Self::fetch_one::(self, db_pool).await 142 | } 143 | 144 | async fn fetch_optional<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 145 | where 146 | DB: Executor<'e, Database = Postgres>, 147 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 148 | { 149 | Self::fetch_optional::(self, db_pool).await 150 | } 151 | 152 | async fn fetch_all<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 153 | where 154 | DB: Executor<'e, Database = Postgres>, 155 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 156 | { 157 | Self::fetch_all::(self, db_pool).await 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /src/insert.rs: -------------------------------------------------------------------------------- 1 | use crate::core::{into_returnings, sql_comma_names, sql_comma_params, sql_returnings}; 2 | use crate::utils::x_table_name; 3 | use crate::{sqlx_exec, Field, SqlBuilder, SqlxBindable}; 4 | use async_trait::async_trait; 5 | use sqlx::{Executor, FromRow, Postgres}; 6 | 7 | pub fn insert<'a>() -> InsertSqlBuilder<'a> { 8 | InsertSqlBuilder { 9 | table: None, 10 | data: Vec::new(), 11 | returnings: None, 12 | } 13 | } 14 | 15 | // #[derive(Clone)] 16 | pub struct InsertSqlBuilder<'a> { 17 | table: Option, 18 | data: Vec>, 19 | returnings: Option>, 20 | } 21 | 22 | impl<'a> InsertSqlBuilder<'a> { 23 | pub fn table(mut self, table: &str) -> Self { 24 | self.table = Some(table.to_string()); 25 | self 26 | } 27 | 28 | pub fn data(mut self, fields: Vec>) -> Self { 29 | self.data = fields; 30 | self 31 | } 32 | 33 | pub fn returning(mut self, names: &[&str]) -> Self { 34 | self.returnings = into_returnings(self.returnings, names); 35 | self 36 | } 37 | 38 | pub async fn exec<'q, DB>(&'a self, db_pool: DB) -> Result 39 | where 40 | DB: Executor<'q, Database = Postgres>, 41 | { 42 | sqlx_exec::exec(db_pool, self).await 43 | } 44 | 45 | pub async fn fetch_one<'e, DB, D>(&'a self, db_pool: DB) -> Result 46 | where 47 | DB: Executor<'e, Database = Postgres>, 48 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 49 | { 50 | sqlx_exec::fetch_as_one::(db_pool, self).await 51 | } 52 | 53 | pub async fn fetch_optional<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 54 | where 55 | DB: Executor<'e, Database = Postgres>, 56 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 57 | { 58 | sqlx_exec::fetch_as_optional::(db_pool, self).await 59 | } 60 | 61 | pub async fn fetch_all<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 62 | where 63 | DB: Executor<'e, Database = Postgres>, 64 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 65 | { 66 | sqlx_exec::fetch_as_all::(db_pool, self).await 67 | } 68 | } 69 | 70 | #[async_trait] 71 | impl<'a> SqlBuilder<'a> for InsertSqlBuilder<'a> { 72 | fn sql(&self) -> String { 73 | // SQL: INSERT INTO table_name (name1, ...) VALUES ($1, ...) RETURNING r1, ...; 74 | 75 | // SQL: INSERT INTO table_name 76 | let mut sql = String::from("INSERT INTO "); 77 | 78 | if let Some(table) = &self.table { 79 | sql.push_str(&x_table_name(table)); 80 | } 81 | 82 | // NotDB: empty data is a valid usecase, if the row has a all required field with default or auto gen. 83 | let fields = &self.data; 84 | // SQL: (name1, name2, ...) 85 | sql.push_str(&format!("({}) ", sql_comma_names(fields))); 86 | 87 | // SQL: VALUES ($1, $2, ...) 88 | sql.push_str(&format!("VALUES ({}) ", sql_comma_params(fields).1)); 89 | 90 | // SQL: RETURNING "r1", "r2", ... 91 | if let Some(returnings) = &self.returnings { 92 | sql.push_str(&format!("RETURNING {} ", sql_returnings(returnings))); 93 | } 94 | 95 | sql 96 | } 97 | 98 | fn vals(&'a self) -> Box> + 'a + Send> { 99 | let iter = self.data.iter().map(|field| &field.value); 100 | Box::new(iter) 101 | } 102 | 103 | async fn exec<'q, DB>(&'a self, db_pool: DB) -> Result 104 | where 105 | DB: Executor<'q, Database = Postgres>, 106 | { 107 | Self::exec(self, db_pool).await 108 | } 109 | 110 | async fn fetch_one<'e, DB, D>(&'a self, db_pool: DB) -> Result 111 | where 112 | DB: Executor<'e, Database = Postgres>, 113 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 114 | { 115 | Self::fetch_one::(self, db_pool).await 116 | } 117 | 118 | async fn fetch_optional<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 119 | where 120 | DB: Executor<'e, Database = Postgres>, 121 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 122 | { 123 | Self::fetch_optional::(self, db_pool).await 124 | } 125 | 126 | async fn fetch_all<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 127 | where 128 | DB: Executor<'e, Database = Postgres>, 129 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 130 | { 131 | Self::fetch_all::(self, db_pool).await 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // #![allow(unused)] // silence unused warnings while exploring (to comment out) 2 | 3 | mod core; 4 | mod delete; 5 | mod insert; 6 | mod select; 7 | pub mod sqlx_exec; 8 | mod update; 9 | mod utils; 10 | mod val; 11 | 12 | pub use crate::core::Field; 13 | pub use crate::core::HasFields; 14 | pub use crate::core::SqlBuilder; 15 | pub use crate::core::Whereable; 16 | pub use crate::delete::delete; 17 | pub use crate::delete::delete_all; 18 | pub use crate::delete::DeleteSqlBuilder; 19 | pub use crate::insert::insert; 20 | pub use crate::insert::InsertSqlBuilder; 21 | pub use crate::select::select; 22 | pub use crate::select::SelectSqlBuilder; 23 | pub use crate::update::update; 24 | pub use crate::update::update_all; 25 | pub use crate::update::UpdateSqlBuilder; 26 | pub use crate::val::Raw; 27 | pub use crate::val::SqlxBindable; 28 | pub use sqlb_macros::Fields; 29 | -------------------------------------------------------------------------------- /src/select.rs: -------------------------------------------------------------------------------- 1 | use crate::core::{add_to_where, sql_where_items, Whereable}; 2 | use crate::core::{OrderItem, WhereItem}; 3 | use crate::sqlx_exec; 4 | use crate::utils::{x_column_name, x_table_name}; 5 | use crate::{SqlBuilder, SqlxBindable}; 6 | use async_trait::async_trait; 7 | use sqlx::{Executor, FromRow, Postgres}; 8 | 9 | pub fn select<'a>() -> SelectSqlBuilder<'a> { 10 | SelectSqlBuilder { 11 | table: None, 12 | columns: None, 13 | and_wheres: Vec::new(), 14 | order_bys: None, 15 | limit: None, 16 | offset: None, 17 | } 18 | } 19 | 20 | pub struct SelectSqlBuilder<'a> { 21 | table: Option, 22 | columns: Option>, 23 | and_wheres: Vec>, 24 | order_bys: Option>, 25 | limit: Option, 26 | offset: Option, 27 | } 28 | 29 | impl<'a> SelectSqlBuilder<'a> { 30 | pub fn and_where_eq(mut self, name: &str, val: T) -> Self { 31 | add_to_where(&mut self.and_wheres, name, "=", val); 32 | self 33 | } 34 | 35 | pub fn and_where(mut self, name: &str, op: &'static str, val: T) -> Self { 36 | add_to_where(&mut self.and_wheres, name, op, val); 37 | self 38 | } 39 | 40 | pub fn table(mut self, table: &str) -> Self { 41 | self.table = Some(table.to_string()); 42 | self 43 | } 44 | 45 | pub fn columns(mut self, names: &[&str]) -> Self { 46 | self.columns = Some(names.iter().map(|s| s.to_string()).collect()); 47 | self 48 | } 49 | 50 | pub fn order_bys(mut self, odrs: &[&str]) -> Self { 51 | self.order_bys = Some(odrs.iter().copied().map(|o| o.into()).collect()); 52 | self 53 | } 54 | 55 | pub fn order_by(mut self, odr: &str) -> Self { 56 | self.order_bys = Some(vec![odr.into()]); 57 | self 58 | } 59 | 60 | pub fn limit(mut self, limit: i64) -> Self { 61 | self.limit = Some(limit); 62 | self 63 | } 64 | 65 | pub fn offset(mut self, offset: i64) -> Self { 66 | self.offset = Some(offset); 67 | self 68 | } 69 | 70 | pub async fn exec<'q, DB>(&'a self, db_pool: DB) -> Result 71 | where 72 | DB: Executor<'q, Database = Postgres>, 73 | { 74 | sqlx_exec::exec(db_pool, self).await 75 | } 76 | 77 | pub async fn fetch_one<'e, DB, D>(&'a self, db_pool: DB) -> Result 78 | where 79 | DB: Executor<'e, Database = Postgres>, 80 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 81 | { 82 | sqlx_exec::fetch_as_one::(db_pool, self).await 83 | } 84 | 85 | pub async fn fetch_optional<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 86 | where 87 | DB: Executor<'e, Database = Postgres>, 88 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 89 | { 90 | sqlx_exec::fetch_as_optional::(db_pool, self).await 91 | } 92 | 93 | pub async fn fetch_all<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 94 | where 95 | DB: Executor<'e, Database = Postgres>, 96 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 97 | { 98 | sqlx_exec::fetch_as_all::(db_pool, self).await 99 | } 100 | } 101 | 102 | impl<'a> Whereable<'a> for SelectSqlBuilder<'a> { 103 | fn and_where_eq(self, name: &str, val: V) -> Self { 104 | SelectSqlBuilder::and_where_eq(self, name, val) 105 | } 106 | 107 | fn and_where(self, name: &str, op: &'static str, val: V) -> Self { 108 | SelectSqlBuilder::and_where(self, name, op, val) 109 | } 110 | } 111 | 112 | #[async_trait] 113 | impl<'a> SqlBuilder<'a> for SelectSqlBuilder<'a> { 114 | fn sql(&self) -> String { 115 | // SELECT name1, name2 FROM table_name WHERE w1 < r1, w2 = r2 116 | 117 | // SQL: SELECT 118 | let mut sql = String::from("SELECT "); 119 | 120 | // SQL: name1, name2, 121 | // For now, if no column, will do a "*" 122 | match &self.columns { 123 | Some(columns) => { 124 | let names = columns.iter().map(|c| x_column_name(c)).collect::>().join(", "); 125 | sql.push_str(&format!("{} ", names)); 126 | } 127 | None => sql.push_str(&format!("{} ", "*")), 128 | }; 129 | 130 | // SQL: FROM table_name 131 | if let Some(table) = &self.table { 132 | sql.push_str("FROM "); 133 | sql.push_str(&x_table_name(table)); 134 | } 135 | 136 | // SQL: WHERE w1 < $1, ... 137 | if !self.and_wheres.is_empty() { 138 | let sql_where = sql_where_items(&self.and_wheres, 1); 139 | sql.push_str(&format!("WHERE {} ", &sql_where)); 140 | } 141 | 142 | // SQL: ORDER BY 143 | if let Some(order_bys) = &self.order_bys { 144 | let sql_order_bys = order_bys 145 | .iter() 146 | .map::(|o| o.into()) 147 | .collect::>() 148 | .join(", "); 149 | sql.push_str(&format!("ORDER BY {} ", sql_order_bys)) 150 | } 151 | 152 | // SQL: LIMIT 153 | if let Some(limit) = &self.limit { 154 | sql.push_str(&format!("LIMIT {limit} ")) 155 | } 156 | 157 | // SQL: OFFSET 158 | if let Some(offset) = &self.offset { 159 | sql.push_str(&format!("OFFSET {offset} ")) 160 | } 161 | 162 | sql 163 | } 164 | 165 | fn vals(&'a self) -> Box> + 'a + Send> { 166 | let iter = self.and_wheres.iter().map(|wi| &wi.val); 167 | Box::new(iter) 168 | } 169 | 170 | async fn exec<'q, DB>(&'a self, db_pool: DB) -> Result 171 | where 172 | DB: Executor<'q, Database = Postgres>, 173 | { 174 | Self::exec(self, db_pool).await 175 | } 176 | 177 | async fn fetch_one<'e, DB, D>(&'a self, db_pool: DB) -> Result 178 | where 179 | DB: Executor<'e, Database = Postgres>, 180 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 181 | { 182 | Self::fetch_one::(self, db_pool).await 183 | } 184 | 185 | async fn fetch_optional<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 186 | where 187 | DB: Executor<'e, Database = Postgres>, 188 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 189 | { 190 | Self::fetch_optional::(self, db_pool).await 191 | } 192 | 193 | async fn fetch_all<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 194 | where 195 | DB: Executor<'e, Database = Postgres>, 196 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 197 | { 198 | Self::fetch_all::(self, db_pool).await 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /src/sqlx_exec.rs: -------------------------------------------------------------------------------- 1 | //////////////////////////////////// 2 | // sqlx-exec - module for the sqlx query executor 3 | //// 4 | 5 | use crate::SqlBuilder; 6 | use sqlx::{postgres::PgArguments, Execute, Executor, FromRow, Postgres}; 7 | 8 | /// Build a sqlx::query_as for the D (Data) generic type, binds the values, and does a .fetch_one and returns E 9 | pub async fn fetch_as_one<'e, 'q, DB, D, Q>(db_pool: DB, sb: &'q Q) -> Result 10 | where 11 | DB: Executor<'e, Database = Postgres>, 12 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 13 | Q: SqlBuilder<'q>, 14 | { 15 | let sql = sb.sql(); 16 | let vals = sb.vals(); 17 | 18 | // build temp query for binding 19 | let mut query = sqlx::query::(&sql); 20 | for val in vals.into_iter() { 21 | query = val.bind_query(query); 22 | } 23 | 24 | // create the QueryAs 25 | let query = sqlx::query_as_with::(&sql, query.take_arguments().unwrap()); 26 | 27 | // exec and return 28 | let r = query.fetch_one(db_pool).await?; 29 | Ok(r) 30 | } 31 | 32 | /// Build a sqlx::query_as for the D (Data) generic type, binds the values, and does a .fetch_one and returns E 33 | pub async fn fetch_as_optional<'e, 'q, DB, D, Q>(db_pool: DB, sb: &'q Q) -> Result, sqlx::Error> 34 | where 35 | DB: Executor<'e, Database = Postgres>, 36 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 37 | Q: SqlBuilder<'q>, 38 | { 39 | let sql = sb.sql(); 40 | let vals = sb.vals(); 41 | 42 | // build temp query for binding 43 | let mut query = sqlx::query::(&sql); 44 | for val in vals.into_iter() { 45 | query = val.bind_query(query); 46 | } 47 | 48 | // create the QueryAs 49 | let query = sqlx::query_as_with::(&sql, query.take_arguments().unwrap()); 50 | 51 | // exec and return 52 | let r = query.fetch_optional(db_pool).await?; 53 | Ok(r) 54 | } 55 | 56 | /// Build a sqlx::query_as for the D (Data) generic type, binds the values, and does a .fetch_all and returns Vec 57 | pub async fn fetch_as_all<'e, 'q, DB, D, Q>(db_pool: DB, sb: &'q Q) -> Result, sqlx::Error> 58 | where 59 | DB: Executor<'e, Database = Postgres>, 60 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 61 | Q: SqlBuilder<'q>, 62 | { 63 | let sql = sb.sql(); 64 | let vals = sb.vals(); 65 | 66 | // build temp query for binding 67 | let mut query = sqlx::query::(&sql); 68 | for val in vals.into_iter() { 69 | query = val.bind_query(query); 70 | } 71 | 72 | // create the QueryAs 73 | let query = sqlx::query_as_with::(&sql, query.take_arguments().unwrap()); 74 | 75 | // exec and return 76 | let r = query.fetch_all(db_pool).await?; 77 | Ok(r) 78 | } 79 | 80 | pub async fn exec<'e, 'q, DB, Q>(db_pool: DB, sb: &'q Q) -> Result 81 | where 82 | DB: Executor<'e, Database = Postgres>, 83 | Q: SqlBuilder<'q>, 84 | { 85 | let sql = sb.sql(); 86 | let vals = sb.vals(); 87 | let mut query = sqlx::query::(&sql); 88 | for val in vals.into_iter() { 89 | query = val.bind_query(query); 90 | } 91 | 92 | let r = query.execute(db_pool).await?.rows_affected(); 93 | 94 | Ok(r) 95 | } 96 | -------------------------------------------------------------------------------- /src/update.rs: -------------------------------------------------------------------------------- 1 | use crate::core::{add_to_where, into_returnings, sql_returnings, sql_where_items}; 2 | use crate::core::{WhereItem, Whereable}; 3 | use crate::utils::{x_column_name, x_table_name}; 4 | use crate::{sqlx_exec, Field, SqlBuilder, SqlxBindable}; 5 | use async_trait::async_trait; 6 | use sqlx::{Executor, FromRow, Postgres}; 7 | 8 | pub fn update<'a>() -> UpdateSqlBuilder<'a> { 9 | UpdateSqlBuilder { 10 | guard_all: true, 11 | table: None, 12 | data: Vec::new(), 13 | returnings: None, 14 | and_wheres: Vec::new(), 15 | } 16 | } 17 | 18 | pub fn update_all<'a>() -> UpdateSqlBuilder<'a> { 19 | UpdateSqlBuilder { 20 | guard_all: false, 21 | table: None, 22 | data: Vec::new(), 23 | returnings: None, 24 | and_wheres: Vec::new(), 25 | } 26 | } 27 | 28 | pub struct UpdateSqlBuilder<'a> { 29 | guard_all: bool, 30 | table: Option, 31 | data: Vec>, 32 | returnings: Option>, 33 | and_wheres: Vec>, 34 | } 35 | 36 | impl<'a> UpdateSqlBuilder<'a> { 37 | pub fn table(mut self, table: &str) -> Self { 38 | self.table = Some(table.to_string()); 39 | self 40 | } 41 | 42 | pub fn data(mut self, fields: Vec>) -> Self { 43 | self.data = fields; 44 | self 45 | } 46 | 47 | pub fn and_where(mut self, name: &str, op: &'static str, val: T) -> Self { 48 | add_to_where(&mut self.and_wheres, name, op, val); 49 | self 50 | } 51 | 52 | pub fn and_where_eq(mut self, name: &str, val: T) -> Self { 53 | add_to_where(&mut self.and_wheres, name, "=", val); 54 | self 55 | } 56 | 57 | pub fn returning(mut self, names: &[&str]) -> Self { 58 | self.returnings = into_returnings(self.returnings, names); 59 | self 60 | } 61 | 62 | pub async fn exec<'q, E>(&'a self, db_pool: E) -> Result 63 | where 64 | E: Executor<'q, Database = Postgres>, 65 | { 66 | sqlx_exec::exec(db_pool, self).await 67 | } 68 | 69 | pub async fn fetch_one<'e, DB, D>(&'a self, db_pool: DB) -> Result 70 | where 71 | DB: Executor<'e, Database = Postgres>, 72 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 73 | { 74 | sqlx_exec::fetch_as_one::(db_pool, self).await 75 | } 76 | 77 | pub async fn fetch_optional<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 78 | where 79 | DB: Executor<'e, Database = Postgres>, 80 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 81 | { 82 | sqlx_exec::fetch_as_optional::(db_pool, self).await 83 | } 84 | 85 | pub async fn fetch_all<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 86 | where 87 | DB: Executor<'e, Database = Postgres>, 88 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 89 | { 90 | sqlx_exec::fetch_as_all::(db_pool, self).await 91 | } 92 | } 93 | 94 | impl<'a> Whereable<'a> for UpdateSqlBuilder<'a> { 95 | fn and_where_eq(self, name: &str, val: T) -> Self { 96 | UpdateSqlBuilder::and_where_eq(self, name, val) 97 | } 98 | 99 | fn and_where(self, name: &str, op: &'static str, val: T) -> Self { 100 | UpdateSqlBuilder::and_where(self, name, op, val) 101 | } 102 | } 103 | 104 | #[async_trait] 105 | impl<'a> SqlBuilder<'a> for UpdateSqlBuilder<'a> { 106 | fn sql(&self) -> String { 107 | // SQL: UPDATE table_name SET column1 = $1, ... WHERE w1 = $2, w2 = $3 returning r1, r2; 108 | 109 | // SQL: UPDATE table_name SET 110 | let mut sql = String::from("UPDATE "); 111 | 112 | if let Some(table) = &self.table { 113 | sql.push_str(&x_table_name(table)); 114 | } 115 | 116 | sql.push_str("SET "); 117 | 118 | // Index for the $_idx_ in the prepared statement 119 | let mut binding_idx = 1; 120 | 121 | // TODO: Handle the case of empty data. Should we change this signature to return a Result ? 122 | // For now, just ignore this case, will fail at sql exec time 123 | // SQL: column1 = $1, ... 124 | let fields = &self.data; 125 | let sql_set = fields 126 | .iter() 127 | .enumerate() 128 | .map(|(_, f)| { 129 | let mut part = format!("{} = ", x_column_name(&f.name)); 130 | match f.value.raw() { 131 | None => { 132 | part.push_str(&format!("${}", binding_idx)); 133 | binding_idx += 1; 134 | } 135 | Some(raw) => part.push_str(raw), 136 | } 137 | part 138 | }) 139 | .collect::>() 140 | .join(", "); 141 | sql.push_str(&format!("{} ", sql_set)); 142 | 143 | // SQL: WHERE w1 < $1, ... 144 | if !self.and_wheres.is_empty() { 145 | let sql_where = sql_where_items(&self.and_wheres, binding_idx); 146 | sql.push_str(&format!("WHERE {} ", &sql_where)); 147 | } else if self.guard_all { 148 | // For now panic, will return error later 149 | panic!("FATAL - Trying to call a update without any where clause. If needed, use sqlb::update_all(table_name). ") 150 | } 151 | 152 | // SQL: RETURNING "r1", "r2", ... 153 | if let Some(returnings) = &self.returnings { 154 | sql.push_str(&format!("RETURNING {} ", sql_returnings(returnings))); 155 | } 156 | 157 | sql 158 | } 159 | 160 | fn vals(&'a self) -> Box> + 'a + Send> { 161 | let iter = self.data.iter().map(|field| &field.value); 162 | // FIXME needs to uncomment 163 | let iter = iter.chain(self.and_wheres.iter().map(|wi| &wi.val)); 164 | Box::new(iter) 165 | } 166 | 167 | async fn exec<'q, E>(&'a self, db_pool: E) -> Result 168 | where 169 | E: Executor<'q, Database = Postgres>, 170 | { 171 | Self::exec(self, db_pool).await 172 | } 173 | 174 | async fn fetch_one<'e, DB, D>(&'a self, db_pool: DB) -> Result 175 | where 176 | DB: Executor<'e, Database = Postgres>, 177 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 178 | { 179 | Self::fetch_one::(self, db_pool).await 180 | } 181 | 182 | async fn fetch_optional<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 183 | where 184 | DB: Executor<'e, Database = Postgres>, 185 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 186 | { 187 | Self::fetch_optional::(self, db_pool).await 188 | } 189 | 190 | async fn fetch_all<'e, DB, D>(&'a self, db_pool: DB) -> Result, sqlx::Error> 191 | where 192 | DB: Executor<'e, Database = Postgres>, 193 | D: for<'r> FromRow<'r, sqlx::postgres::PgRow> + Unpin + Send, 194 | { 195 | Self::fetch_all::(self, db_pool).await 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | /// Escape table name. 2 | /// - Surround with `"` if simple table name. 3 | /// - If the text contains a . symbol, ensure to surround each part. 4 | /// 5 | /// TODO: needs to handle the . notation (i.e., quote each side of the dot) 6 | pub(crate) fn x_table_name(name: &str) -> String { 7 | if name.contains('.') { 8 | name.split('.') 9 | .map(|part| format!("\"{}\"", part)) 10 | .collect::>() 11 | .join(".") 12 | } else { 13 | format!("\"{}\"", name) 14 | } 15 | } 16 | 17 | /// Escape column name. 18 | /// - Surround with `"` if simple column name. 19 | /// - Leave column name as is if special character `(` (might need to add more) 20 | /// (this allows function call like `count(*)`) 21 | /// - If the text contains a . symbol, ensure to surround each part. 22 | /// 23 | pub(crate) fn x_column_name(name: &str) -> String { 24 | if name.contains('(') { 25 | name.to_string() 26 | } else if name.contains('.') { 27 | name.split('.') 28 | .map(|part| format!("\"{}\"", part)) 29 | .collect::>() 30 | .join(".") 31 | } else { 32 | format!("\"{}\"", name) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/val.rs: -------------------------------------------------------------------------------- 1 | //! Currently, `SqlxBindable` represents a value that can be bound. 2 | //! This requires cloning the value. The performance impact should be minimal, and for bulk updates, direct usage of `sqlx` can be preferred. 3 | //! Eventually, this might change to follow the `'args` pattern of `sqlx` `Builder`, 4 | //! but at this point, priority is given to API ergonomics. 5 | //! 6 | 7 | use time::OffsetDateTime; 8 | use uuid::Uuid; 9 | 10 | pub trait SqlxBindable: std::fmt::Debug { 11 | fn bind_query<'q>( 12 | &'q self, 13 | query: sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments>, 14 | ) -> sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments>; 15 | 16 | fn raw(&self) -> Option<&str> { 17 | None 18 | } 19 | } 20 | 21 | #[macro_export] 22 | macro_rules! bindable { 23 | ($($t:ty),*) => { 24 | $(impl $crate::SqlxBindable for $t { 25 | fn bind_query<'q>(&self, query: sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments>) -> sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments> { 26 | let query = query.bind(self.clone()); 27 | query 28 | } 29 | } 30 | 31 | impl $crate::SqlxBindable for &$t { 32 | fn bind_query<'q>(&self, query: sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments>) -> sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments> { 33 | let query = query.bind(<$t>::clone(self)); 34 | query 35 | } 36 | } 37 | 38 | )* 39 | }; 40 | } 41 | 42 | #[macro_export] 43 | macro_rules! bindable_to_string { 44 | ($($t:ident),*) => { 45 | $( 46 | impl $crate::SqlxBindable for $t { 47 | fn bind_query<'q>(&self, query: sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments>) -> sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments> { 48 | let query = query.bind(self.to_string()); 49 | query 50 | } 51 | } 52 | 53 | impl $crate::SqlxBindable for &$t { 54 | fn bind_query<'q>(&self, query: sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments>) -> sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments> { 55 | let query = query.bind(self.to_string()); 56 | query 57 | } 58 | } 59 | )* 60 | }; 61 | } 62 | 63 | // Bind the string types 64 | bindable_to_string!(String, str); 65 | 66 | impl SqlxBindable for Option 67 | where 68 | T: SqlxBindable + Clone + Send, 69 | T: for<'r> sqlx::Encode<'r, sqlx::Postgres>, 70 | T: sqlx::Type, 71 | { 72 | fn bind_query<'q>( 73 | &'q self, 74 | query: sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments>, 75 | ) -> sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments> { 76 | let query = query.bind(self.clone()); 77 | query 78 | } 79 | } 80 | 81 | // Bind the boolean 82 | bindable!(bool); 83 | // Bind the numbers 84 | // NOTE: Skipping u8, u16, u64 since not mapped by sqlx to postgres. 85 | bindable!(i8, i16, i32, i64, f32, f64); 86 | 87 | bindable!(Uuid, OffsetDateTime); 88 | 89 | // region: --- Raw Value 90 | 91 | // region: --- chrono support 92 | #[cfg(feature = "chrono-support")] 93 | mod chrono_support { 94 | use chrono::{NaiveDateTime, NaiveDate, NaiveTime, DateTime, Utc}; 95 | 96 | bindable!(NaiveDateTime, NaiveDate, NaiveTime, DateTime); 97 | } 98 | // endregion: --- chrono support 99 | 100 | // region: --- json support 101 | #[cfg(feature = "json")] 102 | mod json { 103 | use serde_json::Value; 104 | 105 | bindable!(Value); 106 | } 107 | // endregion: --- json support 108 | 109 | // region: --- decimal support 110 | #[cfg(feature = "decimal")] 111 | mod decimal { 112 | use rust_decimal::Decimal; 113 | 114 | bindable!(Decimal); 115 | } 116 | // endregion: --- decimal support 117 | 118 | #[derive(Debug)] 119 | pub struct Raw(pub &'static str); 120 | 121 | impl SqlxBindable for Raw { 122 | // just return the query given, since no binding should be taken place 123 | fn bind_query<'q>( 124 | &self, 125 | query: sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments>, 126 | ) -> sqlx::query::Query<'q, sqlx::Postgres, sqlx::postgres::PgArguments> { 127 | query 128 | } 129 | 130 | fn raw(&self) -> Option<&str> { 131 | Some(self.0) 132 | } 133 | } 134 | // endregion: --- Raw Value 135 | 136 | #[cfg(test)] 137 | mod tests { 138 | use crate::Field; 139 | 140 | #[test] 141 | fn field_from_str() { 142 | let field = Field::from(("name1", "v2")); 143 | assert_eq!("name1", field.name); 144 | 145 | let field: Field = ("name1", "v2").into(); 146 | assert_eq!("name1", field.name); 147 | } 148 | 149 | #[test] 150 | fn field_from_string() { 151 | let field = Field::from(("name1", "v1")); 152 | assert_eq!("name1", field.name); 153 | 154 | let v2 = &"v2".to_string(); 155 | let field: Field = ("name2", v2).into(); 156 | assert_eq!("name2", field.name); 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /tests/test_macro.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use sqlb::HasFields; 3 | use sqlb_macros::Fields; 4 | 5 | #[test] 6 | pub fn test_macro_field_names() -> Result<()> { 7 | // -- Setup & Fixtures 8 | #[allow(unused)] 9 | #[derive(Debug, Fields)] 10 | struct Todo { 11 | id: i64, 12 | 13 | #[field(name = "description")] 14 | desc: Option, 15 | name: String, 16 | 17 | #[field(skip)] 18 | something_else: String, 19 | } 20 | let fx_desc = "desc 01"; 21 | 22 | // -- Exec 23 | let field_names = Todo::field_names(); 24 | 25 | // -- Check 26 | assert_eq!(field_names, &["id", "description", "name"]); 27 | 28 | // -- Exec & Check 29 | let todo = Todo { 30 | id: 123, 31 | desc: Some(fx_desc.to_string()), 32 | name: "name 01".to_string(), 33 | something_else: "something 01".to_string(), 34 | }; 35 | let fields = todo.all_fields(); 36 | assert_eq!("description", &fields[1].name); 37 | let val = format!("{:?}", &fields[1].value); 38 | assert_eq!(r#"Some("desc 01")"#, val); 39 | 40 | Ok(()) 41 | } 42 | 43 | #[test] 44 | pub fn test_macro_all_fields() -> Result<()> { 45 | // -- Setup & Fixtures 46 | #[allow(unused)] 47 | #[derive(Debug, Fields)] 48 | struct Todo { 49 | id: i64, 50 | 51 | #[field(name = "description")] 52 | desc: Option, 53 | name: String, 54 | 55 | #[field(skip)] 56 | something_else: String, 57 | } 58 | let fx_desc = "desc 01"; 59 | 60 | // -- Exec 61 | let todo = Todo { 62 | id: 123, 63 | desc: Some(fx_desc.to_string()), 64 | name: "name 01".to_string(), 65 | something_else: "something 01".to_string(), 66 | }; 67 | let fields = todo.all_fields(); 68 | 69 | // -- Check 70 | assert_eq!("description", &fields[1].name); 71 | let val = format!("{:?}", &fields[1].value); 72 | assert_eq!(r#"Some("desc 01")"#, val); 73 | 74 | Ok(()) 75 | } 76 | 77 | // Note: Just a compile check. 78 | #[allow(unused)] 79 | #[test] 80 | pub fn test_custom_type_and_enum() -> Result<()> { 81 | use sqlb::bindable; 82 | 83 | #[derive(sqlb::Fields, Default, Clone)] 84 | pub struct SubscriptionPatch { 85 | pub client_id: Option, 86 | pub my_val: Option, 87 | } 88 | 89 | #[derive(sqlx::Type, Debug, Clone)] 90 | #[sqlx(type_name = "my_enum")] // must be defined in pg as a enum type. 91 | pub enum MyEnum { 92 | One, 93 | Two, 94 | TooBig, 95 | } 96 | sqlb::bindable!(MyEnum); 97 | 98 | #[derive(Debug, Clone, Copy, sqlx::Type)] 99 | #[sqlx(transparent)] 100 | pub struct OffsetDateTime(pub time::OffsetDateTime); 101 | pub struct ClientName(String); 102 | 103 | Ok(()) 104 | } 105 | 106 | // Note: Just a compile check. 107 | #[test] 108 | pub fn test_bindable_generic() -> Result<()> { 109 | #[derive(Debug, Clone, sqlx::Type)] 110 | #[sqlx(transparent)] 111 | pub struct ClientRef(T); 112 | 113 | sqlb::bindable!(ClientRef); 114 | 115 | Ok(()) 116 | } 117 | -------------------------------------------------------------------------------- /tests/test_rules.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use serial_test::serial; 4 | use sqlb::{bindable, Field}; 5 | use std::error::Error; 6 | use utils::init_db; 7 | 8 | // region: Custom Type (enum) 9 | #[derive(Eq, PartialEq, Hash, sqlx::Type, Debug, Clone)] 10 | #[sqlx(type_name = "todo_status_enum")] 11 | #[sqlx(rename_all = "lowercase")] 12 | pub enum TodoStatus { 13 | New, 14 | Open, 15 | Done, 16 | } 17 | 18 | // OR 19 | bindable!(TodoStatus); 20 | 21 | // NOTE: This test is just about passing the compile time. 22 | #[serial] 23 | #[tokio::test] 24 | async fn test_rules_custom_enum() -> Result<(), Box> { 25 | let db_pool = init_db().await?; 26 | 27 | // CHECK that the SqlxBindable is implemented for TodoStatus 28 | let title_1 = "test - test_rules_custom_enum title".to_string(); 29 | let _data: Vec = vec![("title", title_1).into(), ("status", TodoStatus::Open).into()]; 30 | 31 | // CHECK that the TodoStatus has the appropriate types to pass sqlx binding (no sqlb::SqlxBindable at this stage) 32 | let query = sqlx::query::("INSERT INTO todo (title, status) VALUES ($1, $2)"); 33 | let query = query.bind("test sb_enum_insert 01"); 34 | let query = query.bind(TodoStatus::Done); 35 | let _r = query.execute(&db_pool).await?.rows_affected(); 36 | 37 | Ok(()) 38 | } 39 | -------------------------------------------------------------------------------- /tests/test_sb_delete.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use sqlb::{HasFields, SqlBuilder}; 4 | use std::error::Error; 5 | use utils::{init_db, util_fetch_all_todos, util_insert_todo}; 6 | 7 | use crate::utils::Todo; 8 | use serial_test::serial; 9 | 10 | #[serial] 11 | #[test] 12 | #[should_panic] 13 | fn sb_delete_err_all() { 14 | let sb = sqlb::delete().table("todo"); 15 | sb.sql(); 16 | // should panic 17 | } 18 | 19 | #[serial] 20 | #[test] 21 | fn sb_delete_ok_all() { 22 | let sb = sqlb::delete_all().table("todo"); 23 | sb.sql(); 24 | // should pass 25 | } 26 | 27 | #[serial] 28 | #[tokio::test] 29 | async fn sb_delete_ok_exec() -> Result<(), Box> { 30 | let db_pool = init_db().await?; 31 | 32 | // -- Fixtures 33 | let test_title_1 = "test - title 01"; 34 | let test_title_2 = "test - title 02"; 35 | let todo_id_1 = util_insert_todo(&db_pool, test_title_1).await?; 36 | let _ = util_insert_todo(&db_pool, test_title_2).await?; 37 | 38 | // -- Exec 39 | // Note: test schema and fully qualified column name. 40 | let sb = sqlb::delete().table("public.todo").and_where("todo.id", "=", todo_id_1); 41 | let row_affected = sb.exec(&db_pool).await?; 42 | assert_eq!(1, row_affected, "row_affected"); 43 | 44 | // -- Check - if only one todo_1 was deleted 45 | let todos = util_fetch_all_todos(&db_pool).await?; 46 | assert_eq!(1, todos.len()); 47 | assert_eq!(test_title_2, todos[0].title); 48 | 49 | Ok(()) 50 | } 51 | 52 | #[serial] 53 | #[tokio::test] 54 | async fn sb_delete_ok_return_one() -> Result<(), Box> { 55 | let db_pool = init_db().await?; 56 | 57 | // -- Fixtures 58 | let test_title_1 = "test - title 01"; 59 | let test_title_2 = "test - title 02"; 60 | let todo_id_1 = util_insert_todo(&db_pool, test_title_1).await?; 61 | let _ = util_insert_todo(&db_pool, test_title_2).await?; 62 | 63 | // -- Exec 64 | let sb = sqlb::delete().table("todo").and_where("id", "=", todo_id_1); 65 | let sb = sb.returning(&["id", "title"]); 66 | let (deleted_todo_1_id, deleted_todo_1_title) = sb.fetch_one::<_, (i64, String)>(&db_pool).await?; 67 | 68 | // -- Check - deleted returns 69 | assert_eq!(test_title_1, deleted_todo_1_title); 70 | assert_eq!(todo_id_1, deleted_todo_1_id); 71 | 72 | // -- Check - check with fetch all 73 | let todos = util_fetch_all_todos(&db_pool).await?; 74 | assert_eq!(1, todos.len()); 75 | assert_eq!(test_title_2, todos[0].title); 76 | 77 | Ok(()) 78 | } 79 | 80 | #[serial] 81 | #[tokio::test] 82 | async fn sb_delete_ok_return_many() -> Result<(), Box> { 83 | let db_pool = init_db().await?; 84 | 85 | // -- Fixtures 86 | let test_title_1 = "test - title 01"; 87 | let test_title_2 = "test - title 02"; 88 | let todo_id_1 = util_insert_todo(&db_pool, test_title_1).await?; 89 | let todo_id_2 = util_insert_todo(&db_pool, test_title_2).await?; 90 | 91 | // -- Exec 92 | let sb = sqlb::delete().table("todo").and_where("id", ">", 0); 93 | let sb = sb.returning(Todo::field_names()); 94 | 95 | let deleted: Vec = sb.fetch_all(&db_pool).await?; 96 | 97 | // -- Check - deleted returns 98 | assert_eq!(2, deleted.len()); 99 | assert_eq!(todo_id_1, deleted[0].id); 100 | assert_eq!(test_title_1, deleted[0].title); 101 | assert_eq!(todo_id_2, deleted[1].id); 102 | assert_eq!(test_title_2, deleted[1].title); 103 | 104 | // -- Check - empty table 105 | let todos = util_fetch_all_todos(&db_pool).await?; 106 | assert_eq!(0, todos.len()); 107 | 108 | Ok(()) 109 | } 110 | -------------------------------------------------------------------------------- /tests/test_sb_enum.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use serial_test::serial; 4 | use sqlb::{Field, SqlxBindable}; 5 | use sqlx::{postgres::PgArguments, query::Query, Postgres}; 6 | use std::error::Error; 7 | use utils::init_db; 8 | 9 | // region: Custom Type (enum) 10 | #[derive(Eq, PartialEq, Hash, sqlx::Type, Debug, Clone)] 11 | #[sqlx(type_name = "todo_status_enum")] 12 | #[sqlx(rename_all = "lowercase")] 13 | pub enum TodoStatus { 14 | New, 15 | Open, 16 | Done, 17 | } 18 | 19 | // NOTE: manual implementation, see test_rules for the macros alternative. 20 | impl SqlxBindable for TodoStatus { 21 | fn bind_query<'q>(&self, query: Query<'q, Postgres, PgArguments>) -> Query<'q, sqlx::Postgres, PgArguments> { 22 | let query = query.bind(self.clone()); 23 | query 24 | } 25 | } 26 | 27 | // endregion: Custom Type (enum) 28 | 29 | // This is to test that the type above was undestood by Sqlx 30 | #[serial] 31 | #[tokio::test] 32 | async fn sb_enum_direct_sqlx_ok() -> Result<(), Box> { 33 | let db_pool = init_db().await?; 34 | 35 | let query = sqlx::query::("INSERT INTO todo (title, status) VALUES ($1, $2)"); 36 | let query = query.bind("test sb_enum_direct_sqlx_ok 01"); 37 | let query = query.bind(TodoStatus::Done); 38 | let _r = query.execute(&db_pool).await?.rows_affected(); 39 | 40 | Ok(()) 41 | } 42 | 43 | #[serial] 44 | #[tokio::test] 45 | async fn sb_enum_direct_sqlb_field_ok() -> Result<(), Box> { 46 | let db_pool = init_db().await?; 47 | 48 | // fixtures 49 | let title_1 = "test - sb_enum_direct_sqlb_field_ok"; 50 | let status_1 = TodoStatus::Open; 51 | 52 | // DO the insert 53 | let data: Vec = vec![("title", title_1).into(), ("status", TodoStatus::Open).into()]; 54 | let sb = sqlb::insert().table("todo").data(data).returning(&["id", "title", "status"]); 55 | let (id_1, title, status) = sb.fetch_one::<_, (i64, String, TodoStatus)>(&db_pool).await?; 56 | 57 | // CHECK the insert 58 | assert_eq!(title_1, title); 59 | assert_eq!(status_1, status); 60 | 61 | // DO the select 62 | let sb = sqlb::select() 63 | .table("todo") 64 | .columns(&["id", "title", "status"]) 65 | .and_where_eq("id", id_1); 66 | let (id, title, status) = sb.fetch_one::<_, (i64, String, TodoStatus)>(&db_pool).await?; 67 | 68 | // CHECK the insert 69 | assert_eq!(id_1, id); 70 | assert_eq!(title_1, title); 71 | assert_eq!(status_1, status); 72 | 73 | Ok(()) 74 | } 75 | 76 | #[serial] 77 | #[tokio::test] 78 | async fn sb_enum_option_sqlx_ok() -> Result<(), Box> { 79 | let db_pool = init_db().await?; 80 | 81 | let status = Some(TodoStatus::Done); 82 | let query = sqlx::query::("INSERT INTO todo (title, status) VALUES ($1, $2)"); 83 | let query = query.bind("test sb_enum_option_sqlx_ok 01"); 84 | let query = query.bind(status); 85 | let _r = query.execute(&db_pool).await?.rows_affected(); 86 | 87 | Ok(()) 88 | } 89 | 90 | #[serial] 91 | #[tokio::test] 92 | async fn sb_enum_option_sqlb_field_ok() -> Result<(), Box> { 93 | let db_pool = init_db().await?; 94 | 95 | // fixtures 96 | let title_1 = "test - sb_enum_option_sqlb_field_ok"; 97 | let status_1 = TodoStatus::Open; 98 | 99 | // DO the insert 100 | let status = Some(TodoStatus::Open); 101 | let data: Vec = vec![("title", title_1).into(), ("status", status).into()]; 102 | let sb = sqlb::insert().table("todo").data(data).returning(&["id", "title", "status"]); 103 | let (id_1, title, status) = sb.fetch_one::<_, (i64, String, TodoStatus)>(&db_pool).await?; 104 | 105 | // CHECK the insert 106 | assert_eq!(title_1, title); 107 | assert_eq!(status_1, status); 108 | 109 | // DO the select 110 | let sb = sqlb::select() 111 | .table("todo") 112 | .columns(&["id", "title", "status"]) 113 | .and_where_eq("id", id_1); 114 | let (id, title, status) = sb.fetch_one::<_, (i64, String, TodoStatus)>(&db_pool).await?; 115 | 116 | // CHECK the insert 117 | assert_eq!(id_1, id); 118 | assert_eq!(title_1, title); 119 | assert_eq!(status_1, status); 120 | 121 | Ok(()) 122 | } 123 | -------------------------------------------------------------------------------- /tests/test_sb_insert.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use crate::utils::Todo; 4 | use serial_test::serial; 5 | use sqlb::{Field, HasFields, Raw}; 6 | use std::error::Error; 7 | use utils::{init_db, util_fetch_all_todos, TodoPatch}; 8 | 9 | #[serial] 10 | #[tokio::test] 11 | async fn sb_insert_ok_simple() -> Result<(), Box> { 12 | let db_pool = init_db().await?; 13 | 14 | // -- Fixtures 15 | let test_title = "test - title 01"; 16 | let patch_data = TodoPatch { 17 | title: Some(test_title.to_string()), 18 | desc: None, 19 | }; 20 | 21 | // -- Exec 22 | // Note: test schema and fully qualified column name. 23 | let sb = sqlb::insert().table("public.todo").data(patch_data.not_none_fields()); 24 | let sb = sb.returning(&["todo.id", "public.todo.title"]); 25 | let (_id, title) = sb.fetch_one::<_, (i64, String)>(&db_pool).await?; 26 | assert_eq!(test_title, title); 27 | 28 | // -- Check 29 | let todos = util_fetch_all_todos(&db_pool).await?; 30 | assert_eq!(1, todos.len()); 31 | assert_eq!(test_title, todos[0].title); 32 | 33 | Ok(()) 34 | } 35 | 36 | #[serial] 37 | #[tokio::test] 38 | async fn sb_insert_ok_renamed_field() -> Result<(), Box> { 39 | // -- Setup & Fixtures 40 | let db_pool = init_db().await?; 41 | let fx_title = "test - title 01"; 42 | let fx_desc = "test - desc 01"; 43 | 44 | let patch_data = TodoPatch { 45 | title: Some(fx_title.to_string()), 46 | desc: Some(fx_desc.to_string()), 47 | }; 48 | 49 | // -- Exec 50 | let sb = sqlb::insert().table("todo").data(patch_data.all_fields()); 51 | let sb = sb.returning(&["id", "title", "description"]); 52 | let (_id, title, desc) = sb.fetch_one::<_, (i64, String, String)>(&db_pool).await?; 53 | 54 | // -- Check 55 | assert_eq!(fx_title, title); 56 | assert_eq!(fx_desc, desc); 57 | 58 | Ok(()) 59 | } 60 | 61 | #[serial] 62 | #[tokio::test] 63 | async fn sb_insert_ok_renamed_field_and_get() -> Result<(), Box> { 64 | // -- Setup & Fixtures 65 | let db_pool = init_db().await?; 66 | let fx_title = "test - title 01"; 67 | let fx_desc = "test - desc 01"; 68 | 69 | let patch_data = TodoPatch { 70 | title: Some(fx_title.to_string()), 71 | desc: Some(fx_desc.to_string()), 72 | }; 73 | 74 | // -- Exec 75 | let sb = sqlb::insert().table("todo").data(patch_data.all_fields()); 76 | let sb = sb.returning(&["id"]); 77 | let (id,): (i64,) = sb.fetch_one(&db_pool).await?; 78 | let todo: Todo = sqlb::select() 79 | .table("todo") 80 | .and_where("id", "=", id) 81 | .fetch_one(&db_pool) 82 | .await?; 83 | assert_eq!(todo.desc, Some(fx_desc.to_string())); 84 | 85 | // -- Check 86 | // assert_eq!(fx_title, title); 87 | 88 | Ok(()) 89 | } 90 | 91 | #[serial] 92 | #[tokio::test] 93 | async fn sb_insert_ok_raw() -> Result<(), Box> { 94 | let db_pool = init_db().await?; 95 | 96 | // FIXTURE 97 | let test_title = "test - title 02"; 98 | 99 | // ACTION 100 | let fields: Vec = vec![("ctime", Raw("now()")).into(), ("title", test_title.to_string()).into()]; 101 | 102 | let sb = sqlb::insert().table("todo").data(fields); 103 | let sb = sb.returning(&["id", "title", "ctime"]); 104 | let (_id, title) = sb.fetch_one::<_, (i64, String)>(&db_pool).await?; 105 | assert_eq!(test_title, title); 106 | 107 | // CHECK 108 | let todos = util_fetch_all_todos(&db_pool).await?; 109 | assert_eq!(1, todos.len()); 110 | assert_eq!(test_title, todos[0].title); 111 | 112 | Ok(()) 113 | } 114 | -------------------------------------------------------------------------------- /tests/test_sb_macro.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use serial_test::serial; 4 | use sqlb::sqlx_exec::fetch_as_one; 5 | use sqlb::{Fields, HasFields}; 6 | use utils::init_db; 7 | 8 | #[serial] 9 | #[tokio::test] 10 | async fn sb_macro_ok_insert_full() -> Result<(), Box> { 11 | let db_pool = init_db().await?; 12 | 13 | // FIXTURES 14 | let fix_title = "sb_macro_insert_full title".to_string(); 15 | let fix_desc = "sb_macro_insert_full desc".to_string(); 16 | let todo = TodoCreate { 17 | title: fix_title.clone(), 18 | desc: Some(fix_desc.clone()), 19 | }; 20 | 21 | // DO insert 22 | let sb = sqlb::insert().table("todo").data(todo.not_none_fields()); 23 | let sb = sb.returning(&["id", "title", "desc"]); 24 | let (_id, title, desc) = fetch_as_one::<_, (i64, String, String), _>(&db_pool, &sb).await?; 25 | 26 | // CHECK title and desc 27 | assert_eq!(&fix_title, &title); 28 | assert_eq!(&fix_desc, &desc); 29 | 30 | Ok(()) 31 | } 32 | 33 | #[serial] 34 | #[tokio::test] 35 | async fn sb_macro_ok_insert_partial() -> Result<(), Box> { 36 | let db_pool = init_db().await?; 37 | 38 | // FIXTURES 39 | let fix_title = "sb_macro_insert_partial title".to_string(); 40 | let fix_desc: Option = None; 41 | let todo = TodoCreate { 42 | title: fix_title.clone(), 43 | desc: fix_desc.clone(), 44 | }; 45 | 46 | // DO insert 47 | let sb = sqlb::insert().table("todo").data(todo.not_none_fields()); 48 | let sb = sb.returning(&["id", "title", "desc"]); 49 | let (_id, title, _) = fetch_as_one::<_, (i64, String, Option), _>(&db_pool, &sb).await?; 50 | 51 | // CHECK title and desc 52 | assert_eq!(&fix_title, &title); 53 | assert_eq!(&fix_desc, &None); 54 | 55 | Ok(()) 56 | } 57 | 58 | #[derive(Fields)] 59 | struct TodoCreate { 60 | title: String, 61 | desc: Option, // TODO: Need to handle Option 62 | } 63 | -------------------------------------------------------------------------------- /tests/test_sb_others.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use serial_test::serial; 4 | use sqlb::sqlx_exec; 5 | use std::error::Error; 6 | use utils::init_db; 7 | 8 | #[serial] 9 | #[tokio::test] 10 | async fn sb_transaction_ok_simple() -> Result<(), Box> { 11 | let db_pool = init_db().await?; 12 | 13 | // -- Fixtures 14 | let test_title_1 = "test sb_transaction_ok_simple title 01"; 15 | 16 | // -- Exec 17 | let fields = vec![("title", test_title_1).into()]; 18 | let sb = sqlb::insert().table("todo").data(fields); 19 | let mut db_tx = db_pool.begin().await?; 20 | let row_affected = sqlx_exec::exec(&mut *db_tx, &sb).await?; 21 | 22 | // -- Check 23 | assert_eq!(1, row_affected, "row_affected"); 24 | 25 | // NOTE: Assume if this works, the were tx could fail would be more on the sqlx side. 26 | 27 | Ok(()) 28 | } 29 | 30 | #[serial] 31 | #[tokio::test] 32 | async fn sb_schema_ok_simple() -> Result<(), Box> { 33 | let db_pool = init_db().await?; 34 | 35 | // -- Fixtures 36 | let test_title_1 = "test sb_schema_ok_simple title 01"; 37 | 38 | // -- Exec 39 | let fields = vec![("title", test_title_1).into()]; 40 | let sb = sqlb::insert().table("public.todo").data(fields); 41 | let row_affected = sqlx_exec::exec(&db_pool, &sb).await?; 42 | 43 | // -- Check 44 | assert_eq!(1, row_affected, "row_affected"); 45 | 46 | // NOTE: Assume if this works, the were tx could fail would be more on the sqlx side. 47 | 48 | Ok(()) 49 | } 50 | -------------------------------------------------------------------------------- /tests/test_sb_select.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use crate::utils::{util_insert_many_todos, Todo}; 4 | use serial_test::serial; 5 | use std::error::Error; 6 | use utils::init_db; 7 | 8 | #[serial] 9 | #[tokio::test] 10 | async fn sb_select_ok_simple() -> Result<(), Box> { 11 | // -- Setup & Fixtures 12 | let db_pool = init_db().await?; 13 | let fx_title_prefix = "test_sb_select_ok_simple"; 14 | let fx_ids = util_insert_many_todos(&db_pool, fx_title_prefix, 5).await?; 15 | 16 | // -- Exec 17 | // Note: Check schema as well. 18 | let todos: Vec = sqlb::select().table("public.todo").fetch_all(&db_pool).await?; 19 | let todos: Vec = todos.into_iter().filter(|t| t.title.starts_with(fx_title_prefix)).collect(); 20 | 21 | // -- Check 22 | // Todo: Needs to do a more complete check 23 | assert_eq!(todos.len(), 5, "number of todos"); 24 | 25 | // -- Clean 26 | for id in fx_ids { 27 | sqlb::delete().table("todo").and_where_eq("id", id).exec(&db_pool).await?; 28 | } 29 | 30 | Ok(()) 31 | } 32 | 33 | #[serial] 34 | #[tokio::test] 35 | async fn sb_select_ok_limit_offset() -> Result<(), Box> { 36 | // -- Setup & Fixtures 37 | let db_pool = init_db().await?; 38 | let fx_title_prefix = "sb_select_ok_limit_offset"; 39 | let fx_ids = util_insert_many_todos(&db_pool, fx_title_prefix, 5).await?; 40 | 41 | // -- Exec 42 | let todos: Vec = sqlb::select().table("todo").limit(3).offset(2).fetch_all(&db_pool).await?; 43 | let todos: Vec = todos.into_iter().filter(|t| t.title.starts_with(fx_title_prefix)).collect(); 44 | 45 | // -- Check 46 | assert_eq!(todos.len(), 3, "number of todos"); 47 | let todo_02 = todos.get(0).unwrap(); 48 | assert_eq!(todo_02.title, "sb_select_ok_limit_offset-02"); 49 | 50 | // -- Clean 51 | for id in fx_ids { 52 | sqlb::delete().table("todo").and_where_eq("id", id).exec(&db_pool).await?; 53 | } 54 | 55 | Ok(()) 56 | } 57 | 58 | #[serial] 59 | #[tokio::test] 60 | async fn sb_select_ok_limit_only() -> Result<(), Box> { 61 | // -- Setup & Fixtures 62 | let db_pool = init_db().await?; 63 | let fx_title_prefix = "sb_select_ok_limit_only"; 64 | let fx_ids = util_insert_many_todos(&db_pool, fx_title_prefix, 5).await?; 65 | 66 | // -- Exec 67 | // Note: Check schema as well and fully qualified column name. 68 | let todos: Vec = sqlb::select() 69 | .table("public.todo") 70 | .columns(&["id", "todo.title", "public.todo.description"]) 71 | .limit(3) 72 | .fetch_all(&db_pool) 73 | .await?; 74 | let todos: Vec = todos.into_iter().filter(|t| t.title.starts_with(fx_title_prefix)).collect(); 75 | 76 | // -- Check 77 | assert_eq!(todos.len(), 3, "number of todos"); 78 | 79 | // -- Clean 80 | for id in fx_ids { 81 | sqlb::delete().table("todo").and_where_eq("id", id).exec(&db_pool).await?; 82 | } 83 | 84 | Ok(()) 85 | } 86 | 87 | #[serial] 88 | #[tokio::test] 89 | async fn sb_select_ok_count() -> Result<(), Box> { 90 | // -- Setup & Fixtures 91 | let db_pool = init_db().await?; 92 | let fx_title_prefix = "sb_select_ok_count"; 93 | let fx_ids = util_insert_many_todos(&db_pool, fx_title_prefix, 5).await?; 94 | 95 | // -- Exec 96 | let (count,): (i64,) = sqlb::select().table("todo").columns(&["count(*)"]).fetch_one(&db_pool).await?; 97 | 98 | // -- Check 99 | assert_eq!(count, 5, "number of todos"); 100 | 101 | // -- Clean 102 | for id in fx_ids { 103 | sqlb::delete().table("todo").and_where_eq("id", id).exec(&db_pool).await?; 104 | } 105 | 106 | Ok(()) 107 | } 108 | -------------------------------------------------------------------------------- /tests/test_sb_update.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use sqlb::{Field, Raw, SqlBuilder}; 4 | use sqlx::types::time::OffsetDateTime; 5 | use std::error::Error; 6 | use utils::{init_db, util_fetch_all_todos, util_insert_todo}; 7 | 8 | use crate::utils::util_fetch_todo; 9 | use serial_test::serial; 10 | 11 | #[test] 12 | #[should_panic] 13 | fn sb_update_err_all_sql_panic() { 14 | let sb = sqlb::update().table("todo"); 15 | sb.sql(); 16 | // should panic 17 | } 18 | 19 | #[test] 20 | fn sb_update_ok_all_sql() { 21 | let sb = sqlb::update_all().table("todo"); 22 | sb.sql(); 23 | // should pass 24 | } 25 | 26 | #[serial] 27 | #[tokio::test] 28 | async fn sb_update_ok_exec_all() -> Result<(), Box> { 29 | let db_pool = init_db().await?; 30 | 31 | // -- Fixtures 32 | let test_title_1 = "test - title 01"; 33 | let test_title_2 = "test - title 02"; 34 | let _todo_id_1 = util_insert_todo(&db_pool, test_title_1).await?; 35 | let _todo_id_2 = util_insert_todo(&db_pool, test_title_2).await?; 36 | 37 | // -- Exec 38 | // Note: Check schema as well. 39 | let test_title_for_all = "test - new title for all"; 40 | let fields = vec![("title", test_title_for_all).into()]; 41 | let sb = sqlb::update_all().table("public.todo").data(fields); 42 | // let row_affected = sqlx_exec::exec(&db_pool, &sb).await?; 43 | let row_affected = sb.exec(&db_pool).await?; 44 | assert_eq!(2, row_affected, "row_affected"); 45 | 46 | // -- Check 47 | let todos = util_fetch_all_todos(&db_pool).await?; 48 | assert_eq!(2, todos.len()); 49 | assert_eq!(test_title_for_all, todos[0].title, "todo.tile"); 50 | assert_eq!(test_title_for_all, todos[1].title, "todo.tile"); 51 | 52 | Ok(()) 53 | } 54 | 55 | #[serial] 56 | #[tokio::test] 57 | async fn sb_update_exec_ok_with_where_single() -> Result<(), Box> { 58 | let db_pool = init_db().await?; 59 | 60 | // -- Fixtures 61 | let test_title_1 = "test - title 01"; 62 | let test_title_2 = "test - title 02"; 63 | let todo_id_1 = util_insert_todo(&db_pool, test_title_1).await?; 64 | let todo_id_2 = util_insert_todo(&db_pool, test_title_2).await?; 65 | 66 | // -- Exec 67 | let test_title_for_all = "test - new title"; 68 | let fields = vec![("title", test_title_for_all).into()]; 69 | let sb = sqlb::update().table("todo").data(fields).and_where_eq("id", todo_id_1); 70 | 71 | let row_affected = sb.exec(&db_pool).await?; 72 | assert_eq!(1, row_affected, "row_affected"); 73 | 74 | // -- Check - todo_1 75 | let todo = util_fetch_todo(&db_pool, todo_id_1).await?; 76 | assert_eq!(test_title_for_all, todo.title, "todo_1.tile"); 77 | 78 | // -- Check - todo_2 79 | let todo = util_fetch_todo(&db_pool, todo_id_2).await?; 80 | assert_eq!(test_title_2, todo.title, "todo_1.tile"); 81 | 82 | Ok(()) 83 | } 84 | 85 | #[serial] 86 | #[tokio::test] 87 | async fn sb_update_exec_ok_with_where_many() -> Result<(), Box> { 88 | let db_pool = init_db().await?; 89 | 90 | // -- Fixtures 91 | let test_title_1 = "test - title 01"; 92 | let test_title_2 = "test - title 02"; 93 | let todo_id_1 = util_insert_todo(&db_pool, test_title_1).await?; 94 | let todo_id_2 = util_insert_todo(&db_pool, test_title_2).await?; 95 | 96 | // -- Exec 97 | let test_title_for_all = "test - new title"; 98 | let fields = vec![("title", test_title_for_all).into()]; 99 | let sb = sqlb::update() 100 | .table("todo") 101 | .data(fields) 102 | .and_where("id", "=", todo_id_1) 103 | .and_where("title", "=", test_title_1); 104 | let row_affected = sb.exec(&db_pool).await?; 105 | assert_eq!(1, row_affected, "row_affected"); 106 | 107 | // -- Check - todo_1 108 | let todo = util_fetch_todo(&db_pool, todo_id_1).await?; 109 | assert_eq!(test_title_for_all, todo.title, "todo_1.tile"); 110 | 111 | // -- Check - todo_2 112 | let todo = util_fetch_todo(&db_pool, todo_id_2).await?; 113 | assert_eq!(test_title_2, todo.title, "todo_1.tile"); 114 | 115 | Ok(()) 116 | } 117 | 118 | #[serial] 119 | #[tokio::test] 120 | async fn sb_update_ok_returning() -> Result<(), Box> { 121 | let db_pool = init_db().await?; 122 | 123 | // -- Fixtures 124 | let test_title_1 = "test - title 01"; 125 | let test_title_2 = "test - title 02"; 126 | let todo_id_1 = util_insert_todo(&db_pool, test_title_1).await?; 127 | let todo_id_2 = util_insert_todo(&db_pool, test_title_2).await?; 128 | 129 | // -- Exec 130 | let test_title_new = "test - new title"; 131 | let fields = vec![("title", test_title_new).into()]; 132 | let sb = sqlb::update().table("todo").data(fields).and_where("id", "=", todo_id_1); 133 | let sb = sb.returning(&["id", "title"]); 134 | let (returned_todo_1_id, returned_todo_1_title) = sb.fetch_one::<_, (i64, String)>(&db_pool).await?; 135 | 136 | // -- Check - return values 137 | assert_eq!(todo_id_1, returned_todo_1_id); 138 | assert_eq!(test_title_new, returned_todo_1_title); 139 | 140 | // -- Check - todo_2 141 | let todo = util_fetch_todo(&db_pool, todo_id_2).await?; 142 | assert_eq!(test_title_2, todo.title, "todo_1.tile"); 143 | 144 | Ok(()) 145 | } 146 | 147 | #[serial] 148 | #[tokio::test] 149 | async fn sb_update_ok_raw() -> Result<(), Box> { 150 | let db_pool = init_db().await?; 151 | 152 | // -- Fixtures 153 | let todo_id_1 = util_insert_todo(&db_pool, "test_title_1").await?; 154 | let test_title_new = "test - new title"; 155 | 156 | // -- Exec 157 | let fields: Vec = vec![("title", test_title_new).into(), ("ctime", Raw("now()")).into()]; 158 | let sb = sqlb::update().table("todo").data(fields).and_where_eq("id", todo_id_1); 159 | let sb = sb.returning(&["id", "title", "ctime"]); 160 | let (id, title, _ctime) = sb.fetch_one::<_, (i64, String, OffsetDateTime)>(&db_pool).await?; 161 | 162 | // -- Check 163 | assert_eq!(test_title_new, title); 164 | assert_eq!(todo_id_1, id); 165 | 166 | Ok(()) 167 | } 168 | -------------------------------------------------------------------------------- /tests/test_sqlx.rs: -------------------------------------------------------------------------------- 1 | //! Mostly for dev sqlx API validation. 2 | 3 | #![allow(unused)] // For early development. 4 | 5 | mod utils; 6 | 7 | use crate::utils::init_db; 8 | use serial_test::serial; 9 | use std::error::Error; 10 | 11 | // #[serial] 12 | // #[tokio::test] 13 | async fn test_sqlx_insert_todo() -> Result<(), Box> { 14 | let db = init_db().await?; 15 | let fx_title = "test_sqlx_insert_todo - title".to_string(); 16 | let fx_desc: Option = None; 17 | 18 | let (id,) = sqlx::query_as::<_, (i64,)>(r#"INSERT INTO todo (title, "desc") values ($1, $2) returning id"#) 19 | .bind(fx_title) 20 | .bind(fx_desc) 21 | .fetch_one(&db) 22 | .await?; 23 | 24 | Ok(()) 25 | } 26 | -------------------------------------------------------------------------------- /tests/utils/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | use anyhow::Result; 4 | use sqlb::{Field, HasFields}; 5 | use sqlb_macros::Fields; 6 | use sqlx::{postgres::PgPoolOptions, Pool, Postgres}; 7 | 8 | // region: Test Types 9 | #[derive(Debug, sqlx::FromRow, Fields)] 10 | pub struct Todo { 11 | pub id: i64, 12 | pub title: String, 13 | #[sqlx(rename = "description")] 14 | #[field(name = "description")] 15 | pub desc: Option, 16 | } 17 | 18 | #[derive(Debug, sqlb::Fields)] 19 | pub struct TodoPatch { 20 | pub title: Option, 21 | #[field(name = "description")] 22 | pub desc: Option, 23 | } 24 | 25 | // -- Manual implementation 26 | // impl HasFields for TodoPatch { 27 | // fn not_none_fields<'a>(self) -> Vec> { 28 | // let mut fields = Vec::new(); 29 | // if let Some(title) = self.title { 30 | // fields.push(("title", title).into()); 31 | // } 32 | // fields 33 | // } 34 | 35 | // #[allow(clippy::vec_init_then_push)] 36 | // fn all_fields<'a>(self) -> Vec> { 37 | // let mut fields: Vec = Vec::new(); 38 | // fields.push(("title", self.title).into()); 39 | // fields 40 | // } 41 | 42 | // fn field_names() -> &'static [&'static str] { 43 | // &["title"] 44 | // } 45 | // } 46 | 47 | // endregion: Test Types 48 | 49 | // region: Test Seed Utils 50 | 51 | pub async fn util_insert_many_todos(db_pool: &Pool, title_prefix: &str, count: i32) -> Result> { 52 | let mut ids = Vec::new(); 53 | 54 | for idx in 0..count { 55 | let title = format!("{title_prefix}-{:0>2}", idx); 56 | let id = util_insert_todo(db_pool, &title).await?; 57 | ids.push(id); 58 | } 59 | 60 | Ok(ids) 61 | } 62 | 63 | pub async fn util_insert_todos(db_pool: &Pool, titles: &[&str]) -> Result> { 64 | let mut ids = Vec::new(); 65 | 66 | for title in titles { 67 | let id = util_insert_todo(db_pool, title).await?; 68 | ids.push(id); 69 | } 70 | 71 | Ok(ids) 72 | } 73 | 74 | pub async fn util_insert_todo(db_pool: &Pool, title: &str) -> Result { 75 | let patch_data = TodoPatch { 76 | title: Some(title.to_string()), 77 | desc: None, 78 | }; 79 | 80 | let sb = sqlb::insert().table("todo").data(patch_data.not_none_fields()); 81 | let sb = sb.returning(&["id"]); 82 | let (id,) = sb.fetch_one::<_, (i64,)>(db_pool).await?; 83 | 84 | Ok(id) 85 | } 86 | 87 | pub async fn util_fetch_all_todos(db_pool: &Pool) -> Result> { 88 | let sb = sqlb::select() 89 | .table("todo") 90 | .columns(&["id", "title", "description"]) 91 | .order_by("!id"); 92 | let todos = sb.fetch_all::<_, Todo>(db_pool).await?; 93 | Ok(todos) 94 | } 95 | 96 | pub async fn util_fetch_todo(db_pool: &Pool, id: i64) -> Result { 97 | let sb = sqlb::select() 98 | .table("todo") 99 | .columns(&["id", "title", "description"]) 100 | .and_where("id", "=", id); 101 | let todos = sb.fetch_one::<_, Todo>(db_pool).await?; 102 | Ok(todos) 103 | } 104 | // endregion: Test Seed Utils 105 | 106 | // region: Test Utils 107 | pub async fn init_db() -> Result, sqlx::Error> { 108 | let pool = PgPoolOptions::new() 109 | .max_connections(5) 110 | .connect("postgres://postgres:welcome@localhost/postgres") 111 | .await?; 112 | 113 | sqlx::query("DROP TABLE IF EXISTS todo").execute(&pool).await?; 114 | 115 | // create todo status 116 | if let Err(ex) = sqlx::query("DROP TYPE todo_status_enum").execute(&pool).await { 117 | println!("Warning - {}", ex); 118 | } 119 | if let Err(ex) = sqlx::query( 120 | r#" 121 | CREATE TYPE todo_status_enum AS ENUM ( 122 | 'new', 123 | 'open', 124 | 'done' 125 | ); 126 | "#, 127 | ) 128 | .execute(&pool) 129 | .await 130 | { 131 | println!("ERROR CREATE TYPE todo_status_enum - {}", ex); 132 | } 133 | 134 | // Create todo table 135 | 136 | sqlx::query( 137 | r#" 138 | CREATE TABLE IF NOT EXISTS todo ( 139 | id bigserial, 140 | title text, 141 | description text, 142 | ctime timestamp with time zone, 143 | "desc" text, 144 | status todo_status_enum 145 | );"#, 146 | ) 147 | .execute(&pool) 148 | .await?; 149 | 150 | // Create project table 151 | sqlx::query("DROP TABLE IF EXISTS projects").execute(&pool).await?; 152 | sqlx::query( 153 | r#" 154 | CREATE TABLE IF NOT EXISTS project ( 155 | id bigserial, 156 | name text 157 | );"#, 158 | ) 159 | .execute(&pool) 160 | .await?; 161 | 162 | Ok(pool) 163 | } 164 | 165 | // endregion: Test Utils 166 | --------------------------------------------------------------------------------