├── .github └── workflows │ └── rust.yml ├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── postgres_query ├── Cargo.toml ├── examples │ └── basic.rs ├── src │ ├── client.rs │ ├── client │ │ └── cache.rs │ ├── error.rs │ ├── execute.rs │ ├── extract.rs │ ├── lib.rs │ └── parse.rs └── tests │ ├── deadpool.rs │ ├── execute.rs │ └── query_macro.rs └── postgres_query_macro ├── Cargo.toml └── src ├── from_sql_row.rs ├── from_sql_row ├── attrs.rs ├── partition.rs └── validate.rs ├── lib.rs ├── macros.rs └── query.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | services: 18 | postgres: 19 | image: postgres:12.4-alpine 20 | env: 21 | POSTGRES_DB: test-postgres-query 22 | POSTGRES_PASSWORD: 2UtcMAAvNuBHPqK9 23 | ports: 24 | - 5432:5432 25 | options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 26 | 27 | steps: 28 | - uses: actions/checkout@v2 29 | - name: Build 30 | run: cargo build --verbose 31 | - name: Run tests 32 | run: cargo test --verbose --workspace 33 | env: 34 | POSTGRES_DB_CONFIG: postgresql://postgres:2UtcMAAvNuBHPqK9@localhost:5432/test-postgres-query 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | 4 | 5 | #Added by cargo 6 | # 7 | #already existing elements are commented out 8 | 9 | #/target 10 | #**/*.rs.bk 11 | Cargo.lock 12 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | services: 2 | - postgresql 3 | before_script: 4 | - psql -c 'create user postgres_query_test;' -U postgres 5 | - psql -c 'create database postgres_query_test;' -U postgres 6 | language: rust 7 | rust: 8 | - stable 9 | - beta 10 | - nightly 11 | jobs: 12 | allow_failures: 13 | - rust: nightly 14 | 15 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "postgres_query", 4 | "postgres_query_macro", 5 | ] 6 | 7 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # postgres-query 3 | 4 | [![Crates.io](https://img.shields.io/crates/v/postgres_query)](https://crates.io/crates/postgres-query) 5 | [![Build Status](https://travis-ci.org/nolanderc/rust-postgres-query.svg?branch=master)](https://travis-ci.org/nolanderc/rust-postgres-query) 6 | [![License](https://img.shields.io/crates/l/postgres_query)](#license) 7 | [![Minimum Rust Version](https://img.shields.io/badge/rustc-1.40%2B-orange)](https://www.rust-lang.org/) 8 | [![Documentation](https://docs.rs/postgres_query/badge.svg)](https://docs.rs/postgres_query) 9 | 10 | This crate provides convenience macros and traits which help with writing SQL 11 | queries and gathering their results into statically typed structures. 12 | 13 | [Documentation](https://docs.rs/postgres_query) 14 | 15 | 16 | # Example 17 | 18 | ```rust 19 | // Connect to the database 20 | let client: Client = connect(/* ... */); 21 | 22 | // Construct the query 23 | let query = query!( 24 | "SELECT name, age FROM people WHERE age >= $min_age", 25 | min_age = 18 26 | ); 27 | 28 | // Define the structure of the data returned from the query 29 | #[derive(FromSqlRow)] 30 | struct Person { 31 | age: i32, 32 | name: String, 33 | } 34 | 35 | // Execute the query 36 | let people: Vec = query.fetch(&client).await?; 37 | 38 | // Use the results 39 | for person in people { 40 | println!("{} is {} years young", person.name, person.age); 41 | } 42 | ``` 43 | 44 | # Features 45 | 46 | 47 | ## Extractors 48 | 49 | This crate allows you to extract the result of queries simply by tagging a 50 | struct with the `#[derive(FromSqlRow)]` attribute: 51 | 52 | ```rust 53 | #[derive(FromSqlRow)] 54 | struct Book { 55 | id: i32, 56 | title: String, 57 | genre: String, 58 | } 59 | 60 | let books: Vec = query!("SELECT * FROM books") 61 | .fetch(&client) 62 | .await?; 63 | ``` 64 | 65 | 66 | ## Multi-mapping 67 | 68 | You may also extract multiple structures from a single row. This can be useful 69 | when you are joining two tables. As a motivating example, we can store an 70 | `Author` instance inside a `Book` instance, which can be easier to work with: 71 | 72 | ```rust 73 | #[derive(FromSqlRow)] 74 | #[row(split)] 75 | struct Book { 76 | #[row(split = "id")] 77 | id: i32, 78 | title: String, 79 | genre: String, 80 | 81 | #[row(flatten, split = "id")] 82 | author: Author, 83 | } 84 | 85 | #[derive(FromSqlRow)] 86 | struct Author { 87 | id: i32, 88 | name: String, 89 | birthyear: i32, 90 | } 91 | 92 | let books: Vec = query!( 93 | "SELECT books.*, authors.* 94 | FROM books 95 | INNER JOIN authors ON authors.id = books.id" 96 | ) 97 | .fetch(&client) 98 | .await?; 99 | ``` 100 | 101 | Notice the `#[row(split = "id")]` attributes on the fields in `Book`. In order 102 | to extract values correctly we have to first split the row into smaller 103 | segments. We do this by by specifying that the first occurrence of `id` is part 104 | of the book and the second `id` part of the author. The rest is done for you. 105 | 106 | The splits/segments would look like this: 107 | 108 | ``` 109 | Splits: id id 110 | Columns: id, title, genre, id, name, birthyear 111 | Segments: +-----Book-----+ +-----Author------+ 112 | ``` 113 | 114 | If we wanted to reuse an already existing `Book` we could just as easily do 115 | the following: 116 | 117 | ```rust 118 | #[derive(FromSqlRow)] 119 | #[row(split)] 120 | struct Listings { 121 | #[row(flatten, split = "id")] 122 | book: Book 123 | #[row(flatten, split = "id")] 124 | author: Author, 125 | } 126 | ``` 127 | 128 | 129 | ### One-to-many Relationships 130 | 131 | In the previous examples we had a `Book` that contained an `Author`. This is 132 | what is called a many-to-one relationship, since one book only has one author, 133 | but many books may share the same author (or so we assume anyway). What if you 134 | instead had `Author` an author that contained many `Book`s? We know that one 135 | author may write many books, so that is a one-to-many relationship. We can write 136 | an extractor for that case as well: 137 | 138 | ```rust 139 | #[derive(FromSqlRow)] 140 | #[row(split, group)] 141 | struct Author { 142 | #[row(split = "id", key)] 143 | id: i32, 144 | name: String, 145 | birthyear: i32, 146 | 147 | #[row(split = "id", merge)] 148 | books: Vec, 149 | } 150 | 151 | #[derive(FromSqlRow)] 152 | struct Book { 153 | id: i32, 154 | title: String, 155 | genre: String, 156 | } 157 | 158 | let authors: Vec = query!( 159 | "SELECT authors.*, books.* 160 | INNER JOIN books ON books.author = authors.id 161 | GROUP BY authors.id" 162 | ) 163 | .fetch(&client) 164 | .await?; 165 | ``` 166 | 167 | 168 | ## Dynamic queries 169 | 170 | Queries may be constructed from either a string literal, in which case parameter 171 | bindings are computed at compile time, or any other `String` dynamically at 172 | runtime. The same is true for parameter bindings, which in the latter case can 173 | be added dynamically. 174 | 175 | Let's say that we wanted to dynamically add filters to our query: 176 | 177 | ```rust 178 | // We have the query we want to execute 179 | let mut sql = "SELECT * FROM people".to_string(); 180 | 181 | // and some filters we got from the user. 182 | let age_filter: Option = Some(32); 183 | let name_filter: Option<&str> = None; 184 | 185 | // Then we dynamically build a list of filters and bindings to use: 186 | let mut filters = Vec::new(); 187 | let mut bindings = Vec::new(); 188 | 189 | // We add the filters as needed. 190 | if let Some(age) = age_filter.as_ref() { 191 | filters.push("age > $min_age"); 192 | bindings.push(("min_age", age as Parameter)); 193 | } 194 | 195 | if let Some(name) = name_filter.as_ref() { 196 | filters.push("name LIKE $name"); 197 | bindings.push(("name", name as Parameter)); 198 | } 199 | 200 | // And append them to the query. 201 | if filters.len() > 0 { 202 | sql += &format!(" WHERE {}", filters.join(" AND ")); 203 | } 204 | 205 | // Then we can use it as normal. 206 | let query: Query = query_dyn!(&sql, ..bindings)?; 207 | ``` 208 | 209 | 210 | ## License 211 | 212 | Licensed under either of Apache License, Version 213 | 2.0 or MIT license at your option. 214 | 215 | Unless you explicitly state otherwise, any contribution intentionally submitted 216 | for inclusion in rust-postgres-query by you, as defined in the Apache-2.0 217 | license, shall be dual licensed as above, without any additional terms or 218 | conditions. 219 | 220 | -------------------------------------------------------------------------------- /postgres_query/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "postgres_query" 3 | version = "0.3.3" 4 | authors = ["Christofer Nolander "] 5 | edition = "2018" 6 | description = "Write and execute SQL queries with ease" 7 | repository = "https://github.com/nolanderc/rust-postgres-query" 8 | license = "MIT OR Apache-2.0" 9 | readme = "../README.md" 10 | 11 | [lib] 12 | name = "postgres_query" 13 | 14 | [[example]] 15 | name = "basic" 16 | path = "examples/basic.rs" 17 | 18 | [features] 19 | deadpool = ["deadpool-postgres"] 20 | 21 | [dependencies] 22 | postgres_query_macro = { version = "0.3.3", path = "../postgres_query_macro" } 23 | proc-macro-hack = "0.5.19" 24 | postgres-types = "0.2.0" 25 | serde = "1.0.118" 26 | tokio-postgres = "0.7.0" 27 | futures = "0.3.8" 28 | async-trait = "0.1.42" 29 | thiserror = "1.0.23" 30 | deadpool-postgres = { version = "0.7.0", optional = true, default-features = false } 31 | 32 | [dev-dependencies] 33 | bytes = "1.0.0" 34 | tokio = { version = "1.0.1", features = ["macros", "rt-multi-thread"] } 35 | structopt = "0.3.21" 36 | anyhow = "1.0.37" 37 | -------------------------------------------------------------------------------- /postgres_query/examples/basic.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Error; 2 | use postgres_query::{query, FromSqlRow}; 3 | use structopt::StructOpt; 4 | use tokio_postgres::{config::Config, NoTls}; 5 | 6 | #[derive(StructOpt)] 7 | struct Options { 8 | /// The database configuration, given as a string of space separated key-value pairs (eg. 9 | /// 'host=localhost user=postgres'). 10 | config: Vec, 11 | } 12 | 13 | #[derive(FromSqlRow)] 14 | struct Person { 15 | name: String, 16 | age: i32, 17 | } 18 | 19 | #[tokio::main] 20 | async fn main() -> Result<(), Error> { 21 | let options = Options::from_args(); 22 | let config: Config = options.config.join(" ").parse()?; 23 | 24 | let (mut client, connection) = config.connect(NoTls).await?; 25 | 26 | tokio::spawn(async move { 27 | if let Err(e) = connection.await { 28 | eprintln!("connection error: {}", e); 29 | } 30 | }); 31 | 32 | // open a new transaction to avoid making any changes to the database 33 | let tx = client.transaction().await?; 34 | 35 | query!("CREATE TABLE people (name TEXT, age INT)") 36 | .execute(&tx) 37 | .await?; 38 | 39 | query!( 40 | "INSERT INTO people VALUES ($name, $age)", 41 | name = "John Wick", 42 | age = 42, 43 | ) 44 | .execute(&tx) 45 | .await?; 46 | 47 | let query = query!("SELECT name, age FROM people"); 48 | let people: Vec = query.fetch(&tx).await?; 49 | 50 | for person in people { 51 | println!("{} is {} years young", person.name, person.age); 52 | } 53 | 54 | // undo any changes 55 | tx.rollback().await?; 56 | Ok(()) 57 | } 58 | -------------------------------------------------------------------------------- /postgres_query/src/client.rs: -------------------------------------------------------------------------------- 1 | //! Abstractions over client-like types. 2 | 3 | mod cache; 4 | 5 | pub use cache::Caching; 6 | 7 | use async_trait::async_trait; 8 | use postgres_types::ToSql; 9 | use tokio_postgres::{error::Error as SqlError, Client, RowStream, Statement, Transaction}; 10 | 11 | #[cfg(feature = "deadpool")] 12 | use deadpool_postgres::{Client as DpClient, ClientWrapper as DpClientWrapper}; 13 | 14 | 15 | /// A generic client with basic functionality. 16 | #[async_trait] 17 | pub trait GenericClient { 18 | /// Prepare a SQL query for execution. See [`Client::prepare`] for more info. 19 | /// 20 | /// [`Client::prepare`]: 21 | /// https://docs.rs/tokio-postgres/0.5.1/tokio_postgres/struct.Client.html#method.prepare 22 | async fn prepare(&self, sql: &str) -> Result; 23 | 24 | /// Implementors may choose to override this method if they, for whatever reason (performance 25 | /// being one), want to cache a specific query. 26 | /// 27 | /// Because of the `'static` lifetime associated with the query string, we can assert that its 28 | /// value is never going to change. For instance, if a `HashMap` is used to build a cache of 29 | /// queries, it is enough to hash the pointer to the query instead of the whole string, since we 30 | /// know it will be unique for the duration of the program. 31 | async fn prepare_static(&self, sql: &'static str) -> Result { 32 | self.prepare(sql).await 33 | } 34 | 35 | /// Execute the given statement with the parameters specified and return the number of affected 36 | /// rows. See [`Client::execute_raw`] for more info. 37 | /// 38 | /// [`Client::execute_raw`]: 39 | /// https://docs.rs/tokio-postgres/0.5.1/tokio_postgres/struct.Client.html#method.execute_raw 40 | async fn execute_raw<'a>( 41 | &'a self, 42 | statement: &Statement, 43 | parameters: &[&'a (dyn ToSql + Sync)], 44 | ) -> Result; 45 | 46 | /// Execute the given statement with the parameters specified and return the resulting rows as 47 | /// an asynchronous stream. See [`Client::query_raw`] for more info. 48 | /// 49 | /// [`Client::query_raw`]: 50 | /// https://docs.rs/tokio-postgres/0.5.1/tokio_postgres/struct.Client.html#method.query_raw 51 | async fn query_raw<'a>( 52 | &'a self, 53 | statement: &Statement, 54 | parameters: &[&'a (dyn ToSql + Sync)], 55 | ) -> Result; 56 | } 57 | 58 | fn slice_iter<'a>( 59 | s: &'a [&'a (dyn ToSql + Sync)], 60 | ) -> impl ExactSizeIterator + 'a { 61 | s.iter().map(|s| *s as _) 62 | } 63 | 64 | #[async_trait] 65 | impl GenericClient for Client { 66 | #[deny(unconditional_recursion)] 67 | async fn prepare(&self, sql: &str) -> Result { 68 | Client::prepare(self, sql).await 69 | } 70 | 71 | #[deny(unconditional_recursion)] 72 | async fn execute_raw<'a>( 73 | &'a self, 74 | statement: &Statement, 75 | parameters: &[&'a (dyn ToSql + Sync)], 76 | ) -> Result { 77 | Client::execute_raw(self, statement, slice_iter(parameters)).await 78 | } 79 | 80 | #[deny(unconditional_recursion)] 81 | async fn query_raw<'a>( 82 | &'a self, 83 | statement: &Statement, 84 | parameters: &[&'a (dyn ToSql + Sync)], 85 | ) -> Result { 86 | Client::query_raw(self, statement, slice_iter(parameters)).await 87 | } 88 | } 89 | 90 | #[cfg(feature = "deadpool")] 91 | #[async_trait] 92 | impl GenericClient for DpClient { 93 | #[deny(unconditional_recursion)] 94 | async fn prepare(&self, sql: &str) -> Result { 95 | DpClientWrapper::prepare(self, sql).await 96 | } 97 | 98 | #[deny(unconditional_recursion)] 99 | async fn execute_raw<'a>( 100 | &'a self, 101 | statement: &Statement, 102 | parameters: &[&'a (dyn ToSql + Sync)], 103 | ) -> Result { 104 | Client::execute_raw(&*self, statement, slice_iter(parameters)).await 105 | } 106 | 107 | #[deny(unconditional_recursion)] 108 | async fn query_raw<'a>( 109 | &'a self, 110 | statement: &Statement, 111 | parameters: &[&'a (dyn ToSql + Sync)], 112 | ) -> Result { 113 | Client::query_raw(&*self, statement, slice_iter(parameters)).await 114 | } 115 | } 116 | 117 | #[async_trait] 118 | impl GenericClient for Transaction<'_> { 119 | async fn prepare(&self, sql: &str) -> Result { 120 | Transaction::prepare(self, sql).await 121 | } 122 | 123 | async fn execute_raw<'a>( 124 | &'a self, 125 | statement: &Statement, 126 | parameters: &[&'a (dyn ToSql + Sync)], 127 | ) -> Result { 128 | Transaction::execute_raw::<_, _, Statement>(self, statement, slice_iter(parameters)).await 129 | } 130 | 131 | async fn query_raw<'a>( 132 | &'a self, 133 | statement: &Statement, 134 | parameters: &[&'a (dyn ToSql + Sync)], 135 | ) -> Result { 136 | Transaction::query_raw(self, statement, slice_iter(parameters)).await 137 | } 138 | } 139 | 140 | macro_rules! client_deref_impl { 141 | ($($target:tt)+) => { 142 | #[async_trait] 143 | impl GenericClient for $($target)+ where T: GenericClient + Sync { 144 | async fn prepare(&self, sql: &str) -> Result { 145 | T::prepare(self, sql).await 146 | } 147 | 148 | async fn execute_raw<'a>( 149 | &'a self, 150 | statement: &Statement, 151 | parameters: &[&'a (dyn ToSql + Sync)], 152 | ) -> Result { 153 | T::execute_raw(self, statement, parameters).await 154 | } 155 | 156 | async fn query_raw<'a>( 157 | &'a self, 158 | statement: &Statement, 159 | parameters: &[&'a (dyn ToSql + Sync)], 160 | ) -> Result { 161 | T::query_raw(self, statement, parameters).await 162 | } 163 | } 164 | } 165 | } 166 | 167 | client_deref_impl!(&T); 168 | -------------------------------------------------------------------------------- /postgres_query/src/client/cache.rs: -------------------------------------------------------------------------------- 1 | //! A client which caches repeated requests. 2 | 3 | use super::GenericClient; 4 | use crate::error::Error; 5 | use async_trait::async_trait; 6 | use futures::lock::Mutex; 7 | use postgres_types::ToSql; 8 | use std::collections::HashMap; 9 | use std::hash::Hash; 10 | use std::mem; 11 | use std::ops::{Deref, DerefMut}; 12 | use std::sync::Arc; 13 | use tokio_postgres::{error::Error as SqlError, RowStream, Statement}; 14 | 15 | /// A client wrapper which caches prepared queries. 16 | /// 17 | /// Only queries prepared through the `GenericClient::prepare_static` trait method are actually 18 | /// cached. 19 | #[derive(Clone)] 20 | pub struct Caching 21 | where 22 | C: GenericClient, 23 | { 24 | client: C, 25 | cache: Cache, 26 | } 27 | 28 | type Cache = Arc>>; 29 | 30 | // We uniquely identify a `&'static str` using a pointer and a length. 31 | // Since shared references with static lifetimes are guaranteed not to change we can assert that two 32 | // `&'static str`s that point to the same value in fact are the same value during the whole duration 33 | // of the program. 34 | #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] 35 | struct StrKey { 36 | ptr: usize, 37 | len: usize, 38 | } 39 | 40 | /// A cache optimized for a small number of items. 41 | #[derive(Debug, Clone, PartialEq, Eq)] 42 | enum DynamicCache 43 | where 44 | K: DynamicKey, 45 | { 46 | Linear(Vec<(K, V)>), 47 | Hash(HashMap), 48 | } 49 | 50 | /// A key with a dynamic cutoff. 51 | trait DynamicKey: Hash + Eq { 52 | /// Maximum number of items in a linear search. 53 | const LINEAR_CUTOFF: usize; 54 | } 55 | 56 | impl Caching 57 | where 58 | C: GenericClient, 59 | { 60 | /// Wrap a client in a new cache. 61 | pub fn new(client: C) -> Caching { 62 | Caching { 63 | client, 64 | cache: Cache::default(), 65 | } 66 | } 67 | 68 | /// Return the inner client. 69 | pub fn into_inner(self) -> C { 70 | self.client 71 | } 72 | } 73 | 74 | impl From for Caching 75 | where 76 | C: GenericClient, 77 | { 78 | fn from(client: C) -> Self { 79 | Caching::new(client) 80 | } 81 | } 82 | 83 | impl Deref for Caching 84 | where 85 | C: GenericClient, 86 | { 87 | type Target = C; 88 | 89 | fn deref(&self) -> &Self::Target { 90 | &self.client 91 | } 92 | } 93 | 94 | impl DerefMut for Caching 95 | where 96 | C: GenericClient, 97 | { 98 | fn deref_mut(&mut self) -> &mut Self::Target { 99 | &mut self.client 100 | } 101 | } 102 | 103 | #[async_trait] 104 | impl GenericClient for Caching 105 | where 106 | C: GenericClient + Sync + Send, 107 | { 108 | async fn prepare(&self, sql: &str) -> Result { 109 | self.client.prepare(sql).await 110 | } 111 | 112 | async fn prepare_static(&self, sql: &'static str) -> Result { 113 | if let Some(statement) = self.get_cached(sql).await { 114 | Ok(statement) 115 | } else { 116 | let statement = self.client.prepare_static(sql).await?; 117 | self.cache(sql, statement.clone()).await; 118 | Ok(statement) 119 | } 120 | } 121 | 122 | async fn execute_raw<'a>( 123 | &'a self, 124 | statement: &Statement, 125 | parameters: &[&'a (dyn ToSql + Sync)], 126 | ) -> Result { 127 | self.client.execute_raw(statement, parameters).await 128 | } 129 | 130 | async fn query_raw<'a>( 131 | &'a self, 132 | statement: &Statement, 133 | parameters: &[&'a (dyn ToSql + Sync)], 134 | ) -> Result { 135 | self.client.query_raw(statement, parameters).await 136 | } 137 | } 138 | 139 | impl Caching 140 | where 141 | C: GenericClient, 142 | { 143 | async fn get_cached(&self, sql: &'static str) -> Option { 144 | let cache = self.cache.lock().await; 145 | cache.get(&StrKey::new(sql)).map(Statement::clone) 146 | } 147 | 148 | async fn cache(&self, sql: &'static str, statement: Statement) { 149 | let mut cache = self.cache.lock().await; 150 | cache.insert(StrKey::new(sql), statement); 151 | } 152 | } 153 | 154 | impl StrKey { 155 | pub fn new(text: &'static str) -> StrKey { 156 | StrKey { 157 | ptr: text.as_ptr() as usize, 158 | len: text.len(), 159 | } 160 | } 161 | } 162 | 163 | impl DynamicKey for StrKey { 164 | // TODO: run benchmarks to find a good cutoff. 165 | const LINEAR_CUTOFF: usize = 64; 166 | } 167 | 168 | impl DynamicCache 169 | where 170 | K: DynamicKey, 171 | { 172 | pub fn get(&self, index: &K) -> Option<&V> { 173 | match self { 174 | DynamicCache::Linear(pairs) => pairs 175 | .iter() 176 | .find(|(key, _)| K::eq(key, &index)) 177 | .map(|(_, value)| value), 178 | DynamicCache::Hash(map) => map.get(index), 179 | } 180 | } 181 | 182 | /// Insert a new key-value pair into the cache, and grow the cache if necessary. 183 | pub fn insert(&mut self, key: K, value: V) { 184 | match self { 185 | DynamicCache::Linear(pairs) if pairs.len() >= K::LINEAR_CUTOFF => { 186 | let map = mem::take(pairs).into_iter().collect(); 187 | *self = DynamicCache::Hash(map); 188 | self.insert(key, value); 189 | } 190 | DynamicCache::Linear(pairs) => { 191 | pairs.push((key, value)); 192 | } 193 | DynamicCache::Hash(map) => { 194 | map.insert(key, value); 195 | } 196 | } 197 | } 198 | } 199 | 200 | impl Default for DynamicCache 201 | where 202 | K: DynamicKey, 203 | { 204 | fn default() -> Self { 205 | DynamicCache::Linear(Vec::new()) 206 | } 207 | } 208 | 209 | // TODO: Unfortunately we require GATs to do this in a more general fashion without resorting to 210 | // dynamic dispatch. When GATs become stable we can move this into the `GenericClient` trait. 211 | macro_rules! impl_cached_transaction { 212 | ($client:ty, $transaction:ty) => { 213 | impl Caching<$client> { 214 | /// Start a new transaction that shares the same cache as the current client. 215 | pub async fn transaction(&mut self) -> Result, Error> { 216 | <$client>::transaction(self) 217 | .await 218 | .map(Caching::new) 219 | .map_err(Error::BeginTransaction) 220 | } 221 | } 222 | }; 223 | } 224 | 225 | impl_cached_transaction!(tokio_postgres::Client, tokio_postgres::Transaction<'_>); 226 | impl_cached_transaction!( 227 | tokio_postgres::Transaction<'_>, 228 | tokio_postgres::Transaction<'_> 229 | ); 230 | -------------------------------------------------------------------------------- /postgres_query/src/error.rs: -------------------------------------------------------------------------------- 1 | use crate::execute; 2 | use thiserror::Error; 3 | 4 | /// Any error that this crate may produce. 5 | #[derive(Debug, Error)] 6 | pub enum Error { 7 | #[error("failed to execute the query")] 8 | Execute(#[from] execute::Error), 9 | 10 | #[error("failed to start new transaction")] 11 | BeginTransaction(#[source] tokio_postgres::Error), 12 | 13 | #[error("failed to parse query: {0}")] 14 | Parse(#[from] ParseError), 15 | } 16 | 17 | pub type Result = std::result::Result; 18 | 19 | #[derive(Debug, Error)] 20 | pub enum ParseError { 21 | #[error("failed to find binding matching `${binding}`")] 22 | UndefinedBinding { binding: String }, 23 | 24 | #[error( 25 | "expected an identifier, found '{next}'. Dollar signs may be escaped: `$$`.", 26 | next = found.map(|ch| ch.to_string()).unwrap_or_else(|| "EOF".to_owned()) 27 | )] 28 | EmptyIdentifier { found: Option }, 29 | } 30 | -------------------------------------------------------------------------------- /postgres_query/src/execute.rs: -------------------------------------------------------------------------------- 1 | //! Executing queries through a client. 2 | //! 3 | //! See [`Query`]. 4 | //! 5 | //! [`Query`]: ../struct.Query.html 6 | 7 | use super::{Query, Sql}; 8 | use crate::client::GenericClient; 9 | use crate::error::Result; 10 | use crate::extract::{self, FromSqlRow}; 11 | use futures::{pin_mut, Stream, StreamExt, TryStreamExt}; 12 | use thiserror::Error; 13 | use tokio_postgres::{error::Error as SqlError, Row, Statement}; 14 | 15 | /// An error that may arise when executing a query. 16 | #[derive(Debug, Error)] 17 | pub enum Error { 18 | #[error("failed to execute query")] 19 | Sql(#[from] SqlError), 20 | 21 | #[error("expected 1 row, found 0")] 22 | NoRows, 23 | #[error("expected 1 row, found more than 1")] 24 | TooManyRows, 25 | 26 | #[error("failed to extract value from row")] 27 | Extract(#[from] extract::Error), 28 | } 29 | 30 | impl<'a> Query<'a> { 31 | /// Execute this query and return the number of affected rows. 32 | pub async fn execute(&self, client: &C) -> Result 33 | where 34 | C: GenericClient + Sync, 35 | { 36 | let statement = self.prepare(&client).await?; 37 | let rows = client 38 | .execute_raw(&statement, &self.parameters) 39 | .await 40 | .map_err(Error::from)?; 41 | Ok(rows) 42 | } 43 | 44 | /// Execute this query and return the resulting values. 45 | pub async fn fetch(&self, client: &C) -> Result> 46 | where 47 | T: FromSqlRow, 48 | C: GenericClient + Sync, 49 | { 50 | let rows = self.query(client).await?; 51 | let values = T::from_row_multi(&rows).map_err(Error::from)?; 52 | Ok(values) 53 | } 54 | 55 | /// Execute this query and return the resulting value. This method will return an error if, not 56 | /// exactly one row was returned by the query. 57 | pub async fn fetch_one(&self, client: &C) -> Result 58 | where 59 | T: FromSqlRow, 60 | C: GenericClient + Sync, 61 | { 62 | let row = self.query_one(client).await?; 63 | let value = T::from_row(&row).map_err(Error::from)?; 64 | Ok(value) 65 | } 66 | 67 | /// Execute this query and return the resulting values as an asynchronous stream of values. 68 | pub async fn fetch_streaming(&self, client: &C) -> Result>> 69 | where 70 | T: FromSqlRow, 71 | C: GenericClient + Sync, 72 | { 73 | let rows = self.query_streaming(client).await?; 74 | let values = rows.map(|row| { 75 | row.and_then(|row| { 76 | T::from_row(&row) 77 | .map_err(Error::Extract) 78 | .map_err(Into::into) 79 | }) 80 | }); 81 | Ok(values) 82 | } 83 | 84 | /// Execute this query and return the resulting rows. 85 | pub async fn query(&self, client: &C) -> Result> 86 | where 87 | C: GenericClient + Sync, 88 | { 89 | let statement = self.prepare(&client).await?; 90 | let rows = client 91 | .query_raw(&statement, &self.parameters) 92 | .await 93 | .map_err(Error::from)? 94 | .try_collect::>() 95 | .await 96 | .map_err(Error::from)?; 97 | Ok(rows) 98 | } 99 | 100 | /// Execute this query and return the resulting row. This method will return an error if, not 101 | /// exactly one row was returned by the query. 102 | pub async fn query_one(&self, client: &C) -> Result 103 | where 104 | C: GenericClient + Sync, 105 | { 106 | let statement = self.prepare(&client).await?; 107 | let rows = client 108 | .query_raw(&statement, &self.parameters) 109 | .await 110 | .map_err(Error::from)?; 111 | 112 | pin_mut!(rows); 113 | 114 | let row = match rows.try_next().await.map_err(Error::from)? { 115 | Some(row) => row, 116 | None => return Err(Error::NoRows.into()), 117 | }; 118 | 119 | if rows.try_next().await.map_err(Error::from)?.is_some() { 120 | return Err(Error::TooManyRows.into()); 121 | } 122 | 123 | Ok(row) 124 | } 125 | 126 | /// Execute this query and return the resulting values as an asynchronous stream of values. 127 | pub async fn query_streaming(&self, client: &C) -> Result>> 128 | where 129 | C: GenericClient + Sync, 130 | { 131 | let statement = self.prepare(&client).await?; 132 | let rows = client 133 | .query_raw(&statement, &self.parameters) 134 | .await 135 | .map_err(Error::from)?; 136 | Ok(rows.map_err(Error::from).map_err(Into::into)) 137 | } 138 | } 139 | 140 | impl<'a> Query<'a> { 141 | async fn prepare(&self, client: &C) -> Result 142 | where 143 | C: GenericClient + Sync, 144 | { 145 | let result = match &self.sql { 146 | Sql::Static(text) => client.prepare_static(text).await, 147 | Sql::Dynamic(text) => client.prepare(&text).await, 148 | }; 149 | 150 | result.map_err(Error::Sql).map_err(Into::into) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /postgres_query/src/extract.rs: -------------------------------------------------------------------------------- 1 | //! Extract typed values from rows. 2 | 3 | use postgres_types::FromSql; 4 | use postgres_types::WasNull; 5 | use std::collections::{BTreeSet, HashSet}; 6 | use std::error::Error as StdError; 7 | use std::fmt::{Display, Write}; 8 | use std::hash::Hash; 9 | use std::iter; 10 | use std::ops::Range; 11 | use thiserror::Error; 12 | use tokio_postgres::{error::Error as SqlError, row::RowIndex, Column}; 13 | 14 | /// An error that can occur while extracting values from a row. 15 | #[derive(Debug, Error)] 16 | pub enum Error { 17 | #[error("{msg}")] 18 | Custom { msg: String }, 19 | 20 | #[error("invalid number of columns, found {found} but expected {expected}")] 21 | ColumnCount { found: usize, expected: usize }, 22 | 23 | #[error("failed to get column: `{index}` (columns were: {columns})")] 24 | SliceLookup { index: String, columns: String }, 25 | 26 | #[error("failed to split on: `{split}` (columns were: {columns})")] 27 | InvalidSplit { split: String, columns: String }, 28 | 29 | #[error( 30 | "failed to slice row on: `{start}..{end}` (len was: {len})", 31 | start = range.start, 32 | end = range.end 33 | )] 34 | SliceIndex { range: Range, len: usize }, 35 | 36 | /// An error occured within postgres itself. 37 | #[error("internal postgres error")] 38 | Sql(#[from] SqlError), 39 | } 40 | 41 | impl Error { 42 | /// Construct a new error message with a custom message. 43 | pub fn new(msg: D) -> Error 44 | where 45 | D: Display, 46 | { 47 | Error::Custom { 48 | msg: msg.to_string(), 49 | } 50 | } 51 | 52 | /// A soft error is an error that can be converted into an `Option::None`. 53 | fn is_soft(&self) -> bool { 54 | match self { 55 | Error::Sql(sql) => { 56 | let mut error: &dyn StdError = sql; 57 | loop { 58 | if let Some(WasNull) = error.downcast_ref() { 59 | break true; 60 | } 61 | 62 | match error.source() { 63 | Some(source) => error = source, 64 | None => break false, 65 | } 66 | } 67 | } 68 | 69 | _ => false, 70 | } 71 | } 72 | } 73 | 74 | mod private { 75 | pub mod row { 76 | pub trait Sealed {} 77 | } 78 | } 79 | 80 | /// Anything that provides a row-like interface. 81 | /// 82 | /// This trait is sealed and cannot be implemented for types outside of this crate. 83 | pub trait Row: private::row::Sealed { 84 | /// Return the name and type of each column. 85 | fn columns(&self) -> &[Column]; 86 | 87 | /// Attempt to get a cell in the row by the column name or index. 88 | fn try_get<'a, I, T>(&'a self, index: I) -> Result 89 | where 90 | I: RowIndex + Display, 91 | T: FromSql<'a>; 92 | 93 | /// The number of values (columns) in the row. 94 | fn len(&self) -> usize { 95 | self.columns().len() 96 | } 97 | 98 | /// `true` if the value did not contain any values, `false` otherwise. 99 | fn is_empty(&self) -> bool { 100 | self.len() == 0 101 | } 102 | 103 | /// Attempt to get a cell in the row by the column name or index. 104 | /// 105 | /// # Panics 106 | /// 107 | /// - If no cell was found with the given index. 108 | fn get<'a, I, T>(&'a self, index: I) -> T 109 | where 110 | I: RowIndex + Display, 111 | T: FromSql<'a>, 112 | { 113 | match self.try_get::(index) { 114 | Ok(value) => value, 115 | Err(err) => panic!("failed to retrieve column: {}", err), 116 | } 117 | } 118 | 119 | /// Return a subslice of this row's columns. 120 | fn slice(&self, range: Range) -> Result, Error> 121 | where 122 | Self: Sized, 123 | { 124 | if range.end > self.len() { 125 | Err(Error::SliceIndex { 126 | range, 127 | len: self.len(), 128 | }) 129 | } else { 130 | let slice = RowSlice { row: self, range }; 131 | Ok(slice) 132 | } 133 | } 134 | } 135 | 136 | /// A contiguous subset of columns in a row. 137 | pub struct RowSlice<'a, R> 138 | where 139 | R: Row, 140 | { 141 | row: &'a R, 142 | range: Range, 143 | } 144 | 145 | /// Extract values from a row. 146 | /// 147 | /// May be derived for `struct`s using `#[derive(FromSqlRow)]`. 148 | /// 149 | /// # Example 150 | /// 151 | /// ``` 152 | /// # use postgres_query_macro::FromSqlRow; 153 | /// # use postgres_types::Date; 154 | /// #[derive(FromSqlRow)] 155 | /// struct Person { 156 | /// age: i32, 157 | /// name: String, 158 | /// birthday: Option>, 159 | /// } 160 | /// ``` 161 | pub trait FromSqlRow: Sized { 162 | /// Number of columns required to construct this type. 163 | /// 164 | /// IMPORTANT: if not set correctly, extractors which depend on this value may produce errors. 165 | const COLUMN_COUNT: usize; 166 | 167 | /// Extract values from a single row. 168 | fn from_row(row: &R) -> Result 169 | where 170 | R: Row; 171 | 172 | /// Extract values from multiple rows. 173 | /// 174 | /// Implementors of this trait may override this method to enable optimizations not possible in 175 | /// [`from_row`] by, for example, only looking up the indices of columns with a specific name 176 | /// once. 177 | /// 178 | /// [`from_row`]: #tymethod.from_row 179 | fn from_row_multi(rows: &[R]) -> Result, Error> 180 | where 181 | R: Row, 182 | { 183 | rows.iter().map(Self::from_row).collect() 184 | } 185 | } 186 | 187 | /// For collections that can be built from single elements. 188 | /// 189 | /// Used by `#[derive(FromSqlRow)]` when a field is tagged with the attribute `#[row(merge)]`. 190 | pub trait Merge { 191 | /// The type of item being merged. 192 | type Item; 193 | 194 | /// Insert one item into this collection. 195 | fn insert(&mut self, item: Self::Item); 196 | } 197 | 198 | impl Merge for Vec { 199 | type Item = T; 200 | fn insert(&mut self, item: T) { 201 | self.push(item) 202 | } 203 | } 204 | 205 | impl Merge for HashSet 206 | where 207 | T: Hash + Eq, 208 | { 209 | type Item = T; 210 | fn insert(&mut self, item: T) { 211 | HashSet::insert(self, item); 212 | } 213 | } 214 | 215 | impl Merge for BTreeSet 216 | where 217 | T: Ord, 218 | { 219 | type Item = T; 220 | fn insert(&mut self, item: T) { 221 | BTreeSet::insert(self, item); 222 | } 223 | } 224 | 225 | impl private::row::Sealed for tokio_postgres::Row {} 226 | 227 | impl Row for tokio_postgres::Row { 228 | fn columns(&self) -> &[Column] { 229 | tokio_postgres::Row::columns(self) 230 | } 231 | 232 | fn try_get<'a, I, T>(&'a self, index: I) -> Result 233 | where 234 | I: RowIndex + Display, 235 | T: FromSql<'a>, 236 | { 237 | tokio_postgres::Row::try_get(self, index).map_err(Error::from) 238 | } 239 | 240 | fn len(&self) -> usize { 241 | tokio_postgres::Row::len(self) 242 | } 243 | 244 | fn is_empty(&self) -> bool { 245 | tokio_postgres::Row::is_empty(self) 246 | } 247 | 248 | fn get<'a, I, T>(&'a self, index: I) -> T 249 | where 250 | I: RowIndex + Display, 251 | T: FromSql<'a>, 252 | { 253 | tokio_postgres::Row::get(self, index) 254 | } 255 | } 256 | 257 | impl private::row::Sealed for RowSlice<'_, R> where R: Row {} 258 | 259 | impl Row for RowSlice<'_, R> 260 | where 261 | R: Row, 262 | { 263 | fn columns(&self) -> &[Column] { 264 | &self.row.columns()[self.range.clone()] 265 | } 266 | 267 | fn try_get<'a, I, T>(&'a self, index: I) -> Result 268 | where 269 | I: RowIndex + Display, 270 | T: FromSql<'a>, 271 | { 272 | if let Some(index) = index.__idx(self.columns()) { 273 | self.row.try_get(self.range.start + index) 274 | } else { 275 | Err(Error::SliceLookup { 276 | index: index.to_string(), 277 | columns: format_columns(self.columns()), 278 | }) 279 | } 280 | } 281 | } 282 | 283 | impl RowSlice<'_, R> 284 | where 285 | R: Row, 286 | { 287 | /// Return a subslice of this row's columns. 288 | /// 289 | /// This is an optimized version of `Row::slice` which reduces the number of 290 | /// pointer-indirections. 291 | pub fn slice(&self, range: Range) -> Result, Error> 292 | where 293 | Self: Sized, 294 | { 295 | if range.end > self.range.end { 296 | Err(Error::SliceIndex { 297 | range, 298 | len: self.range.end, 299 | }) 300 | } else { 301 | let slice = RowSlice { 302 | row: self.row, 303 | range, 304 | }; 305 | Ok(slice) 306 | } 307 | } 308 | } 309 | 310 | /// Split a row's columns into multiple partitions based on some split-points. 311 | /// 312 | /// # Split 313 | /// 314 | /// Given a list of column labels, a split is made right before the first column with a matching 315 | /// name following the previous split: 316 | /// 317 | /// ```text 318 | /// Labels: a, a, c, a 319 | /// Indices: 0 1 2 3 4 5 6 7 8 9 10 320 | /// Columns: a b c a b a b c b a c 321 | /// Splits: | | | | 322 | /// Partitions: + +---+ +-----+ +-+ +-+ 323 | /// Ranges: [0..0, 0..3, 3..7, 7..9, 9..11]` 324 | /// ``` 325 | /// 326 | /// The first partition always contains the leading columns (zero or more): 327 | /// 328 | /// ```text 329 | /// Labels: b, a 330 | /// Indices: 0 1 2 3 4 5 331 | /// Columns: d a b c a b 332 | /// Splits: | | 333 | /// Partitions: +-+ +-+ +-+ 334 | /// Ranges: [0..2, 2..4, 4..6] 335 | /// ``` 336 | /// 337 | /// # Errors 338 | /// 339 | /// Will return an error if the columns could not be split (ie. no column with a matching name was 340 | /// found in the remaining columns). 341 | pub fn split_columns_many<'a, S>( 342 | columns: &'a [Column], 343 | splits: &'a [S], 344 | ) -> impl Iterator, Error>> + 'a 345 | where 346 | S: AsRef, 347 | { 348 | let column_names = columns.iter().map(|col| col.name()); 349 | partition_many(column_names, splits.iter()).map(move |split| match split { 350 | SplitResult::Range(range) => Ok(range), 351 | SplitResult::NotFound { split, start } => Err(Error::InvalidSplit { 352 | split, 353 | columns: format_columns(&columns[start..]), 354 | }), 355 | }) 356 | } 357 | 358 | #[cfg_attr(test, derive(Debug, PartialEq))] 359 | enum SplitResult { 360 | NotFound { split: String, start: usize }, 361 | Range(Range), 362 | } 363 | 364 | fn partition_many<'a>( 365 | columns: impl Iterator + 'a> + 'a, 366 | splits: impl Iterator + 'a> + 'a, 367 | ) -> impl Iterator + 'a { 368 | let mut columns = columns.enumerate(); 369 | let mut splits = splits; 370 | 371 | let mut previous_end = 0; 372 | 373 | iter::from_fn(move || -> Option<_> { 374 | if let Some(split) = splits.next() { 375 | let split = split.as_ref(); 376 | if let Some((end, _)) = columns.find(|(_, name)| name.as_ref() == split) { 377 | let range = previous_end..end; 378 | previous_end = end; 379 | Some(SplitResult::Range(range)) 380 | } else { 381 | Some(SplitResult::NotFound { 382 | split: split.to_owned(), 383 | start: previous_end, 384 | }) 385 | } 386 | } else { 387 | let (last, _) = columns.by_ref().last()?; 388 | let len = last + 1; 389 | Some(SplitResult::Range(previous_end..len)) 390 | } 391 | }) 392 | } 393 | 394 | fn format_columns(columns: &[Column]) -> String { 395 | let mut total = String::with_capacity(16 * columns.len()); 396 | for col in columns { 397 | if !total.is_empty() { 398 | total.push_str(", "); 399 | } 400 | write!(total, "`{}`", col.name()).unwrap(); 401 | } 402 | total 403 | } 404 | 405 | mod from_row_sql_impls { 406 | use super::*; 407 | 408 | use std::rc::Rc; 409 | use std::sync::Arc; 410 | 411 | macro_rules! impl_from_row_for_tuple { 412 | (($($elem:ident),+)) => { 413 | impl<$($elem),+> FromSqlRow for ($($elem,)+) 414 | where $($elem: for<'a> FromSql<'a> + std::fmt::Display),+ 415 | { 416 | const COLUMN_COUNT: usize = impl_from_row_for_tuple!(@count ($($elem),*)); 417 | 418 | fn from_row(row: &R) -> Result 419 | where R: Row { 420 | if row.len() != Self::COLUMN_COUNT { 421 | Err(Error::ColumnCount { 422 | expected: Self::COLUMN_COUNT, 423 | found: row.len(), 424 | }) 425 | } else { 426 | let result = ( 427 | $( 428 | row.try_get::( 429 | impl_from_row_for_tuple!(@index $elem) 430 | )?, 431 | )+ 432 | ); 433 | 434 | Ok(result) 435 | } 436 | } 437 | } 438 | }; 439 | 440 | (@index A) => { 0 }; 441 | (@index B) => { 1 }; 442 | (@index C) => { 2 }; 443 | (@index D) => { 3 }; 444 | (@index E) => { 4 }; 445 | (@index F) => { 5 }; 446 | (@index G) => { 6 }; 447 | (@index H) => { 7 }; 448 | 449 | (@count ()) => { 0 }; 450 | (@count ($head:ident $(, $tail:ident)*)) => {{ 451 | 1 + impl_from_row_for_tuple!(@count ($($tail),*)) 452 | }}; 453 | } 454 | 455 | impl_from_row_for_tuple!((A)); 456 | impl_from_row_for_tuple!((A, B)); 457 | impl_from_row_for_tuple!((A, B, C)); 458 | impl_from_row_for_tuple!((A, B, C, D)); 459 | impl_from_row_for_tuple!((A, B, C, D, E)); 460 | impl_from_row_for_tuple!((A, B, C, D, E, F)); 461 | impl_from_row_for_tuple!((A, B, C, D, E, F, G)); 462 | impl_from_row_for_tuple!((A, B, C, D, E, F, G, H)); 463 | 464 | impl FromSqlRow for Option 465 | where 466 | T: FromSqlRow, 467 | { 468 | const COLUMN_COUNT: usize = T::COLUMN_COUNT; 469 | 470 | fn from_row(row: &R) -> Result 471 | where 472 | R: Row, 473 | { 474 | match T::from_row(row) { 475 | Ok(value) => Ok(Some(value)), 476 | Err(error) if error.is_soft() => Ok(None), 477 | Err(error) => Err(error), 478 | } 479 | } 480 | } 481 | 482 | impl FromSqlRow for Result 483 | where 484 | T: FromSqlRow, 485 | E: From, 486 | { 487 | const COLUMN_COUNT: usize = T::COLUMN_COUNT; 488 | 489 | fn from_row(row: &R) -> Result 490 | where 491 | R: Row, 492 | { 493 | match T::from_row(row) { 494 | Ok(value) => Ok(Ok(value)), 495 | Err(error) => Ok(Err(E::from(error))), 496 | } 497 | } 498 | } 499 | 500 | macro_rules! impl_from_row_for_wrapper { 501 | ($wrapper:ident, $constructor:expr) => { 502 | impl FromSqlRow for $wrapper 503 | where 504 | T: FromSqlRow, 505 | { 506 | const COLUMN_COUNT: usize = T::COLUMN_COUNT; 507 | 508 | fn from_row(row: &R) -> Result 509 | where 510 | R: Row, 511 | { 512 | let value = T::from_row(row)?; 513 | Ok($constructor(value)) 514 | } 515 | } 516 | }; 517 | } 518 | 519 | impl_from_row_for_wrapper!(Box, Box::new); 520 | impl_from_row_for_wrapper!(Rc, Rc::new); 521 | impl_from_row_for_wrapper!(Arc, Arc::new); 522 | } 523 | 524 | #[cfg(test)] 525 | mod tests { 526 | use super::*; 527 | 528 | fn split_chars_fallible<'a>( 529 | columns: &'a str, 530 | splits: &'a str, 531 | ) -> impl Iterator + 'a { 532 | let cols = columns.chars().map(|ch| ch.to_string()); 533 | let splits = splits.chars().map(|ch| ch.to_string()); 534 | partition_many(cols, splits) 535 | } 536 | 537 | fn split_chars<'a>( 538 | columns: &'a str, 539 | splits: &'a str, 540 | ) -> impl Iterator> + 'a { 541 | let cols = columns.chars().map(|ch| ch.to_string()); 542 | let splits = splits.chars().map(|ch| ch.to_string()); 543 | partition_many(cols, splits).map(move |split| match split { 544 | SplitResult::Range(range) => range, 545 | SplitResult::NotFound { split, start } => panic!( 546 | "failed to split {:?} on {:?}", 547 | columns.chars().skip(start).collect::(), 548 | split, 549 | ), 550 | }) 551 | } 552 | 553 | #[test] 554 | fn split_columns_many_no_excess() { 555 | let partitions = split_chars("abcabdab", "aaa").collect::>(); 556 | assert_eq!(partitions, vec![0..0, 0..3, 3..6, 6..8,]) 557 | } 558 | 559 | #[test] 560 | fn split_columns_many_leading_columns() { 561 | let partitions = split_chars("deabcabdab", "aaa").collect::>(); 562 | assert_eq!(partitions, vec![0..2, 2..5, 5..8, 8..10,]) 563 | } 564 | 565 | #[test] 566 | fn split_columns_many_too_many_splits() { 567 | let partitions = split_chars_fallible("abcabc", "aaa").collect::>(); 568 | assert_eq!( 569 | partitions, 570 | vec![ 571 | SplitResult::Range(0..0), 572 | SplitResult::Range(0..3), 573 | SplitResult::NotFound { 574 | split: "a".to_owned(), 575 | start: 3, 576 | } 577 | ] 578 | ) 579 | } 580 | } 581 | -------------------------------------------------------------------------------- /postgres_query/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Helper macros and traits built around 2 | //! [tokio-postgres](https://docs.rs/tokio-postgres/0.5.1/tokio_postgres/index.html) to define 3 | //! queries with human readable parameters and return values. 4 | //! 5 | //! # Example 6 | //! 7 | //! ``` 8 | //! # use tokio_postgres::Client; 9 | //! # use postgres_query::{query, FromSqlRow, Result}; 10 | //! # fn connect() -> Client { unimplemented!() } 11 | //! # async fn foo() -> Result<()> { 12 | //! // Connect to the database 13 | //! let client: Client = connect(/* ... */); 14 | //! 15 | //! // Construct the query 16 | //! let query = query!( 17 | //! "SELECT age, name FROM people WHERE age >= $min_age", 18 | //! min_age = 18 19 | //! ); 20 | //! 21 | //! // Define the structure of the data returned from the query 22 | //! #[derive(FromSqlRow)] 23 | //! struct Person { 24 | //! age: i32, 25 | //! name: String, 26 | //! } 27 | //! 28 | //! // Execute the query 29 | //! let people: Vec = query.fetch(&client).await?; 30 | //! 31 | //! for person in people { 32 | //! println!("{} is {} years young", person.name, person.age); 33 | //! } 34 | //! # Ok(()) 35 | //! # } 36 | //! ``` 37 | //! 38 | //! # Queries 39 | //! 40 | //! The preferred way of constructing a new [`Query`] is through the [`query!`] macro. It uses a 41 | //! syntax similar to the `format!(...)` family of macros from the standard library. The first 42 | //! parameter is the SQL query and is always given as a string literal (this might be relaxed in the 43 | //! future). This string literal may contain parameter bindings on the form `$ident` where `ident` 44 | //! is any valid Rust identifier (`$abc`, `$value_123`, etc.). 45 | //! 46 | //! ``` 47 | //! # use postgres_query::query; 48 | //! let age = 42; 49 | //! let insert_person = query!( 50 | //! "INSERT INTO people VALUES ($age, $name)", 51 | //! name = "John Wick", // Binds "$name" to "John Wick" 52 | //! age, // Binds "$age" to the value of `age` 53 | //! ); 54 | //! ``` 55 | //! 56 | //! During compilation the query is converted into the format expected by PostgreSQL: parameter 57 | //! bindings are converted to using numbers ($1, $2, etc.) and the actual parameter values are put 58 | //! into a 1-indexed array. The code snippet above would be expanded into the following: 59 | //! 60 | //! ``` 61 | //! # use postgres_query::*; 62 | //! let age = 42; 63 | //! let insert_person = Query::new_static( 64 | //! "INSERT INTO people VALUES ($1, $2)", 65 | //! vec![&age, &"John Wick"], 66 | //! ); 67 | //! ``` 68 | //! 69 | //! 70 | //! ## Dynamic Queries 71 | //! 72 | //! If necessary, queries may be constructed from `&str`s at runtime instead of the usual 73 | //! compile-time string literals expected by the `query!` macro. This is achieved by using the 74 | //! [`query_dyn!`] macro instead. In addition to dynamic queries, parameter bindings may also be 75 | //! dynamically: 76 | //! 77 | //! ``` 78 | //! # use postgres_query::*; 79 | //! let mut sql = "SELECT * FROM people WHERE name = $name".to_string(); 80 | //! let mut bindings = Vec::new(); 81 | //! 82 | //! // Add a filter at runtime 83 | //! sql += " AND age > $min_age"; 84 | //! bindings.push(("min_age", &42 as Parameter)); 85 | //! 86 | //! let query: Result = query_dyn!( 87 | //! &sql, 88 | //! name = "John", 89 | //! ..bindings, 90 | //! ); 91 | //! ``` 92 | //! 93 | //! Using dynamic queries does introduce some errors that cannot be caught at runtime: such as some 94 | //! parameters in the query not having a matching binding. Because of this the value returned by the 95 | //! [`query_dyn!`] macro is not a `Query` but a `Result` which carries an error you must 96 | //! handle: 97 | //! 98 | //! ``` 99 | //! # use postgres_query::*; 100 | //! let mut sql = "SELECT * FROM people".to_string(); 101 | //! sql += " WHERE age <= $max_age AND name = $name"; 102 | //! 103 | //! let query: Result = query_dyn!( 104 | //! &sql, 105 | //! name = "John", 106 | //! // Forgot to bind the parameter `max_age`. 107 | //! // Will result in an error. 108 | //! ); 109 | //! 110 | //! assert!(query.is_err()); 111 | //! ``` 112 | //! 113 | //! 114 | //! # Data Extraction 115 | //! 116 | //! In addition to helping you define new queries this crate provides the [`FromSqlRow`] trait which 117 | //! makes it easy to extract typed values from the resulting rows. The easiest way to implement this 118 | //! trait for new `struct`s is to use the included [`derive(FromSqlRow)`] macro. 119 | //! 120 | //! - If used on a tuple struct, values will be extracted from the corresponding columns based on 121 | //! their position in the tuple. 122 | //! - If used on a stuct with named fields, values will be extracted from the column with the same 123 | //! name as the field. 124 | //! 125 | //! ``` 126 | //! # use postgres_query::*; 127 | //! #[derive(FromSqlRow)] 128 | //! struct TupleData(i32, String); 129 | //! 130 | //! #[derive(FromSqlRow)] 131 | //! struct NamedData { 132 | //! age: i32, 133 | //! name: String, 134 | //! }; 135 | //! ``` 136 | //! 137 | //! ## Multi-mapping 138 | //! 139 | //! If you query the same table multiple times it gets tedious to have to redefine structs with the 140 | //! same fields over and over. Preferably we would like to reuse the same definition multiple times. 141 | //! We can do this be utilizing "multi-mapping". 142 | //! 143 | //! 144 | //! ### Partitions 145 | //! 146 | //! Multi-mapping works by splitting the columns of rows returned by a query into multiple 147 | //! partitions (or slices). For example, if we had the query `SELECT books.*, authors.* FROM ...`, 148 | //! we would like to extract the data into two structs: `Book` and `Author`. We accomplish this by 149 | //! looking at the columns returned by the database and splitting them into partitions: 150 | //! 151 | //! ```text 152 | //! Columns: id, title, release_date, genre, id, name, birthyear 153 | //! Partitions: +------------Book-------------+ +------Author-----+ 154 | //! ``` 155 | //! 156 | //! 157 | //! ### Partitioning schemes 158 | //! 159 | //! There are two supported ways to partition a row: either we specify the number of columns 160 | //! required to populate each struct (in the example above: 4 columns for Book and 3 for author), or 161 | //! we split on the name of a column. The former should generally only be used when you know the 162 | //! number of columns isn't going to change. The latter is less prone to break provided you choose 163 | //! an appropriate column to split on (a good candidate is usually `id` as almost all tables have 164 | //! this as their first 165 | //! column). 166 | //! 167 | //! You choose which partitioning scheme you want to use by using the provided 168 | //! [attributes](./derive.FromSqlRow.html#attributes). In order to accomplish the partitioning in 169 | //! the example above we could split on the column name `id`: 170 | //! 171 | //! ``` 172 | //! # use postgres_query::FromSqlRow; 173 | //! #[derive(FromSqlRow)] 174 | //! struct Book { 175 | //! id: i32, 176 | //! title: String, 177 | //! release_date: String, 178 | //! genre: String, 179 | //! } 180 | //! 181 | //! #[derive(FromSqlRow)] 182 | //! struct Author { 183 | //! id: i32, 184 | //! name: String, 185 | //! birthyear: i32, 186 | //! } 187 | //! 188 | //! #[derive(FromSqlRow)] 189 | //! #[row(split)] 190 | //! struct BookAuthor { 191 | //! #[row(flatten, split = "id")] 192 | //! book: Book, 193 | //! #[row(flatten, split = "id")] 194 | //! author: Author, 195 | //! } 196 | //! ``` 197 | //! 198 | //! Alternatively, we can make `Author` a part of the `Book` struct: 199 | //! 200 | //! ``` 201 | //! # use postgres_query::FromSqlRow; 202 | //! #[derive(FromSqlRow)] 203 | //! struct Author { 204 | //! id: i32, 205 | //! name: String, 206 | //! birthyear: i32, 207 | //! } 208 | //! 209 | //! #[derive(FromSqlRow)] 210 | //! #[row(split)] 211 | //! struct Book { 212 | //! #[row(split = "id")] 213 | //! id: i32, 214 | //! title: String, 215 | //! release_date: String, 216 | //! genre: String, 217 | //! 218 | //! #[row(flatten, split = "id")] 219 | //! author: Author, 220 | //! } 221 | //! ``` 222 | //! 223 | //! ### Many-to-one Relationships 224 | //! 225 | //! In the previous examples we had a `Book` that contained an `Author`. This is what is called a 226 | //! many-to-one relationship, since one book only has one author, but many books may share the same 227 | //! author (or so we assume anyway). What if you instead had `Author` an author that contained many 228 | //! `Book`s? We know that one author may write many books, so that is a one-to-many relationship. We 229 | //! can write an extractor for that case as well: 230 | //! 231 | //! ``` 232 | //! # use postgres_query::*; 233 | //! # use tokio_postgres::Client; 234 | //! # async fn foo() -> Result<()> { 235 | //! # let client: Client = unimplemented!(); 236 | //! #[derive(FromSqlRow)] 237 | //! #[row(split, group)] 238 | //! struct Author { 239 | //! #[row(split = "id", key)] 240 | //! id: i32, 241 | //! name: String, 242 | //! birthyear: i32, 243 | //! 244 | //! #[row(split = "id", merge)] 245 | //! books: Vec, 246 | //! } 247 | //! 248 | //! #[derive(FromSqlRow)] 249 | //! struct Book { 250 | //! id: i32, 251 | //! title: String, 252 | //! release_date: String, 253 | //! genre: String, 254 | //! } 255 | //! 256 | //! let authors: Vec = query!( 257 | //! "SELECT authors.*, books.* 258 | //! INNER JOIN books ON books.author = authors.id 259 | //! GROUP BY authors.id" 260 | //! ) 261 | //! .fetch(&client) 262 | //! .await?; 263 | //! # Ok(()) 264 | //! # } 265 | //! ``` 266 | //! 267 | //! See the section on [attributes](./derive.FromSqlRow.html#attributes) for a more advanced 268 | //! in-depth explanation of multi-mapping. 269 | //! 270 | //! 271 | //! # Caching queries 272 | //! 273 | //! From time to time you probably want to execute the same query multiple times, but with different 274 | //! parameters. In times like these we can decrease the load on the database by preparing our 275 | //! queries before executing them. By wrapping a client in a [`Caching`] struct this behaviour is 276 | //! automatically provided for all queries that originate from this crate: 277 | //! 278 | //! ``` 279 | //! # use tokio_postgres::Client; 280 | //! # use postgres_query::{query, Result, Caching}; 281 | //! # fn connect() -> Client { unimplemented!() } 282 | //! # async fn foo() -> Result<()> { 283 | //! // Connect to the database 284 | //! let client: Client = connect(/* ... */); 285 | //! 286 | //! // Wrap the client in a query cache 287 | //! let cached_client = Caching::new(client); 288 | //! 289 | //! for age in 0..100i32 { 290 | //! let query = query!("SELECT name, weight FROM people WHERE age = $age", age); 291 | //! 292 | //! // The query is prepared and cached the first time it's executed. 293 | //! // All subsequent fetches will use the cached Statement. 294 | //! let people: Vec<(String, i32)> = query.fetch(&cached_client).await?; 295 | //! 296 | //! /* Do something with people */ 297 | //! } 298 | //! # Ok(()) 299 | //! # } 300 | //! ``` 301 | //! 302 | //! [`Query`]: struct.Query.html 303 | //! [`query!`]: macro.query.html 304 | //! [`query_dyn!`]: macro.query_dyn.html 305 | //! [`FromSqlRow`]: extract/trait.FromSqlRow.html 306 | //! [`derive(FromSqlRow)`]: derive.FromSqlRow.html 307 | //! [`Caching`]: client/struct.Caching.html 308 | 309 | pub mod client; 310 | pub mod execute; 311 | pub mod extract; 312 | 313 | mod error; 314 | mod parse; 315 | 316 | use postgres_types::ToSql; 317 | use proc_macro_hack::proc_macro_hack; 318 | use std::ops::Deref; 319 | 320 | pub use crate::client::Caching; 321 | pub use crate::error::{Error, Result}; 322 | pub use crate::extract::FromSqlRow; 323 | 324 | /// Extract values from a row. 325 | /// 326 | /// - If used on a tuple struct, values will be extracted from the corresponding columns based on 327 | /// their position in the tuple. 328 | /// - If used on a stuct with named fields, values will be extracted from the column with the same 329 | /// name as the field. 330 | /// 331 | /// # Example 332 | /// 333 | /// ``` 334 | /// # use postgres_query::*; 335 | /// #[derive(FromSqlRow)] 336 | /// struct TupleData(i32, String); 337 | /// 338 | /// #[derive(FromSqlRow)] 339 | /// struct NamedData { 340 | /// age: i32, 341 | /// name: String, 342 | /// }; 343 | /// ``` 344 | /// 345 | /// 346 | /// # Attributes 347 | /// 348 | /// Data extraction can be customized by using the `#[row(...)]` attribute. Attributes can be 349 | /// separated into two categories, those which go on the container itself: 350 | /// 351 | /// - [`#[row(exact)]`](#rowexact) 352 | /// - [`#[row(split)]`](#rowsplit) 353 | /// - [`#[row(group)]`](#rowgroup) 354 | /// - [`#[row(hash)]`](#rowhash) 355 | /// 356 | /// and those which are placed on the container's fields: 357 | /// 358 | /// - [`#[row(rename = "...")]`](#rowrename--) 359 | /// - [`#[row(flatten)]`](#rowflatten) 360 | /// - [`#[row(stride = N)]`](#rowstride--n) 361 | /// - [`#[row(split = "...")]`](#rowsplit--) 362 | /// - [`#[row(key)]`](#rowkey) 363 | /// - [`#[row(merge)]`](#rowmerge) 364 | /// 365 | /// 366 | /// ## Container attributes 367 | /// 368 | /// These attributes are put on the struct itself. 369 | /// 370 | /// 371 | /// ### `#[row(exact)]` 372 | /// 373 | /// [Partition](./index.html#multi-mapping) the row according to the number of columns matched by 374 | /// each group. 375 | /// 376 | /// Note that no order is forced upon fields within any group. In the example below, that means that 377 | /// even though the `generation` and `origin` fields are flipped relative to the query, the 378 | /// extraction will be successful: 379 | /// 380 | /// ``` 381 | /// # use postgres_query::{FromSqlRow, Result, query}; 382 | /// # use tokio_postgres::Client; 383 | /// # async fn foo() -> Result<()> { 384 | /// # let client: Client = unimplemented!(); 385 | /// #[derive(FromSqlRow)] 386 | /// #[row(exact)] 387 | /// struct Family { 388 | /// generation: i32, 389 | /// origin: String, 390 | /// #[row(flatten)] 391 | /// parent: Person, 392 | /// #[row(flatten)] 393 | /// child: Person, 394 | /// } 395 | /// 396 | /// #[derive(FromSqlRow)] 397 | /// struct Person { 398 | /// id: i32, 399 | /// name: String, 400 | /// } 401 | /// 402 | /// let family = query!( 403 | /// "SELECT 404 | /// 'Germany' as origin, 7 as generation, 405 | /// 1 as id, 'Bob' as name, 406 | /// 2 as id, 'Ike' as name" 407 | /// ) 408 | /// .fetch_one::(&client) 409 | /// .await?; 410 | /// # Ok(()) 411 | /// # } 412 | /// ``` 413 | /// 414 | /// ### `#[row(split)]` 415 | /// 416 | /// [Partition](./index.html#multi-mapping) the row according to the field's [split 417 | /// points](extract/fn.split_columns_many.html#split-points). 418 | /// 419 | /// Split points are introduced by using the [`#[row(split = "...")]`](#rowsplit---1) attribute on 420 | /// fields. 421 | /// 422 | /// ``` 423 | /// # use postgres_query::{FromSqlRow, Result, query}; 424 | /// # use tokio_postgres::Client; 425 | /// # async fn foo() -> Result<()> { 426 | /// # let client: Client = unimplemented!(); 427 | /// #[derive(FromSqlRow)] 428 | /// #[row(split)] 429 | /// struct Family { 430 | /// generation: i32, 431 | /// origin: String, 432 | /// #[row(flatten, split = "id")] 433 | /// parent: Person, 434 | /// #[row(flatten, split = "id")] 435 | /// child: Person, 436 | /// } 437 | /// 438 | /// #[derive(FromSqlRow)] 439 | /// struct Person { 440 | /// id: i32, 441 | /// name: String, 442 | /// } 443 | /// 444 | /// let family = query!( 445 | /// "SELECT 446 | /// 'Germany' as origin, 7 as generation, 447 | /// 1 as id, 'Bob' as name, 448 | /// 2 as id, 'Ike' as name" 449 | /// ) 450 | /// .fetch_one::(&client) 451 | /// .await?; 452 | /// # Ok(()) 453 | /// # } 454 | /// ``` 455 | /// 456 | /// 457 | /// ### `#[row(group)]` 458 | /// 459 | /// Enables one-to-many mapping for the container. One-to-many mapping requires that at least one 460 | /// field has the `#[row(key)]` attribute and that one other field has the `#[row(merge)]` attribute. 461 | /// 462 | /// When extracting values from multiple rows, any two **adjacent** rows that are identical on their 463 | /// fields marked with `#[row(key)]` will have their fields tagged with `#[row(merge)]` merged. This 464 | /// means that in order to get the expected relation back, you may need to include a `GROUP BY` 465 | /// statement in your SQL query, hence the name `group`. 466 | /// 467 | /// ``` 468 | /// # use postgres_query::*; 469 | /// # use tokio_postgres::Client; 470 | /// # async fn foo() -> Result<()> { 471 | /// # let client: Client = unimplemented!(); 472 | /// #[derive(Debug, FromSqlRow)] 473 | /// #[row(group)] 474 | /// struct Author { 475 | /// #[row(key)] 476 | /// name: String, 477 | /// 478 | /// #[row(merge)] 479 | /// books: Vec, 480 | /// } 481 | /// 482 | /// #[derive(Debug, FromSqlRow)] 483 | /// struct Book { 484 | /// title: String, 485 | /// } 486 | /// 487 | /// let authors = query!( 488 | /// "SELECT 'J.R.R. Tolkien' as name, 'The Fellowship of the Ring' as title 489 | /// UNION ALL SELECT 'J.R.R. Tolkien', 'The Two Towers' 490 | /// UNION ALL SELECT 'Andrzej Sapkowski', 'The Last Wish' 491 | /// UNION ALL SELECT 'J.R.R. Tolkien', 'Return of the King'") 492 | /// .fetch::(&client) 493 | /// .await?; 494 | /// 495 | /// assert_eq!(authors[0].name, "J.R.R. Tolkien"); 496 | /// assert_eq!(authors[0].books[0].title, "The Fellowship of the Ring"); 497 | /// assert_eq!(authors[0].books[1].title, "The Two Towers"); 498 | /// 499 | /// assert_eq!(authors[1].name, "Andrzej Sapkowski"); 500 | /// assert_eq!(authors[1].books[0].title, "The Last Wish"); 501 | /// 502 | /// assert_eq!(authors[2].name, "J.R.R. Tolkien"); 503 | /// assert_eq!(authors[2].books[0].title, "Return of the King"); 504 | /// # Ok(()) 505 | /// # } 506 | /// ``` 507 | /// 508 | /// 509 | /// ### `#[row(hash)]` 510 | /// 511 | /// Like `#[row(group)]`, but all previous rows are considered when merging. This is accomplished by 512 | /// using a `HashMap`, hence the name. This implies that all keys have to implement the `Hash` and 513 | /// `Eq` traits: 514 | /// 515 | /// ``` 516 | /// # use postgres_query::*; 517 | /// # use tokio_postgres::Client; 518 | /// # async fn foo() -> Result<()> { 519 | /// # let client: Client = unimplemented!(); 520 | /// #[derive(Debug, FromSqlRow)] 521 | /// #[row(hash)] 522 | /// struct Author { 523 | /// #[row(key)] 524 | /// name: String, 525 | /// 526 | /// #[row(merge)] 527 | /// books: Vec, 528 | /// } 529 | /// 530 | /// #[derive(Debug, FromSqlRow)] 531 | /// struct Book { 532 | /// title: String, 533 | /// } 534 | /// 535 | /// let authors = query!( 536 | /// "SELECT 'J.R.R. Tolkien' as name, 'The Fellowship of the Ring' as title 537 | /// UNION ALL SELECT 'J.R.R. Tolkien', 'The Two Towers' 538 | /// UNION ALL SELECT 'Andrzej Sapkowski', 'The Last Wish' 539 | /// UNION ALL SELECT 'J.R.R. Tolkien', 'Return of the King'") 540 | /// .fetch::(&client) 541 | /// .await?; 542 | /// 543 | /// assert_eq!(authors[0].name, "J.R.R. Tolkien"); 544 | /// assert_eq!(authors[0].books[0].title, "The Fellowship of the Ring"); 545 | /// assert_eq!(authors[0].books[1].title, "The Two Towers"); 546 | /// assert_eq!(authors[0].books[2].title, "Return of the King"); 547 | /// 548 | /// assert_eq!(authors[1].name, "Andrzej Sapkowski"); 549 | /// assert_eq!(authors[1].books[0].title, "The Last Wish"); 550 | /// # Ok(()) 551 | /// # } 552 | /// ``` 553 | /// 554 | /// ## Field attributes 555 | /// 556 | /// These attributes are put on the fields of a container. 557 | /// 558 | /// 559 | /// ### `#[row(rename = "...")]` 560 | /// 561 | /// Use a name other than that of the field when looking up the name of the column. 562 | /// 563 | /// ``` 564 | /// # use postgres_query::FromSqlRow; 565 | /// #[derive(FromSqlRow)] 566 | /// struct Person { 567 | /// age: i32, 568 | /// // matches the column named "first_name" instead of "name" 569 | /// #[row(rename = "first_name")] 570 | /// name: String, 571 | /// } 572 | /// ``` 573 | /// 574 | /// ### `#[row(flatten)]` 575 | /// 576 | /// Flatten the contents of this field into its container by recursively calling `FromSqlRow` on the 577 | /// field's type. This removes one level of nesting: 578 | /// 579 | /// ``` 580 | /// # use postgres_query::{FromSqlRow, query, Result}; 581 | /// # use tokio_postgres::Client; 582 | /// # async fn foo() -> Result<()> { 583 | /// # let client: Client = unimplemented!(); 584 | /// #[derive(FromSqlRow)] 585 | /// struct Customer { 586 | /// id: i32, 587 | /// #[row(flatten)] 588 | /// info: Person, 589 | /// } 590 | /// 591 | /// #[derive(FromSqlRow)] 592 | /// struct Person { 593 | /// name: String, 594 | /// age: i32 595 | /// } 596 | /// 597 | /// let customer: Customer = query!("SELECT 14 as id, 'Bob' as name, 47 as age") 598 | /// .fetch_one(&client) 599 | /// .await?; 600 | /// 601 | /// assert_eq!(customer.id, 14); 602 | /// assert_eq!(customer.info.name, "Bob"); 603 | /// assert_eq!(customer.info.age, 47); 604 | /// # Ok(()) 605 | /// # } 606 | /// ``` 607 | /// 608 | /// ### `#[row(stride = N)]` 609 | /// 610 | /// Puts this field into a partition with exactly `N` columns. Only available when using the 611 | /// `#[row(exact)]` attribute on the container, 612 | /// 613 | /// ``` 614 | /// # use postgres_query::{FromSqlRow, query, Result}; 615 | /// # use tokio_postgres::Client; 616 | /// # async fn foo() -> Result<()> { 617 | /// # let client: Client = unimplemented!(); 618 | /// #[derive(Debug, FromSqlRow)] 619 | /// struct Person { 620 | /// id: i32, 621 | /// name: String, 622 | /// } 623 | /// 624 | /// #[derive(Debug, FromSqlRow)] 625 | /// #[row(exact)] 626 | /// struct Family { 627 | /// // Matches first 4 columns 628 | /// #[row(flatten, stride = 4)] 629 | /// parent: Person, 630 | /// // Matches last 3 columns 631 | /// #[row(flatten, stride = 3)] 632 | /// child: Person, 633 | /// } 634 | /// 635 | /// let family = query!( 636 | /// "SELECT 637 | /// 11 as generation, 638 | /// 1 as id, 'Bob' as name, 42 as age, 639 | /// 2 as id, 'Ike' as name, 14 as age" 640 | /// ) 641 | /// .fetch_one::(&client) 642 | /// .await?; 643 | /// 644 | /// assert_eq!(family.parent.id, 1); 645 | /// assert_eq!(family.parent.name, "Bob"); 646 | /// assert_eq!(family.child.id, 2); 647 | /// assert_eq!(family.child.name, "Ike"); 648 | /// # Ok(()) 649 | /// # } 650 | /// ``` 651 | /// 652 | /// ### `#[row(split = "...")]` 653 | /// 654 | /// Introduce an additional [split](extract/fn.split_columns_many.html#split-points) right 655 | /// before this field. Requires that the container has the `split` attribute as well. 656 | /// 657 | /// Intuitively this splits the row in two parts: every field before this attribute matches the 658 | /// columns before the split and every field afterwards matches the second remaining columns. 659 | /// 660 | /// ``` 661 | /// # use postgres_query::{FromSqlRow}; 662 | /// #[derive(FromSqlRow)] 663 | /// #[row(split)] 664 | /// struct User { 665 | /// // `id` and `name` will only match the columns before `email` 666 | /// id: i32, 667 | /// name: String, 668 | /// #[row(split = "email")] 669 | /// // `email`, `address` and `shoe_size` will only 670 | /// // match the columns after and including `email` 671 | /// email: String, 672 | /// address: String, 673 | /// shoe_size: i32, 674 | /// } 675 | /// ``` 676 | /// 677 | /// Note that the first split always matches first occurence of that column. This can result in some 678 | /// subtle bugs: 679 | /// 680 | /// ``` 681 | /// # use postgres_query::{FromSqlRow, query}; 682 | /// #[derive(FromSqlRow)] 683 | /// #[row(split)] 684 | /// struct Family { 685 | /// #[row(flatten)] 686 | /// parent: Person, 687 | /// #[row(flatten, split = "id")] 688 | /// child: Person, 689 | /// } 690 | /// 691 | /// #[derive(FromSqlRow)] 692 | /// struct Person { 693 | /// name: String, 694 | /// age: i32 695 | /// } 696 | /// 697 | /// let query = query!("SELECT parent.*, child.* FROM ..."); 698 | /// 699 | /// // Imagine the query above results in the following columns: 700 | /// // 701 | /// // Columns: id, name, id, name 702 | /// // Splits: | 703 | /// // Partitions: +-parent-+ +-----child------+ 704 | /// ``` 705 | /// 706 | /// The split causes `parent` to match against all columns before the first `id`, ie. an empty 707 | /// partition. This would cause an error when executing the query. 708 | /// 709 | /// A correct split would look like this: 710 | /// 711 | /// ``` 712 | /// # use postgres_query::{FromSqlRow, query}; 713 | /// # #[derive(FromSqlRow)] struct Person; 714 | /// #[derive(FromSqlRow)] 715 | /// #[row(split)] 716 | /// struct Family { 717 | /// #[row(flatten, split = "id")] 718 | /// parent: Person, 719 | /// #[row(flatten, split = "id")] 720 | /// child: Person, 721 | /// } 722 | /// ``` 723 | /// 724 | /// 725 | /// ### `#[row(key)]` 726 | /// 727 | /// Specifies this field to be a `key` field. `key` fields are compared against each other when 728 | /// extracting values from multiple rows. Rows are merged if the key fields in each row are 729 | /// identical. You may have multiple `key` fields within a single container, but none of them may 730 | /// have the `#[row(merge)]` attribute. Multiple `key` fields will be treated as a tuple in 731 | /// comparisons. 732 | /// 733 | /// 734 | /// ### `#[row(merge)]` 735 | /// 736 | /// Specifies this field to be a `merge` field. This requires that the field's type implements the 737 | /// [`Merge`] trait. When two rows have been deemed to be equal based on the `key` fields, the 738 | /// corresponding `merge` fields in those rows will be merged. You may specify multiple `merge` 739 | /// fields within one container, but none of them may have the `#[row(key)]` attribute. 740 | /// 741 | /// [`Merge`]: extract/trait.Merge.html 742 | pub use postgres_query_macro::FromSqlRow; 743 | 744 | /// Constructs a new query at compile-time. See also `query_dyn!`. 745 | /// 746 | /// # Usage 747 | /// 748 | /// This macro expands to an expression with the type `Query`. 749 | /// 750 | /// The first parameter is the SQL query and is always given as a string literal. This string 751 | /// literal may contain parameter bindings on the form `$ident` where `ident` is any valid Rust 752 | /// identifier (`$abc`, `$value_123`, etc.). The order of the parameters does not matter. 753 | /// 754 | /// ``` 755 | /// # use postgres_query::query; 756 | /// let age = 42; 757 | /// let insert_person = query!( 758 | /// "INSERT INTO people VALUES ($age, $name)", 759 | /// name = "John Wick", // Binds "$name" to "John Wick" 760 | /// age, // Binds "$age" to the value of `age` 761 | /// ); 762 | /// ``` 763 | /// 764 | /// During compilation the query is converted into the format expected by PostgreSQL: parameter 765 | /// bindings are converted to using numbers (`$1`, `$2`, etc.) and the actual parameter values are 766 | /// put into a 1-indexed array. The code snippet above would be expanded into the following: 767 | /// 768 | /// ``` 769 | /// # use postgres_query::*; 770 | /// let age = 42; 771 | /// let insert_person = Query::new_static( 772 | /// "INSERT INTO people VALUES ($1, $2)", 773 | /// vec![&age, &"John Wick"], 774 | /// ); 775 | /// ``` 776 | #[macro_export] 777 | macro_rules! query { 778 | ($($tt:tt)*) => { 779 | $crate::__query_static!($($tt)*) 780 | }; 781 | } 782 | 783 | /// Constructs a new query dynamically at runtime. See also `query!`. 784 | /// 785 | /// # Usage 786 | /// 787 | /// This macro expands to an expression with the type `Result`. 788 | /// 789 | /// The first parameter is the SQL query and is always given as a `&str`. This string may contain 790 | /// parameter bindings on the form `$ident` where `ident` is any valid Rust identifier (`$abc`, 791 | /// `$value_123`, etc.). The order of the parameters does not matter. 792 | /// 793 | /// ``` 794 | /// # use postgres_query::{query_dyn, Result}; 795 | /// # fn foo() -> Result<()> { 796 | /// // We can construct the actual query at runtime 797 | /// let mut sql = "INSERT INTO people VALUES".to_owned(); 798 | /// sql.push_str("($age, $name)"); 799 | /// 800 | /// let age = 42; 801 | /// 802 | /// let insert_person = query_dyn!( 803 | /// &sql, 804 | /// name = "John Wick", // Binds "$name" to "John Wick" 805 | /// age, // Binds "$age" to the value of `age` 806 | /// )?; 807 | /// # Ok(()) 808 | /// # } 809 | /// ``` 810 | /// 811 | /// The query and all the parameters are passed into `Query::parse`, so the above would be expanded 812 | /// into: 813 | /// 814 | /// ``` 815 | /// # use postgres_query::Query; 816 | /// // We can construct the actual query at runtime 817 | /// let mut sql = "INSERT INTO people VALUES".to_string(); 818 | /// sql.push_str("($age, $name)"); 819 | /// 820 | /// let age = 42; 821 | /// 822 | /// let insert_person = Query::parse( 823 | /// &sql, 824 | /// &[("name", &"John Wick"), ("age", &age)], 825 | /// ); 826 | /// ``` 827 | /// 828 | /// 829 | /// ## Dynamic Binding 830 | /// 831 | /// Optionally, you may also choose to include additional bindings at runtime by using the 832 | /// `..bindings` syntax. This is supported for any type that implements `IntoIterator`, ie. `Vec<(&str, Parameter)>`, `HashMap<&str, Parameter>`, `Option<(&str, 834 | /// Parameter)>`, iterators, and so on. 835 | /// 836 | /// Dynamic bindings may be mixed with static bindings: 837 | /// 838 | /// ``` 839 | /// # use postgres_query::{query_dyn, Parameter, Result}; 840 | /// # fn foo() -> Result<()> { 841 | /// let mut bindings = Vec::new(); 842 | /// 843 | /// // We use the `as Parameter` to please the type checker. 844 | /// // Alternatively, we could specify the type for bindings: `Vec<(&str, Parameter)>`. 845 | /// bindings.push(("age", &42 as Parameter)); 846 | /// bindings.push(("name", &"John Wick" as Parameter)); 847 | /// 848 | /// let sql = "INSERT INTO people VALUES ($age, $name, $height)".to_string(); 849 | /// let insert_person = query_dyn!( 850 | /// &sql, 851 | /// height = 192, 852 | /// ..bindings, 853 | /// )?; 854 | /// # Ok(()) 855 | /// # } 856 | /// ``` 857 | /// 858 | /// 859 | /// # A larger example 860 | /// 861 | /// Let's say that we wanted to dynamically add filters to our query: 862 | /// 863 | /// ``` 864 | /// # use postgres_query::{query_dyn, Parameter, Query, Result}; 865 | /// # fn foo() -> Result<()> { 866 | /// // We have the query we want to execute 867 | /// let mut sql = "SELECT * FROM people".to_string(); 868 | /// 869 | /// // and some filters we got from the user. 870 | /// let age_filter: Option = Some(32); 871 | /// let name_filter: Option<&str> = None; 872 | /// 873 | /// // Then we dynamically build a list of filters and bindings to use: 874 | /// let mut filters = Vec::new(); 875 | /// let mut bindings = Vec::new(); 876 | /// 877 | /// // We add the filters as needed. 878 | /// if let Some(age) = age_filter.as_ref() { 879 | /// filters.push("age > $min_age"); 880 | /// bindings.push(("min_age", age as Parameter)); 881 | /// } 882 | /// 883 | /// if let Some(name) = name_filter.as_ref() { 884 | /// filters.push("name LIKE $name"); 885 | /// bindings.push(("name", name as Parameter)); 886 | /// } 887 | /// 888 | /// // And add them to the query. 889 | /// if filters.len() > 0 { 890 | /// sql += &format!(" WHERE {}", filters.join(" AND ")); 891 | /// } 892 | /// 893 | /// // Then we can use it as normal. 894 | /// let query: Query = query_dyn!(&sql, ..bindings)?; 895 | /// # Ok(()) 896 | /// # } 897 | /// ``` 898 | #[macro_export] 899 | macro_rules! query_dyn { 900 | ($($tt:tt)*) => { 901 | $crate::__query_dynamic!($($tt)*) 902 | }; 903 | } 904 | 905 | #[proc_macro_hack] 906 | #[doc(hidden)] 907 | pub use postgres_query_macro::{query_dynamic as __query_dynamic, query_static as __query_static}; 908 | 909 | /// A shorthand for types that can be treated as SQL parameters. 910 | /// 911 | /// A common use case for this type alias is when using dynamic bindings and you have to please the 912 | /// type checker: 913 | /// 914 | /// ``` 915 | /// # use postgres_query::{Parameter, query_dyn, Result}; 916 | /// # fn foo() -> Result<()> { 917 | /// let mut bindings = Vec::new(); 918 | /// 919 | /// // Without the `as Parameter` the compiler assumes the type to be `&i32`. 920 | /// bindings.push(("age", &32 as Parameter)); 921 | /// 922 | /// // Which would cause problems when adding something that is not an integer. 923 | /// bindings.push(("name", &"John" as Parameter)); 924 | /// 925 | /// let query = query_dyn!( 926 | /// "SELECT * FROM people WHERE age > $age AND name = $name", 927 | /// ..bindings 928 | /// )?; 929 | /// # Ok(()) 930 | /// # } 931 | /// ``` 932 | /// 933 | /// Alternatively we could just set the type on the container explicitly: 934 | /// 935 | /// ``` 936 | /// # use postgres_query::Parameter; 937 | /// let mut bindings: Vec<(&str, Parameter)> = Vec::new(); 938 | /// ``` 939 | pub type Parameter<'a> = &'a (dyn ToSql + Sync); 940 | 941 | /// A static query with dynamic parameters. 942 | /// 943 | /// # Usage 944 | /// 945 | /// ## Constructing 946 | /// 947 | /// The preferred way of constructing a [`Query`] is by using the [`query!`] and [`query_dyn!`] 948 | /// macros. 949 | /// 950 | /// You may also use the `Query::parse`, `Query::new_static` or `Query::new` methods. 951 | /// 952 | /// 953 | /// ## Executing 954 | /// 955 | /// When executing the query you have two options, either: 956 | /// 957 | /// 1. use the provided methods: `execute`, `fetch`, `query`, etc. 958 | /// 2. use the `sql` and `parameters` fields as arguments to the standard [`Client`] methods 959 | /// 960 | /// ``` 961 | /// # use tokio_postgres::{Client, Row}; 962 | /// # use postgres_query::{query, FromSqlRow, Result}; 963 | /// # fn connect() -> Client { unimplemented!() } 964 | /// # async fn foo() -> Result<(), Box> { 965 | /// #[derive(FromSqlRow)] 966 | /// struct Person { 967 | /// age: i32, 968 | /// name: String, 969 | /// } 970 | /// 971 | /// let client: Client = connect(/* ... */); 972 | /// let query = query!("SELECT age, name FROM people"); 973 | /// 974 | /// // Option 1 975 | /// let people: Vec = query.fetch(&client).await?; 976 | /// 977 | /// // Option 2 978 | /// let rows: Vec = client.query(query.sql(), query.parameters()).await?; 979 | /// let people: Vec = Person::from_row_multi(&rows)?; 980 | /// # Ok(()) 981 | /// # } 982 | /// ``` 983 | /// 984 | /// [`Query`]: struct.Query.html 985 | /// [`query!`]: macro.query.html 986 | /// [`query_dyn!`]: macro.query_dyn.html 987 | /// [`Client`]: https://docs.rs/tokio-postgres/0.5.1/tokio_postgres/struct.Client.html 988 | #[derive(Debug, Clone)] 989 | pub struct Query<'a> { 990 | sql: Sql, 991 | parameters: Vec>, 992 | } 993 | 994 | #[derive(Debug, Clone)] 995 | enum Sql { 996 | Static(&'static str), 997 | Dynamic(String), 998 | } 999 | 1000 | impl<'a> Query<'a> { 1001 | /// Create a new query an already prepared string. 1002 | /// 1003 | /// IMPORTANT: This does not allow you to pass named parameter bindings (`$name`, `$abc_123`, 1004 | /// etc.). For that behaviour, refer to the `query!` macro. Instead bindings and parameters are 1005 | /// given in the same format required by `tokio_postgres` (`$1`, `$2`, ...). 1006 | pub fn new(sql: String, parameters: Vec>) -> Query<'a> { 1007 | Query { 1008 | sql: Sql::Dynamic(sql), 1009 | parameters, 1010 | } 1011 | } 1012 | 1013 | /// Create a new query with a static query string. 1014 | /// 1015 | /// IMPORTANT: This does not allow you to pass named parameter bindings (`$name`, `$abc_123`, 1016 | /// etc.), For that behaviour, refer to the `query_dyn!` macro. Instead bindings and parameters 1017 | /// are given in the same format required by `tokio_postgres` (`$1`, `$2`, ...). 1018 | pub fn new_static(sql: &'static str, parameters: Vec>) -> Query<'a> { 1019 | Query { 1020 | sql: Sql::Static(sql), 1021 | parameters, 1022 | } 1023 | } 1024 | 1025 | /// Parses a string that may contain parameter bindings on the form `$abc_123`. This is the same 1026 | /// function that is called when passing dynamically generated strings to the `query_dyn!` 1027 | /// macro. 1028 | /// 1029 | /// Because this is a function there will some runtime overhead unlike the `query!` macro which 1030 | /// has zero overhead when working with string literals. 1031 | pub fn parse(text: &str, bindings: &[(&str, Parameter<'a>)]) -> Result> { 1032 | let (sql, parameters) = parse::parse(text, bindings)?; 1033 | 1034 | Ok(Query { 1035 | sql: Sql::Dynamic(sql), 1036 | parameters, 1037 | }) 1038 | } 1039 | 1040 | /// Get this query as an SQL string. 1041 | pub fn sql(&'a self) -> &'a str { 1042 | &self.sql 1043 | } 1044 | 1045 | /// Get the parameters of this query in the order expected by the query returned by 1046 | /// `Query::sql`. 1047 | pub fn parameters(&'a self) -> &[Parameter<'a>] { 1048 | &self.parameters 1049 | } 1050 | } 1051 | 1052 | impl Deref for Sql { 1053 | type Target = str; 1054 | 1055 | fn deref(&self) -> &Self::Target { 1056 | match self { 1057 | Sql::Static(text) => text, 1058 | Sql::Dynamic(text) => &text, 1059 | } 1060 | } 1061 | } 1062 | 1063 | #[cfg(test)] 1064 | mod tests { 1065 | use super::*; 1066 | use crate::error::ParseError; 1067 | 1068 | macro_rules! is_match { 1069 | ($expr:expr, $pattern:pat) => { 1070 | match $expr { 1071 | $pattern => true, 1072 | _ => false, 1073 | } 1074 | }; 1075 | } 1076 | 1077 | #[test] 1078 | fn parse_query_without_bindings() { 1079 | let query = Query::parse("SELECT 123, 'abc'", &[]).unwrap(); 1080 | assert_eq!(query.sql(), "SELECT 123, 'abc'"); 1081 | } 1082 | 1083 | #[test] 1084 | fn parse_query_single_binding() { 1085 | let query = Query::parse("SELECT $number", &[("number", &123)]).unwrap(); 1086 | assert_eq!(query.sql(), "SELECT $1"); 1087 | } 1088 | 1089 | #[test] 1090 | fn parse_query_missing_identifier_eof() { 1091 | let query = Query::parse("SELECT $", &[]); 1092 | assert!(is_match!( 1093 | query.unwrap_err(), 1094 | Error::Parse(ParseError::EmptyIdentifier { found: None }) 1095 | )); 1096 | } 1097 | 1098 | #[test] 1099 | fn parse_query_missing_identifier() { 1100 | let query = Query::parse("SELECT $ FROM users", &[]); 1101 | assert!(is_match!( 1102 | query.unwrap_err(), 1103 | Error::Parse(ParseError::EmptyIdentifier { found: Some(' ') }) 1104 | )); 1105 | } 1106 | } 1107 | -------------------------------------------------------------------------------- /postgres_query/src/parse.rs: -------------------------------------------------------------------------------- 1 | use super::Parameter; 2 | use crate::error::{Error, ParseError, Result}; 3 | use std::fmt::Write; 4 | use std::iter::Peekable; 5 | 6 | pub fn parse<'a>( 7 | text: &str, 8 | bindings: &[(&str, Parameter<'a>)], 9 | ) -> Result<(String, Vec>)> { 10 | let mut sql = String::with_capacity(text.len()); 11 | let mut parameters = Vec::with_capacity(bindings.len()); 12 | let mut param_indices = vec![None; bindings.len()]; 13 | 14 | let mut chars = text.chars().peekable(); 15 | 16 | while let Some(ch) = chars.next() { 17 | if ch != '$' { 18 | sql.push(ch); 19 | } else if let Some('$') = chars.peek() { 20 | sql.push(chars.next().unwrap()); 21 | } else { 22 | let name = next_identifier(&mut chars)?; 23 | 24 | let argument = bindings 25 | .iter() 26 | .position(|(binding, _)| *binding == name) 27 | .ok_or_else(|| ParseError::UndefinedBinding { binding: name })?; 28 | 29 | let index = param_indices[argument].unwrap_or_else(|| { 30 | let (_, value) = bindings[argument]; 31 | parameters.push(value); 32 | let index = parameters.len(); 33 | param_indices[argument] = Some(index); 34 | index 35 | }); 36 | 37 | write!(sql, "${}", index).unwrap(); 38 | } 39 | } 40 | 41 | Ok((sql, parameters)) 42 | } 43 | 44 | fn next_identifier(chars: &mut Peekable>) -> Result { 45 | let mut name = String::new(); 46 | 47 | while let Some(&ch) = chars.peek() { 48 | if ch.is_ascii_alphanumeric() || ch == '_' { 49 | name.push(chars.next().unwrap()); 50 | } else { 51 | break; 52 | } 53 | } 54 | 55 | if name.is_empty() { 56 | let found = chars.peek().copied(); 57 | return Err(Error::from(ParseError::EmptyIdentifier { found })); 58 | } 59 | 60 | Ok(name) 61 | } 62 | -------------------------------------------------------------------------------- /postgres_query/tests/deadpool.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "deadpool")] 2 | 3 | use postgres_query::*; 4 | use deadpool_postgres::{Pool, Client, Config}; 5 | 6 | fn connect() -> Pool { 7 | let mut cfg = Config::new(); 8 | cfg.dbname = Some("postgres_query_test".to_string()); 9 | cfg.host = Some("localhost".to_string()); 10 | cfg.create_pool(tokio_postgres::NoTls).unwrap() 11 | } 12 | 13 | #[tokio::test] 14 | async fn simple_query() { 15 | let pool = connect(); 16 | let client: Client = pool.get().await.unwrap(); 17 | let query: Query = query_dyn!("SELECT 14").unwrap(); 18 | let res = query.fetch_one::<(i32,), _>(&client).await; 19 | } 20 | -------------------------------------------------------------------------------- /postgres_query/tests/execute.rs: -------------------------------------------------------------------------------- 1 | //! Validate that queries are executed as intended. 2 | //! 3 | //! 4 | //! # Setup 5 | //! 6 | //! These tests require access to a PostgreSQL database. To run these tests it is recommended that 7 | //! you create a new user that has access to an empty database. By default these tests assume a user 8 | //! with the name `postgres_query_test`. Then, initialize the environment variable 9 | //! `POSTGRES_DB_CONFIG` to point to this new user (this variable uses the same format as 10 | //! `tokio_postgres::connect`). 11 | 12 | use anyhow::{anyhow, Error}; 13 | use postgres_query::{client::Caching, query, FromSqlRow}; 14 | use std::env; 15 | use tokio_postgres::Client; 16 | 17 | type Result = std::result::Result; 18 | 19 | /// Establish a new connection to the database 20 | async fn establish() -> Result { 21 | let config = env::var("POSTGRES_DB_CONFIG") 22 | .unwrap_or_else(|_| "user=postgres_query_test host=localhost".to_owned()); 23 | let (client, conn) = tokio_postgres::connect(&config, tokio_postgres::NoTls) 24 | .await 25 | .map_err(|e| { 26 | anyhow!( 27 | "failed to establish connection to database \ 28 | (have you set the POSTGRES_DB_CONFIG environment variable?): {}", 29 | e 30 | ) 31 | })?; 32 | 33 | tokio::spawn(async move { 34 | conn.await.expect("connection encountered an error"); 35 | }); 36 | 37 | Ok(client) 38 | } 39 | 40 | #[tokio::test] 41 | async fn simple_select() -> Result { 42 | let client = establish().await?; 43 | 44 | let query = query!("SELECT 14"); 45 | let row = query.query_one(&client).await?; 46 | let value: i32 = row.get(0); 47 | 48 | assert_eq!(value, 14); 49 | 50 | Ok(()) 51 | } 52 | 53 | #[tokio::test] 54 | async fn simple_select_fetch() -> Result { 55 | let client = establish().await?; 56 | 57 | let value: (i32,) = query!("SELECT 14").fetch_one(&client).await?; 58 | 59 | assert_eq!(value, (14,)); 60 | 61 | Ok(()) 62 | } 63 | 64 | #[tokio::test] 65 | async fn cached_fetch() -> Result { 66 | let client = establish().await?; 67 | let client = Caching::new(client); 68 | 69 | for _ in 0..10usize { 70 | let query = query!("SELECT 'Myke', 31"); 71 | let (name, age): (String, i32) = query.fetch_one(&client).await?; 72 | 73 | assert_eq!(name, "Myke"); 74 | assert_eq!(age, 31); 75 | } 76 | 77 | Ok(()) 78 | } 79 | 80 | #[tokio::test] 81 | async fn fetch_named_struct() -> Result { 82 | let client = establish().await?; 83 | 84 | #[derive(FromSqlRow)] 85 | struct Person { 86 | age: i32, 87 | name: String, 88 | } 89 | 90 | let query = query!("SELECT 'Myke' as name, 31 as age"); 91 | let person: Person = query.fetch_one(&client).await?; 92 | 93 | assert_eq!(person.name, "Myke"); 94 | assert_eq!(person.age, 31); 95 | 96 | Ok(()) 97 | } 98 | 99 | #[tokio::test] 100 | async fn fetch_named_struct_rename() -> Result { 101 | let client = establish().await?; 102 | 103 | #[derive(FromSqlRow)] 104 | struct Person { 105 | #[row(rename = "name")] 106 | customer: String, 107 | age: i32, 108 | } 109 | 110 | let query = query!("SELECT 'Myke' as name, 31 as age"); 111 | let person: Person = query.fetch_one(&client).await?; 112 | 113 | assert_eq!(person.customer, "Myke"); 114 | assert_eq!(person.age, 31); 115 | 116 | Ok(()) 117 | } 118 | 119 | #[tokio::test] 120 | async fn fetch_named_struct_flattened() -> Result { 121 | let client = establish().await?; 122 | 123 | #[derive(FromSqlRow)] 124 | struct Person { 125 | name: String, 126 | age: i32, 127 | } 128 | 129 | #[derive(FromSqlRow)] 130 | struct Customer { 131 | id: i32, 132 | #[row(flatten)] 133 | info: Person, 134 | } 135 | 136 | let query = query!("SELECT 14 as id, 'Myke' as name, 31 as age"); 137 | let customer: Customer = query.fetch_one(&client).await?; 138 | 139 | assert_eq!(customer.info.name, "Myke"); 140 | assert_eq!(customer.info.age, 31); 141 | assert_eq!(customer.id, 14); 142 | 143 | Ok(()) 144 | } 145 | 146 | #[tokio::test] 147 | async fn cached_transaction() -> Result { 148 | let client = establish().await?; 149 | let mut client = Caching::new(client); 150 | 151 | let tx: Caching<_> = client.transaction().await?; 152 | 153 | tx.into_inner().rollback().await?; 154 | 155 | Ok(()) 156 | } 157 | 158 | #[tokio::test] 159 | async fn fetch_joined_relations() -> Result { 160 | let mut client = establish().await?; 161 | let tx = client.transaction().await?; 162 | 163 | query!( 164 | "CREATE TABLE orders ( 165 | id SERIAL PRIMARY KEY, 166 | customer TEXT 167 | )" 168 | ) 169 | .execute(&tx) 170 | .await?; 171 | 172 | query!( 173 | "CREATE TABLE order_items ( 174 | order_id INTEGER REFERENCES orders(id), 175 | item TEXT NOT NULL 176 | )" 177 | ) 178 | .execute(&tx) 179 | .await?; 180 | 181 | #[derive(FromSqlRow)] 182 | struct OrderId(i32); 183 | 184 | let orders = query!( 185 | "INSERT INTO orders (customer) 186 | VALUES 187 | ('Emma'), 188 | ('Anna') 189 | RETURNING id", 190 | ) 191 | .fetch::(&tx) 192 | .await?; 193 | 194 | query!( 195 | "INSERT INTO order_items (order_id, item) 196 | VALUES 197 | ($emma, 'Hair dryer'), 198 | ($emma, 'Phone'), 199 | ($anna, 'Note book')", 200 | emma = orders[0].0, 201 | anna = orders[1].0, 202 | ) 203 | .execute(&tx) 204 | .await?; 205 | 206 | #[derive(Debug, PartialEq, FromSqlRow)] 207 | struct Order { 208 | customer: String, 209 | item: String, 210 | } 211 | 212 | let orders = query!( 213 | "SELECT 214 | customer, 215 | item 216 | FROM order_items 217 | INNER JOIN orders ON order_items.order_id = orders.id 218 | ORDER BY customer, item" 219 | ) 220 | .fetch::(&tx) 221 | .await?; 222 | 223 | assert_eq!(orders.len(), 3); 224 | 225 | assert_eq!(orders[0].customer, "Anna"); 226 | assert_eq!(orders[0].item, "Note book"); 227 | 228 | assert_eq!(orders[1].customer, "Emma"); 229 | assert_eq!(orders[1].item, "Hair dryer"); 230 | 231 | assert_eq!(orders[2].customer, "Emma"); 232 | assert_eq!(orders[2].item, "Phone"); 233 | 234 | Ok(()) 235 | } 236 | 237 | #[tokio::test] 238 | async fn multi_mapping_exact() -> Result { 239 | let mut client = establish().await?; 240 | let tx = client.transaction().await?; 241 | 242 | #[derive(Debug, FromSqlRow)] 243 | struct Person { 244 | id: i32, 245 | name: String, 246 | } 247 | 248 | #[derive(Debug, FromSqlRow)] 249 | #[row(exact)] 250 | struct Family { 251 | #[row(flatten)] 252 | parent: Person, 253 | #[row(flatten)] 254 | child: Person, 255 | } 256 | 257 | let family = query!( 258 | "SELECT 259 | 1 as id, 'Bob' as name, 260 | 2 as id, 'Ike' as name" 261 | ) 262 | .fetch_one::(&tx) 263 | .await?; 264 | 265 | assert_eq!(family.parent.id, 1); 266 | assert_eq!(family.parent.name, "Bob"); 267 | 268 | assert_eq!(family.child.id, 2); 269 | assert_eq!(family.child.name, "Ike"); 270 | 271 | Ok(()) 272 | } 273 | 274 | #[tokio::test] 275 | async fn multi_mapping_custom_stride() -> Result { 276 | let mut client = establish().await?; 277 | let tx = client.transaction().await?; 278 | 279 | #[derive(Debug, FromSqlRow)] 280 | struct Person { 281 | id: i32, 282 | name: String, 283 | } 284 | 285 | #[derive(Debug, FromSqlRow)] 286 | #[row(exact)] 287 | struct Family { 288 | #[row(flatten, stride = 4)] 289 | parent: Person, 290 | #[row(flatten, stride = 3)] 291 | child: Person, 292 | } 293 | 294 | let family = query!( 295 | "SELECT 296 | 11 as generation, 297 | 1 as id, 'Bob' as name, 42 as age, 298 | 2 as id, 'Ike' as name, 14 as age" 299 | ) 300 | .fetch_one::(&tx) 301 | .await?; 302 | 303 | assert_eq!(family.parent.id, 1); 304 | assert_eq!(family.parent.name, "Bob"); 305 | 306 | assert_eq!(family.child.id, 2); 307 | assert_eq!(family.child.name, "Ike"); 308 | 309 | Ok(()) 310 | } 311 | 312 | #[tokio::test] 313 | async fn multi_mapping_exact_mixed_fields() -> Result { 314 | let mut client = establish().await?; 315 | let tx = client.transaction().await?; 316 | 317 | #[derive(Debug, FromSqlRow)] 318 | struct Person { 319 | id: i32, 320 | name: String, 321 | } 322 | 323 | #[derive(Debug, FromSqlRow)] 324 | #[row(exact)] 325 | struct Family { 326 | generation: i32, 327 | origin: String, 328 | #[row(flatten)] 329 | parent: Person, 330 | #[row(flatten)] 331 | child: Person, 332 | } 333 | 334 | let family = query!( 335 | // Order shouldn't matter within one group 336 | "SELECT 337 | 'Germany' as origin, 7 as generation, 338 | 1 as id, 'Bob' as name, 339 | 2 as id, 'Ike' as name" 340 | ) 341 | .fetch_one::(&tx) 342 | .await?; 343 | 344 | assert_eq!(family.generation, 7); 345 | assert_eq!(family.origin, "Germany"); 346 | 347 | assert_eq!(family.parent.id, 1); 348 | assert_eq!(family.parent.name, "Bob"); 349 | 350 | assert_eq!(family.child.id, 2); 351 | assert_eq!(family.child.name, "Ike"); 352 | 353 | Ok(()) 354 | } 355 | 356 | #[tokio::test] 357 | async fn multi_mapping_excessive_colunms() -> Result { 358 | let mut client = establish().await?; 359 | let tx = client.transaction().await?; 360 | 361 | #[derive(Debug, FromSqlRow)] 362 | struct Person { 363 | id: i32, 364 | name: String, 365 | } 366 | 367 | #[derive(Debug, FromSqlRow)] 368 | #[row(split)] 369 | struct Family { 370 | #[row(flatten, split = "id")] 371 | grandparent: Person, 372 | #[row(flatten, split = "id")] 373 | parent: Person, 374 | #[row(flatten, split = "id")] 375 | child: Person, 376 | } 377 | 378 | let family = query!( 379 | "SELECT 380 | 0 as id, 'John' as name, 61 as age, 381 | 1 as id, 'Bob' as name, 32 as age, 382 | 2 as id, 'Ike' as name, 7 as age" 383 | ) 384 | .fetch_one::(&tx) 385 | .await?; 386 | 387 | assert_eq!(family.grandparent.id, 0); 388 | assert_eq!(family.grandparent.name, "John"); 389 | 390 | assert_eq!(family.parent.id, 1); 391 | assert_eq!(family.parent.name, "Bob"); 392 | 393 | assert_eq!(family.child.id, 2); 394 | assert_eq!(family.child.name, "Ike"); 395 | 396 | Ok(()) 397 | } 398 | 399 | #[tokio::test] 400 | async fn multi_mapping_leading_columns() -> Result { 401 | let mut client = establish().await?; 402 | let tx = client.transaction().await?; 403 | 404 | #[derive(Debug, FromSqlRow)] 405 | struct Person { 406 | id: i32, 407 | name: String, 408 | } 409 | 410 | #[derive(Debug, FromSqlRow)] 411 | #[row(split)] 412 | struct Family { 413 | generation: i32, 414 | #[row(flatten, split = "id")] 415 | grandparent: Person, 416 | #[row(flatten, split = "id")] 417 | parent: Person, 418 | #[row(flatten, split = "id")] 419 | child: Person, 420 | } 421 | 422 | let family = query!( 423 | "SELECT 424 | 8 as generation, 425 | 0 as id, 'John' as name, 61 as age, 426 | 1 as id, 'Bob' as name, 32 as age, 427 | 2 as id, 'Ike' as name, 7 as age" 428 | ) 429 | .fetch_one::(&tx) 430 | .await?; 431 | 432 | assert_eq!(family.generation, 8); 433 | 434 | assert_eq!(family.grandparent.id, 0); 435 | assert_eq!(family.grandparent.name, "John"); 436 | 437 | assert_eq!(family.parent.id, 1); 438 | assert_eq!(family.parent.name, "Bob"); 439 | 440 | assert_eq!(family.child.id, 2); 441 | assert_eq!(family.child.name, "Ike"); 442 | 443 | Ok(()) 444 | } 445 | 446 | #[tokio::test] 447 | async fn multi_mapping_mixed() -> Result { 448 | let mut client = establish().await?; 449 | let tx = client.transaction().await?; 450 | 451 | #[derive(Debug, FromSqlRow)] 452 | struct Person { 453 | id: i32, 454 | name: String, 455 | } 456 | 457 | #[derive(Debug, FromSqlRow)] 458 | #[row(split)] 459 | struct Family { 460 | generation: i32, 461 | #[row(flatten, split = "id")] 462 | grandparent: Person, 463 | age: i32, 464 | #[row(flatten, split = "id")] 465 | parent: Person, 466 | #[row(flatten, split = "id")] 467 | child: Person, 468 | } 469 | 470 | let family = query!( 471 | "SELECT 472 | 8 as generation, 473 | 0 as id, 'John' as name, 61 as age, 474 | 1 as id, 'Bob' as name, 32 as age, 475 | 2 as id, 'Ike' as name, 7 as age" 476 | ) 477 | .fetch_one::(&tx) 478 | .await?; 479 | 480 | assert_eq!(family.generation, 8); 481 | 482 | assert_eq!(family.grandparent.id, 0); 483 | assert_eq!(family.grandparent.name, "John"); 484 | assert_eq!(family.age, 61); 485 | 486 | assert_eq!(family.parent.id, 1); 487 | assert_eq!(family.parent.name, "Bob"); 488 | 489 | assert_eq!(family.child.id, 2); 490 | assert_eq!(family.child.name, "Ike"); 491 | 492 | Ok(()) 493 | } 494 | 495 | #[tokio::test] 496 | async fn multi_mapping_stacked_splits() -> Result { 497 | let mut client = establish().await?; 498 | let tx = client.transaction().await?; 499 | 500 | #[derive(Debug, FromSqlRow)] 501 | #[row(split)] 502 | struct Family { 503 | generation: i32, 504 | #[row(split = "id")] 505 | id: i32, 506 | #[row(split = "id")] 507 | #[row(split = "name")] 508 | name: String, 509 | #[row(split = "age")] 510 | age: i32, 511 | } 512 | 513 | let family = query!( 514 | // Each line represents a partition 515 | "SELECT 516 | 8 as generation, 517 | 0 as id, 'John' as name, 61 as age, 518 | 1 as id, 519 | 'Bob' as name, 520 | 32 as age, 2 as id, 'Ike' as name, 7 as age" 521 | ) 522 | .fetch_one::(&tx) 523 | .await?; 524 | 525 | assert_eq!(family.generation, 8); 526 | assert_eq!(family.id, 0); 527 | assert_eq!(family.name, "Bob"); 528 | assert_eq!(family.age, 32); 529 | 530 | Ok(()) 531 | } 532 | 533 | #[tokio::test] 534 | async fn multi_mapping_many_to_one_group() -> Result { 535 | let mut client = establish().await?; 536 | let tx = client.transaction().await?; 537 | 538 | #[derive(Debug, FromSqlRow)] 539 | #[row(group)] 540 | struct Author { 541 | #[row(key)] 542 | id: i32, 543 | name: String, 544 | 545 | #[row(merge)] 546 | books: Vec, 547 | } 548 | 549 | #[derive(Debug, FromSqlRow)] 550 | struct Book { 551 | title: String, 552 | } 553 | 554 | let authors = query!( 555 | " 556 | SELECT 1 as id, 'J.R.R. Tolkien' as name, 'The Fellowship of the Ring' as title 557 | UNION ALL 558 | SELECT 1 as id, 'J.R.R. Tolkien' as name, 'The Two Towers' as title 559 | UNION ALL 560 | SELECT 2 as id, 'Andrzej Sapkowski' as name, 'The Last Wish' as title 561 | UNION ALL 562 | SELECT 1 as id, 'J.R.R. Tolkien' as name, 'Return of the King' as title 563 | " 564 | ) 565 | .fetch::(&tx) 566 | .await?; 567 | 568 | assert_eq!(authors.len(), 3); 569 | 570 | let tolkien = &authors[0]; 571 | let andrzej = &authors[1]; 572 | let tolkien2 = &authors[2]; 573 | 574 | assert_eq!(tolkien.id, 1); 575 | assert_eq!(tolkien.name, "J.R.R. Tolkien"); 576 | assert_eq!(tolkien.books.len(), 2); 577 | assert_eq!(tolkien.books[0].title, "The Fellowship of the Ring"); 578 | assert_eq!(tolkien.books[1].title, "The Two Towers"); 579 | 580 | assert_eq!(andrzej.id, 2); 581 | assert_eq!(andrzej.name, "Andrzej Sapkowski"); 582 | assert_eq!(andrzej.books.len(), 1); 583 | assert_eq!(andrzej.books[0].title, "The Last Wish"); 584 | 585 | assert_eq!(tolkien2.id, 1); 586 | assert_eq!(tolkien2.name, "J.R.R. Tolkien"); 587 | assert_eq!(tolkien2.books.len(), 1); 588 | assert_eq!(tolkien2.books[0].title, "Return of the King"); 589 | 590 | Ok(()) 591 | } 592 | 593 | #[tokio::test] 594 | async fn multi_mapping_many_to_one_hash() -> Result { 595 | let mut client = establish().await?; 596 | let tx = client.transaction().await?; 597 | 598 | #[derive(Debug, FromSqlRow)] 599 | #[row(hash)] 600 | struct Author { 601 | #[row(key)] 602 | id: i32, 603 | name: String, 604 | 605 | #[row(merge)] 606 | books: Vec, 607 | } 608 | 609 | #[derive(Debug, FromSqlRow)] 610 | struct Book { 611 | title: String, 612 | } 613 | 614 | let authors = query!( 615 | " 616 | SELECT 1 as id, 'J.R.R. Tolkien' as name, 'The Fellowship of the Ring' as title 617 | UNION ALL 618 | SELECT 1 as id, 'J.R.R. Tolkien' as name, 'The Two Towers' as title 619 | UNION ALL 620 | SELECT 2 as id, 'Andrzej Sapkowski' as name, 'The Last Wish' as title 621 | UNION ALL 622 | SELECT 1 as id, 'J.R.R. Tolkien' as name, 'Return of the King' as title 623 | " 624 | ) 625 | .fetch::(&tx) 626 | .await?; 627 | 628 | assert_eq!(authors.len(), 2); 629 | 630 | let tolkien = &authors[0]; 631 | let andrzej = &authors[1]; 632 | 633 | assert_eq!(tolkien.id, 1); 634 | assert_eq!(tolkien.name, "J.R.R. Tolkien"); 635 | assert_eq!(tolkien.books.len(), 3); 636 | assert_eq!(tolkien.books[0].title, "The Fellowship of the Ring"); 637 | assert_eq!(tolkien.books[1].title, "The Two Towers"); 638 | assert_eq!(tolkien.books[2].title, "Return of the King"); 639 | 640 | assert_eq!(andrzej.id, 2); 641 | assert_eq!(andrzej.name, "Andrzej Sapkowski"); 642 | assert_eq!(andrzej.books.len(), 1); 643 | assert_eq!(andrzej.books[0].title, "The Last Wish"); 644 | 645 | Ok(()) 646 | } 647 | 648 | #[tokio::test] 649 | async fn multi_mapping_many_to_one_group_with_split() -> Result { 650 | let mut client = establish().await?; 651 | let tx = client.transaction().await?; 652 | 653 | #[derive(Debug, FromSqlRow)] 654 | #[row(split, group)] 655 | struct Author { 656 | #[row(split = "id")] 657 | #[row(key)] 658 | id: i32, 659 | name: String, 660 | 661 | #[row(split = "id")] 662 | #[row(merge)] 663 | books: Vec, 664 | } 665 | 666 | #[derive(Debug, FromSqlRow)] 667 | struct Book { 668 | id: i32, 669 | title: String, 670 | } 671 | 672 | let authors = query!( 673 | " 674 | SELECT 1 as id, 'J.R.R. Tolkien' as name, 1 as id, 'The Fellowship of the Ring' as title 675 | UNION ALL 676 | SELECT 1 as id, 'J.R.R. Tolkien' as name, 2 as id, 'The Two Towers' as title 677 | UNION ALL 678 | SELECT 2 as id, 'Andrzej Sapkowski' as name, 3 as id, 'The Last Wish' as title 679 | UNION ALL 680 | SELECT 1 as id, 'J.R.R. Tolkien' as name, 4 as id, 'Return of the King' as title 681 | " 682 | ) 683 | .fetch::(&tx) 684 | .await?; 685 | 686 | assert_eq!(authors.len(), 3); 687 | 688 | let tolkien = &authors[0]; 689 | let andrzej = &authors[1]; 690 | let tolkien2 = &authors[2]; 691 | 692 | assert_eq!(tolkien.id, 1); 693 | assert_eq!(tolkien.name, "J.R.R. Tolkien"); 694 | assert_eq!(tolkien.books.len(), 2); 695 | assert_eq!(tolkien.books[0].id, 1); 696 | assert_eq!(tolkien.books[0].title, "The Fellowship of the Ring"); 697 | assert_eq!(tolkien.books[1].id, 2); 698 | assert_eq!(tolkien.books[1].title, "The Two Towers"); 699 | 700 | assert_eq!(andrzej.id, 2); 701 | assert_eq!(andrzej.name, "Andrzej Sapkowski"); 702 | assert_eq!(andrzej.books.len(), 1); 703 | assert_eq!(andrzej.books[0].id, 3); 704 | assert_eq!(andrzej.books[0].title, "The Last Wish"); 705 | 706 | assert_eq!(tolkien2.books[0].id, 4); 707 | assert_eq!(tolkien2.books[0].title, "Return of the King"); 708 | 709 | Ok(()) 710 | } 711 | 712 | #[tokio::test] 713 | async fn parameter_list() -> Result { 714 | let mut client = establish().await?; 715 | let tx = client.transaction().await?; 716 | 717 | #[derive(FromSqlRow)] 718 | struct Id(i32); 719 | 720 | let filter: &[i32] = &[1, 3]; 721 | 722 | let query = query!( 723 | "select * from ( 724 | select 1 as id 725 | union all select 2 726 | union all select 3 727 | ) as X where id = any($ids)", 728 | ids = filter, 729 | ); 730 | 731 | let ids: Vec = query.fetch(&tx).await?; 732 | 733 | assert_eq!(ids.len(), 2); 734 | assert_eq!(ids[0].0, 1); 735 | assert_eq!(ids[1].0, 3); 736 | 737 | Ok(()) 738 | } 739 | 740 | #[tokio::test] 741 | async fn optional_flatten() -> Result { 742 | let mut client = establish().await?; 743 | let tx = client.transaction().await?; 744 | 745 | #[derive(FromSqlRow, Clone)] 746 | #[row(split)] 747 | struct Family { 748 | #[row(flatten, split = "id")] 749 | child: Person, 750 | #[row(flatten, split = "id")] 751 | father: Option, 752 | } 753 | 754 | #[derive(FromSqlRow, Clone)] 755 | struct Person { 756 | id: i32, 757 | name: String, 758 | } 759 | 760 | let families: Vec = query!( 761 | "SELECT 1 as id, 'Luke Skywalker' as name, 2 as id, 'Darth Vader' as name 762 | UNION ALL SELECT 2, 'Darth Vader', NULL, NULL" 763 | ) 764 | .fetch(&tx) 765 | .await?; 766 | 767 | let luke = families[0].clone(); 768 | let vader = families[1].clone(); 769 | 770 | assert_eq!(luke.child.id, 1); 771 | assert_eq!(luke.child.name, "Luke Skywalker"); 772 | assert_eq!(luke.father.as_ref().unwrap().id, 2); 773 | assert_eq!(luke.father.as_ref().unwrap().name, "Darth Vader"); 774 | 775 | assert_eq!(vader.child.id, 2); 776 | assert_eq!(vader.child.name, "Darth Vader"); 777 | assert!(vader.father.is_none()); 778 | 779 | Ok(()) 780 | } 781 | 782 | #[tokio::test] 783 | async fn optional_flatten_invalid_type() -> Result { 784 | let mut client = establish().await?; 785 | let tx = client.transaction().await?; 786 | 787 | #[derive(FromSqlRow, Clone)] 788 | #[row(split)] 789 | struct Family { 790 | #[row(flatten, split = "id")] 791 | child: Person, 792 | #[row(flatten, split = "id")] 793 | father: Option, 794 | } 795 | 796 | #[derive(FromSqlRow, Clone)] 797 | struct Person { 798 | id: i32, 799 | name: String, 800 | } 801 | 802 | let families = query!( 803 | "SELECT 1 as id, 'Luke Skywalker' as name, NULL as id, 'Darth Vader' as name 804 | UNION ALL SELECT 2, 'Darth Vader', 'a number', 'The Force'" 805 | ) 806 | .fetch::(&tx) 807 | .await; 808 | 809 | // 'a number' is not of the correct type, so this should fail 810 | assert!(families.is_err()); 811 | 812 | Ok(()) 813 | } 814 | 815 | #[tokio::test] 816 | async fn optional_flatten_nested_option() -> Result { 817 | let mut client = establish().await?; 818 | let tx = client.transaction().await?; 819 | 820 | #[derive(FromSqlRow, Clone)] 821 | #[row(split)] 822 | struct Family { 823 | #[row(flatten, split = "id")] 824 | child: Person, 825 | #[row(flatten, split = "id")] 826 | father: Option, 827 | } 828 | 829 | #[derive(FromSqlRow, Clone)] 830 | struct Person { 831 | id: i32, 832 | name: Option, 833 | } 834 | 835 | let families: Vec = query!( 836 | "SELECT 1 as id, 'Luke Skywalker' as name, 2 as id, 'Darth Vader' as name 837 | UNION ALL SELECT 2, 'Darth Vader', 3, NULL" 838 | ) 839 | .fetch(&tx) 840 | .await?; 841 | 842 | let luke = families[0].clone(); 843 | let vader = families[1].clone(); 844 | 845 | assert_eq!(luke.child.id, 1); 846 | assert_eq!(luke.child.name.unwrap(), "Luke Skywalker"); 847 | let luke_father = luke.father.unwrap(); 848 | assert_eq!(luke_father.id, 2); 849 | assert_eq!(luke_father.name, Some("Darth Vader".into())); 850 | 851 | assert_eq!(vader.child.id, 2); 852 | assert_eq!(vader.child.name.unwrap(), "Darth Vader"); 853 | let vader_father = vader.father.unwrap(); 854 | assert_eq!(vader_father.id, 3); 855 | assert_eq!(vader_father.name, None); 856 | 857 | Ok(()) 858 | } 859 | -------------------------------------------------------------------------------- /postgres_query/tests/query_macro.rs: -------------------------------------------------------------------------------- 1 | use bytes::BytesMut; 2 | use postgres_query::*; 3 | use postgres_types::{IsNull, ToSql, Type}; 4 | 5 | #[test] 6 | fn text_only() { 7 | let query = query!("SELECT id, name FROM people"); 8 | 9 | assert_eq!(query.sql(), "SELECT id, name FROM people"); 10 | assert_params_eq(query.parameters(), &[]) 11 | } 12 | 13 | #[test] 14 | fn escape_dollar() { 15 | let query = query!("SELECT $$"); 16 | assert_eq!(query.sql(), "SELECT $"); 17 | assert_params_eq(query.parameters(), &[]) 18 | } 19 | 20 | #[test] 21 | fn parameter_substitution_implicit_name() { 22 | let age = 42; 23 | let query = query!("SELECT id, name FROM people WHERE age = $age", age); 24 | 25 | assert_eq!(query.sql(), "SELECT id, name FROM people WHERE age = $1"); 26 | assert_params_eq(query.parameters(), &[(&age, &Type::INT4)]) 27 | } 28 | 29 | #[test] 30 | fn parameter_substitution_explicit_name() { 31 | let query = query!("SELECT id, name FROM people WHERE age = $age", age = 42); 32 | 33 | assert_eq!(query.sql(), "SELECT id, name FROM people WHERE age = $1"); 34 | assert_params_eq(query.parameters(), &[(&42, &Type::INT4)]) 35 | } 36 | 37 | #[test] 38 | fn parameter_substitution_multiple_parameters() { 39 | let query = query!("$a $b $c", a = 42, b = "John Wick", c = Option::::None,); 40 | 41 | assert_eq!(query.sql(), "$1 $2 $3"); 42 | assert_params_eq( 43 | query.parameters(), 44 | &[ 45 | (&42, &Type::INT4), 46 | (&"John Wick", &Type::TEXT), 47 | (&Option::::None, &Type::INT4), 48 | ], 49 | ) 50 | } 51 | 52 | #[test] 53 | fn dynamic_query() { 54 | let filters = ["age > $min_age", "name LIKE $name"].join(" AND "); 55 | 56 | let query = query_dyn!( 57 | &format!("SELECT * FROM people WHERE {}", filters), 58 | min_age = 32, 59 | name = "%John%", 60 | ) 61 | .unwrap(); 62 | 63 | assert_eq!( 64 | query.sql(), 65 | "SELECT * FROM people WHERE age > $1 AND name LIKE $2" 66 | ); 67 | } 68 | 69 | #[test] 70 | fn dynamic_query_dynamic_bindings() -> Result<()> { 71 | let mut filters = Vec::new(); 72 | let mut bindings = Vec::<(&str, Parameter)>::new(); 73 | 74 | filters.push("age > $min_age"); 75 | bindings.push(("min_age", &32)); 76 | 77 | filters.push("name LIKE $name"); 78 | bindings.push(("name", &"%John%")); 79 | 80 | let filters = filters.join(" AND "); 81 | let sql = format!("SELECT * FROM people WHERE {}", filters); 82 | 83 | let query = query_dyn!(&sql, ..bindings).unwrap(); 84 | 85 | assert_eq!( 86 | query.sql(), 87 | "SELECT * FROM people WHERE age > $1 AND name LIKE $2" 88 | ); 89 | 90 | assert_params_eq( 91 | query.parameters(), 92 | &[(&32, &Type::INT4), (&"%John%", &Type::TEXT)], 93 | ); 94 | 95 | Ok(()) 96 | } 97 | 98 | fn assert_params_eq<'a>(a: &[&'a (dyn ToSql + Sync)], b: &[(&'a dyn ToSql, &'a Type)]) { 99 | assert_eq!(a.len(), b.len()); 100 | for (a, (b, ty)) in a.iter().copied().zip(b.iter().copied()) { 101 | sql_eq(a, b, ty); 102 | } 103 | } 104 | 105 | /// Check if two SQL values are of the same type and value 106 | fn sql_eq(a: &dyn ToSql, b: &dyn ToSql, ty: &Type) -> bool { 107 | let mut a_buffer = BytesMut::new(); 108 | let mut b_buffer = BytesMut::new(); 109 | 110 | let a_result = a.to_sql_checked(ty, &mut a_buffer); 111 | let b_result = b.to_sql_checked(ty, &mut b_buffer); 112 | 113 | let is_null = |null| match null { 114 | IsNull::Yes => true, 115 | IsNull::No => false, 116 | }; 117 | 118 | a_result.is_ok() 119 | && b_result.is_ok() 120 | && is_null(a_result.unwrap()) == is_null(b_result.unwrap()) 121 | && a_buffer == b_buffer 122 | } 123 | -------------------------------------------------------------------------------- /postgres_query_macro/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "postgres_query_macro" 3 | version = "0.3.3" 4 | authors = ["Christofer Nolander "] 5 | edition = "2018" 6 | description = "Write and execute SQL queries with ease" 7 | repository = "https://github.com/nolanderc/rust-postgres-macro" 8 | license = "MIT OR Apache-2.0" 9 | readme = "../README.md" 10 | 11 | [lib] 12 | proc-macro = true 13 | 14 | [dependencies] 15 | proc-macro-hack = "0.5.19" 16 | quote = "1.0.8" 17 | syn = { version = "1.0.58", features = ["full"] } 18 | proc-macro2 = "1.0.24" 19 | -------------------------------------------------------------------------------- /postgres_query_macro/src/from_sql_row.rs: -------------------------------------------------------------------------------- 1 | mod attrs; 2 | mod partition; 3 | mod validate; 4 | 5 | use attrs::{ContainerAttributes, FieldAttributes, MergeKind, PartitionKind}; 6 | use partition::partition_initializers; 7 | use proc_macro2::{Span, TokenStream}; 8 | use quote::*; 9 | use syn::{ 10 | spanned::Spanned, 11 | token::{Enum, Union}, 12 | Data, DataEnum, DataStruct, DataUnion, DeriveInput, Field, Fields, Ident, Result, Type, 13 | }; 14 | use validate::validate_properties; 15 | 16 | pub fn derive(input: DeriveInput) -> TokenStream { 17 | let ident = &input.ident; 18 | 19 | let Extractor { 20 | getters, 21 | locals, 22 | columns, 23 | merge, 24 | } = match extract_columns(&input) { 25 | Ok(columns) => columns, 26 | Err(e) => return e.to_compile_error(), 27 | }; 28 | 29 | let constructor = make_constructor(&input, locals); 30 | 31 | let multi = merge.map(|merge| make_merge(merge, &constructor, &getters)); 32 | 33 | let lib = lib!(); 34 | quote! { 35 | impl #lib::FromSqlRow for #ident { 36 | const COLUMN_COUNT: usize = #columns; 37 | 38 | fn from_row(__row: &R) -> Result 39 | where 40 | R: #lib::extract::Row 41 | { 42 | #getters 43 | Ok(#constructor) 44 | } 45 | 46 | #multi 47 | } 48 | } 49 | } 50 | 51 | fn make_constructor(input: &DeriveInput, locals: impl IntoIterator) -> TokenStream { 52 | let ident = &input.ident; 53 | 54 | let mut locals = locals.into_iter().map(|local| { 55 | let ident = local.ident; 56 | let lib = lib!(); 57 | match local.merge { 58 | None => (ident.clone(), quote! { #ident }), 59 | Some(base) => ( 60 | ident.clone(), 61 | quote! { 62 | { 63 | let mut collections = <#base as Default>::default(); 64 | #lib::extract::Merge::insert(&mut collections, #ident); 65 | collections 66 | } 67 | }, 68 | ), 69 | } 70 | }); 71 | 72 | match &input.data { 73 | Data::Struct(data) => match data.fields { 74 | Fields::Unnamed(_) => { 75 | let values = locals.map(|(_, value)| value); 76 | quote! { 77 | #ident ( #(#values),* ) 78 | } 79 | } 80 | Fields::Named(_) => { 81 | let fields = locals.map(|(ident, value)| quote! { #ident: #value }); 82 | quote! { 83 | #ident { #(#fields),* } 84 | } 85 | } 86 | Fields::Unit => { 87 | if locals.next().is_none() { 88 | quote! { 89 | #ident 90 | } 91 | } else { 92 | unreachable!("Attempted to construct unit struct with fields"); 93 | } 94 | } 95 | }, 96 | _ => unreachable!(), 97 | } 98 | } 99 | 100 | fn make_merge(merge: Merge, constructor: &TokenStream, getters: &TokenStream) -> TokenStream { 101 | let lib = lib!(); 102 | 103 | let Merge { 104 | kind, 105 | keys, 106 | collections, 107 | } = merge; 108 | 109 | let key_idents = keys.iter().map(|(ident, _)| ident).collect::>(); 110 | let collection_idents = collections 111 | .iter() 112 | .map(|(ident, _)| ident) 113 | .collect::>(); 114 | 115 | let body = match kind { 116 | MergeKind::Group => { 117 | quote! { 118 | let mut __objects = Vec::::new(); 119 | for __row in __rows { 120 | #getters 121 | 122 | if let Some(__last) = __objects.last_mut() { 123 | if #(#key_idents == __last.#key_idents) && * { 124 | #( 125 | #lib::extract::Merge::insert( 126 | &mut __last.#collection_idents, 127 | #collection_idents 128 | ); 129 | )* 130 | } else { 131 | __objects.push(#constructor); 132 | } 133 | } else { 134 | __objects.push(#constructor); 135 | } 136 | } 137 | Ok(__objects) 138 | } 139 | } 140 | 141 | MergeKind::Hash => { 142 | let key_types = keys.iter().map(|(_, ty)| ty); 143 | 144 | quote! { 145 | let mut __objects = Vec::::new(); 146 | let mut __indices = ::std::collections::HashMap::<(#(#key_types,)*), usize>::new(); 147 | 148 | for __row in __rows { 149 | #getters 150 | 151 | let __key = (#(#key_idents,)*); 152 | 153 | if let Some(&__index) = __indices.get(&__key) { 154 | #( 155 | #lib::extract::Merge::insert( 156 | &mut __objects[__index].#collection_idents, 157 | #collection_idents 158 | ); 159 | )* 160 | } else { 161 | let __index = __objects.len(); 162 | __indices.insert(__key.clone(), __index); 163 | let (#(#key_idents,)*) = __key; 164 | __objects.push(#constructor); 165 | } 166 | } 167 | 168 | Ok(__objects) 169 | } 170 | } 171 | }; 172 | 173 | quote! { 174 | fn from_row_multi(__rows: &[R]) -> Result, #lib::extract::Error> 175 | where 176 | R: #lib::extract::Row 177 | { 178 | #body 179 | } 180 | } 181 | } 182 | 183 | enum Index { 184 | Position, 185 | Flatten, 186 | Name(String), 187 | } 188 | 189 | struct Extractor { 190 | getters: TokenStream, 191 | locals: Vec, 192 | columns: TokenStream, 193 | merge: Option, 194 | } 195 | 196 | struct Local { 197 | ident: Ident, 198 | merge: Option, 199 | } 200 | 201 | struct Merge { 202 | kind: MergeKind, 203 | keys: Vec<(Ident, Type)>, 204 | collections: Vec<(Ident, Type)>, 205 | } 206 | 207 | struct Property { 208 | ident: Ident, 209 | ty: Type, 210 | attrs: FieldAttributes, 211 | index: Index, 212 | span: Span, 213 | field: Field, 214 | } 215 | 216 | fn extract_columns(input: &DeriveInput) -> Result { 217 | match &input.data { 218 | Data::Struct(data) => { 219 | let container = ContainerAttributes::from_attrs(&input.attrs)?; 220 | let props = extract_properties(&data)?; 221 | 222 | validate_properties(&container, &props)?; 223 | 224 | let columns = count_columns(&props); 225 | 226 | let merge = extract_merge(&container, &props); 227 | 228 | let (getters, locals) = if let Some(kind) = container.partition { 229 | partition_initializers(props, kind)? 230 | } else { 231 | let row = Ident::new("__row", Span::call_site()); 232 | field_initializers(&props, &row) 233 | }; 234 | 235 | Ok(Extractor { 236 | getters, 237 | locals, 238 | columns, 239 | merge, 240 | }) 241 | } 242 | Data::Enum(DataEnum { 243 | enum_token: Enum { span }, 244 | .. 245 | }) 246 | | Data::Union(DataUnion { 247 | union_token: Union { span, .. }, 248 | .. 249 | }) => Err(err!( 250 | *span, 251 | "`FromSqlRow` may only be derived for `struct`s" 252 | )), 253 | } 254 | } 255 | 256 | fn extract_merge(container: &ContainerAttributes, props: &[Property]) -> Option { 257 | container.merge.map(|kind| Merge { 258 | kind: kind.value, 259 | keys: props 260 | .iter() 261 | .filter_map(|prop| match prop.attrs.key { 262 | Some(_) => Some((prop.ident.clone(), prop.ty.clone())), 263 | None => None, 264 | }) 265 | .collect(), 266 | collections: props 267 | .iter() 268 | .filter_map(|prop| match prop.attrs.merge { 269 | Some(_) => Some((prop.ident.clone(), prop.ty.clone())), 270 | None => None, 271 | }) 272 | .collect(), 273 | }) 274 | } 275 | 276 | fn extract_properties(data: &DataStruct) -> Result> { 277 | let mut props = Vec::new(); 278 | 279 | for (i, field) in data.fields.iter().enumerate() { 280 | let attrs = FieldAttributes::from_attrs(&field.attrs)?; 281 | 282 | let index = match &field.ident { 283 | _ if attrs.merge.is_some() => Index::Flatten, 284 | None => Index::Position, 285 | Some(_) if attrs.flatten => Index::Flatten, 286 | Some(name) => { 287 | if let Some(name) = attrs.rename.clone() { 288 | Index::Name(name) 289 | } else { 290 | Index::Name(name.to_string()) 291 | } 292 | } 293 | }; 294 | 295 | let ident = field 296 | .ident 297 | .clone() 298 | .unwrap_or_else(|| Ident::new(&format!("column_{}", i), Span::call_site())); 299 | 300 | let ty = if attrs.merge.is_some() { 301 | let base = &field.ty; 302 | let lib = lib!(); 303 | let qualifier = quote! { 304 | <#base as #lib::extract::Merge>::Item 305 | }; 306 | syn::parse2(qualifier)? 307 | } else { 308 | field.ty.clone() 309 | }; 310 | 311 | props.push(Property { 312 | ident, 313 | ty, 314 | attrs, 315 | index, 316 | span: field.span(), 317 | field: field.clone(), 318 | }); 319 | } 320 | 321 | Ok(props) 322 | } 323 | 324 | fn field_initializers(props: &[Property], row: &Ident) -> (TokenStream, Vec) { 325 | let mut initializers = Vec::new(); 326 | let mut locals = Vec::new(); 327 | 328 | for (i, prop) in props.iter().enumerate() { 329 | let ident = &prop.ident; 330 | let ty = &prop.ty; 331 | let lib = lib!(); 332 | 333 | let getter = match &prop.index { 334 | Index::Position => quote! { 335 | #lib::extract::Row::try_get(#row, #i)? 336 | }, 337 | Index::Name(name) => quote! { 338 | #lib::extract::Row::try_get(#row, #name)? 339 | }, 340 | Index::Flatten => quote! { 341 | <#ty as #lib::FromSqlRow>::from_row(#row)? 342 | }, 343 | }; 344 | 345 | initializers.push(quote! { 346 | let #ident: #ty = #getter; 347 | }); 348 | 349 | let merge = prop.attrs.merge.map(|_| prop.field.ty.clone()); 350 | locals.push(Local { 351 | ident: ident.clone(), 352 | merge, 353 | }); 354 | } 355 | 356 | let initializers = quote! { 357 | #(#initializers)* 358 | }; 359 | 360 | (initializers, locals) 361 | } 362 | 363 | fn count_columns(props: &[Property]) -> TokenStream { 364 | let mut external = Vec::new(); 365 | let mut fields: usize = 0; 366 | 367 | for prop in props { 368 | match prop.index { 369 | Index::Position | Index::Name(_) => fields += 1, 370 | Index::Flatten => { 371 | let ty = &prop.ty; 372 | let lib = lib!(); 373 | let count = quote! { <#ty as #lib::FromSqlRow>::COLUMN_COUNT }; 374 | external.push(count); 375 | } 376 | } 377 | } 378 | 379 | quote! { 380 | #fields #(+ #external)* 381 | } 382 | } 383 | -------------------------------------------------------------------------------- /postgres_query_macro/src/from_sql_row/attrs.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::Span; 2 | use std::fmt::Display; 3 | use std::ops::Deref; 4 | use std::str::FromStr; 5 | use syn::{spanned::Spanned, Attribute, Lit, Meta, NestedMeta, Result}; 6 | 7 | pub struct ContainerAttributes { 8 | pub partition: Option>, 9 | pub merge: Option>, 10 | } 11 | 12 | pub struct FieldAttributes { 13 | pub flatten: bool, 14 | pub rename: Option, 15 | pub splits: Vec>, 16 | pub stride: Option>, 17 | pub key: Option>, 18 | pub merge: Option>, 19 | } 20 | 21 | #[derive(Copy, Clone)] 22 | pub struct Attr { 23 | pub span: Span, 24 | pub value: T, 25 | } 26 | 27 | #[derive(Copy, Clone)] 28 | pub enum PartitionKind { 29 | Exact, 30 | Split, 31 | } 32 | 33 | #[derive(Copy, Clone)] 34 | pub enum MergeKind { 35 | Group, 36 | Hash, 37 | } 38 | 39 | impl Attr { 40 | pub fn new(span: impl Spanned, value: T) -> Self { 41 | Attr { 42 | span: span.span(), 43 | value, 44 | } 45 | } 46 | } 47 | 48 | impl Deref for Attr { 49 | type Target = T; 50 | fn deref(&self) -> &Self::Target { 51 | &self.value 52 | } 53 | } 54 | 55 | macro_rules! set_or_err { 56 | ($var:ident, $val:expr, $err:expr) => { 57 | if $var.is_some() { 58 | Err($err) 59 | } else { 60 | $var = Some($val); 61 | Ok(()) 62 | } 63 | }; 64 | } 65 | 66 | macro_rules! err_duplicate_attribute { 67 | ($item:expr, $attr:literal) => { 68 | err!($item, concat!("attribute specified twice: `", $attr, "`")) 69 | }; 70 | } 71 | 72 | macro_rules! err_multiple_partition { 73 | ($item:expr) => { 74 | err!( 75 | $item, 76 | "only one partitioning scheme may be specified (either `split` or `exact`)" 77 | ) 78 | }; 79 | } 80 | 81 | macro_rules! err_expected_variant { 82 | ($item:expr, $name:literal, [$($kind:ident),+]) => { 83 | err!( 84 | $item, 85 | concat!( 86 | "malformed attribute, expected ", 87 | err_expected_variant!(@concat: $name, [$($kind),+]) 88 | ) 89 | ) 90 | }; 91 | (@concat: $name:literal, [$head:ident, $mid:ident, $($tail:ident),+]) => { 92 | concat!( 93 | err_expected_variant!(@format: $name, $head), ", ", 94 | err_expected_variant!(@concat: $name, [$mid, $($tail),+]) 95 | ) 96 | }; 97 | (@concat: $name:literal, [$head:ident, $last:ident]) => { 98 | concat!( 99 | err_expected_variant!(@format: $name, $head), " or ", 100 | err_expected_variant!(@format: $name, $last) 101 | ) 102 | }; 103 | (@concat: $name:literal, [$head:ident]) => { 104 | err_expected_variant!(@format: $name, $head) 105 | }; 106 | (@format: $name:literal, Path) => { concat!("an identifier (`", $name, "`)") }; 107 | (@format: $name:literal, NameValue) => { concat!("key-value (`", $name, " = \"...\"`)") }; 108 | (@format: $name:literal, List) => { concat!("a list (`", $name, "(...)`)") }; 109 | } 110 | 111 | macro_rules! match_item { 112 | ( 113 | ($item:expr) { 114 | $( 115 | $ident:literal => { 116 | $( 117 | $meta:ident ($binding:pat) => $expr:expr 118 | ),+ $(,)? 119 | } 120 | ),* $(,)? 121 | } 122 | ) => { 123 | match $item { 124 | $( 125 | item if item.path().is_ident($ident) => match item { 126 | $( 127 | $meta ($binding) => $expr, 128 | )+ 129 | _ => return Err(err_expected_variant!( 130 | item, 131 | $ident, 132 | [$($meta),+] 133 | )), 134 | }, 135 | )* 136 | item => return Err(err!(item, "unknown attribute")), 137 | } 138 | }; 139 | } 140 | 141 | impl ContainerAttributes { 142 | pub fn from_attrs<'a>( 143 | attrs: impl IntoIterator, 144 | ) -> Result { 145 | let items = attribute_items("row", attrs)?; 146 | 147 | let mut partition = None; 148 | let mut merge = None; 149 | 150 | for item in &items { 151 | use Meta::Path; 152 | 153 | match_item!((item) { 154 | "exact" => { 155 | Path(_) => { 156 | let kind = Attr::new(item, PartitionKind::Exact); 157 | set_or_err!(partition, kind, err_multiple_partition!(item))?; 158 | } 159 | }, 160 | "split" => { 161 | Path(_) => { 162 | let kind = Attr::new(item, PartitionKind::Split); 163 | set_or_err!(partition, kind, err_multiple_partition!(item))?; 164 | } 165 | }, 166 | "group" => { 167 | Path(_) => { 168 | let kind = Attr::new(item, MergeKind::Group); 169 | set_or_err!(merge, kind, err_multiple_partition!(item))?; 170 | } 171 | }, 172 | "hash" => { 173 | Path(_) => { 174 | let kind = Attr::new(item, MergeKind::Hash); 175 | set_or_err!(merge, kind, err_multiple_partition!(item))?; 176 | } 177 | }, 178 | }) 179 | } 180 | 181 | let container = ContainerAttributes { partition, merge }; 182 | 183 | Ok(container) 184 | } 185 | } 186 | 187 | impl FieldAttributes { 188 | pub fn from_attrs<'a>( 189 | attrs: impl IntoIterator, 190 | ) -> Result { 191 | let items = attribute_items("row", attrs)?; 192 | 193 | let mut flatten = None; 194 | let mut rename = None; 195 | let mut splits = Vec::new(); 196 | let mut stride = None; 197 | let mut key = None; 198 | let mut merge = None; 199 | 200 | for item in &items { 201 | use Meta::{NameValue, Path}; 202 | 203 | match_item!((item) { 204 | "flatten" => { 205 | Path(_) => { 206 | set_or_err!(flatten, true, err_duplicate_attribute!(item, "flatten"))? 207 | } 208 | }, 209 | "rename" => { 210 | NameValue(pair) => { 211 | let text = lit_string(&pair.lit)?; 212 | set_or_err!(rename, text, err_duplicate_attribute!(item, "rename"))?; 213 | } 214 | }, 215 | "split" => { 216 | NameValue(pair) => { 217 | let text = lit_string(&pair.lit)?; 218 | splits.push(Attr::new(pair, text)); 219 | } 220 | }, 221 | "stride" => { 222 | NameValue(pair) => { 223 | let step = lit_int(&pair.lit)?; 224 | let step = Attr::new(pair, step); 225 | set_or_err!(stride, step, err_duplicate_attribute!(item, "stride"))? 226 | } 227 | }, 228 | "key" => { 229 | Path(_) => { 230 | let attr = Attr::new(item, ()); 231 | set_or_err!(key, attr, err_duplicate_attribute!(item, "key"))? 232 | } 233 | }, 234 | "merge" => { 235 | Path(_) => { 236 | let attr = Attr::new(item, ()); 237 | set_or_err!(merge, attr, err_duplicate_attribute!(item, "merge"))? 238 | } 239 | }, 240 | }) 241 | } 242 | 243 | let field = FieldAttributes { 244 | flatten: flatten.unwrap_or(false), 245 | rename, 246 | splits, 247 | stride, 248 | key, 249 | merge, 250 | }; 251 | 252 | Ok(field) 253 | } 254 | } 255 | 256 | fn attribute_items<'a>( 257 | name: &str, 258 | attrs: impl IntoIterator, 259 | ) -> Result> { 260 | let mut items = Vec::new(); 261 | 262 | for attr in attrs { 263 | if !attr.path.is_ident(name) { 264 | continue; 265 | } 266 | 267 | let meta = attr.parse_meta()?; 268 | let list = match meta { 269 | Meta::List(list) => list, 270 | _ => return Err(err!(attr, "expected list: #[row(...)]")), 271 | }; 272 | 273 | for inner in list.nested { 274 | match inner { 275 | NestedMeta::Lit(_) => return Err(err!(inner, "unexpected literal")), 276 | NestedMeta::Meta(item) => items.push(item), 277 | } 278 | } 279 | } 280 | 281 | Ok(items) 282 | } 283 | 284 | fn lit_string(lit: &Lit) -> Result { 285 | match lit { 286 | Lit::Str(text) => Ok(text.value()), 287 | _ => Err(err!(lit, "expected string literal")), 288 | } 289 | } 290 | 291 | fn lit_int(lit: &Lit) -> Result 292 | where 293 | N: FromStr, 294 | N::Err: Display, 295 | { 296 | match lit { 297 | Lit::Int(int) => int.base10_parse(), 298 | _ => Err(err!(lit, "expected integer literal")), 299 | } 300 | } 301 | -------------------------------------------------------------------------------- /postgres_query_macro/src/from_sql_row/partition.rs: -------------------------------------------------------------------------------- 1 | use super::attrs::Attr; 2 | use super::{field_initializers, Index, Local, PartitionKind, Property}; 3 | use proc_macro2::{Span, TokenStream}; 4 | use quote::*; 5 | use std::mem; 6 | use syn::{Ident, Result}; 7 | 8 | struct ExactPartition { 9 | len: TokenStream, 10 | properties: Vec, 11 | } 12 | 13 | enum Split { 14 | Column(String), 15 | Group(Vec), 16 | } 17 | 18 | pub(super) fn partition_initializers( 19 | props: Vec, 20 | kind: Attr, 21 | ) -> Result<(TokenStream, Vec)> { 22 | match kind.value { 23 | PartitionKind::Exact => { 24 | let partitions = exact::partition(props)?; 25 | Ok(exact::initializers(partitions)) 26 | } 27 | PartitionKind::Split => { 28 | let splits = split::partition(props); 29 | 30 | let split_count = splits 31 | .iter() 32 | .filter(|split| match split { 33 | Split::Column(_) => true, 34 | _ => false, 35 | }) 36 | .count(); 37 | 38 | if split_count == 0 { 39 | return Err(err!( 40 | kind.span, 41 | "using split partitioning without any `#[row(split = \"...\")]` points" 42 | )); 43 | } 44 | 45 | Ok(split::initializers(splits)) 46 | } 47 | } 48 | } 49 | 50 | mod exact { 51 | use super::*; 52 | 53 | pub(super) fn partition(props: Vec) -> Result> { 54 | let mut partitions = Vec::new(); 55 | let mut props = props.into_iter().peekable(); 56 | 57 | let merge = |prop: &Property| match prop.index { 58 | Index::Position | Index::Name(_) => prop.attrs.stride.is_none(), 59 | _ => false, 60 | }; 61 | 62 | while let Some(prop) = props.next() { 63 | match prop { 64 | prop if prop.attrs.stride.is_some() => { 65 | let stride = prop.attrs.stride.unwrap().value; 66 | partitions.push(ExactPartition { 67 | len: quote! { #stride }, 68 | properties: vec![prop], 69 | }); 70 | } 71 | 72 | prop if merge(&prop) => { 73 | let mut properties = vec![prop]; 74 | 75 | while let Some(prop) = props.peek() { 76 | if merge(prop) { 77 | properties.push(props.next().unwrap()); 78 | } else { 79 | break; 80 | } 81 | } 82 | 83 | let len = properties.len(); 84 | partitions.push(ExactPartition { 85 | len: quote! { #len }, 86 | properties, 87 | }); 88 | } 89 | 90 | prop if is_match!(prop.index, Index::Flatten) => { 91 | let ty = &prop.ty; 92 | let lib = lib!(); 93 | let len = quote! { 94 | <#ty as #lib::FromSqlRow>::COLUMN_COUNT 95 | }; 96 | partitions.push(ExactPartition { 97 | len, 98 | properties: vec![prop], 99 | }); 100 | } 101 | 102 | _ => return Err(err!(prop.span, "failed to compute `stride` for field")), 103 | } 104 | } 105 | 106 | Ok(partitions) 107 | } 108 | 109 | pub(super) fn initializers(partitions: Vec) -> (TokenStream, Vec) { 110 | let mut getters = Vec::new(); 111 | let mut locals = Vec::new(); 112 | 113 | let mut previous_end = Ident::new("__begin", Span::call_site()); 114 | 115 | getters.push(quote! { let #previous_end = 0; }); 116 | 117 | for (i, partition) in partitions.into_iter().enumerate() { 118 | let end = Ident::new(&format!("__end_{}", i), Span::call_site()); 119 | let current = Ident::new(&format!("__slice_{}", i), Span::call_site()); 120 | let len = partition.len; 121 | 122 | let lib = lib!(); 123 | let advance = quote! { 124 | let #end = #previous_end + #len; 125 | let #current = #lib::extract::Row::slice(__row, #previous_end..#end)?; 126 | let #current = &#current; 127 | }; 128 | 129 | previous_end = end; 130 | 131 | let (initializers, idents) = field_initializers(&partition.properties, ¤t); 132 | 133 | locals.extend(idents); 134 | 135 | let getter = quote! { 136 | #advance 137 | #initializers 138 | }; 139 | 140 | getters.push(getter); 141 | } 142 | 143 | let getters = quote! { 144 | #(#getters)* 145 | }; 146 | 147 | (getters, locals) 148 | } 149 | } 150 | 151 | mod split { 152 | use super::*; 153 | 154 | pub(super) fn partition(props: Vec) -> Vec { 155 | let mut splits = Vec::new(); 156 | let mut group = Vec::new(); 157 | 158 | for prop in props { 159 | let mut split_column = |name: String| { 160 | if !group.is_empty() { 161 | splits.push(Split::Group(mem::take(&mut group))); 162 | } 163 | splits.push(Split::Column(name)); 164 | }; 165 | 166 | for name in &prop.attrs.splits { 167 | split_column(name.value.clone()); 168 | } 169 | 170 | group.push(prop); 171 | } 172 | 173 | if !group.is_empty() { 174 | splits.push(Split::Group(group)) 175 | } 176 | 177 | splits 178 | } 179 | 180 | pub(super) fn initializers(layout: Vec) -> (TokenStream, Vec) { 181 | let mut fragments = Vec::new(); 182 | let mut locals = Vec::new(); 183 | 184 | let splits = layout.iter().filter_map(|kind| match kind { 185 | Split::Column(name) => Some(name.as_str()), 186 | _ => None, 187 | }); 188 | 189 | let partition_ident = |i| Ident::new(&format!("__partition_{}", i), Span::call_site()); 190 | let first_partition = partition_ident(0); 191 | 192 | let lib = lib!(); 193 | let row_trait = quote! { #lib::extract::Row }; 194 | 195 | fragments.push(quote! { 196 | let columns = #row_trait::columns(__row); 197 | let splits: &[&'static str] = &[#(#splits),*]; 198 | let mut splits = #lib::extract::split_columns_many(columns, &splits); 199 | }); 200 | 201 | let next_partition = quote! { 202 | #row_trait::slice(__row, splits.next().unwrap()?)? 203 | }; 204 | 205 | let advance = |partition: &Ident| { 206 | quote! { 207 | let #partition = #next_partition; 208 | let #partition = &#partition; 209 | } 210 | }; 211 | 212 | fragments.push(advance(&first_partition)); 213 | 214 | let mut splits = 0; 215 | let mut partition = first_partition; 216 | 217 | for kind in layout.iter() { 218 | match kind { 219 | Split::Column(_) => { 220 | splits += 1; 221 | partition = partition_ident(splits); 222 | fragments.push(advance(&partition)); 223 | } 224 | Split::Group(props) => { 225 | let (initializers, idents) = field_initializers(&props, &partition); 226 | fragments.push(initializers); 227 | locals.extend(idents); 228 | } 229 | } 230 | } 231 | 232 | let getters = quote! { 233 | #(#fragments)* 234 | }; 235 | 236 | (getters, locals) 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /postgres_query_macro/src/from_sql_row/validate.rs: -------------------------------------------------------------------------------- 1 | use super::{ContainerAttributes, PartitionKind, Property}; 2 | use syn::Result; 3 | 4 | pub(super) fn validate_properties( 5 | container: &ContainerAttributes, 6 | props: &[Property], 7 | ) -> Result<()> { 8 | check_split_in_non_split_container(container, props)?; 9 | check_stride_in_non_exact_container(container, props)?; 10 | 11 | check_merging_container_attributes(container, props)?; 12 | check_not_key_and_merge(props)?; 13 | 14 | Ok(()) 15 | } 16 | 17 | fn check_split_in_non_split_container( 18 | container: &ContainerAttributes, 19 | props: &[Property], 20 | ) -> Result<()> { 21 | let is_split = is_match!( 22 | container.partition.as_ref().map(|attr| &attr.value), 23 | Some(PartitionKind::Split) 24 | ); 25 | 26 | if is_split { 27 | Ok(()) 28 | } else { 29 | let split = props 30 | .iter() 31 | .flat_map(|prop| prop.attrs.splits.iter()) 32 | .next(); 33 | 34 | match split { 35 | None => Ok(()), 36 | Some(split) => Err(err!( 37 | split.span, 38 | "explicit `split` in a container without the `#[row(split)]` attribute" 39 | )), 40 | } 41 | } 42 | } 43 | 44 | fn check_stride_in_non_exact_container( 45 | container: &ContainerAttributes, 46 | props: &[Property], 47 | ) -> Result<()> { 48 | let is_exact = is_match!( 49 | container.partition.as_ref().map(|attr| &attr.value), 50 | Some(PartitionKind::Exact) 51 | ); 52 | 53 | if is_exact { 54 | Ok(()) 55 | } else { 56 | let stride = props.iter().filter_map(|prop| prop.attrs.stride).next(); 57 | 58 | match stride { 59 | None => Ok(()), 60 | Some(stride) => Err(err!( 61 | stride.span, 62 | "explicit `stride` in a container without the `#[row(exact)]` attribute" 63 | )), 64 | } 65 | } 66 | } 67 | 68 | fn check_merging_container_attributes( 69 | container: &ContainerAttributes, 70 | props: &[Property], 71 | ) -> Result<()> { 72 | if let Some(merge) = container.merge.as_ref() { 73 | let has_key = props.iter().any(|prop| prop.attrs.key.is_some()); 74 | let has_merge = props.iter().any(|prop| prop.attrs.merge.is_some()); 75 | 76 | if !has_key { 77 | Err(err!( 78 | merge.span, 79 | "You need to specify at least one of the container's fields to be a key: `#[row(key)]`" 80 | )) 81 | } else if !has_merge { 82 | Err(err!( 83 | merge.span, 84 | "You need to specify at least one of the container's fields to be the merged field: `#[row(merge)]`" 85 | )) 86 | } else { 87 | Ok(()) 88 | } 89 | } else { 90 | let key = props.iter().find(|prop| prop.attrs.key.is_some()); 91 | match key { 92 | None => {}, 93 | Some(key) => return Err(err!( 94 | key.span, 95 | "`#[row(key)]` is only available in containers with the `#[row(group)]` or `#[row(hash)]` attributes" 96 | )), 97 | } 98 | 99 | let merge = props.iter().find(|prop| prop.attrs.merge.is_some()); 100 | match merge { 101 | None => Ok(()), 102 | Some(merge) => Err(err!( 103 | merge.span, 104 | "`#[row(merge)]` is only available in containers with the `#[row(group)]` or `#[row(hash)]` attributes" 105 | )), 106 | } 107 | } 108 | } 109 | 110 | fn check_not_key_and_merge(props: &[Property]) -> Result<()> { 111 | props 112 | .iter() 113 | .map(|prop| match (prop.attrs.key, prop.attrs.merge) { 114 | (Some(key), Some(merge)) => Err(err!( 115 | key.span.join(merge.span).unwrap_or(key.span), 116 | "You cannot specify both `#[row(key)]` and `#[row(merge)]` on the same field" 117 | )), 118 | _ => Ok(()), 119 | }) 120 | .collect() 121 | } 122 | -------------------------------------------------------------------------------- /postgres_query_macro/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate proc_macro; 2 | 3 | #[macro_use] 4 | mod macros; 5 | 6 | mod from_sql_row; 7 | mod query; 8 | 9 | use proc_macro::TokenStream; 10 | use proc_macro_hack::proc_macro_hack; 11 | use syn::{parse_macro_input, DeriveInput}; 12 | 13 | #[proc_macro_hack] 14 | pub fn query_static(input: TokenStream) -> TokenStream { 15 | let query = parse_macro_input!(input as query::QueryInput); 16 | 17 | let output = match query.convert_to_struct_static() { 18 | Ok(output) => output, 19 | Err(e) => e.to_compile_error(), 20 | }; 21 | 22 | TokenStream::from(output) 23 | } 24 | 25 | #[proc_macro_hack] 26 | pub fn query_dynamic(input: TokenStream) -> TokenStream { 27 | let query = parse_macro_input!(input as query::QueryInput); 28 | 29 | let output = match query.convert_to_struct_dynamic() { 30 | Ok(output) => output, 31 | Err(e) => e.to_compile_error(), 32 | }; 33 | 34 | TokenStream::from(output) 35 | } 36 | 37 | #[proc_macro_derive(FromSqlRow, attributes(row))] 38 | pub fn from_sql_row(input: TokenStream) -> TokenStream { 39 | let input = parse_macro_input!(input as DeriveInput); 40 | let output = from_sql_row::derive(input); 41 | TokenStream::from(output) 42 | } 43 | -------------------------------------------------------------------------------- /postgres_query_macro/src/macros.rs: -------------------------------------------------------------------------------- 1 | macro_rules! err { 2 | ($item:expr, $msg:expr) => { 3 | syn::Error::new(syn::spanned::Spanned::span(&$item), $msg) 4 | }; 5 | ($item:expr, $msg:expr, $($tt:tt)*) => { 6 | syn::Error::new(syn::spanned::Spanned::span(&$item), format!($msg, $($tt)*)) 7 | }; 8 | } 9 | 10 | macro_rules! lib { 11 | () => {{ 12 | quote! { postgres_query } 13 | }}; 14 | } 15 | 16 | macro_rules! is_match { 17 | ($expr:expr, $pattern:pat) => { 18 | match $expr { 19 | $pattern => true, 20 | _ => false, 21 | } 22 | }; 23 | } 24 | -------------------------------------------------------------------------------- /postgres_query_macro/src/query.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::TokenStream; 2 | use quote::*; 3 | use std::fmt::Write; 4 | use syn::parse::{Parse, ParseStream}; 5 | use syn::punctuated::Punctuated; 6 | use syn::{ 7 | Expr, ExprAssign, ExprLit, ExprPath, ExprRange, ExprReference, Ident, Lit, LitStr, Path, 8 | PathArguments, RangeLimits, Result, Token, 9 | }; 10 | 11 | pub struct QueryInput { 12 | text: Expr, 13 | arguments: Vec, 14 | } 15 | 16 | enum Argument { 17 | Single { ident: Ident, value: Expr }, 18 | Dynamic { value: Expr }, 19 | } 20 | 21 | impl Parse for QueryInput { 22 | fn parse(input: ParseStream) -> Result { 23 | let mut parameters = Punctuated::::parse_terminated(input)?.into_iter(); 24 | 25 | let text = parameters 26 | .next() 27 | .ok_or_else(|| input.error("argument missing: expected SQL query"))?; 28 | 29 | let arguments: Vec<_> = parameters.map(expr_to_argument).collect::>()?; 30 | 31 | Ok(QueryInput { text, arguments }) 32 | } 33 | } 34 | 35 | impl QueryInput { 36 | pub fn convert_to_struct_static(self) -> Result { 37 | match self.text { 38 | Expr::Lit(ExprLit { 39 | lit: Lit::Str(text), 40 | .. 41 | }) => { 42 | let arguments = self 43 | .arguments 44 | .into_iter() 45 | .map(|argument| match argument { 46 | Argument::Single { ident, value } => Ok((ident, value)), 47 | Argument::Dynamic { value } => Err(err!( 48 | value, 49 | "found dynamic binding (`..`) in static context, \ 50 | use `query_dyn!` if working with dynamic parameters" 51 | )), 52 | }) 53 | .collect::>>()?; 54 | 55 | let (sql, parameters) = parameter_substitution(text, arguments)?; 56 | 57 | let lib = lib!(); 58 | Ok(quote! { 59 | #lib::Query::new_static(#sql, vec![#(&#parameters),*]) 60 | }) 61 | } 62 | 63 | _ => Err(err!( 64 | self.text, 65 | "expected a string literal, \ 66 | use `query_dyn!` if working with dynamically generated strings" 67 | )), 68 | } 69 | } 70 | 71 | pub fn convert_to_struct_dynamic(self) -> Result { 72 | let mut simple = Vec::new(); 73 | let mut dynamic = Vec::new(); 74 | 75 | for argument in self.arguments { 76 | match argument { 77 | Argument::Single { ident, value } => { 78 | let name = ident.to_string(); 79 | simple.push(quote! { 80 | (#name, &#value) 81 | }); 82 | } 83 | Argument::Dynamic { value } => { 84 | dynamic.push(value); 85 | } 86 | } 87 | } 88 | 89 | let text = self.text; 90 | 91 | let lib = lib!(); 92 | let result = if dynamic.is_empty() { 93 | quote! { 94 | #lib::Query::parse(#text, &[#(#simple),*]) 95 | } 96 | } else { 97 | quote! { 98 | { 99 | let mut parameters = Vec::<(&str, #lib::Parameter)>::with_capacity(16); 100 | parameters.extend_from_slice(&[#(#simple),*]); 101 | 102 | #( 103 | parameters.extend(#dynamic); 104 | )* 105 | 106 | #lib::Query::parse(#text, ¶meters) 107 | } 108 | } 109 | }; 110 | 111 | Ok(result) 112 | } 113 | } 114 | 115 | fn parameter_substitution( 116 | literal: LitStr, 117 | bindings: Vec<(Ident, Expr)>, 118 | ) -> Result<(String, Vec)> { 119 | let text = literal.value(); 120 | 121 | let mut sql = String::with_capacity(text.len()); 122 | let mut parameters = Vec::with_capacity(bindings.len()); 123 | let mut param_indices = vec![None; bindings.len()]; 124 | 125 | let mut chars = text.chars().enumerate().peekable(); 126 | 127 | let context = |i: usize| { 128 | let start = i.saturating_sub(16); 129 | text.chars().skip(start).take(32).collect::() 130 | }; 131 | 132 | while let Some((index, ch)) = chars.next() { 133 | if ch != '$' { 134 | sql.push(ch); 135 | } else if let Some((_, '$')) = chars.peek() { 136 | let (_, dollar) = chars.next().unwrap(); 137 | sql.push(dollar); 138 | } else { 139 | let mut name = String::new(); 140 | 141 | while let Some(&(_, ch)) = chars.peek() { 142 | if ch.is_ascii_alphanumeric() || ch == '_' { 143 | let (_, ch) = chars.next().unwrap(); 144 | name.push(ch); 145 | } else { 146 | break; 147 | } 148 | } 149 | 150 | if name.is_empty() { 151 | return Err(err!( 152 | literal, 153 | "expected an identifier, found {:?}. Dollar signs may be escaped: `$$`. \ 154 | Here: `{}`", 155 | chars.peek().map(|(_, ch)| *ch).unwrap_or('\0'), 156 | context(index), 157 | )); 158 | } 159 | 160 | let argument = bindings 161 | .iter() 162 | .position(|(binding, _)| *binding == name) 163 | .ok_or_else(|| { 164 | err!( 165 | literal, 166 | "could not find a binding with the name `{}`. Here: `{}`", 167 | name, 168 | context(index), 169 | ) 170 | })?; 171 | 172 | let index = param_indices[argument].unwrap_or_else(|| { 173 | let (_, value) = &bindings[argument]; 174 | parameters.push(value.clone()); 175 | let index = parameters.len(); 176 | param_indices[argument] = Some(index); 177 | index 178 | }); 179 | 180 | write!(sql, "${}", index).unwrap(); 181 | } 182 | } 183 | 184 | if let Some(index) = param_indices 185 | .into_iter() 186 | .position(|index: Option| index.is_none()) 187 | { 188 | let (ident, _) = &bindings[index]; 189 | Err(err!(ident, "unused argument")) 190 | } else { 191 | Ok((sql, parameters)) 192 | } 193 | } 194 | 195 | fn expr_to_argument(expr: Expr) -> Result { 196 | match expr { 197 | Expr::Assign(assign) => { 198 | let ExprAssign { left, right, .. } = assign; 199 | 200 | let ident = expr_as_ident(&left).ok_or_else(|| err!(left, "expected an identifier"))?; 201 | 202 | Ok(Argument::Single { 203 | ident: ident.clone(), 204 | value: *right, 205 | }) 206 | } 207 | 208 | Expr::Path(_) => { 209 | if let Some(ident) = expr_as_ident(&expr) { 210 | Ok(Argument::Single { 211 | ident: ident.clone(), 212 | value: expr, 213 | }) 214 | } else { 215 | Err(err!(expr, "expected an identifier")) 216 | } 217 | } 218 | 219 | Expr::Reference(ExprReference { 220 | expr: ref inner, .. 221 | }) => { 222 | if let Some(ident) = expr_as_ident(&inner) { 223 | Ok(Argument::Single { 224 | ident: ident.clone(), 225 | value: expr, 226 | }) 227 | } else { 228 | Err(err!(expr, "expected an identifier")) 229 | } 230 | } 231 | 232 | Expr::Range(ExprRange { 233 | from: None, 234 | limits: RangeLimits::HalfOpen(_), 235 | to: Some(expr), 236 | .. 237 | }) => Ok(Argument::Dynamic { value: *expr }), 238 | 239 | _ => Err(err!( 240 | expr, 241 | "unexpected expression, expected either ``, ` = ` or `..`", 242 | )), 243 | } 244 | } 245 | 246 | fn path_is_ident(path: &Path) -> bool { 247 | path.leading_colon.is_none() 248 | && path.segments.len() == 1 249 | && match path.segments[0].arguments { 250 | PathArguments::None => true, 251 | _ => false, 252 | } 253 | } 254 | 255 | fn expr_as_ident(expr: &Expr) -> Option<&Ident> { 256 | match expr { 257 | Expr::Path(ExprPath { 258 | qself: None, path, .. 259 | }) if path_is_ident(&path) => Some(&path.segments[0].ident), 260 | _ => None, 261 | } 262 | } 263 | --------------------------------------------------------------------------------