├── examples ├── sync-wrapper │ ├── migrations │ │ ├── .keep │ │ └── 00000000000000_diesel_initial_setup │ │ │ ├── down.sql │ │ │ └── up.sql │ ├── diesel.toml │ ├── Cargo.toml │ └── src │ │ └── main.rs └── postgres │ ├── run-pending-migrations-with-rustls │ ├── migrations │ │ └── 2023-09-08-075742_dummy_migration │ │ │ ├── up.sql │ │ │ └── down.sql │ ├── Cargo.toml │ └── src │ │ └── main.rs │ ├── pipelining │ ├── src │ │ ├── rust_out │ │ └── main.rs │ └── Cargo.toml │ └── pooled-with-rustls │ ├── Cargo.toml │ └── src │ └── main.rs ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── config.yml │ └── bug_report.md └── workflows │ └── ci.yml ├── .gitignore ├── LICENSE-MIT ├── tests ├── migrations.rs ├── notifications.rs ├── sync_wrapper.rs ├── pooling.rs ├── custom_types.rs ├── type_check.rs ├── transactions.rs ├── lib.rs └── instrumentation.rs ├── src ├── pg │ ├── serialize.rs │ ├── row.rs │ ├── error_helper.rs │ └── transaction_builder.rs ├── mysql │ ├── cancel_token.rs │ ├── error_helper.rs │ ├── serialize.rs │ └── row.rs ├── stmt_cache.rs ├── run_query_dsl │ └── utils.rs ├── pooled_connection │ ├── mobc.rs │ ├── bb8.rs │ ├── deadpool.rs │ └── mod.rs ├── sync_connection_wrapper │ └── sqlite.rs ├── deref_connection.rs ├── migrations.rs ├── doctest_setup.rs └── async_connection_wrapper.rs ├── Cargo.toml ├── CHANGELOG.md ├── README.md └── LICENSE-APACHE /examples/sync-wrapper/migrations/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [weiznich] 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /examples/sync-wrapper/migrations/00000000000000_diesel_initial_setup/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS users; -------------------------------------------------------------------------------- /examples/postgres/run-pending-migrations-with-rustls/migrations/2023-09-08-075742_dummy_migration/up.sql: -------------------------------------------------------------------------------- 1 | SELECT 1; -------------------------------------------------------------------------------- /examples/postgres/run-pending-migrations-with-rustls/migrations/2023-09-08-075742_dummy_migration/down.sql: -------------------------------------------------------------------------------- 1 | SELECT 0; -------------------------------------------------------------------------------- /examples/postgres/pipelining/src/rust_out: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weiznich/diesel_async/HEAD/examples/postgres/pipelining/src/rust_out -------------------------------------------------------------------------------- /examples/sync-wrapper/migrations/00000000000000_diesel_initial_setup/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT); 2 | 3 | INSERT INTO users(id, name) VALUES(123, 'hello world'); 4 | -------------------------------------------------------------------------------- /examples/sync-wrapper/diesel.toml: -------------------------------------------------------------------------------- 1 | # For documentation on how to configure this file, 2 | # see https://diesel.rs/guides/configuring-diesel-cli 3 | 4 | [print_schema] 5 | file = "src/schema.rs" 6 | custom_type_derives = ["diesel::query_builder::QueryId"] 7 | 8 | [migrations_directory] 9 | dir = "migrations" 10 | -------------------------------------------------------------------------------- /examples/postgres/pipelining/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pipelining" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [dependencies] 7 | diesel-async = { version = "0.7.0", path = "../../../", features = ["bb8", "postgres"] } 8 | tokio = { version = "1.2.0", default-features = false, features = ["macros", "rt-multi-thread"] } 9 | 10 | [dependencies.diesel] 11 | version = "2.3.0" 12 | default-features = false 13 | -------------------------------------------------------------------------------- /examples/postgres/pooled-with-rustls/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pooled-with-rustls" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | diesel-async = { version = "0.7.0", path = "../../../", features = ["bb8", "postgres"] } 10 | futures-util = "0.3.21" 11 | rustls = "0.23.8" 12 | rustls-platform-verifier = "0.5.0" 13 | tokio = { version = "1.2.0", default-features = false, features = ["macros", "rt-multi-thread"] } 14 | tokio-postgres = "0.7.7" 15 | tokio-postgres-rustls = "0.13.0" 16 | 17 | 18 | [dependencies.diesel] 19 | version = "2.3.0" 20 | default-features = false 21 | -------------------------------------------------------------------------------- /examples/sync-wrapper/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sync-wrapper" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | diesel-async = { version = "0.7.0", path = "../../", features = ["sync-connection-wrapper", "async-connection-wrapper"] } 10 | futures-util = "0.3.21" 11 | tokio = { version = "1.2.0", default-features = false, features = ["macros", "rt-multi-thread"] } 12 | 13 | [dependencies.diesel] 14 | version = "2.3.0" 15 | default-features = false 16 | features = ["returning_clauses_for_sqlite_3_35"] 17 | 18 | [dependencies.diesel_migrations] 19 | version = "2.3.0" 20 | 21 | [features] 22 | default = ["sqlite"] 23 | sqlite = ["diesel-async/sqlite"] 24 | -------------------------------------------------------------------------------- /examples/postgres/run-pending-migrations-with-rustls/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "run-pending-migrations-with-rustls" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | diesel-async = { version = "0.7.0", path = "../../../", features = ["bb8", "postgres", "migrations"] } 10 | futures-util = "0.3.21" 11 | rustls = "0.23.8" 12 | rustls-platform-verifier = "0.5.0" 13 | tokio = { version = "1.2.0", default-features = false, features = ["macros", "rt-multi-thread"] } 14 | tokio-postgres = "0.7.7" 15 | tokio-postgres-rustls = "0.13.0" 16 | 17 | [dependencies.diesel] 18 | version = "2.3.0" 19 | default-features = false 20 | 21 | [dependencies.diesel_migrations] 22 | version = "2.3.0" 23 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | 2021-2022 Georg Semmler 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/migrations.rs: -------------------------------------------------------------------------------- 1 | use diesel_async::AsyncMigrationHarness; 2 | use diesel_migrations::MigrationHarness; 3 | 4 | static SEQUENTIAL: std::sync::Mutex<()> = std::sync::Mutex::new(()); 5 | 6 | // These two tests are mostly smoke tests to verify 7 | // that the `AsyncMigrationHarness` actually implements 8 | // the necessary traits 9 | 10 | #[tokio::test(flavor = "multi_thread")] 11 | async fn plain_connection() { 12 | let _guard = SEQUENTIAL.lock().unwrap(); 13 | let conn = super::connection().await; 14 | let mut harness = AsyncMigrationHarness::from(conn); 15 | harness.applied_migrations().unwrap(); 16 | } 17 | 18 | #[cfg(feature = "deadpool")] 19 | #[tokio::test(flavor = "multi_thread")] 20 | async fn pool_connection() { 21 | use diesel_async::pooled_connection::deadpool::Pool; 22 | use diesel_async::pooled_connection::AsyncDieselConnectionManager; 23 | let _guard = SEQUENTIAL.lock().unwrap(); 24 | 25 | let db_url = std::env::var("DATABASE_URL").unwrap(); 26 | let config = AsyncDieselConnectionManager::::new(db_url); 27 | let pool = Pool::builder(config).build().unwrap(); 28 | let conn = pool.get().await.unwrap(); 29 | let mut harness = AsyncMigrationHarness::from(conn); 30 | harness.applied_migrations().unwrap(); 31 | } 32 | -------------------------------------------------------------------------------- /src/pg/serialize.rs: -------------------------------------------------------------------------------- 1 | use diesel::pg::PgTypeMetadata; 2 | use tokio_postgres::types::{private::BytesMut, IsNull, Type, WrongType}; 3 | 4 | #[derive(Debug)] 5 | pub(super) struct ToSqlHelper(pub(super) PgTypeMetadata, pub(super) Option>); 6 | 7 | impl tokio_postgres::types::ToSql for ToSqlHelper { 8 | fn to_sql( 9 | &self, 10 | _ty: &Type, 11 | out: &mut BytesMut, 12 | ) -> Result> 13 | where 14 | Self: Sized, 15 | { 16 | if let Some(ref bytes) = self.1 { 17 | out.extend_from_slice(bytes); 18 | Ok(IsNull::No) 19 | } else { 20 | Ok(IsNull::Yes) 21 | } 22 | } 23 | 24 | fn accepts(_ty: &Type) -> bool 25 | where 26 | Self: Sized, 27 | { 28 | // this should be called anymore 29 | true 30 | } 31 | 32 | fn to_sql_checked( 33 | &self, 34 | ty: &Type, 35 | out: &mut BytesMut, 36 | ) -> Result> { 37 | if Type::from_oid(self.0.oid()?) 38 | .map(|d| ty != &d) 39 | .unwrap_or(false) 40 | { 41 | return Err(Box::new(WrongType::new::(ty.clone()))); 42 | } 43 | self.to_sql(ty, out) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/mysql/cancel_token.rs: -------------------------------------------------------------------------------- 1 | use mysql_async::prelude::Query; 2 | use mysql_async::{Opts, OptsBuilder}; 3 | 4 | use crate::mysql::error_helper::ErrorHelper; 5 | 6 | /// The capability to request cancellation of in-progress queries on a 7 | /// connection. 8 | #[derive(Clone)] 9 | pub struct MysqlCancelToken { 10 | pub(crate) opts: Opts, 11 | pub(crate) kill_id: u32, 12 | } 13 | 14 | impl MysqlCancelToken { 15 | /// Attempts to cancel the in-progress query on the connection associated 16 | /// with this `CancelToken`. 17 | /// 18 | /// The server provides no information about whether a cancellation attempt was successful or not. An error will 19 | /// only be returned if the client was unable to connect to the database. 20 | /// 21 | /// Cancellation is inherently racy. There is no guarantee that the 22 | /// cancellation request will reach the server before the query terminates 23 | /// normally, or that the connection associated with this token is still 24 | /// active. 25 | pub async fn cancel_query(&self) -> diesel::result::ConnectionResult<()> { 26 | let builder = OptsBuilder::from_opts(self.opts.clone()); 27 | 28 | let conn = mysql_async::Conn::new(builder).await.map_err(ErrorHelper)?; 29 | 30 | format!("KILL QUERY {};", self.kill_id) 31 | .ignore(conn) 32 | .await 33 | .map_err(ErrorHelper)?; 34 | 35 | Ok(()) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /examples/postgres/pipelining/src/main.rs: -------------------------------------------------------------------------------- 1 | use diesel::prelude::*; 2 | use diesel_async::{AsyncConnection, AsyncPgConnection, RunQueryDsl}; 3 | 4 | diesel::table! { 5 | users { 6 | id -> Integer, 7 | name -> Text, 8 | } 9 | } 10 | 11 | #[derive(HasQuery, Debug)] 12 | struct User { 13 | id: i32, 14 | name: String, 15 | } 16 | 17 | impl User { 18 | async fn load_all(mut conn: &AsyncPgConnection) -> QueryResult> { 19 | Self::query().load(&mut conn).await 20 | } 21 | 22 | async fn filter_by_id(mut conn: &AsyncPgConnection, id: i32) -> QueryResult> { 23 | Self::query() 24 | .find(id) 25 | .get_result(&mut conn) 26 | .await 27 | .optional() 28 | } 29 | } 30 | 31 | #[tokio::main] 32 | async fn main() -> Result<(), Box> { 33 | let db_url = std::env::var("DATABASE_URL").expect("Env var `DATABASE_URL` not set"); 34 | let mut conn = AsyncPgConnection::establish(&db_url).await?; 35 | 36 | let all_users = User::query().load(&mut conn); 37 | let single_user = User::query().find(1).get_result(&mut conn); 38 | 39 | let (all_users, single_user) = tokio::try_join!(all_users, single_user)?; 40 | println!("All users: {all_users:?}"); 41 | println!("Single user: {single_user:?}"); 42 | 43 | let (all_users, single_user) = 44 | tokio::try_join!(User::load_all(&conn), User::filter_by_id(&conn, 1))?; 45 | 46 | println!("All users: {all_users:?}"); 47 | println!("Single user: {single_user:?}"); 48 | 49 | Ok(()) 50 | } 51 | -------------------------------------------------------------------------------- /examples/postgres/run-pending-migrations-with-rustls/src/main.rs: -------------------------------------------------------------------------------- 1 | use diesel::{ConnectionError, ConnectionResult}; 2 | use diesel_async::{AsyncMigrationHarness, AsyncPgConnection}; 3 | use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; 4 | use futures_util::future::BoxFuture; 5 | use futures_util::FutureExt; 6 | use rustls::ClientConfig; 7 | use rustls_platform_verifier::ConfigVerifierExt; 8 | 9 | pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); 10 | 11 | #[tokio::main] 12 | async fn main() -> Result<(), Box> { 13 | // Should be in the form of postgres://user:password@localhost/database?sslmode=require 14 | let db_url = std::env::var("DATABASE_URL").expect("Env var `DATABASE_URL` not set"); 15 | 16 | let async_connection = establish_connection(db_url.as_str()).await?; 17 | 18 | let mut harness = AsyncMigrationHarness::new(async_connection); 19 | harness.run_pending_migrations(MIGRATIONS)?; 20 | let _async_connection = harness.into_inner(); 21 | 22 | Ok(()) 23 | } 24 | 25 | fn establish_connection(config: &str) -> BoxFuture<'_, ConnectionResult> { 26 | let fut = async { 27 | // We first set up the way we want rustls to work. 28 | let rustls_config = ClientConfig::with_platform_verifier(); 29 | let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config); 30 | let (client, conn) = tokio_postgres::connect(config, tls) 31 | .await 32 | .map_err(|e| ConnectionError::BadConnection(e.to_string()))?; 33 | AsyncPgConnection::try_from_client_and_connection(client, conn).await 34 | }; 35 | fut.boxed() 36 | } 37 | -------------------------------------------------------------------------------- /tests/notifications.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "postgres")] 2 | #[tokio::test] 3 | async fn notifications_arrive() { 4 | use diesel_async::RunQueryDsl; 5 | use futures_util::{StreamExt, TryStreamExt}; 6 | 7 | let conn = &mut super::connection_without_transaction().await; 8 | 9 | diesel::sql_query("LISTEN test_notifications") 10 | .execute(conn) 11 | .await 12 | .unwrap(); 13 | 14 | diesel::sql_query("NOTIFY test_notifications, 'first'") 15 | .execute(conn) 16 | .await 17 | .unwrap(); 18 | 19 | diesel::sql_query("NOTIFY test_notifications, 'second'") 20 | .execute(conn) 21 | .await 22 | .unwrap(); 23 | 24 | let notifications = conn 25 | .notifications_stream() 26 | .take(2) 27 | .try_collect::>() 28 | .await 29 | .unwrap(); 30 | 31 | assert_eq!(2, notifications.len()); 32 | assert_eq!(notifications[0].channel, "test_notifications"); 33 | assert_eq!(notifications[1].channel, "test_notifications"); 34 | assert_eq!(notifications[0].payload, "first"); 35 | assert_eq!(notifications[1].payload, "second"); 36 | 37 | let next_notification = tokio::time::timeout( 38 | std::time::Duration::from_secs(1), 39 | std::pin::pin!(conn.notifications_stream()).next(), 40 | ) 41 | .await; 42 | 43 | assert!( 44 | next_notification.is_err(), 45 | "Got a next notification, while not expecting one: {next_notification:?}" 46 | ); 47 | 48 | diesel::sql_query("NOTIFY test_notifications") 49 | .execute(conn) 50 | .await 51 | .unwrap(); 52 | 53 | let next_notification = std::pin::pin!(conn.notifications_stream()).next().await; 54 | assert_eq!(next_notification.unwrap().unwrap().payload, ""); 55 | } 56 | -------------------------------------------------------------------------------- /src/mysql/error_helper.rs: -------------------------------------------------------------------------------- 1 | use diesel::{result::DatabaseErrorKind, ConnectionError}; 2 | use mysql_async::Error; 3 | 4 | pub(super) struct ErrorHelper(pub(super) Error); 5 | 6 | impl From for diesel::result::Error { 7 | fn from(ErrorHelper(e): ErrorHelper) -> Self { 8 | match e { 9 | Error::Server(e) => { 10 | let kind = match e.code { 11 | 1062 | 1586 | 1859 => DatabaseErrorKind::UniqueViolation, 12 | 1216 | 1217 | 1451 | 1452 | 1830 | 1834 => { 13 | DatabaseErrorKind::ForeignKeyViolation 14 | } 15 | 1792 => DatabaseErrorKind::ReadOnlyTransaction, 16 | 1048 | 1364 => DatabaseErrorKind::NotNullViolation, 17 | 3819 => DatabaseErrorKind::CheckViolation, 18 | _ => DatabaseErrorKind::Unknown, 19 | }; 20 | diesel::result::Error::DatabaseError(kind, Box::new(e.message) as _) 21 | } 22 | e => diesel::result::Error::DatabaseError( 23 | DatabaseErrorKind::Unknown, 24 | Box::new(e.to_string()) as _, 25 | ), 26 | } 27 | } 28 | } 29 | 30 | impl From for diesel::result::ConnectionError { 31 | fn from(ErrorHelper(e): ErrorHelper) -> Self { 32 | match e { 33 | Error::Driver(e) => ConnectionError::BadConnection(e.to_string()), 34 | Error::Io(e) => ConnectionError::BadConnection(e.to_string()), 35 | Error::Other(e) => ConnectionError::BadConnection(e.to_string()), 36 | Error::Server(_) => ConnectionError::CouldntSetupConfiguration(ErrorHelper(e).into()), 37 | Error::Url(e) => ConnectionError::InvalidConnectionUrl(e.to_string()), 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/stmt_cache.rs: -------------------------------------------------------------------------------- 1 | use diesel::connection::statement_cache::{MaybeCached, StatementCallbackReturnType}; 2 | use diesel::QueryResult; 3 | use futures_core::future::BoxFuture; 4 | use futures_util::future::Either; 5 | use futures_util::{FutureExt, TryFutureExt}; 6 | use std::future::{self, Future}; 7 | 8 | pub(crate) struct CallbackHelper(pub(crate) F); 9 | 10 | type PrepareFuture<'a, C, S> = Either< 11 | future::Ready, C)>>, 12 | BoxFuture<'a, QueryResult<(MaybeCached<'a, S>, C)>>, 13 | >; 14 | 15 | impl StatementCallbackReturnType for CallbackHelper 16 | where 17 | F: Future> + Send, 18 | S: 'static, 19 | { 20 | type Return<'a> = PrepareFuture<'a, C, S>; 21 | 22 | fn from_error<'a>(e: diesel::result::Error) -> Self::Return<'a> { 23 | Either::Left(future::ready(Err(e))) 24 | } 25 | 26 | fn map_to_no_cache<'a>(self) -> Self::Return<'a> 27 | where 28 | Self: 'a, 29 | { 30 | Either::Right( 31 | self.0 32 | .map_ok(|(stmt, conn)| (MaybeCached::CannotCache(stmt), conn)) 33 | .boxed(), 34 | ) 35 | } 36 | 37 | fn map_to_cache(stmt: &mut S, conn: C) -> Self::Return<'_> { 38 | Either::Left(future::ready(Ok((MaybeCached::Cached(stmt), conn)))) 39 | } 40 | 41 | fn register_cache<'a>( 42 | self, 43 | callback: impl FnOnce(S) -> &'a mut S + Send + 'a, 44 | ) -> Self::Return<'a> 45 | where 46 | Self: 'a, 47 | { 48 | Either::Right( 49 | self.0 50 | .map_ok(|(stmt, conn)| (MaybeCached::Cached(callback(stmt)), conn)) 51 | .boxed(), 52 | ) 53 | } 54 | } 55 | 56 | pub(crate) struct QueryFragmentHelper { 57 | pub(crate) sql: String, 58 | pub(crate) safe_to_cache: bool, 59 | } 60 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | 13 | 14 | ## Setup 15 | 16 | ### Versions 17 | 18 | - **Rust:** 19 | - **Diesel:** 20 | - **Diesel_async:** 21 | - **Database:** 22 | - **Operating System** 23 | 24 | ### Feature Flags 25 | 26 | - **diesel:** 27 | - **diesel_async:** 28 | 29 | ## Problem Description 30 | 31 | 32 | ### What are you trying to accomplish? 33 | 34 | 35 | ### What is the expected output? 36 | 37 | 38 | ### What is the actual output? 39 | 40 | 41 | ### Are you seeing any additional errors? 42 | 43 | 44 | ### Steps to reproduce 45 | 46 | 53 | 54 | ## Checklist 55 | 56 | - [ ] I have already looked over the [issue tracker](https://github.com/weiznich/diesel_async/issues) for similar possible closed issues. 57 | 60 | - [ ] This issue can be reproduced on Rust's stable channel. (Your issue will be 61 | closed if this is not the case) 62 | - [ ] This issue can be reproduced without requiring a third party crate 63 | 64 | 69 | -------------------------------------------------------------------------------- /examples/postgres/pooled-with-rustls/src/main.rs: -------------------------------------------------------------------------------- 1 | use diesel::{ConnectionError, ConnectionResult}; 2 | use diesel_async::pooled_connection::bb8::Pool; 3 | use diesel_async::pooled_connection::AsyncDieselConnectionManager; 4 | use diesel_async::pooled_connection::ManagerConfig; 5 | use diesel_async::AsyncPgConnection; 6 | use futures_util::future::BoxFuture; 7 | use futures_util::FutureExt; 8 | use rustls::ClientConfig; 9 | use rustls_platform_verifier::ConfigVerifierExt; 10 | use std::time::Duration; 11 | 12 | #[tokio::main] 13 | async fn main() -> Result<(), Box> { 14 | let db_url = std::env::var("DATABASE_URL").expect("Env var `DATABASE_URL` not set"); 15 | 16 | let mut config = ManagerConfig::default(); 17 | config.custom_setup = Box::new(establish_connection); 18 | 19 | // First we have to construct a connection manager with our custom `establish_connection` 20 | // function 21 | let mgr = AsyncDieselConnectionManager::::new_with_config(db_url, config); 22 | // From that connection we can then create a pool, here given with some example settings. 23 | // 24 | // This creates a TLS configuration that's equivalent to `libpq'` `sslmode=verify-full`, which 25 | // means this will check whether the provided certificate is valid for the given database host. 26 | // 27 | // `libpq` does not perform these checks by default (https://www.postgresql.org/docs/current/libpq-connect.html) 28 | // If you hit a TLS error while connecting to the database double check your certificates 29 | let pool = Pool::builder() 30 | .max_size(10) 31 | .min_idle(Some(5)) 32 | .max_lifetime(Some(Duration::from_secs(60 * 60 * 24))) 33 | .idle_timeout(Some(Duration::from_secs(60 * 2))) 34 | .build(mgr) 35 | .await?; 36 | 37 | // Now we can use our pool to run queries over a TLS-secured connection: 38 | let conn = pool.get().await?; 39 | let _ = conn; 40 | 41 | Ok(()) 42 | } 43 | 44 | fn establish_connection(config: &str) -> BoxFuture<'_, ConnectionResult> { 45 | let fut = async { 46 | // We first set up the way we want rustls to work. 47 | let rustls_config = ClientConfig::with_platform_verifier(); 48 | let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config); 49 | let (client, conn) = tokio_postgres::connect(config, tls) 50 | .await 51 | .map_err(|e| ConnectionError::BadConnection(e.to_string()))?; 52 | 53 | AsyncPgConnection::try_from_client_and_connection(client, conn).await 54 | }; 55 | fut.boxed() 56 | } 57 | -------------------------------------------------------------------------------- /src/pg/row.rs: -------------------------------------------------------------------------------- 1 | use diesel::backend::Backend; 2 | use diesel::row::{Field, PartialRow, RowIndex, RowSealed}; 3 | use std::{error::Error, num::NonZeroU32}; 4 | use tokio_postgres::{types::Type, Row}; 5 | 6 | pub struct PgRow { 7 | row: Row, 8 | } 9 | 10 | impl PgRow { 11 | pub(super) fn new(row: Row) -> Self { 12 | Self { row } 13 | } 14 | } 15 | impl RowSealed for PgRow {} 16 | 17 | impl<'a> diesel::row::Row<'a, diesel::pg::Pg> for PgRow { 18 | type InnerPartialRow = Self; 19 | type Field<'b> 20 | = PgField<'b> 21 | where 22 | Self: 'b, 23 | 'a: 'b; 24 | 25 | fn field_count(&self) -> usize { 26 | self.row.len() 27 | } 28 | 29 | fn get<'b, I>(&'b self, idx: I) -> Option> 30 | where 31 | 'a: 'b, 32 | Self: diesel::row::RowIndex, 33 | { 34 | let idx = self.idx(idx)?; 35 | Some(PgField { 36 | row: &self.row, 37 | idx, 38 | }) 39 | } 40 | 41 | fn partial_row( 42 | &self, 43 | range: std::ops::Range, 44 | ) -> diesel::row::PartialRow<'_, Self::InnerPartialRow> { 45 | PartialRow::new(self, range) 46 | } 47 | } 48 | 49 | impl RowIndex for PgRow { 50 | fn idx(&self, idx: usize) -> Option { 51 | if idx < self.row.len() { 52 | Some(idx) 53 | } else { 54 | None 55 | } 56 | } 57 | } 58 | 59 | impl<'a> RowIndex<&'a str> for PgRow { 60 | fn idx(&self, idx: &'a str) -> Option { 61 | self.row.columns().iter().position(|c| c.name() == idx) 62 | } 63 | } 64 | 65 | pub struct PgField<'a> { 66 | row: &'a Row, 67 | idx: usize, 68 | } 69 | 70 | impl<'a> Field<'a, diesel::pg::Pg> for PgField<'a> { 71 | fn field_name(&self) -> Option<&str> { 72 | Some(self.row.columns()[self.idx].name()) 73 | } 74 | 75 | fn value(&self) -> Option<::RawValue<'_>> { 76 | let DieselFromSqlWrapper(value) = self.row.get(self.idx); 77 | value 78 | } 79 | } 80 | 81 | #[repr(transparent)] 82 | struct TyWrapper(Type); 83 | 84 | impl diesel::pg::TypeOidLookup for TyWrapper { 85 | fn lookup(&self) -> NonZeroU32 { 86 | NonZeroU32::new(self.0.oid()).unwrap() 87 | } 88 | } 89 | 90 | struct DieselFromSqlWrapper<'a>(Option>); 91 | 92 | impl<'a> tokio_postgres::types::FromSql<'a> for DieselFromSqlWrapper<'a> { 93 | fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { 94 | let ty = unsafe { &*(ty as *const Type as *const TyWrapper) }; 95 | Ok(DieselFromSqlWrapper(Some(diesel::pg::PgValue::new( 96 | raw, ty, 97 | )))) 98 | } 99 | 100 | fn accepts(ty: &Type) -> bool { 101 | ty.oid() != 0 102 | } 103 | 104 | fn from_sql_null(_ty: &Type) -> Result> { 105 | Ok(DieselFromSqlWrapper(None)) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/pg/error_helper.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::sync::Arc; 3 | 4 | use diesel::ConnectionError; 5 | 6 | pub(super) struct ErrorHelper(pub(super) tokio_postgres::Error); 7 | 8 | impl From for ConnectionError { 9 | fn from(postgres_error: ErrorHelper) -> Self { 10 | ConnectionError::CouldntSetupConfiguration(postgres_error.into()) 11 | } 12 | } 13 | 14 | impl From for diesel::result::Error { 15 | fn from(ErrorHelper(postgres_error): ErrorHelper) -> Self { 16 | from_tokio_postgres_error(Arc::new(postgres_error)) 17 | } 18 | } 19 | 20 | pub(super) fn from_tokio_postgres_error( 21 | postgres_error: Arc, 22 | ) -> diesel::result::Error { 23 | use diesel::result::DatabaseErrorKind::*; 24 | use tokio_postgres::error::SqlState; 25 | 26 | match postgres_error.code() { 27 | Some(code) => { 28 | let kind = match *code { 29 | SqlState::UNIQUE_VIOLATION => UniqueViolation, 30 | SqlState::FOREIGN_KEY_VIOLATION => ForeignKeyViolation, 31 | SqlState::T_R_SERIALIZATION_FAILURE => SerializationFailure, 32 | SqlState::READ_ONLY_SQL_TRANSACTION => ReadOnlyTransaction, 33 | SqlState::NOT_NULL_VIOLATION => NotNullViolation, 34 | SqlState::CHECK_VIOLATION => CheckViolation, 35 | _ => Unknown, 36 | }; 37 | 38 | diesel::result::Error::DatabaseError( 39 | kind, 40 | Box::new(PostgresDbErrorWrapper( 41 | postgres_error 42 | .source() 43 | .and_then(|e| e.downcast_ref::().cloned()) 44 | .expect("It's a db error, because we've got a SQLState code above"), 45 | )) as _, 46 | ) 47 | } 48 | None => diesel::result::Error::DatabaseError( 49 | UnableToSendCommand, 50 | Box::new(postgres_error.to_string()), 51 | ), 52 | } 53 | } 54 | 55 | struct PostgresDbErrorWrapper(tokio_postgres::error::DbError); 56 | 57 | impl diesel::result::DatabaseErrorInformation for PostgresDbErrorWrapper { 58 | fn message(&self) -> &str { 59 | self.0.message() 60 | } 61 | 62 | fn details(&self) -> Option<&str> { 63 | self.0.detail() 64 | } 65 | 66 | fn hint(&self) -> Option<&str> { 67 | self.0.hint() 68 | } 69 | 70 | fn table_name(&self) -> Option<&str> { 71 | self.0.table() 72 | } 73 | 74 | fn column_name(&self) -> Option<&str> { 75 | self.0.column() 76 | } 77 | 78 | fn constraint_name(&self) -> Option<&str> { 79 | self.0.constraint() 80 | } 81 | 82 | fn statement_position(&self) -> Option { 83 | use tokio_postgres::error::ErrorPosition; 84 | self.0.position().and_then(|e| match *e { 85 | ErrorPosition::Original(position) | ErrorPosition::Internal { position, .. } => { 86 | position.try_into().ok() 87 | } 88 | }) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /tests/sync_wrapper.rs: -------------------------------------------------------------------------------- 1 | use diesel::migration::Migration; 2 | use diesel::{Connection, IntoSql}; 3 | use diesel_async::async_connection_wrapper::AsyncConnectionWrapper; 4 | 5 | #[test] 6 | fn test_sync_wrapper() { 7 | use diesel::RunQueryDsl; 8 | 9 | // The runtime is required for the `sqlite` implementation to be able to use 10 | // `spawn_blocking()`. This is not required for `postgres` or `mysql`. 11 | #[cfg(feature = "sqlite")] 12 | let rt = tokio::runtime::Builder::new_current_thread() 13 | .enable_io() 14 | .build() 15 | .unwrap(); 16 | 17 | #[cfg(feature = "sqlite")] 18 | let _guard = rt.enter(); 19 | 20 | let db_url = std::env::var("DATABASE_URL").unwrap(); 21 | let mut conn = AsyncConnectionWrapper::::establish(&db_url).unwrap(); 22 | 23 | let res = 24 | diesel::select(1.into_sql::()).get_result::(&mut conn); 25 | assert_eq!(Ok(1), res); 26 | } 27 | 28 | #[tokio::test] 29 | async fn test_sync_wrapper_async_query() { 30 | use diesel_async::{AsyncConnection, RunQueryDsl}; 31 | 32 | let db_url = std::env::var("DATABASE_URL").unwrap(); 33 | let conn = crate::TestConnection::establish(&db_url).await.unwrap(); 34 | let mut conn = AsyncConnectionWrapper::<_>::from(conn); 35 | 36 | let res = diesel::select(1.into_sql::()) 37 | .get_result::(&mut conn) 38 | .await; 39 | assert_eq!(Ok(1), res); 40 | } 41 | 42 | #[tokio::test] 43 | async fn test_sync_wrapper_under_runtime() { 44 | use diesel::RunQueryDsl; 45 | 46 | let db_url = std::env::var("DATABASE_URL").unwrap(); 47 | tokio::task::spawn_blocking(move || { 48 | let mut conn = AsyncConnectionWrapper::::establish(&db_url).unwrap(); 49 | 50 | let res = 51 | diesel::select(1.into_sql::()).get_result::(&mut conn); 52 | assert_eq!(Ok(1), res); 53 | }) 54 | .await 55 | .unwrap(); 56 | } 57 | 58 | #[test] 59 | fn check_run_migration() { 60 | use diesel_migrations::MigrationHarness; 61 | 62 | let db_url = std::env::var("DATABASE_URL").unwrap(); 63 | let migrations: Vec>> = Vec::new(); 64 | let mut conn = AsyncConnectionWrapper::::establish(&db_url).unwrap(); 65 | 66 | // just use `run_migrations` here because that's the easiest one without additional setup 67 | conn.run_migrations(&migrations).unwrap(); 68 | } 69 | 70 | #[tokio::test] 71 | async fn test_sync_wrapper_unwrap() { 72 | let db_url = std::env::var("DATABASE_URL").unwrap(); 73 | 74 | let conn = tokio::task::spawn_blocking(move || { 75 | use diesel::RunQueryDsl; 76 | 77 | let mut conn = AsyncConnectionWrapper::::establish(&db_url).unwrap(); 78 | let res = 79 | diesel::select(1.into_sql::()).get_result::(&mut conn); 80 | assert_eq!(Ok(1), res); 81 | conn 82 | }) 83 | .await 84 | .unwrap(); 85 | 86 | { 87 | use diesel_async::RunQueryDsl; 88 | 89 | let mut conn = conn.into_inner(); 90 | let res = diesel::select(1.into_sql::()) 91 | .get_result::(&mut conn) 92 | .await; 93 | assert_eq!(Ok(1), res); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /tests/pooling.rs: -------------------------------------------------------------------------------- 1 | use super::{users, User}; 2 | use diesel::prelude::*; 3 | use diesel_async::RunQueryDsl; 4 | #[cfg(not(feature = "sqlite"))] 5 | use diesel_async::SaveChangesDsl; 6 | 7 | #[tokio::test] 8 | #[cfg(feature = "bb8")] 9 | async fn save_changes_bb8() { 10 | use diesel_async::pooled_connection::bb8::Pool; 11 | use diesel_async::pooled_connection::AsyncDieselConnectionManager; 12 | 13 | let db_url = std::env::var("DATABASE_URL").unwrap(); 14 | 15 | let config = AsyncDieselConnectionManager::::new(db_url); 16 | let pool = Pool::builder().max_size(1).build(config).await.unwrap(); 17 | 18 | let mut conn = pool.get().await.unwrap(); 19 | 20 | super::setup(&mut conn).await; 21 | 22 | diesel::insert_into(users::table) 23 | .values(users::name.eq("John")) 24 | .execute(&mut conn) 25 | .await 26 | .unwrap(); 27 | 28 | let u = users::table.first::(&mut conn).await.unwrap(); 29 | assert_eq!(u.name, "John"); 30 | 31 | #[cfg(not(feature = "sqlite"))] 32 | { 33 | let mut u = u; 34 | u.name = "Jane".into(); 35 | let u2: User = u.save_changes(&mut conn).await.unwrap(); 36 | 37 | assert_eq!(u2.name, "Jane"); 38 | } 39 | } 40 | 41 | #[tokio::test] 42 | #[cfg(feature = "deadpool")] 43 | async fn save_changes_deadpool() { 44 | use diesel_async::pooled_connection::deadpool::Pool; 45 | use diesel_async::pooled_connection::AsyncDieselConnectionManager; 46 | 47 | let db_url = std::env::var("DATABASE_URL").unwrap(); 48 | 49 | let config = AsyncDieselConnectionManager::::new(db_url); 50 | let pool = Pool::builder(config).max_size(1).build().unwrap(); 51 | 52 | let mut conn = pool.get().await.unwrap(); 53 | 54 | super::setup(&mut conn).await; 55 | 56 | diesel::insert_into(users::table) 57 | .values(users::name.eq("John")) 58 | .execute(&mut conn) 59 | .await 60 | .unwrap(); 61 | 62 | let u = users::table.first::(&mut conn).await.unwrap(); 63 | assert_eq!(u.name, "John"); 64 | 65 | #[cfg(not(feature = "sqlite"))] 66 | { 67 | let mut u = u; 68 | u.name = "Jane".into(); 69 | let u2: User = u.save_changes(&mut conn).await.unwrap(); 70 | 71 | assert_eq!(u2.name, "Jane"); 72 | } 73 | } 74 | 75 | #[tokio::test] 76 | #[cfg(feature = "mobc")] 77 | async fn save_changes_mobc() { 78 | use diesel_async::pooled_connection::mobc::Pool; 79 | use diesel_async::pooled_connection::AsyncDieselConnectionManager; 80 | 81 | let db_url = std::env::var("DATABASE_URL").unwrap(); 82 | 83 | let config = AsyncDieselConnectionManager::::new(db_url); 84 | let pool = Pool::new(config); 85 | 86 | let mut conn = pool.get().await.unwrap(); 87 | 88 | super::setup(&mut conn).await; 89 | 90 | diesel::insert_into(users::table) 91 | .values(users::name.eq("John")) 92 | .execute(&mut conn) 93 | .await 94 | .unwrap(); 95 | 96 | let u = users::table.first::(&mut conn).await.unwrap(); 97 | assert_eq!(u.name, "John"); 98 | 99 | #[cfg(not(feature = "sqlite"))] 100 | { 101 | let mut u = u; 102 | u.name = "Jane".into(); 103 | let u2: User = u.save_changes(&mut conn).await.unwrap(); 104 | 105 | assert_eq!(u2.name, "Jane"); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/run_query_dsl/utils.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::task::{Context, Poll}; 4 | 5 | use diesel::QueryResult; 6 | use futures_core::{ready, TryFuture, TryStream}; 7 | use futures_util::{TryFutureExt, TryStreamExt}; 8 | 9 | // We use a custom future implementation here to erase some lifetimes 10 | // that otherwise need to be specified explicitly 11 | // 12 | // Specifying these lifetimes results in the compiler not beeing 13 | // able to look through the generic code and emit 14 | // lifetime erros for pipelined queries. See 15 | // https://github.com/weiznich/diesel_async/issues/249 for more context 16 | #[repr(transparent)] 17 | pub struct MapOk { 18 | future: futures_util::future::MapOk T>, 19 | } 20 | 21 | impl Future for MapOk 22 | where 23 | F: TryFuture, 24 | futures_util::future::MapOk T>: Future>, 25 | { 26 | type Output = Result; 27 | 28 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { 29 | unsafe { 30 | // SAFETY: This projects pinning to the only inner field, so it 31 | // should be safe 32 | self.map_unchecked_mut(|s| &mut s.future) 33 | } 34 | .poll(cx) 35 | } 36 | } 37 | 38 | impl MapOk { 39 | pub(crate) fn new(future: Fut, f: fn(Fut::Ok) -> T) -> Self { 40 | Self { 41 | future: future.map_ok(f), 42 | } 43 | } 44 | } 45 | 46 | // similar to `MapOk` above this mainly exists to hide the lifetime 47 | #[repr(transparent)] 48 | pub struct AndThen { 49 | future: futures_util::future::AndThen F2>, 50 | } 51 | 52 | impl AndThen 53 | where 54 | Fut1: TryFuture, 55 | Fut2: TryFuture, 56 | { 57 | pub(crate) fn new(fut1: Fut1, f: fn(Fut1::Ok) -> Fut2) -> AndThen { 58 | Self { 59 | future: fut1.and_then(f), 60 | } 61 | } 62 | } 63 | 64 | impl Future for AndThen 65 | where 66 | F1: TryFuture, 67 | F2: TryFuture, 68 | { 69 | type Output = Result; 70 | 71 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 72 | unsafe { 73 | // SAFETY: This projects pinning to the only inner field, so it 74 | // should be safe 75 | self.map_unchecked_mut(|s| &mut s.future) 76 | } 77 | .poll(cx) 78 | } 79 | } 80 | 81 | /// Converts a stream into a future, only yielding the first element. 82 | /// Based on [`futures_util::stream::StreamFuture`]. 83 | pub struct LoadNext { 84 | stream: Option, 85 | } 86 | 87 | impl LoadNext { 88 | pub(crate) fn new(stream: St) -> Self { 89 | Self { 90 | stream: Some(stream), 91 | } 92 | } 93 | } 94 | 95 | impl Future for LoadNext 96 | where 97 | St: TryStream + Unpin, 98 | { 99 | type Output = QueryResult; 100 | 101 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 102 | let first = { 103 | let s = self.stream.as_mut().expect("polling LoadNext twice"); 104 | ready!(s.try_poll_next_unpin(cx)) 105 | }; 106 | self.stream = None; 107 | match first { 108 | Some(first) => Poll::Ready(first), 109 | None => Poll::Ready(Err(diesel::result::Error::NotFound)), 110 | } 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "diesel-async" 3 | version = "0.7.4" 4 | authors = ["Georg Semmler "] 5 | edition = "2021" 6 | autotests = false 7 | license = "MIT OR Apache-2.0" 8 | readme = "README.md" 9 | repository = "https://github.com/weiznich/diesel_async" 10 | keywords = ["orm", "database", "sql", "async"] 11 | categories = ["database"] 12 | description = "An async extension for Diesel the safe, extensible ORM and Query Builder" 13 | rust-version = "1.84.0" 14 | include = ["README.md", "src/**/*.rs", "Cargo.toml", "LICENSE-MIT", "LICENSE-APACHE"] 15 | 16 | [dependencies] 17 | futures-core = "0.3.17" 18 | futures-channel = { version = "0.3.17", default-features = false, features = [ 19 | "std", 20 | "sink", 21 | ], optional = true } 22 | futures-util = { version = "0.3.17", default-features = false, features = [ 23 | "alloc", 24 | "sink", 25 | ] } 26 | tokio-postgres = { version = "0.7.10", optional = true } 27 | tokio = { version = "1.26", optional = true } 28 | mysql_async = { version = "0.36.0", optional = true, default-features = false, features = [ 29 | "minimal-rust", 30 | ] } 31 | mysql_common = { version = "0.35.3", optional = true, default-features = false } 32 | 33 | bb8 = { version = "0.9", optional = true } 34 | async-trait = { version = "0.1.66", optional = true } 35 | deadpool = { version = "0.12", optional = true, default-features = false, features = [ 36 | "managed", 37 | ] } 38 | mobc = { version = ">=0.7,<0.10", optional = true } 39 | scoped-futures = { version = "0.1", features = ["std"] } 40 | 41 | [dependencies.diesel] 42 | version = "~2.3.0" 43 | default-features = false 44 | features = [ 45 | "i-implement-a-third-party-backend-and-opt-into-breaking-changes", 46 | ] 47 | 48 | [dependencies.diesel_migrations] 49 | version = "~2.3.0" 50 | optional = true 51 | 52 | [dev-dependencies] 53 | tokio = { version = "1.12.0", features = ["rt", "macros", "rt-multi-thread"] } 54 | cfg-if = "1" 55 | chrono = "0.4" 56 | assert_matches = "1.0.1" 57 | 58 | [dev-dependencies.diesel] 59 | version = "~2.3.0" 60 | default-features = false 61 | features = [ 62 | "chrono" 63 | ] 64 | 65 | [dev-dependencies.diesel_migrations] 66 | version = "2.3.0" 67 | 68 | [features] 69 | default = [] 70 | mysql = [ 71 | "diesel/mysql_backend", 72 | "mysql_async", 73 | "mysql_common", 74 | "futures-channel", 75 | "tokio", 76 | ] 77 | postgres = ["diesel/postgres_backend", "tokio-postgres", "tokio", "tokio/rt"] 78 | sqlite = ["diesel/sqlite", "sync-connection-wrapper"] 79 | sync-connection-wrapper = ["tokio/rt"] 80 | async-connection-wrapper = ["tokio/net", "tokio/rt"] 81 | migrations = ["diesel_migrations", "async-connection-wrapper", "tokio/rt-multi-thread"] 82 | pool = [] 83 | r2d2 = ["pool", "diesel/r2d2"] 84 | bb8 = ["pool", "dep:bb8"] 85 | mobc = ["pool", "dep:mobc", "dep:async-trait", "tokio/sync"] 86 | deadpool = ["pool", "dep:deadpool"] 87 | 88 | [[test]] 89 | name = "integration_tests" 90 | path = "tests/lib.rs" 91 | harness = true 92 | 93 | [package.metadata.docs.rs] 94 | features = [ 95 | "postgres", 96 | "mysql", 97 | "sqlite", 98 | "deadpool", 99 | "bb8", 100 | "mobc", 101 | "async-connection-wrapper", 102 | "sync-connection-wrapper", 103 | "r2d2", 104 | "migrations", 105 | "tokio/macros", 106 | ] 107 | no-default-features = true 108 | rustdoc-args = ["--cfg", "docsrs", "-Z", "unstable-options", "--generate-link-to-definition"] 109 | 110 | [workspace] 111 | members = [ 112 | ".", 113 | "examples/postgres/pipelining", 114 | "examples/postgres/pooled-with-rustls", 115 | "examples/postgres/run-pending-migrations-with-rustls", 116 | "examples/sync-wrapper", 117 | ] 118 | -------------------------------------------------------------------------------- /src/pooled_connection/mobc.rs: -------------------------------------------------------------------------------- 1 | //! A pool implementation for `diesel-async` based on [`mobc`] 2 | //! 3 | //! ```rust 4 | //! # include!("../doctest_setup.rs"); 5 | //! use diesel::result::Error; 6 | //! use futures_util::FutureExt; 7 | //! use diesel_async::pooled_connection::AsyncDieselConnectionManager; 8 | //! use diesel_async::pooled_connection::mobc::Pool; 9 | //! use diesel_async::{RunQueryDsl, AsyncConnection}; 10 | //! 11 | //! # #[tokio::main(flavor = "current_thread")] 12 | //! # async fn main() { 13 | //! # run_test().await.unwrap(); 14 | //! # } 15 | //! # 16 | //! # #[cfg(feature = "postgres")] 17 | //! # fn get_config() -> AsyncDieselConnectionManager { 18 | //! # let db_url = database_url_from_env("PG_DATABASE_URL"); 19 | //! let config = AsyncDieselConnectionManager::::new(db_url); 20 | //! # config 21 | //! # } 22 | //! # 23 | //! # #[cfg(feature = "mysql")] 24 | //! # fn get_config() -> AsyncDieselConnectionManager { 25 | //! # let db_url = database_url_from_env("MYSQL_DATABASE_URL"); 26 | //! # let config = AsyncDieselConnectionManager::::new(db_url); 27 | //! # config 28 | //! # } 29 | //! # 30 | //! # #[cfg(feature = "sqlite")] 31 | //! # fn get_config() -> AsyncDieselConnectionManager> { 32 | //! # let db_url = database_url_from_env("SQLITE_DATABASE_URL"); 33 | //! # let config = AsyncDieselConnectionManager::>::new(db_url); 34 | //! # config 35 | //! # } 36 | //! # 37 | //! # async fn run_test() -> Result<(), Box> { 38 | //! # use schema::users::dsl::*; 39 | //! # let config = get_config(); 40 | //! # #[cfg(feature = "postgres")] 41 | //! let pool: Pool = Pool::new(config); 42 | //! # #[cfg(not(feature = "postgres"))] 43 | //! # let pool = Pool::new(config); 44 | //! let mut conn = pool.get().await?; 45 | //! # conn.begin_test_transaction(); 46 | //! # create_tables(&mut conn).await; 47 | //! # conn.begin_test_transaction(); 48 | //! let res = users.load::<(i32, String)>(&mut conn).await?; 49 | //! # Ok(()) 50 | //! # } 51 | //! ``` 52 | use super::{AsyncDieselConnectionManager, PoolError, PoolableConnection}; 53 | use diesel::query_builder::QueryFragment; 54 | use mobc::Manager; 55 | 56 | /// Type alias for using [`mobc::Pool`] with [`diesel-async`] 57 | /// 58 | /// 59 | /// This is **not** equal to [`mobc::Pool`]. It already uses the correct 60 | /// connection manager and expects only the connection type as generic argument 61 | pub type Pool = mobc::Pool>; 62 | 63 | /// Type alias for using [`mobc::Connection`] with [`diesel-async`] 64 | pub type PooledConnection = mobc::Connection>; 65 | 66 | /// Type alias for using [`mobc::Builder`] with [`diesel-async`] 67 | pub type Builder = mobc::Builder>; 68 | 69 | #[async_trait::async_trait] 70 | impl Manager for AsyncDieselConnectionManager 71 | where 72 | C: PoolableConnection + 'static, 73 | diesel::dsl::select>: 74 | crate::methods::ExecuteDsl, 75 | diesel::query_builder::SqlQuery: QueryFragment, 76 | { 77 | type Connection = C; 78 | 79 | type Error = PoolError; 80 | 81 | async fn connect(&self) -> Result { 82 | (self.manager_config.custom_setup)(&self.connection_url) 83 | .await 84 | .map_err(PoolError::ConnectionError) 85 | } 86 | 87 | async fn check(&self, mut conn: Self::Connection) -> Result { 88 | conn.ping(&self.manager_config.recycling_method) 89 | .await 90 | .map_err(PoolError::QueryError)?; 91 | Ok(conn) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/mysql/serialize.rs: -------------------------------------------------------------------------------- 1 | use diesel::mysql::data_types::MysqlTime; 2 | use diesel::mysql::MysqlType; 3 | use diesel::mysql::MysqlValue; 4 | use diesel::QueryResult; 5 | use mysql_async::{Params, Value}; 6 | use std::convert::TryInto; 7 | 8 | pub(super) struct ToSqlHelper { 9 | pub(super) metadata: Vec, 10 | pub(super) binds: Vec>>, 11 | } 12 | 13 | fn to_value((metadata, bind): (MysqlType, Option>)) -> QueryResult { 14 | let cast_helper = |e| diesel::result::Error::SerializationError(Box::new(e)); 15 | let v = match bind { 16 | Some(bind) => match metadata { 17 | MysqlType::Tiny => Value::Int(i8::from_be_bytes([bind[0]]) as i64), 18 | MysqlType::Short => Value::Int(i16::from_ne_bytes(bind.try_into().unwrap()) as _), 19 | MysqlType::Long => Value::Int(i32::from_ne_bytes(bind.try_into().unwrap()) as _), 20 | MysqlType::LongLong => Value::Int(i64::from_ne_bytes(bind.try_into().unwrap())), 21 | 22 | MysqlType::UnsignedTiny => Value::UInt(bind[0] as _), 23 | MysqlType::UnsignedShort => { 24 | Value::UInt(u16::from_ne_bytes(bind.try_into().unwrap()) as _) 25 | } 26 | MysqlType::UnsignedLong => { 27 | Value::UInt(u32::from_ne_bytes(bind.try_into().unwrap()) as _) 28 | } 29 | MysqlType::UnsignedLongLong => { 30 | Value::UInt(u64::from_ne_bytes(bind.try_into().unwrap())) 31 | } 32 | MysqlType::Float => Value::Float(f32::from_ne_bytes(bind.try_into().unwrap())), 33 | MysqlType::Double => Value::Double(f64::from_ne_bytes(bind.try_into().unwrap())), 34 | 35 | MysqlType::Time => { 36 | let time: MysqlTime = diesel::deserialize::FromSql::< 37 | diesel::sql_types::Time, 38 | diesel::mysql::Mysql, 39 | >::from_sql(MysqlValue::new(&bind, metadata)) 40 | .expect("This does not fail"); 41 | Value::Time( 42 | time.neg, 43 | time.day, 44 | time.hour.try_into().map_err(cast_helper)?, 45 | time.minute.try_into().map_err(cast_helper)?, 46 | time.second.try_into().map_err(cast_helper)?, 47 | time.second_part.try_into().expect("Cast does not fail"), 48 | ) 49 | } 50 | MysqlType::Date | MysqlType::DateTime | MysqlType::Timestamp => { 51 | let time: MysqlTime = diesel::deserialize::FromSql::< 52 | diesel::sql_types::Timestamp, 53 | diesel::mysql::Mysql, 54 | >::from_sql(MysqlValue::new(&bind, metadata)) 55 | .expect("This does not fail"); 56 | Value::Date( 57 | time.year.try_into().map_err(cast_helper)?, 58 | time.month.try_into().map_err(cast_helper)?, 59 | time.day.try_into().map_err(cast_helper)?, 60 | time.hour.try_into().map_err(cast_helper)?, 61 | time.minute.try_into().map_err(cast_helper)?, 62 | time.second.try_into().map_err(cast_helper)?, 63 | time.second_part.try_into().expect("Cast does not fail"), 64 | ) 65 | } 66 | MysqlType::Numeric 67 | | MysqlType::Set 68 | | MysqlType::Enum 69 | | MysqlType::String 70 | | MysqlType::Blob => Value::Bytes(bind), 71 | MysqlType::Bit => unimplemented!(), 72 | _ => unreachable!(), 73 | }, 74 | None => Value::NULL, 75 | }; 76 | Ok(v) 77 | } 78 | 79 | impl TryFrom for Params { 80 | type Error = diesel::result::Error; 81 | 82 | fn try_from(ToSqlHelper { metadata, binds }: ToSqlHelper) -> Result { 83 | let values = metadata 84 | .into_iter() 85 | .zip(binds) 86 | .map(to_value) 87 | .collect::, Self::Error>>()?; 88 | Ok(Params::Positional(values)) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /src/pooled_connection/bb8.rs: -------------------------------------------------------------------------------- 1 | //! A pool implementation for `diesel-async` based on [`bb8`] 2 | //! 3 | //! ```rust 4 | //! # include!("../doctest_setup.rs"); 5 | //! use diesel::result::Error; 6 | //! use futures_util::FutureExt; 7 | //! use diesel_async::pooled_connection::AsyncDieselConnectionManager; 8 | //! use diesel_async::pooled_connection::bb8::Pool; 9 | //! use diesel_async::{RunQueryDsl, AsyncConnection}; 10 | //! 11 | //! # #[tokio::main(flavor = "current_thread")] 12 | //! # async fn main() { 13 | //! # run_test().await.unwrap(); 14 | //! # } 15 | //! # 16 | //! # #[cfg(feature = "postgres")] 17 | //! # fn get_config() -> AsyncDieselConnectionManager { 18 | //! # let db_url = database_url_from_env("PG_DATABASE_URL"); 19 | //! let config = AsyncDieselConnectionManager::::new(db_url); 20 | //! # config 21 | //! # } 22 | //! # 23 | //! # #[cfg(feature = "mysql")] 24 | //! # fn get_config() -> AsyncDieselConnectionManager { 25 | //! # let db_url = database_url_from_env("MYSQL_DATABASE_URL"); 26 | //! # let config = AsyncDieselConnectionManager::::new(db_url); 27 | //! # config 28 | //! # } 29 | //! # 30 | //! # #[cfg(feature = "sqlite")] 31 | //! # fn get_config() -> AsyncDieselConnectionManager> { 32 | //! # let db_url = database_url_from_env("SQLITE_DATABASE_URL"); 33 | //! # let config = AsyncDieselConnectionManager::>::new(db_url); 34 | //! # config 35 | //! # } 36 | //! # 37 | //! # async fn run_test() -> Result<(), Box> { 38 | //! # use schema::users::dsl::*; 39 | //! # let config = get_config(); 40 | //! # #[cfg(feature = "postgres")] 41 | //! let pool: Pool = Pool::builder().build(config).await?; 42 | //! # #[cfg(not(feature = "postgres"))] 43 | //! # let pool = Pool::builder().build(config).await?; 44 | //! let mut conn = pool.get().await?; 45 | //! # conn.begin_test_transaction(); 46 | //! # create_tables(&mut conn).await; 47 | //! # #[cfg(feature = "mysql")] 48 | //! # conn.begin_test_transaction(); 49 | //! let res = users.load::<(i32, String)>(&mut conn).await?; 50 | //! # Ok(()) 51 | //! # } 52 | //! ``` 53 | use super::{AsyncDieselConnectionManager, PoolError, PoolableConnection}; 54 | use bb8::ManageConnection; 55 | use diesel::query_builder::QueryFragment; 56 | 57 | /// Type alias for using [`bb8::Pool`] with [`diesel-async`] 58 | /// 59 | /// This is **not** equal to [`bb8::Pool`]. It already uses the correct 60 | /// connection manager and expects only the connection type as generic argument 61 | pub type Pool = bb8::Pool>; 62 | /// Type alias for using [`bb8::PooledConnection`] with [`diesel-async`] 63 | pub type PooledConnection<'a, C> = bb8::PooledConnection<'a, AsyncDieselConnectionManager>; 64 | /// Type alias for using [`bb8::RunError`] with [`diesel-async`] 65 | pub type RunError = bb8::RunError; 66 | 67 | impl ManageConnection for AsyncDieselConnectionManager 68 | where 69 | C: PoolableConnection + 'static, 70 | diesel::dsl::select>: 71 | crate::methods::ExecuteDsl, 72 | diesel::query_builder::SqlQuery: QueryFragment, 73 | { 74 | type Connection = C; 75 | 76 | type Error = PoolError; 77 | 78 | async fn connect(&self) -> Result { 79 | (self.manager_config.custom_setup)(&self.connection_url) 80 | .await 81 | .map_err(PoolError::ConnectionError) 82 | } 83 | 84 | async fn is_valid(&self, conn: &mut Self::Connection) -> Result<(), Self::Error> { 85 | conn.ping(&self.manager_config.recycling_method) 86 | .await 87 | .map_err(PoolError::QueryError) 88 | } 89 | 90 | fn has_broken(&self, conn: &mut Self::Connection) -> bool { 91 | std::thread::panicking() || conn.is_broken() 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /examples/sync-wrapper/src/main.rs: -------------------------------------------------------------------------------- 1 | use diesel::prelude::*; 2 | use diesel::sqlite::{Sqlite, SqliteConnection}; 3 | use diesel_async::async_connection_wrapper::AsyncConnectionWrapper; 4 | use diesel_async::sync_connection_wrapper::SyncConnectionWrapper; 5 | use diesel_async::{AsyncConnection, RunQueryDsl}; 6 | use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; 7 | 8 | // ordinary diesel model setup 9 | 10 | table! { 11 | users { 12 | id -> Integer, 13 | name -> Text, 14 | } 15 | } 16 | 17 | #[allow(dead_code)] 18 | #[derive(Debug, Queryable, QueryableByName, Selectable)] 19 | #[diesel(table_name = users)] 20 | struct User { 21 | id: i32, 22 | name: String, 23 | } 24 | 25 | const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); 26 | 27 | type InnerConnection = SqliteConnection; 28 | 29 | type InnerDB = Sqlite; 30 | 31 | async fn establish(db_url: &str) -> ConnectionResult> { 32 | // It is necessary to specify the specific inner connection type because of inference issues 33 | SyncConnectionWrapper::::establish(db_url).await 34 | } 35 | 36 | async fn run_migrations(async_connection: A) -> Result<(), Box> 37 | where 38 | A: AsyncConnection + 'static, 39 | { 40 | let mut async_wrapper: AsyncConnectionWrapper = 41 | AsyncConnectionWrapper::from(async_connection); 42 | 43 | tokio::task::spawn_blocking(move || { 44 | async_wrapper.run_pending_migrations(MIGRATIONS).unwrap(); 45 | }) 46 | .await 47 | .map_err(|e| Box::new(e) as Box) 48 | } 49 | 50 | async fn transaction( 51 | async_conn: &mut SyncConnectionWrapper, 52 | old_name: &str, 53 | new_name: &str, 54 | ) -> Result, diesel::result::Error> { 55 | async_conn 56 | .transaction::, diesel::result::Error, _>(|c| { 57 | Box::pin(async { 58 | if old_name.is_empty() { 59 | Ok(Vec::new()) 60 | } else { 61 | diesel::update(users::table.filter(users::name.eq(old_name))) 62 | .set(users::name.eq(new_name)) 63 | .load(c) 64 | .await 65 | } 66 | }) 67 | }) 68 | .await 69 | } 70 | 71 | #[tokio::main] 72 | async fn main() -> Result<(), Box> { 73 | let db_url = std::env::var("DATABASE_URL").expect("Env var `DATABASE_URL` not set"); 74 | 75 | // create an async connection for the migrations 76 | let sync_wrapper: SyncConnectionWrapper = establish(&db_url).await?; 77 | run_migrations(sync_wrapper).await?; 78 | 79 | let mut sync_wrapper: SyncConnectionWrapper = establish(&db_url).await?; 80 | 81 | diesel::delete(users::table) 82 | .execute(&mut sync_wrapper) 83 | .await?; 84 | 85 | diesel::insert_into(users::table) 86 | .values((users::id.eq(3), users::name.eq("toto"))) 87 | .execute(&mut sync_wrapper) 88 | .await?; 89 | 90 | let data: Vec = users::table 91 | .select(User::as_select()) 92 | .load(&mut sync_wrapper) 93 | .await?; 94 | println!("{data:?}"); 95 | 96 | diesel::delete(users::table) 97 | .execute(&mut sync_wrapper) 98 | .await?; 99 | 100 | diesel::insert_into(users::table) 101 | .values((users::id.eq(1), users::name.eq("iLuke"))) 102 | .execute(&mut sync_wrapper) 103 | .await?; 104 | 105 | let data: Vec = users::table 106 | .filter(users::id.gt(0)) 107 | .or_filter(users::name.like("%Luke")) 108 | .select(User::as_select()) 109 | .load(&mut sync_wrapper) 110 | .await?; 111 | println!("{data:?}"); 112 | 113 | // a quick test to check if we correctly handle transactions 114 | let mut conn_a: SyncConnectionWrapper = establish(&db_url).await?; 115 | let mut conn_b: SyncConnectionWrapper = establish(&db_url).await?; 116 | 117 | let handle_1 = tokio::spawn(async move { 118 | loop { 119 | let changed = transaction(&mut conn_a, "iLuke", "JustLuke").await; 120 | println!("Changed {changed:?}"); 121 | std::thread::sleep(std::time::Duration::from_secs(1)); 122 | } 123 | }); 124 | 125 | let handle_2 = tokio::spawn(async move { 126 | loop { 127 | let changed = transaction(&mut conn_b, "JustLuke", "iLuke").await; 128 | println!("Changed {changed:?}"); 129 | std::thread::sleep(std::time::Duration::from_secs(1)); 130 | } 131 | }); 132 | 133 | let _ = handle_2.await; 134 | let _ = handle_1.await; 135 | 136 | Ok(()) 137 | } 138 | -------------------------------------------------------------------------------- /src/sync_connection_wrapper/sqlite.rs: -------------------------------------------------------------------------------- 1 | use diesel::connection::AnsiTransactionManager; 2 | use diesel::SqliteConnection; 3 | use scoped_futures::ScopedBoxFuture; 4 | 5 | use crate::sync_connection_wrapper::SyncTransactionManagerWrapper; 6 | use crate::TransactionManager; 7 | 8 | use super::SyncConnectionWrapper; 9 | 10 | impl SyncConnectionWrapper { 11 | /// Run a transaction with `BEGIN IMMEDIATE` 12 | /// 13 | /// This method will return an error if a transaction is already open. 14 | /// 15 | /// **WARNING:** Canceling the returned future does currently **not** 16 | /// close an already open transaction. You may end up with a connection 17 | /// containing a dangling transaction. 18 | /// 19 | /// # Example 20 | /// 21 | /// ```rust 22 | /// # include!("../doctest_setup.rs"); 23 | /// use diesel::result::Error; 24 | /// use scoped_futures::ScopedFutureExt; 25 | /// use diesel_async::{RunQueryDsl, AsyncConnection}; 26 | /// # 27 | /// # #[tokio::main(flavor = "current_thread")] 28 | /// # async fn main() { 29 | /// # run_test().await.unwrap(); 30 | /// # } 31 | /// # 32 | /// # async fn run_test() -> QueryResult<()> { 33 | /// # use schema::users::dsl::*; 34 | /// # let conn = &mut connection_no_transaction().await; 35 | /// conn.immediate_transaction(|conn| async move { 36 | /// diesel::insert_into(users) 37 | /// .values(name.eq("Ruby")) 38 | /// .execute(conn) 39 | /// .await?; 40 | /// 41 | /// let all_names = users.select(name).load::(conn).await?; 42 | /// assert_eq!(vec!["Sean", "Tess", "Ruby"], all_names); 43 | /// 44 | /// Ok(()) 45 | /// }.scope_boxed()).await 46 | /// # } 47 | /// ``` 48 | pub async fn immediate_transaction<'a, R, E, F>(&mut self, f: F) -> Result 49 | where 50 | F: for<'r> FnOnce(&'r mut Self) -> ScopedBoxFuture<'a, 'r, Result> + Send + 'a, 51 | E: From + Send + 'a, 52 | R: Send + 'a, 53 | { 54 | self.transaction_sql(f, "BEGIN IMMEDIATE").await 55 | } 56 | 57 | /// Run a transaction with `BEGIN EXCLUSIVE` 58 | /// 59 | /// This method will return an error if a transaction is already open. 60 | /// 61 | /// **WARNING:** Canceling the returned future does currently **not** 62 | /// close an already open transaction. You may end up with a connection 63 | /// containing a dangling transaction. 64 | /// 65 | /// # Example 66 | /// 67 | /// ```rust 68 | /// # include!("../doctest_setup.rs"); 69 | /// use diesel::result::Error; 70 | /// use scoped_futures::ScopedFutureExt; 71 | /// use diesel_async::{RunQueryDsl, AsyncConnection}; 72 | /// # 73 | /// # #[tokio::main(flavor = "current_thread")] 74 | /// # async fn main() { 75 | /// # run_test().await.unwrap(); 76 | /// # } 77 | /// # 78 | /// # async fn run_test() -> QueryResult<()> { 79 | /// # use schema::users::dsl::*; 80 | /// # let conn = &mut connection_no_transaction().await; 81 | /// conn.exclusive_transaction(|conn| async move { 82 | /// diesel::insert_into(users) 83 | /// .values(name.eq("Ruby")) 84 | /// .execute(conn) 85 | /// .await?; 86 | /// 87 | /// let all_names = users.select(name).load::(conn).await?; 88 | /// assert_eq!(vec!["Sean", "Tess", "Ruby"], all_names); 89 | /// 90 | /// Ok(()) 91 | /// }.scope_boxed()).await 92 | /// # } 93 | /// ``` 94 | pub async fn exclusive_transaction<'a, R, E, F>(&mut self, f: F) -> Result 95 | where 96 | F: for<'r> FnOnce(&'r mut Self) -> ScopedBoxFuture<'a, 'r, Result> + Send + 'a, 97 | E: From + Send + 'a, 98 | R: Send + 'a, 99 | { 100 | self.transaction_sql(f, "BEGIN EXCLUSIVE").await 101 | } 102 | 103 | async fn transaction_sql<'a, R, E, F>(&mut self, f: F, sql: &'static str) -> Result 104 | where 105 | F: for<'r> FnOnce(&'r mut Self) -> ScopedBoxFuture<'a, 'r, Result> + Send + 'a, 106 | E: From + Send + 'a, 107 | R: Send + 'a, 108 | { 109 | self.spawn_blocking(|conn| AnsiTransactionManager::begin_transaction_sql(conn, sql)) 110 | .await?; 111 | 112 | match f(&mut *self).await { 113 | Ok(value) => { 114 | SyncTransactionManagerWrapper::::commit_transaction( 115 | &mut *self, 116 | ) 117 | .await?; 118 | Ok(value) 119 | } 120 | Err(e) => { 121 | SyncTransactionManagerWrapper::::rollback_transaction( 122 | &mut *self, 123 | ) 124 | .await?; 125 | Err(e) 126 | } 127 | } 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /src/pooled_connection/deadpool.rs: -------------------------------------------------------------------------------- 1 | //! A connection pool implementation for `diesel-async` based on [`deadpool`] 2 | //! 3 | //! ```rust 4 | //! # include!("../doctest_setup.rs"); 5 | //! use diesel::result::Error; 6 | //! use futures_util::FutureExt; 7 | //! use diesel_async::pooled_connection::AsyncDieselConnectionManager; 8 | //! use diesel_async::pooled_connection::deadpool::Pool; 9 | //! use diesel_async::{RunQueryDsl, AsyncConnection}; 10 | //! 11 | //! # #[tokio::main(flavor = "current_thread")] 12 | //! # async fn main() { 13 | //! # run_test().await.unwrap(); 14 | //! # } 15 | //! # 16 | //! # #[cfg(feature = "postgres")] 17 | //! # fn get_config() -> AsyncDieselConnectionManager { 18 | //! # let db_url = database_url_from_env("PG_DATABASE_URL"); 19 | //! let config = AsyncDieselConnectionManager::::new(db_url); 20 | //! # config 21 | //! # } 22 | //! # 23 | //! # #[cfg(feature = "mysql")] 24 | //! # fn get_config() -> AsyncDieselConnectionManager { 25 | //! # let db_url = database_url_from_env("MYSQL_DATABASE_URL"); 26 | //! # let config = AsyncDieselConnectionManager::::new(db_url); 27 | //! # config 28 | //! # } 29 | //! # 30 | //! # #[cfg(feature = "sqlite")] 31 | //! # fn get_config() -> AsyncDieselConnectionManager> { 32 | //! # let db_url = database_url_from_env("SQLITE_DATABASE_URL"); 33 | //! # let config = AsyncDieselConnectionManager::>::new(db_url); 34 | //! # config 35 | //! # } 36 | //! # 37 | //! # async fn run_test() -> Result<(), Box> { 38 | //! # use schema::users::dsl::*; 39 | //! # let config = get_config(); 40 | //! # #[cfg(feature = "postgres")] 41 | //! let pool: Pool = Pool::builder(config).build()?; 42 | //! # #[cfg(not(feature = "postgres"))] 43 | //! # let pool = Pool::builder(config).build()?; 44 | //! let mut conn = pool.get().await?; 45 | //! # conn.begin_test_transaction(); 46 | //! # create_tables(&mut conn).await; 47 | //! # conn.begin_test_transaction(); 48 | //! let res = users.load::<(i32, String)>(&mut conn).await?; 49 | //! # Ok(()) 50 | //! # } 51 | //! ``` 52 | use super::{AsyncDieselConnectionManager, PoolableConnection}; 53 | use deadpool::managed::Manager; 54 | use diesel::query_builder::QueryFragment; 55 | 56 | /// Type alias for using [`deadpool::managed::Pool`] with [`diesel-async`] 57 | /// 58 | /// This is **not** equal to [`deadpool::managed::Pool`]. It already uses the correct 59 | /// connection manager and expects only the connection type as generic argument 60 | pub type Pool = deadpool::managed::Pool>; 61 | /// Type alias for using [`deadpool::managed::PoolBuilder`] with [`diesel-async`] 62 | pub type PoolBuilder = deadpool::managed::PoolBuilder>; 63 | /// Type alias for using [`deadpool::managed::BuildError`] with [`diesel-async`] 64 | pub type BuildError = deadpool::managed::BuildError; 65 | /// Type alias for using [`deadpool::managed::PoolError`] with [`diesel-async`] 66 | pub type PoolError = deadpool::managed::PoolError; 67 | /// Type alias for using [`deadpool::managed::Object`] with [`diesel-async`] 68 | pub type Object = deadpool::managed::Object>; 69 | /// Type alias for using [`deadpool::managed::Hook`] with [`diesel-async`] 70 | pub type Hook = deadpool::managed::Hook>; 71 | /// Type alias for using [`deadpool::managed::HookError`] with [`diesel-async`] 72 | pub type HookError = deadpool::managed::HookError; 73 | 74 | impl Manager for AsyncDieselConnectionManager 75 | where 76 | C: PoolableConnection + Send + 'static, 77 | diesel::dsl::select>: 78 | crate::methods::ExecuteDsl, 79 | diesel::query_builder::SqlQuery: QueryFragment, 80 | { 81 | type Type = C; 82 | 83 | type Error = super::PoolError; 84 | 85 | async fn create(&self) -> Result { 86 | (self.manager_config.custom_setup)(&self.connection_url) 87 | .await 88 | .map_err(super::PoolError::ConnectionError) 89 | } 90 | 91 | async fn recycle( 92 | &self, 93 | obj: &mut Self::Type, 94 | _: &deadpool::managed::Metrics, 95 | ) -> deadpool::managed::RecycleResult { 96 | if std::thread::panicking() || obj.is_broken() { 97 | return Err(deadpool::managed::RecycleError::Message( 98 | "Broken connection".into(), 99 | )); 100 | } 101 | obj.ping(&self.manager_config.recycling_method) 102 | .await 103 | .map_err(super::PoolError::QueryError)?; 104 | Ok(()) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/deref_connection.rs: -------------------------------------------------------------------------------- 1 | use crate::UpdateAndFetchResults; 2 | use crate::{AsyncConnection, AsyncConnectionCore, SimpleAsyncConnection, TransactionManager}; 3 | use diesel::associations::HasTable; 4 | use diesel::connection::CacheSize; 5 | use diesel::connection::Instrumentation; 6 | use diesel::QueryResult; 7 | use futures_util::future::BoxFuture; 8 | use std::ops::DerefMut; 9 | 10 | impl SimpleAsyncConnection for C 11 | where 12 | C: DerefMut + Send, 13 | C::Target: SimpleAsyncConnection + Send, 14 | { 15 | async fn batch_execute(&mut self, query: &str) -> diesel::QueryResult<()> { 16 | let conn = self.deref_mut(); 17 | conn.batch_execute(query).await 18 | } 19 | } 20 | 21 | impl AsyncConnectionCore for C 22 | where 23 | C: DerefMut + Send, 24 | C::Target: AsyncConnectionCore, 25 | { 26 | type ExecuteFuture<'conn, 'query> = 27 | ::ExecuteFuture<'conn, 'query>; 28 | type LoadFuture<'conn, 'query> = ::LoadFuture<'conn, 'query>; 29 | type Stream<'conn, 'query> = ::Stream<'conn, 'query>; 30 | type Row<'conn, 'query> = ::Row<'conn, 'query>; 31 | 32 | type Backend = ::Backend; 33 | 34 | fn load<'conn, 'query, T>(&'conn mut self, source: T) -> Self::LoadFuture<'conn, 'query> 35 | where 36 | T: diesel::query_builder::AsQuery + 'query, 37 | T::Query: diesel::query_builder::QueryFragment 38 | + diesel::query_builder::QueryId 39 | + 'query, 40 | { 41 | let conn = self.deref_mut(); 42 | conn.load(source) 43 | } 44 | 45 | fn execute_returning_count<'conn, 'query, T>( 46 | &'conn mut self, 47 | source: T, 48 | ) -> Self::ExecuteFuture<'conn, 'query> 49 | where 50 | T: diesel::query_builder::QueryFragment 51 | + diesel::query_builder::QueryId 52 | + 'query, 53 | { 54 | let conn = self.deref_mut(); 55 | conn.execute_returning_count(source) 56 | } 57 | } 58 | 59 | #[diagnostic::do_not_recommend] 60 | impl AsyncConnection for C 61 | where 62 | C: DerefMut + Send, 63 | C::Target: AsyncConnection, 64 | { 65 | type TransactionManager = 66 | PoolTransactionManager<::TransactionManager>; 67 | 68 | async fn establish(_database_url: &str) -> diesel::ConnectionResult { 69 | Err(diesel::result::ConnectionError::BadConnection( 70 | String::from("Cannot directly establish a pooled connection"), 71 | )) 72 | } 73 | 74 | fn transaction_state( 75 | &mut self, 76 | ) -> &mut >::TransactionStateData{ 77 | let conn = self.deref_mut(); 78 | conn.transaction_state() 79 | } 80 | 81 | async fn begin_test_transaction(&mut self) -> diesel::QueryResult<()> { 82 | self.deref_mut().begin_test_transaction().await 83 | } 84 | 85 | fn instrumentation(&mut self) -> &mut dyn Instrumentation { 86 | self.deref_mut().instrumentation() 87 | } 88 | 89 | fn set_instrumentation(&mut self, instrumentation: impl Instrumentation) { 90 | self.deref_mut().set_instrumentation(instrumentation); 91 | } 92 | 93 | fn set_prepared_statement_cache_size(&mut self, size: CacheSize) { 94 | self.deref_mut().set_prepared_statement_cache_size(size); 95 | } 96 | } 97 | 98 | #[doc(hidden)] 99 | #[allow(missing_debug_implementations)] 100 | pub struct PoolTransactionManager(std::marker::PhantomData); 101 | 102 | impl TransactionManager for PoolTransactionManager 103 | where 104 | C: DerefMut + Send, 105 | C::Target: AsyncConnection, 106 | TM: TransactionManager, 107 | { 108 | type TransactionStateData = TM::TransactionStateData; 109 | 110 | async fn begin_transaction(conn: &mut C) -> diesel::QueryResult<()> { 111 | TM::begin_transaction(&mut **conn).await 112 | } 113 | 114 | async fn rollback_transaction(conn: &mut C) -> diesel::QueryResult<()> { 115 | TM::rollback_transaction(&mut **conn).await 116 | } 117 | 118 | async fn commit_transaction(conn: &mut C) -> diesel::QueryResult<()> { 119 | TM::commit_transaction(&mut **conn).await 120 | } 121 | 122 | fn transaction_manager_status_mut( 123 | conn: &mut C, 124 | ) -> &mut diesel::connection::TransactionManagerStatus { 125 | TM::transaction_manager_status_mut(&mut **conn) 126 | } 127 | 128 | fn is_broken_transaction_manager(conn: &mut C) -> bool { 129 | TM::is_broken_transaction_manager(&mut **conn) 130 | } 131 | } 132 | 133 | impl UpdateAndFetchResults for Conn 134 | where 135 | Conn: DerefMut + Send, 136 | Changes: diesel::prelude::Identifiable + HasTable + Send, 137 | Conn::Target: UpdateAndFetchResults, 138 | { 139 | fn update_and_fetch<'conn, 'changes>( 140 | &'conn mut self, 141 | changeset: Changes, 142 | ) -> BoxFuture<'changes, QueryResult> 143 | where 144 | Changes: 'changes, 145 | 'conn: 'changes, 146 | Self: 'changes, 147 | { 148 | self.deref_mut().update_and_fetch(changeset) 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | All user visible changes to this project will be documented in this file. 4 | This project adheres to [Semantic Versioning](http://semver.org/), as described 5 | for Rust libraries in [RFC #1105](https://github.com/rust-lang/rfcs/blob/master/text/1105-api-evolution.md) 6 | 7 | ## [Unreleased] 8 | 9 | ## [0.7.4] - 2025-11-07 10 | 11 | * Fixed an issue with dropping uncached mysql statements 12 | 13 | ## [0.7.3] - 2025-10-05 14 | 15 | * Another docs.rs build fix 16 | 17 | ## [0.7.2] - 2025-10-02 18 | 19 | * Fix versions in the Readme 20 | 21 | ## [0.7.1] - 2025-10-02 22 | 23 | * Fix the docs.rs build 24 | 25 | ## [0.7.0] - 2025-10-02 26 | 27 | * Support for diesel 2.3 28 | * Added support for running migrations via `AsyncMigrationHarness` 29 | * Improved ergonomics of using query pipelining with `AsyncPgConnection` 30 | * Added the ability to cancel queries using `AsyncMysqlConnection::cancel_token` 31 | 32 | ## [0.6.1] - 2025-07-03 33 | 34 | * Fix features for some dependencies 35 | 36 | ## [0.6.0] - 2025-07-02 37 | 38 | * Allow to control the statement cache size 39 | * Minimize dependencies features 40 | * Bump minimal supported mysql_async version to 0.36.0 41 | * Fixing a bug in how we tracked open transaction that could lead to dangling transactions is specific cases 42 | 43 | ## [0.5.2] - 2024-11-26 44 | 45 | * Fixed an issue around transaction cancellation that could lead to connection pools containing connections with dangling transactions 46 | 47 | ## [0.5.1] - 2024-11-01 48 | 49 | * Add crate feature `pool` for extending connection pool implements through external crate 50 | * Implement `Deref` and `DerefMut` for `AsyncConnectionWrapper` to allow using it in an async context as well 51 | 52 | ## [0.5.0] - 2024-07-19 53 | 54 | * Added type `diesel_async::pooled_connection::mobc::PooledConnection` 55 | * MySQL/MariaDB now use `CLIENT_FOUND_ROWS` capability to allow consistent behaviour with PostgreSQL regarding return value of UPDATe commands. 56 | * The minimal supported rust version is now 1.78.0 57 | * Add a `SyncConnectionWrapper` type that turns a sync connection into an async one. This enables SQLite support for diesel-async 58 | * Add support for `diesel::connection::Instrumentation` to support logging and other instrumentation for any of the provided connection impls. 59 | * Bump minimal supported mysql_async version to 0.34 60 | 61 | ## [0.4.1] - 2023-09-01 62 | 63 | * Fixed feature flags for docs.rs 64 | 65 | ## [0.4.0] - 2023-09-01 66 | 67 | * Add a `AsyncConnectionWrapper` type to turn a `diesel_async::AsyncConnection` into a `diesel::Connection`. This might be used to execute migrations via `diesel_migrations`. 68 | * Add some connection pool configurations to specify how connections 69 | in the pool should be checked if they are still valid 70 | 71 | ## [0.3.2] - 2023-07-24 72 | 73 | * Fix `TinyInt` serialization 74 | * Check for open transactions before returning the connection to the pool 75 | 76 | ## [0.3.1] - 2023-06-07 77 | 78 | * Minor readme fixes 79 | * Add a missing `UpdateAndFetchResults` impl 80 | 81 | ## [0.3.0] - 2023-05-26 82 | 83 | * Compatibility with diesel 2.1 84 | 85 | ## [0.2.2] - 2023-04-14 86 | 87 | * Dependency updates for `mysql-async` to allow newer versions 88 | 89 | ## [0.2.1] - 2023-03-08 90 | 91 | * Dependency updates for `mobc` and `mysql-async` to allow newer versions as well 92 | * Extend the README 93 | * Improve the version constraint for diesel so that we do not end up using a newer 94 | diesel version that's incompatible 95 | 96 | ## [0.2.0] - 2022-12-16 97 | 98 | * [#38](https://github.com/weiznich/diesel_async/pull/38) Relax the requirements for borrowed captures in the transaction closure 99 | * [#41](https://github.com/weiznich/diesel_async/pull/41) Remove GAT workarounds from various traits (Raises the MSRV to 1.65) 100 | * [#42](https://github.com/weiznich/diesel_async/pull/42) Add an additional `AsyncDieselConnectionManager` constructor that allows to specify a custom connection setup method to allow setting up postgres TLS connections 101 | * Relicense the crate under the MIT or Apache 2.0 License 102 | 103 | ## [0.1.1] - 2022-10-19 104 | 105 | ### Fixes 106 | 107 | * Fix prepared statement leak for the mysql backend implementation 108 | 109 | ## 0.1.0 - 2022-09-27 110 | 111 | * Initial release 112 | 113 | [0.1.1]: https://github.com/weiznich/diesel_async/compare/v0.1.0...v0.1.1 114 | [0.2.0]: https://github.com/weiznich/diesel_async/compare/v0.1.1...v0.2.0 115 | [0.2.1]: https://github.com/weiznich/diesel_async/compare/v0.2.0...v0.2.1 116 | [0.2.2]: https://github.com/weiznich/diesel_async/compare/v0.2.1...v0.2.2 117 | [0.3.0]: https://github.com/weiznich/diesel_async/compare/v0.2.0...v0.3.0 118 | [0.3.1]: https://github.com/weiznich/diesel_async/compare/v0.3.0...v0.3.1 119 | [0.3.2]: https://github.com/weiznich/diesel_async/compare/v0.3.1...v0.3.2 120 | [0.4.0]: https://github.com/weiznich/diesel_async/compare/v0.3.2...v0.4.0 121 | [0.4.1]: https://github.com/weiznich/diesel_async/compare/v0.4.0...v0.4.1 122 | [0.5.0]: https://github.com/weiznich/diesel_async/compare/v0.4.0...v0.5.0 123 | [0.5.1]: https://github.com/weiznich/diesel_async/compare/v0.5.0...v0.5.1 124 | [0.5.2]: https://github.com/weiznich/diesel_async/compare/v0.5.1...v0.5.2 125 | [0.6.0]: https://github.com/weiznich/diesel_async/compare/v0.5.2...v0.6.0 126 | [0.6.1]: https://github.com/weiznich/diesel_async/compare/v0.6.0...v0.6.1 127 | [0.7.0]: https://github.com/weiznich/diesel_async/compare/v0.6.0...v0.7.0 128 | [0.7.1]: https://github.com/weiznich/diesel_async/compare/v0.7.0...v0.7.1 129 | [0.7.2]: https://github.com/weiznich/diesel_async/compare/v0.7.1...v0.7.2 130 | [0.7.3]: https://github.com/weiznich/diesel_async/compare/v0.7.2...v0.7.3 131 | [Unreleased]: https://github.com/weiznich/diesel_async/compare/v0.7.0...main 132 | -------------------------------------------------------------------------------- /tests/custom_types.rs: -------------------------------------------------------------------------------- 1 | use crate::connection; 2 | use diesel::deserialize::{self, FromSql, FromSqlRow}; 3 | use diesel::expression::{AsExpression, IntoSql}; 4 | use diesel::pg::{Pg, PgValue}; 5 | use diesel::query_builder::QueryId; 6 | use diesel::serialize::{self, IsNull, Output, ToSql}; 7 | use diesel::sql_types::{Array, Integer, SqlType}; 8 | use diesel::*; 9 | use diesel_async::{RunQueryDsl, SimpleAsyncConnection}; 10 | use std::io::Write; 11 | 12 | table! { 13 | use diesel::sql_types::*; 14 | use super::MyType; 15 | custom_types { 16 | id -> Integer, 17 | custom_enum -> MyType, 18 | } 19 | } 20 | 21 | #[derive(SqlType, QueryId)] 22 | #[diesel(postgres_type(name = "my_type"))] 23 | pub struct MyType; 24 | 25 | #[derive(Debug, PartialEq, FromSqlRow, AsExpression)] 26 | #[diesel(sql_type = MyType)] 27 | pub enum MyEnum { 28 | Foo, 29 | Bar, 30 | } 31 | 32 | impl ToSql for MyEnum { 33 | fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { 34 | match *self { 35 | MyEnum::Foo => out.write_all(b"foo")?, 36 | MyEnum::Bar => out.write_all(b"bar")?, 37 | } 38 | Ok(IsNull::No) 39 | } 40 | } 41 | 42 | impl FromSql for MyEnum { 43 | fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { 44 | match bytes.as_bytes() { 45 | b"foo" => Ok(MyEnum::Foo), 46 | b"bar" => Ok(MyEnum::Bar), 47 | _ => Err("Unrecognized enum variant".into()), 48 | } 49 | } 50 | } 51 | 52 | #[derive(Insertable, Queryable, Identifiable, Debug, PartialEq)] 53 | #[diesel(table_name = custom_types)] 54 | struct HasCustomTypes { 55 | id: i32, 56 | custom_enum: MyEnum, 57 | } 58 | 59 | #[tokio::test(flavor = "multi_thread", worker_threads = 1)] 60 | async fn custom_types_round_trip() { 61 | let data = vec![ 62 | HasCustomTypes { 63 | id: 1, 64 | custom_enum: MyEnum::Foo, 65 | }, 66 | HasCustomTypes { 67 | id: 2, 68 | custom_enum: MyEnum::Bar, 69 | }, 70 | ]; 71 | let connection = &mut connection().await; 72 | 73 | connection 74 | .batch_execute( 75 | r#" 76 | CREATE TYPE my_type AS ENUM ('foo', 'bar'); 77 | CREATE TABLE custom_types ( 78 | id SERIAL PRIMARY KEY, 79 | custom_enum my_type NOT NULL 80 | ); 81 | "#, 82 | ) 83 | .await 84 | .unwrap(); 85 | 86 | // Try encoding arrays to test type metadata lookup 87 | let selected = select(( 88 | vec![MyEnum::Foo].into_sql::>(), 89 | vec![0i32].into_sql::>(), 90 | vec![MyEnum::Bar].into_sql::>(), 91 | )) 92 | .get_result::<(Vec, Vec, Vec)>(connection) 93 | .await 94 | .unwrap(); 95 | assert_eq!((vec![MyEnum::Foo], vec![0], vec![MyEnum::Bar]), selected); 96 | 97 | let inserted = insert_into(custom_types::table) 98 | .values(&data) 99 | .get_results(connection) 100 | .await 101 | .unwrap(); 102 | assert_eq!(data, inserted); 103 | } 104 | 105 | table! { 106 | use diesel::sql_types::*; 107 | use super::MyTypeInCustomSchema; 108 | custom_schema.custom_types_with_custom_schema { 109 | id -> Integer, 110 | custom_enum -> MyTypeInCustomSchema, 111 | } 112 | } 113 | 114 | #[derive(SqlType, QueryId)] 115 | #[diesel(postgres_type(name = "my_type", schema = "custom_schema"))] 116 | pub struct MyTypeInCustomSchema; 117 | 118 | #[derive(Debug, PartialEq, FromSqlRow, AsExpression)] 119 | #[diesel(sql_type = MyTypeInCustomSchema)] 120 | pub enum MyEnumInCustomSchema { 121 | Foo, 122 | Bar, 123 | } 124 | 125 | impl ToSql for MyEnumInCustomSchema { 126 | fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { 127 | match *self { 128 | MyEnumInCustomSchema::Foo => out.write_all(b"foo")?, 129 | MyEnumInCustomSchema::Bar => out.write_all(b"bar")?, 130 | } 131 | Ok(IsNull::No) 132 | } 133 | } 134 | 135 | impl FromSql for MyEnumInCustomSchema { 136 | fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { 137 | match bytes.as_bytes() { 138 | b"foo" => Ok(MyEnumInCustomSchema::Foo), 139 | b"bar" => Ok(MyEnumInCustomSchema::Bar), 140 | _ => Err("Unrecognized enum variant".into()), 141 | } 142 | } 143 | } 144 | 145 | #[derive(Insertable, Queryable, Identifiable, Debug, PartialEq)] 146 | #[diesel(table_name = custom_types_with_custom_schema)] 147 | struct HasCustomTypesInCustomSchema { 148 | id: i32, 149 | custom_enum: MyEnumInCustomSchema, 150 | } 151 | 152 | #[tokio::test(flavor = "multi_thread", worker_threads = 1)] 153 | async fn custom_types_in_custom_schema_round_trip() { 154 | let data = vec![ 155 | HasCustomTypesInCustomSchema { 156 | id: 1, 157 | custom_enum: MyEnumInCustomSchema::Foo, 158 | }, 159 | HasCustomTypesInCustomSchema { 160 | id: 2, 161 | custom_enum: MyEnumInCustomSchema::Bar, 162 | }, 163 | ]; 164 | let connection = &mut connection().await; 165 | connection 166 | .batch_execute( 167 | r#" 168 | CREATE SCHEMA IF NOT EXISTS custom_schema; 169 | CREATE TYPE custom_schema.my_type AS ENUM ('foo', 'bar'); 170 | CREATE TABLE custom_schema.custom_types_with_custom_schema ( 171 | id SERIAL PRIMARY KEY, 172 | custom_enum custom_schema.my_type NOT NULL 173 | ); 174 | "#, 175 | ) 176 | .await 177 | .unwrap(); 178 | 179 | // Try encoding arrays to test type metadata lookup 180 | let selected = select(( 181 | vec![MyEnumInCustomSchema::Foo].into_sql::>(), 182 | vec![0i32].into_sql::>(), 183 | vec![MyEnumInCustomSchema::Bar].into_sql::>(), 184 | )) 185 | .get_result::<( 186 | Vec, 187 | Vec, 188 | Vec, 189 | )>(connection) 190 | .await 191 | .unwrap(); 192 | assert_eq!( 193 | ( 194 | vec![MyEnumInCustomSchema::Foo], 195 | vec![0], 196 | vec![MyEnumInCustomSchema::Bar] 197 | ), 198 | selected 199 | ); 200 | 201 | let inserted = insert_into(custom_types_with_custom_schema::table) 202 | .values(&data) 203 | .get_results(connection) 204 | .await 205 | .unwrap(); 206 | assert_eq!(data, inserted); 207 | } 208 | -------------------------------------------------------------------------------- /src/migrations.rs: -------------------------------------------------------------------------------- 1 | use diesel::migration::{Migration, MigrationVersion, Result}; 2 | 3 | use crate::async_connection_wrapper::AsyncConnectionWrapper; 4 | use crate::AsyncConnection; 5 | 6 | /// A diesel-migration [`MigrationHarness`](diesel_migrations::MigrationHarness) to run migrations 7 | /// via an [`AsyncConnection`](crate::AsyncConnection) 8 | /// 9 | /// Internally this harness is using [`tokio::task::block_in_place`] and [`AsyncConnectionWrapper`] 10 | /// to utilize sync Diesel's migration infrastructure. For most applications this shouldn't 11 | /// be problematic as migrations are usually run at application startup and most applications 12 | /// default to use the multithreaded tokio runtime. In turn this also means that you cannot use 13 | /// this migration harness if you use the current thread variant of the tokio runtime or if 14 | /// you run migrations in a very special setup (e.g by using [`tokio::select!`] or [`tokio::join!`] 15 | /// on a future produced by running the migrations). Consider manually construct a blocking task via 16 | /// [`tokio::task::spawn_blocking`] instead. 17 | /// 18 | /// ## Example 19 | /// 20 | /// ```no_run 21 | /// # include!("doctest_setup.rs"); 22 | /// # async fn run_test() -> Result<(), Box>{ 23 | /// use diesel_async::AsyncMigrationHarness; 24 | /// use diesel_migrations::{FileBasedMigrations, MigrationHarness}; 25 | /// 26 | /// let mut connection = connection_no_data().await; 27 | /// 28 | /// // Alternativly use `diesel_migrations::embed_migrations!()` 29 | /// // to get a list of migrations 30 | /// let migrations = FileBasedMigrations::find_migrations_directory()?; 31 | /// 32 | /// let mut harness = AsyncMigrationHarness::new(connection); 33 | /// harness.run_pending_migrations(migrations)?; 34 | /// // get back the connection from the harness 35 | /// let connection = harness.into_inner(); 36 | /// # Ok(()) 37 | /// # } 38 | /// # #[tokio::main] 39 | /// # async fn main() -> Result<(), Box> { 40 | /// # run_test().await?; 41 | /// # Ok(()) 42 | /// # } 43 | /// ``` 44 | /// 45 | /// ## Example with pool 46 | /// 47 | /// ```no_run 48 | /// # include!("doctest_setup.rs"); 49 | /// # #[cfg(feature = "deadpool")] 50 | /// # use diesel_async::pooled_connection::AsyncDieselConnectionManager; 51 | /// # 52 | /// # #[cfg(all(feature = "postgres", feature = "deadpool"))] 53 | /// # fn get_config() -> AsyncDieselConnectionManager { 54 | /// # let db_url = database_url_from_env("PG_DATABASE_URL"); 55 | /// let config = AsyncDieselConnectionManager::::new(db_url); 56 | /// # config 57 | /// # } 58 | /// # 59 | /// # #[cfg(all(feature = "mysql", feature = "deadpool"))] 60 | /// # fn get_config() -> AsyncDieselConnectionManager { 61 | /// # let db_url = database_url_from_env("MYSQL_DATABASE_URL"); 62 | /// # let config = AsyncDieselConnectionManager::::new(db_url); 63 | /// # config 64 | /// # } 65 | /// # 66 | /// # #[cfg(all(feature = "sqlite", feature = "deadpool"))] 67 | /// # fn get_config() -> AsyncDieselConnectionManager> { 68 | /// # let db_url = database_url_from_env("SQLITE_DATABASE_URL"); 69 | /// # let config = AsyncDieselConnectionManager::>::new(db_url); 70 | /// # config 71 | /// # } 72 | /// # #[cfg(feature = "deadpool")] 73 | /// # async fn run_test() -> Result<(), Box>{ 74 | /// use diesel_async::pooled_connection::deadpool::Pool; 75 | /// use diesel_async::AsyncMigrationHarness; 76 | /// use diesel_migrations::{FileBasedMigrations, MigrationHarness}; 77 | /// 78 | /// // Alternativly use `diesel_migrations::embed_migrations!()` 79 | /// // to get a list of migrations 80 | /// let migrations = FileBasedMigrations::find_migrations_directory()?; 81 | /// 82 | /// let pool = Pool::builder(get_config()).build()?; 83 | /// let mut harness = AsyncMigrationHarness::new(pool.get().await?); 84 | /// harness.run_pending_migrations(migrations)?; 85 | /// # Ok(()) 86 | /// # } 87 | /// 88 | /// # #[cfg(not(feature = "deadpool"))] 89 | /// # async fn run_test() -> Result<(), Box> 90 | /// # { 91 | /// # Ok(()) 92 | /// # } 93 | /// # 94 | /// # #[tokio::main] 95 | /// # async fn main() -> Result<(), Box> { 96 | /// # run_test().await?; 97 | /// # Ok(()) 98 | /// # } 99 | /// ``` 100 | pub struct AsyncMigrationHarness { 101 | conn: AsyncConnectionWrapper, 102 | } 103 | 104 | impl AsyncMigrationHarness 105 | where 106 | C: AsyncConnection, 107 | { 108 | /// Construct a new `AsyncMigrationHarness` from a given connection 109 | pub fn new(connection: C) -> Self { 110 | Self { 111 | conn: AsyncConnectionWrapper::from(connection), 112 | } 113 | } 114 | 115 | /// Return the connection stored inside this instance of `AsyncMigrationHarness` 116 | pub fn into_inner(self) -> C { 117 | self.conn.into_inner() 118 | } 119 | } 120 | 121 | impl From for AsyncMigrationHarness 122 | where 123 | C: AsyncConnection, 124 | { 125 | fn from(value: C) -> Self { 126 | AsyncMigrationHarness::new(value) 127 | } 128 | } 129 | 130 | impl diesel_migrations::MigrationHarness for AsyncMigrationHarness 131 | where 132 | C: AsyncConnection, 133 | AsyncConnectionWrapper: 134 | diesel::Connection + diesel_migrations::MigrationHarness, 135 | { 136 | fn run_migration( 137 | &mut self, 138 | migration: &dyn Migration, 139 | ) -> Result> { 140 | tokio::task::block_in_place(|| { 141 | diesel_migrations::MigrationHarness::run_migration(&mut self.conn, migration) 142 | }) 143 | } 144 | 145 | fn revert_migration( 146 | &mut self, 147 | migration: &dyn Migration, 148 | ) -> Result> { 149 | tokio::task::block_in_place(|| { 150 | diesel_migrations::MigrationHarness::revert_migration(&mut self.conn, migration) 151 | }) 152 | } 153 | 154 | fn applied_migrations(&mut self) -> Result>> { 155 | tokio::task::block_in_place(|| { 156 | diesel_migrations::MigrationHarness::applied_migrations(&mut self.conn) 157 | }) 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # An async interface for diesel 2 | 3 | Diesel gets rid of the boilerplate for database interaction and eliminates 4 | runtime errors without sacrificing performance. It takes full advantage of 5 | Rust's type system to create a low overhead query builder that "feels like 6 | Rust." 7 | 8 | Diesel-async provides an async implementation of diesels connection implementation 9 | and any method that may issue an query. It is designed as pure async drop-in replacement 10 | for the corresponding diesel methods. Similar to diesel the crate is designed in a way 11 | that allows third party crates to extend the existing infrastructure and even provide 12 | their own connection implementations. 13 | 14 | Supported databases: 15 | 16 | 1. PostgreSQL 17 | 2. MySQL 18 | 19 | ## Usage 20 | 21 | ### Simple usage 22 | 23 | Diesel-async is designed to work in combination with diesel, not to replace diesel. For this it 24 | provides drop-in replacement for diesel functionality that actually interacts with the database. 25 | 26 | A normal project should use a setup similar to the following one: 27 | 28 | ```toml 29 | [dependencies] 30 | diesel = "2.3.0" # no backend features need to be enabled 31 | diesel-async = { version = "0.7.0", features = ["postgres"] } 32 | ``` 33 | 34 | This allows to import the relevant traits from both crates: 35 | 36 | ```rust 37 | use diesel::prelude::*; 38 | use diesel_async::{RunQueryDsl, AsyncConnection, AsyncPgConnection}; 39 | 40 | // ordinary diesel model setup 41 | 42 | table! { 43 | users { 44 | id -> Integer, 45 | name -> Text, 46 | } 47 | } 48 | 49 | #[derive(Queryable, Selectable)] 50 | #[diesel(table_name = users)] 51 | struct User { 52 | id: i32, 53 | name: String, 54 | } 55 | 56 | // create an async connection 57 | let mut connection = AsyncPgConnection::establish(&std::env::var("DATABASE_URL")?).await?; 58 | 59 | // use ordinary diesel query dsl to construct your query 60 | let data: Vec = users::table 61 | .filter(users::id.gt(0)) 62 | .or_filter(users::name.like("%Luke")) 63 | .select(User::as_select()) 64 | // execute the query via the provided 65 | // async `diesel_async::RunQueryDsl` 66 | .load(&mut connection) 67 | .await?; 68 | ``` 69 | 70 | ### Async Transaction Support 71 | 72 | Diesel-async provides an ergonomic interface to wrap several statements into a shared 73 | database transaction. Such transactions are automatically rolled back as soon as 74 | the inner closure returns an error 75 | 76 | ``` rust 77 | connection.transaction::<_, diesel::result::Error, _>(|conn| async move { 78 | diesel::insert_into(users::table) 79 | .values(users::name.eq("Ruby")) 80 | .execute(conn) 81 | .await?; 82 | 83 | let all_names = users::table.select(users::name).load::(conn).await?; 84 | Ok(()) 85 | }.scope_boxed() 86 | ).await?; 87 | ``` 88 | 89 | ### Streaming Query Support 90 | 91 | Beside loading data directly into a vector, diesel-async also supports returning a 92 | value stream for each query. This allows to process data from the database while they 93 | are still received. 94 | 95 | ```rust 96 | // use ordinary diesel query dsl to construct your query 97 | let data: impl Stream> = users::table 98 | .filter(users::id.gt(0)) 99 | .or_filter(users::name.like("%Luke")) 100 | .select(User::as_select()) 101 | // execute the query via the provided 102 | // async `diesel_async::RunQueryDsl` 103 | .load_stream(&mut connection) 104 | .await?; 105 | 106 | ``` 107 | 108 | ### Built-in Connection Pooling Support 109 | 110 | Diesel-async provides built-in support for several connection pooling crates. This includes support 111 | for: 112 | 113 | * [deadpool](https://crates.io/crates/deadpool) 114 | * [bb8](https://crates.io/crates/bb8) 115 | * [mobc](https://crates.io/crates/mobc) 116 | 117 | #### Deadpool 118 | 119 | ``` rust 120 | use diesel_async::pooled_connection::AsyncDieselConnectionManager; 121 | use diesel_async::pooled_connection::deadpool::Pool; 122 | use diesel_async::RunQueryDsl; 123 | 124 | // create a new connection pool with the default config 125 | let config = AsyncDieselConnectionManager::::new(std::env::var("DATABASE_URL")?); 126 | let pool = Pool::builder(config).build()?; 127 | 128 | // checkout a connection from the pool 129 | let mut conn = pool.get().await?; 130 | 131 | // use the connection as ordinary diesel-async connection 132 | let res = users::table.select(User::as_select()).load::(&mut conn).await?; 133 | ``` 134 | 135 | #### BB8 136 | 137 | ``` rust 138 | use diesel_async::pooled_connection::AsyncDieselConnectionManager; 139 | use diesel_async::pooled_connection::bb8::Pool; 140 | use diesel_async::RunQueryDsl; 141 | 142 | // create a new connection pool with the default config 143 | let config = AsyncDieselConnectionManager::::new(std::env::var("DATABASE_URL")?); 144 | let pool = Pool::builder().build(config).await?; 145 | 146 | // checkout a connection from the pool 147 | let mut conn = pool.get().await?; 148 | 149 | // use the connection as ordinary diesel-async connection 150 | let res = users::table.select(User::as_select()).load::(&mut conn).await?; 151 | ``` 152 | 153 | #### Mobc 154 | 155 | ``` rust 156 | use diesel_async::pooled_connection::AsyncDieselConnectionManager; 157 | use diesel_async::pooled_connection::mobc::Pool; 158 | use diesel_async::RunQueryDsl; 159 | 160 | // create a new connection pool with the default config 161 | let config = AsyncDieselConnectionManager::::new(std::env::var("DATABASE_URL")?); 162 | let pool = Pool::new(config); 163 | 164 | // checkout a connection from the pool 165 | let mut conn = pool.get().await?; 166 | 167 | // use the connection as ordinary diesel-async connection 168 | let res = users::table.select(User::as_select()).load::(&mut conn).await?; 169 | ``` 170 | 171 | ## Diesel-Async with Secure Database 172 | 173 | In the event of using this crate with a `sslmode=require` flag, it will be necessary to build a TLS cert. 174 | There is an example provided for doing this using the `rustls` crate in the `postgres` examples folder. 175 | 176 | ## Crate Feature Flags 177 | 178 | Diesel-async offers several configurable features: 179 | 180 | * `postgres`: Enables the implementation of `AsyncPgConnection` 181 | * `mysql`: Enables the implementation of `AsyncMysqlConnection` 182 | * `deadpool`: Enables support for the `deadpool` connection pool implementation 183 | * `bb8`: Enables support for the `bb8` connection pool implementation 184 | * `mobc`: Enables support for the `mobc` connection pool implementation 185 | 186 | By default no features are enabled. 187 | 188 | ## Code of conduct 189 | 190 | Anyone who interacts with Diesel in any space, including but not limited to 191 | this GitHub repository, must follow our [code of conduct](https://github.com/diesel-rs/diesel/blob/master/code_of_conduct.md). 192 | 193 | ## License 194 | 195 | Licensed under either of these: 196 | 197 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 198 | https://www.apache.org/licenses/LICENSE-2.0) 199 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 200 | https://opensource.org/licenses/MIT) 201 | 202 | ### Contributing 203 | 204 | Contributions are explicitly welcome. Please consider opening a [discussion](https://github.com/weiznich/diesel_async/discussions/categories/ideas) 205 | with your idea first, to discuss possible designs. 206 | 207 | Unless you explicitly state otherwise, any contribution you intentionally submit 208 | for inclusion in the work, as defined in the Apache-2.0 license, shall be 209 | dual-licensed as above, without any additional terms or conditions. 210 | 211 | -------------------------------------------------------------------------------- /src/pooled_connection/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module contains support using diesel-async with 2 | //! various async rust connection pooling solutions 3 | //! 4 | //! See the concrete pool implementations for examples: 5 | //! * [deadpool](self::deadpool) 6 | //! * [bb8](self::bb8) 7 | //! * [mobc](self::mobc) 8 | use crate::{AsyncConnection, TransactionManager}; 9 | use diesel::QueryResult; 10 | use futures_core::future::BoxFuture; 11 | use futures_util::FutureExt; 12 | use std::borrow::Cow; 13 | use std::fmt; 14 | use std::future::Future; 15 | 16 | #[cfg(feature = "bb8")] 17 | pub mod bb8; 18 | #[cfg(feature = "deadpool")] 19 | pub mod deadpool; 20 | #[cfg(feature = "mobc")] 21 | pub mod mobc; 22 | 23 | /// The error used when managing connections with `deadpool`. 24 | #[derive(Debug)] 25 | pub enum PoolError { 26 | /// An error occurred establishing the connection 27 | ConnectionError(diesel::result::ConnectionError), 28 | 29 | /// An error occurred pinging the database 30 | QueryError(diesel::result::Error), 31 | } 32 | 33 | impl fmt::Display for PoolError { 34 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 35 | match *self { 36 | PoolError::ConnectionError(ref e) => e.fmt(f), 37 | PoolError::QueryError(ref e) => e.fmt(f), 38 | } 39 | } 40 | } 41 | 42 | impl std::error::Error for PoolError {} 43 | 44 | /// Type of the custom setup closure passed to [`ManagerConfig::custom_setup`] 45 | pub type SetupCallback = 46 | Box BoxFuture> + Send + Sync>; 47 | 48 | /// Type of the recycle check callback for the [`RecyclingMethod::CustomFunction`] variant 49 | pub type RecycleCheckCallback = dyn Fn(&mut C) -> BoxFuture> + Send + Sync; 50 | 51 | /// Possible methods of how a connection is recycled. 52 | #[derive(Default)] 53 | pub enum RecyclingMethod { 54 | /// Only check for open transactions when recycling existing connections 55 | /// Unless you have special needs this is a safe choice. 56 | /// 57 | /// If the database connection is closed you will recieve an error on the first place 58 | /// you actually try to use the connection 59 | Fast, 60 | /// In addition to checking for open transactions a test query is executed 61 | /// 62 | /// This is slower, but guarantees that the database connection is ready to be used. 63 | #[default] 64 | Verified, 65 | /// Like `Verified` but with a custom query 66 | CustomQuery(Cow<'static, str>), 67 | /// Like `Verified` but with a custom callback that allows to perform more checks 68 | /// 69 | /// The connection is only recycled if the callback returns `Ok(())` 70 | CustomFunction(Box>), 71 | } 72 | 73 | impl fmt::Debug for RecyclingMethod { 74 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 75 | match self { 76 | Self::Fast => write!(f, "Fast"), 77 | Self::Verified => write!(f, "Verified"), 78 | Self::CustomQuery(arg0) => f.debug_tuple("CustomQuery").field(arg0).finish(), 79 | Self::CustomFunction(_) => f.debug_tuple("CustomFunction").finish(), 80 | } 81 | } 82 | } 83 | 84 | /// Configuration object for a Manager. 85 | /// 86 | /// This makes it possible to specify which [`RecyclingMethod`] 87 | /// should be used when retrieving existing objects from the `Pool` 88 | /// and it allows to provide a custom setup function. 89 | #[non_exhaustive] 90 | pub struct ManagerConfig { 91 | /// Method of how a connection is recycled. See [RecyclingMethod]. 92 | pub recycling_method: RecyclingMethod, 93 | /// Construct a new connection manger 94 | /// with a custom setup procedure 95 | /// 96 | /// This can be used to for example establish a SSL secured 97 | /// postgres connection 98 | pub custom_setup: SetupCallback, 99 | } 100 | 101 | impl Default for ManagerConfig 102 | where 103 | C: AsyncConnection + 'static, 104 | { 105 | fn default() -> Self { 106 | Self { 107 | recycling_method: Default::default(), 108 | custom_setup: Box::new(|url| C::establish(url).boxed()), 109 | } 110 | } 111 | } 112 | 113 | /// An connection manager for use with diesel-async. 114 | /// 115 | /// See the concrete pool implementations for examples: 116 | /// * [deadpool](self::deadpool) 117 | /// * [bb8](self::bb8) 118 | /// * [mobc](self::mobc) 119 | #[allow(dead_code)] 120 | pub struct AsyncDieselConnectionManager { 121 | connection_url: String, 122 | manager_config: ManagerConfig, 123 | } 124 | 125 | impl fmt::Debug for AsyncDieselConnectionManager { 126 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 127 | write!( 128 | f, 129 | "AsyncDieselConnectionManager<{}>", 130 | std::any::type_name::() 131 | ) 132 | } 133 | } 134 | 135 | impl AsyncDieselConnectionManager 136 | where 137 | C: AsyncConnection + 'static, 138 | { 139 | /// Returns a new connection manager, 140 | /// which establishes connections to the given database URL. 141 | #[must_use] 142 | pub fn new(connection_url: impl Into) -> Self 143 | where 144 | C: AsyncConnection + 'static, 145 | { 146 | Self::new_with_config(connection_url, Default::default()) 147 | } 148 | 149 | /// Returns a new connection manager, 150 | /// which establishes connections with the given database URL 151 | /// and that uses the specified configuration 152 | #[must_use] 153 | pub fn new_with_config( 154 | connection_url: impl Into, 155 | manager_config: ManagerConfig, 156 | ) -> Self { 157 | Self { 158 | connection_url: connection_url.into(), 159 | manager_config, 160 | } 161 | } 162 | } 163 | 164 | #[doc(hidden)] 165 | pub trait PoolableConnection: AsyncConnection { 166 | /// Check if a connection is still valid 167 | /// 168 | /// The default implementation will perform a check based on the provided 169 | /// recycling method variant 170 | fn ping( 171 | &mut self, 172 | config: &RecyclingMethod, 173 | ) -> impl Future> + Send 174 | where 175 | for<'a> Self: 'a, 176 | diesel::dsl::select>: 177 | crate::methods::ExecuteDsl, 178 | diesel::query_builder::SqlQuery: crate::methods::ExecuteDsl, 179 | { 180 | use crate::run_query_dsl::RunQueryDsl; 181 | use diesel::IntoSql; 182 | 183 | async move { 184 | match config { 185 | RecyclingMethod::Fast => Ok(()), 186 | RecyclingMethod::Verified => { 187 | diesel::select(1_i32.into_sql::()) 188 | .execute(self) 189 | .await 190 | .map(|_| ()) 191 | } 192 | RecyclingMethod::CustomQuery(query) => diesel::sql_query(query.as_ref()) 193 | .execute(self) 194 | .await 195 | .map(|_| ()), 196 | RecyclingMethod::CustomFunction(c) => c(self).await, 197 | } 198 | } 199 | } 200 | 201 | /// Checks if the connection is broken and should not be reused 202 | /// 203 | /// This method should return only contain a fast non-blocking check 204 | /// if the connection is considered to be broken or not. See 205 | /// [ManageConnection::has_broken] for details. 206 | /// 207 | /// The default implementation uses 208 | /// [TransactionManager::is_broken_transaction_manager]. 209 | fn is_broken(&mut self) -> bool { 210 | Self::TransactionManager::is_broken_transaction_manager(self) 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /tests/type_check.rs: -------------------------------------------------------------------------------- 1 | use crate::{connection, TestConnection}; 2 | use diesel::deserialize::FromSqlRow; 3 | use diesel::expression::{AsExpression, ValidGrouping}; 4 | use diesel::prelude::*; 5 | use diesel::query_builder::{NoFromClause, QueryFragment, QueryId}; 6 | use diesel::sql_types::{self, HasSqlType, SingleValue}; 7 | use diesel_async::{AsyncConnectionCore, RunQueryDsl}; 8 | use std::fmt::Debug; 9 | 10 | async fn type_check(conn: &mut TestConnection, value: T) 11 | where 12 | T: Clone 13 | + AsExpression 14 | + FromSqlRow::Backend> 15 | + Send 16 | + PartialEq 17 | + Debug 18 | + Clone 19 | + 'static, 20 | T::Expression: ValidGrouping<()> 21 | + SelectableExpression 22 | + QueryFragment<::Backend> 23 | + QueryId 24 | + Send, 25 | ::Backend: HasSqlType, 26 | ST: SingleValue, 27 | { 28 | let res = diesel::select(value.clone().into_sql()) 29 | .get_result::(conn) 30 | .await; 31 | 32 | assert_eq!(Ok(value), res); 33 | } 34 | 35 | #[tokio::test] 36 | async fn check_small_int() { 37 | let conn = &mut connection().await; 38 | type_check::<_, sql_types::SmallInt>(conn, 1_i16).await; 39 | type_check::<_, sql_types::SmallInt>(conn, 1_i16).await; 40 | type_check::<_, sql_types::SmallInt>(conn, i16::MIN).await; 41 | type_check::<_, sql_types::SmallInt>(conn, i16::MAX).await; 42 | } 43 | 44 | #[tokio::test] 45 | async fn check_int() { 46 | let conn = &mut connection().await; 47 | type_check::<_, sql_types::Integer>(conn, 1_i32).await; 48 | type_check::<_, sql_types::Integer>(conn, -1_i32).await; 49 | type_check::<_, sql_types::Integer>(conn, i32::MIN).await; 50 | type_check::<_, sql_types::Integer>(conn, i32::MAX).await; 51 | } 52 | 53 | #[tokio::test] 54 | async fn check_big_int() { 55 | let conn = &mut connection().await; 56 | type_check::<_, sql_types::BigInt>(conn, 1_i64).await; 57 | type_check::<_, sql_types::BigInt>(conn, -1_i64).await; 58 | type_check::<_, sql_types::BigInt>(conn, i64::MIN).await; 59 | type_check::<_, sql_types::BigInt>(conn, i64::MAX).await; 60 | } 61 | 62 | #[cfg(feature = "mysql")] 63 | #[tokio::test] 64 | async fn check_tiny_int() { 65 | let conn = &mut connection().await; 66 | type_check::<_, sql_types::TinyInt>(conn, 1_i8).await; 67 | type_check::<_, sql_types::TinyInt>(conn, -1_i8).await; 68 | type_check::<_, sql_types::TinyInt>(conn, i8::MIN).await; 69 | type_check::<_, sql_types::TinyInt>(conn, i8::MAX).await; 70 | 71 | // test case for https://github.com/weiznich/diesel_async/issues/91 72 | let res = diesel::dsl::sql::("SELECT -1 = ") 73 | .bind::(-1) 74 | .get_result::(conn) 75 | .await 76 | .unwrap(); 77 | assert!(res); 78 | } 79 | 80 | #[cfg(feature = "mysql")] 81 | #[tokio::test] 82 | async fn check_unsigned_tiny_int() { 83 | let conn = &mut connection().await; 84 | type_check::<_, sql_types::Unsigned>(conn, 1_u8).await; 85 | type_check::<_, sql_types::Unsigned>(conn, u8::MIN).await; 86 | type_check::<_, sql_types::Unsigned>(conn, u8::MAX).await; 87 | } 88 | 89 | #[cfg(feature = "mysql")] 90 | #[tokio::test] 91 | async fn check_unsigned_small_int() { 92 | let conn = &mut connection().await; 93 | type_check::<_, sql_types::Unsigned>(conn, 1_u16).await; 94 | type_check::<_, sql_types::Unsigned>(conn, u16::MIN).await; 95 | type_check::<_, sql_types::Unsigned>(conn, u16::MAX).await; 96 | } 97 | 98 | #[cfg(feature = "mysql")] 99 | #[tokio::test] 100 | async fn check_unsigned_int() { 101 | let conn = &mut connection().await; 102 | type_check::<_, sql_types::Unsigned>(conn, 1_u32).await; 103 | type_check::<_, sql_types::Unsigned>(conn, u32::MIN).await; 104 | type_check::<_, sql_types::Unsigned>(conn, u32::MAX).await; 105 | } 106 | 107 | #[cfg(feature = "mysql")] 108 | #[tokio::test] 109 | async fn check_unsigned_big_int() { 110 | let conn = &mut connection().await; 111 | type_check::<_, sql_types::Unsigned>(conn, 1_u64).await; 112 | type_check::<_, sql_types::Unsigned>(conn, u64::MIN).await; 113 | type_check::<_, sql_types::Unsigned>(conn, u64::MAX).await; 114 | } 115 | 116 | #[tokio::test] 117 | async fn check_bool() { 118 | let conn = &mut connection().await; 119 | type_check::<_, sql_types::Bool>(conn, false).await; 120 | type_check::<_, sql_types::Bool>(conn, false).await; 121 | } 122 | 123 | #[tokio::test] 124 | async fn check_f32() { 125 | let conn = &mut connection().await; 126 | type_check::<_, sql_types::Float4>(conn, 1.0_f32).await; 127 | type_check::<_, sql_types::Float4>(conn, f32::MIN_POSITIVE).await; 128 | type_check::<_, sql_types::Float4>(conn, f32::MIN).await; 129 | type_check::<_, sql_types::Float4>(conn, f32::MAX).await; 130 | } 131 | 132 | #[tokio::test] 133 | async fn check_f64() { 134 | let conn = &mut connection().await; 135 | type_check::<_, sql_types::Float8>(conn, 1.0_f64).await; 136 | type_check::<_, sql_types::Float8>(conn, f64::MIN_POSITIVE).await; 137 | type_check::<_, sql_types::Float8>(conn, f64::MIN).await; 138 | type_check::<_, sql_types::Float8>(conn, f64::MAX).await; 139 | } 140 | 141 | #[tokio::test] 142 | async fn check_string() { 143 | let conn = &mut connection().await; 144 | type_check::<_, sql_types::Text>(conn, String::from("Test")).await; 145 | type_check::<_, sql_types::Text>(conn, String::new()).await; 146 | type_check::<_, sql_types::Text>(conn, String::from("üöä")).await; 147 | } 148 | 149 | #[tokio::test] 150 | async fn check_option() { 151 | let conn = &mut connection().await; 152 | type_check::<_, sql_types::Nullable>(conn, Some(42)).await; 153 | type_check::<_, sql_types::Nullable>(conn, None::).await; 154 | 155 | type_check::<_, sql_types::Nullable>(conn, Some(String::new())).await; 156 | type_check::<_, sql_types::Nullable>(conn, None::).await; 157 | } 158 | 159 | #[tokio::test] 160 | async fn test_blob() { 161 | let conn = &mut connection().await; 162 | type_check::<_, sql_types::Blob>(conn, b"foo".to_vec()).await; 163 | type_check::<_, sql_types::Blob>(conn, Vec::new()).await; 164 | } 165 | 166 | #[tokio::test] 167 | async fn test_timestamp() { 168 | let conn = &mut connection().await; 169 | type_check::<_, sql_types::Timestamp>( 170 | conn, 171 | chrono::NaiveDateTime::new( 172 | chrono::NaiveDate::from_ymd_opt(2021, 9, 27).unwrap(), 173 | chrono::NaiveTime::from_hms_milli_opt(17, 44, 23, 0).unwrap(), 174 | ), 175 | ) 176 | .await; 177 | } 178 | 179 | #[tokio::test] 180 | async fn test_date() { 181 | let conn = &mut connection().await; 182 | type_check::<_, sql_types::Date>(conn, chrono::NaiveDate::from_ymd_opt(2021, 9, 27).unwrap()) 183 | .await; 184 | } 185 | 186 | #[tokio::test] 187 | async fn test_time() { 188 | let conn = &mut connection().await; 189 | type_check::<_, sql_types::Time>( 190 | conn, 191 | chrono::NaiveTime::from_hms_milli_opt(17, 44, 23, 0).unwrap(), 192 | ) 193 | .await; 194 | } 195 | 196 | #[cfg(feature = "mysql")] 197 | #[tokio::test] 198 | async fn test_datetime() { 199 | let conn = &mut connection().await; 200 | type_check::<_, sql_types::Datetime>( 201 | conn, 202 | chrono::NaiveDateTime::new( 203 | chrono::NaiveDate::from_ymd_opt(2021, 9, 30).unwrap(), 204 | chrono::NaiveTime::from_hms_milli_opt(12, 6, 42, 0).unwrap(), 205 | ), 206 | ) 207 | .await; 208 | } 209 | -------------------------------------------------------------------------------- /tests/transactions.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "postgres")] 2 | #[tokio::test] 3 | async fn concurrent_serializable_transactions_behave_correctly() { 4 | use diesel::prelude::*; 5 | use diesel_async::RunQueryDsl; 6 | use std::sync::Arc; 7 | use tokio::sync::Barrier; 8 | 9 | table! { 10 | users3 { 11 | id -> Integer, 12 | } 13 | } 14 | 15 | // create an async connection 16 | let mut conn = super::connection_without_transaction().await; 17 | 18 | let mut conn1 = super::connection_without_transaction().await; 19 | 20 | diesel::sql_query("CREATE TABLE IF NOT EXISTS users3 (id int);") 21 | .execute(&mut conn) 22 | .await 23 | .unwrap(); 24 | 25 | let barrier_1 = Arc::new(Barrier::new(2)); 26 | let barrier_2 = Arc::new(Barrier::new(2)); 27 | let barrier_3 = Arc::new(Barrier::new(2)); 28 | let barrier_1_for_tx1 = barrier_1.clone(); 29 | let barrier_1_for_tx2 = barrier_1.clone(); 30 | let barrier_2_for_tx1 = barrier_2.clone(); 31 | let barrier_2_for_tx2 = barrier_2.clone(); 32 | let barrier_3_for_tx1 = barrier_3.clone(); 33 | let barrier_3_for_tx2 = barrier_3.clone(); 34 | 35 | let mut tx = conn.build_transaction().serializable().read_write(); 36 | 37 | let res = tx.run(|conn| { 38 | Box::pin(async { 39 | users3::table.select(users3::id).load::(conn).await?; 40 | 41 | barrier_1_for_tx1.wait().await; 42 | diesel::insert_into(users3::table) 43 | .values(users3::id.eq(1)) 44 | .execute(conn) 45 | .await?; 46 | barrier_3_for_tx1.wait().await; 47 | barrier_2_for_tx1.wait().await; 48 | 49 | Ok::<_, diesel::result::Error>(()) 50 | }) 51 | }); 52 | 53 | let mut tx1 = conn1.build_transaction().serializable().read_write(); 54 | 55 | let res1 = async { 56 | let res = tx1 57 | .run(|conn| { 58 | Box::pin(async { 59 | users3::table.select(users3::id).load::(conn).await?; 60 | 61 | barrier_1_for_tx2.wait().await; 62 | diesel::insert_into(users3::table) 63 | .values(users3::id.eq(1)) 64 | .execute(conn) 65 | .await?; 66 | barrier_3_for_tx2.wait().await; 67 | 68 | Ok::<_, diesel::result::Error>(()) 69 | }) 70 | }) 71 | .await; 72 | barrier_2_for_tx2.wait().await; 73 | res 74 | }; 75 | 76 | let (res, res1) = tokio::join!(res, res1); 77 | let _ = diesel::sql_query("DROP TABLE users3") 78 | .execute(&mut conn1) 79 | .await; 80 | 81 | assert!( 82 | res1.is_ok(), 83 | "Expected the second transaction to be succussfull, but got an error: {:?}", 84 | res1.unwrap_err() 85 | ); 86 | 87 | assert!(res.is_err(), "Expected the first transaction to fail"); 88 | let err = res.unwrap_err(); 89 | assert!( 90 | matches!( 91 | &err, 92 | diesel::result::Error::DatabaseError( 93 | diesel::result::DatabaseErrorKind::SerializationFailure, 94 | _ 95 | ) 96 | ), 97 | "Expected an serialization failure but got another error: {err:?}" 98 | ); 99 | 100 | let mut tx = conn.build_transaction(); 101 | 102 | let res = tx 103 | .run(|_| Box::pin(async { Ok::<_, diesel::result::Error>(()) })) 104 | .await; 105 | 106 | assert!( 107 | res.is_ok(), 108 | "Expect transaction to run fine but got an error: {:?}", 109 | res.unwrap_err() 110 | ); 111 | } 112 | 113 | #[cfg(feature = "postgres")] 114 | #[tokio::test] 115 | async fn commit_with_serialization_failure_already_ends_transaction() { 116 | use diesel::prelude::*; 117 | use diesel_async::{AsyncConnection, RunQueryDsl}; 118 | use std::sync::Arc; 119 | use tokio::sync::Barrier; 120 | 121 | table! { 122 | users4 { 123 | id -> Integer, 124 | } 125 | } 126 | 127 | // create an async connection 128 | let mut conn = super::connection_without_transaction().await; 129 | 130 | struct A(Vec<&'static str>); 131 | impl diesel::connection::Instrumentation for A { 132 | fn on_connection_event(&mut self, event: diesel::connection::InstrumentationEvent<'_>) { 133 | if let diesel::connection::InstrumentationEvent::StartQuery { query, .. } = event { 134 | let q = query.to_string(); 135 | let q = q.split_once(' ').map(|(a, _)| a).unwrap_or(&q); 136 | 137 | if matches!(q, "BEGIN" | "COMMIT" | "ROLLBACK") { 138 | assert_eq!(q, self.0.pop().unwrap()); 139 | } 140 | } 141 | } 142 | } 143 | conn.set_instrumentation(A(vec!["COMMIT", "BEGIN", "COMMIT", "BEGIN"])); 144 | 145 | let mut conn1 = super::connection_without_transaction().await; 146 | 147 | diesel::sql_query("CREATE TABLE IF NOT EXISTS users4 (id int);") 148 | .execute(&mut conn) 149 | .await 150 | .unwrap(); 151 | 152 | let barrier_1 = Arc::new(Barrier::new(2)); 153 | let barrier_2 = Arc::new(Barrier::new(2)); 154 | let barrier_3 = Arc::new(Barrier::new(2)); 155 | let barrier_1_for_tx1 = barrier_1.clone(); 156 | let barrier_1_for_tx2 = barrier_1.clone(); 157 | let barrier_2_for_tx1 = barrier_2.clone(); 158 | let barrier_2_for_tx2 = barrier_2.clone(); 159 | let barrier_3_for_tx1 = barrier_3.clone(); 160 | let barrier_3_for_tx2 = barrier_3.clone(); 161 | 162 | let mut tx = conn.build_transaction().serializable().read_write(); 163 | 164 | let res = tx.run(|conn| { 165 | Box::pin(async { 166 | users4::table.select(users4::id).load::(conn).await?; 167 | 168 | barrier_1_for_tx1.wait().await; 169 | diesel::insert_into(users4::table) 170 | .values(users4::id.eq(1)) 171 | .execute(conn) 172 | .await?; 173 | barrier_3_for_tx1.wait().await; 174 | barrier_2_for_tx1.wait().await; 175 | 176 | Ok::<_, diesel::result::Error>(()) 177 | }) 178 | }); 179 | 180 | let mut tx1 = conn1.build_transaction().serializable().read_write(); 181 | 182 | let res1 = async { 183 | let res = tx1 184 | .run(|conn| { 185 | Box::pin(async { 186 | users4::table.select(users4::id).load::(conn).await?; 187 | 188 | barrier_1_for_tx2.wait().await; 189 | diesel::insert_into(users4::table) 190 | .values(users4::id.eq(1)) 191 | .execute(conn) 192 | .await?; 193 | barrier_3_for_tx2.wait().await; 194 | 195 | Ok::<_, diesel::result::Error>(()) 196 | }) 197 | }) 198 | .await; 199 | barrier_2_for_tx2.wait().await; 200 | res 201 | }; 202 | 203 | let (res, res1) = tokio::join!(res, res1); 204 | let _ = diesel::sql_query("DROP TABLE users4") 205 | .execute(&mut conn1) 206 | .await; 207 | 208 | assert!( 209 | res1.is_ok(), 210 | "Expected the second transaction to be succussfull, but got an error: {:?}", 211 | res1.unwrap_err() 212 | ); 213 | 214 | assert!(res.is_err(), "Expected the first transaction to fail"); 215 | let err = res.unwrap_err(); 216 | assert!( 217 | matches!( 218 | &err, 219 | diesel::result::Error::DatabaseError( 220 | diesel::result::DatabaseErrorKind::SerializationFailure, 221 | _ 222 | ) 223 | ), 224 | "Expected an serialization failure but got another error: {err:?}" 225 | ); 226 | 227 | let mut tx = conn.build_transaction(); 228 | 229 | let res = tx 230 | .run(|_| Box::pin(async { Ok::<_, diesel::result::Error>(()) })) 231 | .await; 232 | 233 | assert!( 234 | res.is_ok(), 235 | "Expect transaction to run fine but got an error: {:?}", 236 | res.unwrap_err() 237 | ); 238 | } 239 | -------------------------------------------------------------------------------- /src/mysql/row.rs: -------------------------------------------------------------------------------- 1 | use diesel::backend::Backend; 2 | use diesel::mysql::data_types::{MysqlTime, MysqlTimestampType}; 3 | use diesel::mysql::{Mysql, MysqlType, MysqlValue}; 4 | use diesel::row::{PartialRow, RowIndex, RowSealed}; 5 | use mysql_async::consts::{ColumnFlags, ColumnType}; 6 | use mysql_async::{Column, Row, Value}; 7 | use std::borrow::Cow; 8 | 9 | pub struct MysqlRow(pub(super) Row); 10 | 11 | impl mysql_async::prelude::FromRow for MysqlRow { 12 | fn from_row_opt(row: Row) -> Result 13 | where 14 | Self: Sized, 15 | { 16 | Ok(Self(row)) 17 | } 18 | } 19 | 20 | impl RowIndex for MysqlRow { 21 | fn idx(&self, idx: usize) -> Option { 22 | if idx < self.0.columns_ref().len() { 23 | Some(idx) 24 | } else { 25 | None 26 | } 27 | } 28 | } 29 | 30 | impl<'a> RowIndex<&'a str> for MysqlRow { 31 | fn idx(&self, idx: &'a str) -> Option { 32 | self.0.columns().iter().position(|c| c.name_str() == idx) 33 | } 34 | } 35 | 36 | impl RowSealed for MysqlRow {} 37 | 38 | impl<'a> diesel::row::Row<'a, Mysql> for MysqlRow { 39 | type InnerPartialRow = Self; 40 | type Field<'b> 41 | = MysqlField<'b> 42 | where 43 | Self: 'b, 44 | 'a: 'b; 45 | 46 | fn field_count(&self) -> usize { 47 | self.0.columns_ref().len() 48 | } 49 | 50 | fn get<'b, I>(&'b self, idx: I) -> Option> 51 | where 52 | 'a: 'b, 53 | Self: diesel::row::RowIndex, 54 | { 55 | let idx = diesel::row::RowIndex::idx(self, idx)?; 56 | let value = self.0.as_ref(idx)?; 57 | let column = &self.0.columns_ref()[idx]; 58 | let buffer = match value { 59 | Value::NULL => None, 60 | Value::Bytes(b) => { 61 | // deserialize gets the length prepended, so we just use that buffer 62 | // directly 63 | Some(Cow::Borrowed(b as &[_])) 64 | } 65 | Value::Time(neg, day, hour, minute, second, second_part) => { 66 | let date = MysqlTime::new( 67 | 0, 68 | 0, 69 | *day as _, 70 | *hour as _, 71 | *minute as _, 72 | *second as _, 73 | *second_part as _, 74 | *neg as _, 75 | MysqlTimestampType::MYSQL_TIMESTAMP_TIME, 76 | 0, 77 | ); 78 | let buffer = unsafe { 79 | let ptr = &date as *const MysqlTime as *const u8; 80 | let slice = std::slice::from_raw_parts(ptr, std::mem::size_of::()); 81 | slice.to_vec() 82 | }; 83 | Some(Cow::Owned(buffer)) 84 | } 85 | Value::Date(year, month, day, hour, minute, second, second_part) => { 86 | let date = MysqlTime::new( 87 | *year as _, 88 | *month as _, 89 | *day as _, 90 | *hour as _, 91 | *minute as _, 92 | *second as _, 93 | *second_part as _, 94 | false, 95 | MysqlTimestampType::MYSQL_TIMESTAMP_DATETIME, 96 | 0, 97 | ); 98 | let buffer = unsafe { 99 | let ptr = &date as *const MysqlTime as *const u8; 100 | let slice = std::slice::from_raw_parts(ptr, std::mem::size_of::()); 101 | slice.to_vec() 102 | }; 103 | Some(Cow::Owned(buffer)) 104 | } 105 | _t => { 106 | let mut buffer = Vec::with_capacity( 107 | value 108 | .bin_len() 109 | .try_into() 110 | .expect("Failed to cast byte size to usize"), 111 | ); 112 | mysql_common::proto::MySerialize::serialize(value, &mut buffer); 113 | Some(Cow::Owned(buffer)) 114 | } 115 | }; 116 | let field = MysqlField { 117 | value: buffer, 118 | column, 119 | name: column.name_str(), 120 | }; 121 | Some(field) 122 | } 123 | 124 | fn partial_row(&self, range: std::ops::Range) -> PartialRow<'_, Self::InnerPartialRow> { 125 | PartialRow::new(self, range) 126 | } 127 | } 128 | 129 | pub struct MysqlField<'a> { 130 | value: Option>, 131 | column: &'a Column, 132 | name: Cow<'a, str>, 133 | } 134 | 135 | impl diesel::row::Field<'_, Mysql> for MysqlField<'_> { 136 | fn field_name(&self) -> Option<&str> { 137 | Some(&*self.name) 138 | } 139 | 140 | fn value(&self) -> Option<::RawValue<'_>> { 141 | self.value.as_ref().map(|v| { 142 | MysqlValue::new( 143 | v, 144 | convert_type(self.column.column_type(), self.column.flags()), 145 | ) 146 | }) 147 | } 148 | } 149 | 150 | fn convert_type(column_type: ColumnType, column_flags: ColumnFlags) -> MysqlType { 151 | match column_type { 152 | ColumnType::MYSQL_TYPE_NEWDECIMAL | ColumnType::MYSQL_TYPE_DECIMAL => MysqlType::Numeric, 153 | ColumnType::MYSQL_TYPE_TINY if column_flags.contains(ColumnFlags::UNSIGNED_FLAG) => { 154 | MysqlType::UnsignedTiny 155 | } 156 | ColumnType::MYSQL_TYPE_TINY => MysqlType::Tiny, 157 | ColumnType::MYSQL_TYPE_YEAR | ColumnType::MYSQL_TYPE_SHORT 158 | if column_flags.contains(ColumnFlags::UNSIGNED_FLAG) => 159 | { 160 | MysqlType::UnsignedShort 161 | } 162 | ColumnType::MYSQL_TYPE_YEAR | ColumnType::MYSQL_TYPE_SHORT => MysqlType::Short, 163 | ColumnType::MYSQL_TYPE_INT24 | ColumnType::MYSQL_TYPE_LONG 164 | if column_flags.contains(ColumnFlags::UNSIGNED_FLAG) => 165 | { 166 | MysqlType::UnsignedLong 167 | } 168 | ColumnType::MYSQL_TYPE_INT24 | ColumnType::MYSQL_TYPE_LONG => MysqlType::Long, 169 | ColumnType::MYSQL_TYPE_LONGLONG if column_flags.contains(ColumnFlags::UNSIGNED_FLAG) => { 170 | MysqlType::UnsignedLongLong 171 | } 172 | ColumnType::MYSQL_TYPE_LONGLONG => MysqlType::LongLong, 173 | ColumnType::MYSQL_TYPE_FLOAT => MysqlType::Float, 174 | ColumnType::MYSQL_TYPE_DOUBLE => MysqlType::Double, 175 | 176 | ColumnType::MYSQL_TYPE_TIMESTAMP => MysqlType::Timestamp, 177 | ColumnType::MYSQL_TYPE_DATE => MysqlType::Date, 178 | ColumnType::MYSQL_TYPE_TIME => MysqlType::Time, 179 | ColumnType::MYSQL_TYPE_DATETIME => MysqlType::DateTime, 180 | ColumnType::MYSQL_TYPE_BIT => MysqlType::Bit, 181 | ColumnType::MYSQL_TYPE_JSON => MysqlType::String, 182 | 183 | ColumnType::MYSQL_TYPE_VAR_STRING 184 | | ColumnType::MYSQL_TYPE_STRING 185 | | ColumnType::MYSQL_TYPE_TINY_BLOB 186 | | ColumnType::MYSQL_TYPE_MEDIUM_BLOB 187 | | ColumnType::MYSQL_TYPE_LONG_BLOB 188 | | ColumnType::MYSQL_TYPE_BLOB 189 | if column_flags.contains(ColumnFlags::ENUM_FLAG) => 190 | { 191 | MysqlType::Enum 192 | } 193 | ColumnType::MYSQL_TYPE_VAR_STRING 194 | | ColumnType::MYSQL_TYPE_STRING 195 | | ColumnType::MYSQL_TYPE_TINY_BLOB 196 | | ColumnType::MYSQL_TYPE_MEDIUM_BLOB 197 | | ColumnType::MYSQL_TYPE_LONG_BLOB 198 | | ColumnType::MYSQL_TYPE_BLOB 199 | if column_flags.contains(ColumnFlags::SET_FLAG) => 200 | { 201 | MysqlType::Set 202 | } 203 | 204 | ColumnType::MYSQL_TYPE_VAR_STRING 205 | | ColumnType::MYSQL_TYPE_STRING 206 | | ColumnType::MYSQL_TYPE_TINY_BLOB 207 | | ColumnType::MYSQL_TYPE_MEDIUM_BLOB 208 | | ColumnType::MYSQL_TYPE_LONG_BLOB 209 | | ColumnType::MYSQL_TYPE_BLOB 210 | if column_flags.contains(ColumnFlags::BINARY_FLAG) => 211 | { 212 | MysqlType::Blob 213 | } 214 | 215 | ColumnType::MYSQL_TYPE_VAR_STRING 216 | | ColumnType::MYSQL_TYPE_STRING 217 | | ColumnType::MYSQL_TYPE_TINY_BLOB 218 | | ColumnType::MYSQL_TYPE_MEDIUM_BLOB 219 | | ColumnType::MYSQL_TYPE_LONG_BLOB 220 | | ColumnType::MYSQL_TYPE_BLOB => MysqlType::String, 221 | 222 | ColumnType::MYSQL_TYPE_NULL 223 | | ColumnType::MYSQL_TYPE_NEWDATE 224 | | ColumnType::MYSQL_TYPE_VARCHAR 225 | | ColumnType::MYSQL_TYPE_TIMESTAMP2 226 | | ColumnType::MYSQL_TYPE_DATETIME2 227 | | ColumnType::MYSQL_TYPE_TIME2 228 | | ColumnType::MYSQL_TYPE_TYPED_ARRAY 229 | | ColumnType::MYSQL_TYPE_UNKNOWN 230 | | ColumnType::MYSQL_TYPE_ENUM 231 | | ColumnType::MYSQL_TYPE_SET 232 | | ColumnType::MYSQL_TYPE_VECTOR 233 | | ColumnType::MYSQL_TYPE_GEOMETRY => { 234 | unimplemented!("Hit an unsupported type: {:?}", column_type) 235 | } 236 | } 237 | } 238 | -------------------------------------------------------------------------------- /tests/lib.rs: -------------------------------------------------------------------------------- 1 | use diesel::prelude::{ExpressionMethods, OptionalExtension, QueryDsl}; 2 | use diesel::QueryResult; 3 | use diesel_async::*; 4 | use scoped_futures::ScopedFutureExt; 5 | use std::fmt::Debug; 6 | 7 | #[cfg(feature = "postgres")] 8 | mod custom_types; 9 | mod instrumentation; 10 | #[cfg(feature = "migrations")] 11 | mod migrations; 12 | mod notifications; 13 | #[cfg(any(feature = "bb8", feature = "deadpool", feature = "mobc"))] 14 | mod pooling; 15 | #[cfg(feature = "async-connection-wrapper")] 16 | mod sync_wrapper; 17 | mod transactions; 18 | mod type_check; 19 | 20 | async fn transaction_test>( 21 | conn: &mut C, 22 | ) -> QueryResult<()> { 23 | let res = conn 24 | .transaction::(|conn| { 25 | async move { 26 | let users: Vec = users::table.load(conn).await?; 27 | assert_eq!(&users[0].name, "John Doe"); 28 | assert_eq!(&users[1].name, "Jane Doe"); 29 | 30 | let user: Option = users::table.find(42).first(conn).await.optional()?; 31 | assert_eq!(user, None::); 32 | 33 | let res = conn 34 | .transaction::<_, diesel::result::Error, _>(|conn| { 35 | async move { 36 | diesel::insert_into(users::table) 37 | .values(users::name.eq("Dave")) 38 | .execute(conn) 39 | .await?; 40 | let count = users::table.count().get_result::(conn).await?; 41 | assert_eq!(count, 3); 42 | Ok(()) 43 | } 44 | .scope_boxed() 45 | }) 46 | .await; 47 | assert!(res.is_ok()); 48 | let count = users::table.count().get_result::(conn).await?; 49 | assert_eq!(count, 3); 50 | 51 | let res = diesel::insert_into(users::table) 52 | .values(users::name.eq("Eve")) 53 | .execute(conn) 54 | .await?; 55 | 56 | assert_eq!(res, 1, "Insert in transaction returned wrong result"); 57 | let count = users::table.count().get_result::(conn).await?; 58 | assert_eq!(count, 4); 59 | 60 | Err(diesel::result::Error::RollbackTransaction) 61 | } 62 | .scope_boxed() 63 | }) 64 | .await; 65 | assert_eq!( 66 | res, 67 | Err(diesel::result::Error::RollbackTransaction), 68 | "Failed to rollback transaction" 69 | ); 70 | 71 | let count = users::table.count().get_result::(conn).await?; 72 | assert_eq!(count, 2, "user got committed, but transaction rolled back"); 73 | 74 | Ok(()) 75 | } 76 | 77 | diesel::table! { 78 | users { 79 | id -> Integer, 80 | name -> Text, 81 | } 82 | } 83 | 84 | #[derive( 85 | diesel::Queryable, 86 | diesel::Selectable, 87 | Debug, 88 | PartialEq, 89 | diesel::AsChangeset, 90 | diesel::Identifiable, 91 | )] 92 | struct User { 93 | id: i32, 94 | name: String, 95 | } 96 | 97 | #[cfg(feature = "mysql")] 98 | type TestConnection = AsyncMysqlConnection; 99 | #[cfg(feature = "postgres")] 100 | type TestConnection = AsyncPgConnection; 101 | #[cfg(feature = "sqlite")] 102 | type TestConnection = 103 | sync_connection_wrapper::SyncConnectionWrapper; 104 | 105 | #[allow(dead_code)] 106 | type TestBackend = ::Backend; 107 | 108 | #[tokio::test] 109 | async fn test_basic_insert_and_load() -> QueryResult<()> { 110 | let conn = &mut connection().await; 111 | // Insertion split into 2 since Sqlite batch insert isn't supported for diesel_async yet 112 | let res = diesel::insert_into(users::table) 113 | .values(users::name.eq("John Doe")) 114 | .execute(conn) 115 | .await; 116 | assert_eq!(res, Ok(1), "User count does not match"); 117 | let res = diesel::insert_into(users::table) 118 | .values(users::name.eq("Jane Doe")) 119 | .execute(conn) 120 | .await; 121 | assert_eq!(res, Ok(1), "User count does not match"); 122 | let users = users::table.load::(conn).await?; 123 | assert_eq!(&users[0].name, "John Doe", "User name [0] does not match"); 124 | assert_eq!(&users[1].name, "Jane Doe", "User name [1] does not match"); 125 | 126 | transaction_test(conn).await?; 127 | 128 | Ok(()) 129 | } 130 | 131 | #[cfg(feature = "postgres")] 132 | diesel::define_sql_function!(fn pg_sleep(interval: diesel::sql_types::Double)); 133 | 134 | #[cfg(feature = "postgres")] 135 | #[tokio::test] 136 | async fn postgres_cancel_token() { 137 | use std::time::Duration; 138 | 139 | use diesel::result::{DatabaseErrorKind, Error}; 140 | 141 | let conn = &mut connection().await; 142 | 143 | let token = conn.cancel_token(); 144 | 145 | // execute a query that runs for a long time 146 | let long_running_query = diesel::select(pg_sleep(5.0)).execute(conn); 147 | 148 | // execute the query elsewhere... 149 | let task = tokio::spawn(async move { 150 | long_running_query 151 | .await 152 | .expect_err("query should have been canceled.") 153 | }); 154 | 155 | // let the task above have some time to actually start... 156 | tokio::time::sleep(Duration::from_millis(500)).await; 157 | 158 | // invoke the cancellation token. 159 | token.cancel_query(tokio_postgres::NoTls).await.unwrap(); 160 | 161 | // make sure the query task resulted in a cancellation error 162 | let err = task.await.unwrap(); 163 | match err { 164 | Error::DatabaseError(DatabaseErrorKind::Unknown, v) 165 | if v.message() == "canceling statement due to user request" => {} 166 | _ => panic!("unexpected error: {err:?}"), 167 | } 168 | } 169 | 170 | #[cfg(feature = "mysql")] 171 | #[tokio::test] 172 | async fn mysql_cancel_token() { 173 | use diesel::result::{DatabaseErrorKind, Error}; 174 | use std::time::Duration; 175 | 176 | let (sender, receiver) = tokio::sync::oneshot::channel(); 177 | 178 | // execute a long-running query in a separate future 179 | let query_future = async move { 180 | let conn = &mut connection().await; 181 | let token = conn.cancel_token(); 182 | 183 | // send the token back to the main thread via a oneshot channel 184 | sender 185 | .send(token) 186 | .unwrap_or_else(|_| panic!("couldn't send token")); 187 | 188 | diesel::dsl::sql::("SELECT SLEEP(5)") 189 | .get_result::(conn) 190 | .await 191 | }; 192 | let cancel_future = async move { 193 | // wait for the cancellation token to be sent 194 | if let Ok(token) = receiver.await { 195 | // give the query time to start before invoking the token 196 | tokio::time::sleep(Duration::from_millis(500)).await; 197 | token.cancel_query().await.unwrap(); 198 | } else { 199 | panic!("Failed to receive cancel token"); 200 | } 201 | }; 202 | 203 | let (task, _) = tokio::join!(query_future, cancel_future); 204 | 205 | // make sure the query task resulted in a cancellation error or a return value of 1: 206 | match task { 207 | Err(Error::DatabaseError(DatabaseErrorKind::Unknown, v)) 208 | if v.message() == "Query execution was interrupted" => {} 209 | Err(e) => panic!("unexpected error: {:?}", e), 210 | // mysql 8.4 returns 1 from a canceled sleep instead of an error 211 | Ok(1) => {} 212 | Ok(_) => panic!("query completed successfully without cancellation"), 213 | } 214 | } 215 | 216 | #[cfg(feature = "postgres")] 217 | async fn setup(connection: &mut TestConnection) { 218 | diesel::sql_query( 219 | "CREATE TEMPORARY TABLE users ( 220 | id SERIAL PRIMARY KEY, 221 | name VARCHAR NOT NULL 222 | )", 223 | ) 224 | .execute(connection) 225 | .await 226 | .unwrap(); 227 | } 228 | 229 | #[cfg(feature = "sqlite")] 230 | async fn setup(connection: &mut TestConnection) { 231 | diesel::sql_query( 232 | "CREATE TEMPORARY TABLE users ( 233 | id INTEGER PRIMARY KEY, 234 | name TEXT NOT NULL 235 | )", 236 | ) 237 | .execute(connection) 238 | .await 239 | .unwrap(); 240 | } 241 | 242 | #[cfg(feature = "mysql")] 243 | async fn setup(connection: &mut TestConnection) { 244 | diesel::sql_query( 245 | "CREATE TEMPORARY TABLE users ( 246 | id INTEGER PRIMARY KEY AUTO_INCREMENT, 247 | name TEXT NOT NULL 248 | ) CHARACTER SET utf8mb4", 249 | ) 250 | .execute(connection) 251 | .await 252 | .unwrap(); 253 | } 254 | 255 | async fn connection() -> TestConnection { 256 | let mut conn = connection_without_transaction().await; 257 | if cfg!(feature = "postgres") { 258 | // postgres allows to modify the schema inside of a transaction 259 | conn.begin_test_transaction().await.unwrap(); 260 | } 261 | setup(&mut conn).await; 262 | if cfg!(feature = "mysql") || cfg!(feature = "sqlite") { 263 | // mysql does not allow this and does even automatically close 264 | // any open transaction. As of this we open a transaction **after** 265 | // we setup the schema 266 | conn.begin_test_transaction().await.unwrap(); 267 | } 268 | conn 269 | } 270 | 271 | async fn connection_without_transaction() -> TestConnection { 272 | let db_url = std::env::var("DATABASE_URL").unwrap(); 273 | TestConnection::establish(&db_url).await.unwrap() 274 | } 275 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | types: [opened, synchronize, reopened] 4 | push: 5 | branches: 6 | - main 7 | - 0.7.x 8 | - 0.6.x 9 | - 0.5.x 10 | - 0.3.x 11 | - 0.4.x 12 | - 0.2.x 13 | 14 | name: CI Tests 15 | 16 | # See: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency. 17 | # This will ensure that only one commit will be running tests at a time on each PR. 18 | concurrency: 19 | group: ${{ github.ref }}-${{ github.workflow }} 20 | cancel-in-progress: true 21 | 22 | jobs: 23 | check_and_test: 24 | name: Check 25 | strategy: 26 | fail-fast: false 27 | matrix: 28 | rust: ["stable"] 29 | backend: ["postgres", "mysql", "sqlite"] 30 | os: 31 | [ 32 | ubuntu-latest, 33 | macos-15-intel, 34 | macos-15, 35 | windows-latest, 36 | ubuntu-22.04-arm, 37 | ] 38 | include: 39 | - rust: "beta" 40 | backend: "postgres" 41 | os: "ubuntu-latest" 42 | - rust: "beta" 43 | backend: "sqlite" 44 | os: "ubuntu-latest" 45 | - rust: "beta" 46 | backend: "mysql" 47 | os: "ubuntu-latest" 48 | - rust: "nightly" 49 | backend: "postgres" 50 | os: "ubuntu-latest" 51 | - rust: "nightly" 52 | backend: "sqlite" 53 | os: "ubuntu-latest" 54 | - rust: "nightly" 55 | backend: "mysql" 56 | os: "ubuntu-latest" 57 | runs-on: ${{ matrix.os }} 58 | steps: 59 | - name: Checkout sources 60 | uses: actions/checkout@v4 61 | 62 | - name: Cache cargo registry 63 | uses: Swatinem/rust-cache@v2 64 | with: 65 | key: ${{ runner.os }}-${{ matrix.backend }}-cargo-${{ hashFiles('**/Cargo.toml') }} 66 | 67 | - name: Set environment variables 68 | shell: bash 69 | if: matrix.backend == 'mysql' 70 | run: | 71 | echo "RUST_TEST_THREADS=1" >> $GITHUB_ENV 72 | 73 | - name: Set environment variables 74 | shell: bash 75 | if: matrix.backend == 'postgres' && matrix.os == 'windows-latest' 76 | run: | 77 | echo "AWS_LC_SYS_NO_ASM=1" 78 | 79 | - name: Set environment variables 80 | shell: bash 81 | if: matrix.rust != 'nightly' 82 | run: | 83 | echo "RUSTFLAGS=-D warnings" >> $GITHUB_ENV 84 | echo "RUSTDOCFLAGS=-D warnings" >> $GITHUB_ENV 85 | 86 | - uses: ilammy/setup-nasm@v1 87 | if: matrix.backend == 'postgres' && matrix.os == 'windows-latest' 88 | 89 | - name: Install postgres (Linux) 90 | if: runner.os == 'Linux' && matrix.backend == 'postgres' 91 | run: | 92 | sudo apt-get update 93 | sudo apt-get install -y postgresql 94 | echo "host all all 127.0.0.1/32 md5" > sudo tee -a /etc/postgresql/10/main/pg_hba.conf 95 | sudo service postgresql restart && sleep 3 96 | sudo -u postgres psql -c "ALTER USER postgres PASSWORD 'postgres';" 97 | sudo service postgresql restart && sleep 3 98 | echo "DATABASE_URL=postgres://postgres:postgres@localhost/" >> $GITHUB_ENV 99 | 100 | - name: Install mysql (Linux) 101 | if: runner.os == 'Linux' && matrix.backend == 'mysql' 102 | run: | 103 | sudo systemctl start mysql.service 104 | mysql -e "create database diesel_test; create database diesel_unit_test; grant all on \`diesel_%\`.* to 'root'@'localhost';" -uroot -proot 105 | echo "DATABASE_URL=mysql://root:root@localhost/diesel_test" >> $GITHUB_ENV 106 | 107 | - name: Install sqlite (Linux) 108 | if: runner.os == 'Linux' && matrix.backend == 'sqlite' 109 | run: | 110 | sudo apt-get update 111 | sudo apt-get install libsqlite3-dev 112 | echo "DATABASE_URL=:memory:" >> $GITHUB_ENV 113 | 114 | - name: Install postgres (MacOS) 115 | if: runner.os == 'macOS' && matrix.backend == 'postgres' 116 | run: | 117 | brew install postgresql@14 118 | brew services restart postgresql@14 119 | sleep 3 120 | createuser -s postgres 121 | echo "DATABASE_URL=postgres://postgres@localhost/" >> $GITHUB_ENV 122 | 123 | - name: Install sqlite (MacOS) 124 | if: runner.os == 'macOS' && matrix.backend == 'sqlite' 125 | run: | 126 | brew install sqlite 127 | echo "DATABASE_URL=:memory:" >> $GITHUB_ENV 128 | 129 | - name: Install mysql (MacOS Intel) 130 | if: matrix.os == 'macos-15-intel' && matrix.backend == 'mysql' 131 | run: | 132 | brew install mariadb@11.4 133 | /usr/local/opt/mariadb@11.4/bin/mysql_install_db 134 | /usr/local/opt/mariadb@11.4/bin/mysql.server start 135 | sleep 3 136 | /usr/local/opt/mariadb@11.4/bin/mysqladmin -u runner password diesel 137 | /usr/local/opt/mariadb@11.4/bin/mysql -e "create database diesel_test; create database diesel_unit_test; grant all on \`diesel_%\`.* to 'runner'@'localhost';" -urunner 138 | echo "DATABASE_URL=mysql://runner:diesel@localhost/diesel_test" >> $GITHUB_ENV 139 | 140 | - name: Install mysql (MacOS M1) 141 | if: matrix.os == 'macos-15' && matrix.backend == 'mysql' 142 | run: | 143 | brew install mariadb@11.4 144 | ls /opt/homebrew/opt/mariadb@11.4 145 | /opt/homebrew/opt/mariadb@11.4/bin/mysql_install_db 146 | /opt/homebrew/opt/mariadb@11.4/bin/mysql.server start 147 | sleep 3 148 | /opt/homebrew/opt/mariadb@11.4/bin/mysqladmin -u runner password diesel 149 | /opt/homebrew/opt/mariadb@11.4/bin/mysql -e "create database diesel_test; create database diesel_unit_test; grant all on \`diesel_%\`.* to 'runner'@'localhost';" -urunner 150 | echo "DATABASE_URL=mysql://runner:diesel@localhost/diesel_test" >> $GITHUB_ENV 151 | 152 | - name: Install postgres (Windows) 153 | if: runner.os == 'Windows' && matrix.backend == 'postgres' 154 | shell: bash 155 | run: | 156 | choco install postgresql15 --force --params '/Password:root' 157 | echo "DATABASE_URL=postgres://postgres:root@localhost/" >> $GITHUB_ENV 158 | 159 | - name: Install mysql (Windows) 160 | if: runner.os == 'Windows' && matrix.backend == 'mysql' 161 | shell: cmd 162 | run: | 163 | choco install mysql 164 | "C:\tools\mysql\current\bin\mysql" -e "create database diesel_test; create database diesel_unit_test; grant all on `diesel_%`.* to 'root'@'localhost';" -uroot 165 | 166 | - name: Set variables for mysql (Windows) 167 | if: runner.os == 'Windows' && matrix.backend == 'mysql' 168 | shell: bash 169 | run: | 170 | echo "DATABASE_URL=mysql://root@localhost/diesel_test" >> $GITHUB_ENV 171 | 172 | - name: Install sqlite (Windows) 173 | if: runner.os == 'Windows' && matrix.backend == 'sqlite' 174 | shell: cmd 175 | run: | 176 | choco install sqlite 177 | cd /D C:\ProgramData\chocolatey\lib\SQLite\tools 178 | call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat" 179 | lib /machine:x64 /def:sqlite3.def /out:sqlite3.lib 180 | 181 | - name: Set variables for sqlite (Windows) 182 | if: runner.os == 'Windows' && matrix.backend == 'sqlite' 183 | shell: bash 184 | run: | 185 | echo "C:\ProgramData\chocolatey\lib\SQLite\tools" >> $GITHUB_PATH 186 | echo "SQLITE3_LIB_DIR=C:\ProgramData\chocolatey\lib\SQLite\tools" >> $GITHUB_ENV 187 | echo "DATABASE_URL=:memory:" >> $GITHUB_ENV 188 | 189 | - name: Install rust toolchain 190 | uses: dtolnay/rust-toolchain@master 191 | with: 192 | toolchain: ${{ matrix.rust }} 193 | - name: Rust version check 194 | run: cargo +${{ matrix.rust }} version 195 | 196 | - name: Test diesel_async 197 | run: cargo +${{ matrix.rust }} test --manifest-path Cargo.toml --no-default-features --features "${{ matrix.backend }} deadpool bb8 mobc async-connection-wrapper migrations" 198 | 199 | - name: Run examples (Postgres) 200 | if: matrix.backend == 'postgres' 201 | run: | 202 | cargo +${{ matrix.rust }} check --manifest-path examples/postgres/pooled-with-rustls/Cargo.toml 203 | cargo +${{ matrix.rust }} check --manifest-path examples/postgres/run-pending-migrations-with-rustls/Cargo.toml 204 | 205 | - name: Run examples (Sqlite) 206 | if: matrix.backend == 'sqlite' 207 | run: | 208 | cargo +${{ matrix.rust }} check --manifest-path examples/sync-wrapper/Cargo.toml 209 | 210 | rustfmt_and_clippy: 211 | name: Check rustfmt style && run clippy 212 | runs-on: ubuntu-latest 213 | steps: 214 | - uses: actions/checkout@v4 215 | - uses: dtolnay/rust-toolchain@stable 216 | with: 217 | components: clippy, rustfmt 218 | - name: Cache cargo registry 219 | uses: Swatinem/rust-cache@v2 220 | with: 221 | key: clippy-cargo-${{ hashFiles('**/Cargo.toml') }} 222 | 223 | - name: Remove potential newer clippy.toml from dependencies 224 | run: | 225 | cargo update 226 | cargo fetch 227 | find ~/.cargo/registry -iname "*clippy.toml" -delete 228 | 229 | - name: Run clippy 230 | run: cargo +stable clippy --all --all-features 231 | 232 | - name: Check formating 233 | run: cargo +stable fmt --all -- --check 234 | minimal_rust_version: 235 | name: Check Minimal supported rust version (1.86.0) 236 | runs-on: ubuntu-latest 237 | steps: 238 | - uses: actions/checkout@v4 239 | - uses: dtolnay/rust-toolchain@1.86.0 240 | - uses: dtolnay/rust-toolchain@nightly 241 | - uses: taiki-e/install-action@cargo-hack 242 | - uses: taiki-e/install-action@cargo-minimal-versions 243 | - name: Check diesel-async 244 | # cannot test mysql yet as that crate 245 | # has broken min-version dependencies 246 | # cannot test sqlite yet as that crate 247 | # as broken min-version dependencies as well 248 | run: cargo +1.86.0 minimal-versions check -p diesel-async --features "postgres bb8 deadpool mobc" 249 | all_features_build: 250 | name: Check all feature combination build 251 | runs-on: ubuntu-latest 252 | steps: 253 | - uses: actions/checkout@v4 254 | - uses: dtolnay/rust-toolchain@stable 255 | - uses: taiki-e/install-action@cargo-hack 256 | - name: Check feature combinations 257 | run: cargo hack check --feature-powerset --no-dev-deps --depth 2 258 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2015-2021 Sean Griffin, 2018-2021 Diesel Core Team 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | http://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | -------------------------------------------------------------------------------- /tests/instrumentation.rs: -------------------------------------------------------------------------------- 1 | use crate::users; 2 | use crate::TestConnection; 3 | use assert_matches::assert_matches; 4 | use diesel::connection::InstrumentationEvent; 5 | use diesel::query_builder::AsQuery; 6 | use diesel::QueryResult; 7 | use diesel_async::AsyncConnection; 8 | use diesel_async::AsyncConnectionCore; 9 | use diesel_async::SimpleAsyncConnection; 10 | use std::num::NonZeroU32; 11 | use std::sync::Arc; 12 | use std::sync::Mutex; 13 | 14 | async fn connection_with_sean_and_tess_in_users_table() -> TestConnection { 15 | super::connection().await 16 | } 17 | 18 | #[derive(Debug, PartialEq)] 19 | enum Event { 20 | StartQuery { query: String }, 21 | CacheQuery { sql: String }, 22 | FinishQuery { query: String, error: Option<()> }, 23 | BeginTransaction { depth: NonZeroU32 }, 24 | CommitTransaction { depth: NonZeroU32 }, 25 | RollbackTransaction { depth: NonZeroU32 }, 26 | } 27 | 28 | impl From> for Event { 29 | fn from(value: InstrumentationEvent<'_>) -> Self { 30 | match value { 31 | InstrumentationEvent::StartEstablishConnection { .. } => unreachable!(), 32 | InstrumentationEvent::FinishEstablishConnection { .. } => unreachable!(), 33 | InstrumentationEvent::StartQuery { query, .. } => Event::StartQuery { 34 | query: query.to_string(), 35 | }, 36 | InstrumentationEvent::CacheQuery { sql, .. } => Event::CacheQuery { 37 | sql: sql.to_owned(), 38 | }, 39 | InstrumentationEvent::FinishQuery { query, error, .. } => Event::FinishQuery { 40 | query: query.to_string(), 41 | error: error.map(|_| ()), 42 | }, 43 | InstrumentationEvent::BeginTransaction { depth, .. } => { 44 | Event::BeginTransaction { depth } 45 | } 46 | InstrumentationEvent::CommitTransaction { depth, .. } => { 47 | Event::CommitTransaction { depth } 48 | } 49 | InstrumentationEvent::RollbackTransaction { depth, .. } => { 50 | Event::RollbackTransaction { depth } 51 | } 52 | _ => unreachable!(), 53 | } 54 | } 55 | } 56 | 57 | async fn setup_test_case() -> (Arc>>, TestConnection) { 58 | setup_test_case_with_connection(connection_with_sean_and_tess_in_users_table().await) 59 | } 60 | 61 | fn setup_test_case_with_connection( 62 | mut conn: TestConnection, 63 | ) -> (Arc>>, TestConnection) { 64 | let events = Arc::new(Mutex::new(Vec::::new())); 65 | let events_to_check = events.clone(); 66 | conn.set_instrumentation(move |event: InstrumentationEvent<'_>| { 67 | events.lock().unwrap().push(event.into()); 68 | }); 69 | assert_eq!(events_to_check.lock().unwrap().len(), 0); 70 | (events_to_check, conn) 71 | } 72 | 73 | #[tokio::test] 74 | async fn check_events_are_emitted_for_batch_execute() { 75 | let (events_to_check, mut conn) = setup_test_case().await; 76 | conn.batch_execute("select 1").await.unwrap(); 77 | 78 | let events = events_to_check.lock().unwrap(); 79 | assert_eq!(events.len(), 2); 80 | assert_eq!( 81 | events[0], 82 | Event::StartQuery { 83 | query: String::from("select 1") 84 | } 85 | ); 86 | assert_eq!( 87 | events[1], 88 | Event::FinishQuery { 89 | query: String::from("select 1"), 90 | error: None, 91 | } 92 | ); 93 | } 94 | 95 | #[tokio::test] 96 | async fn check_events_are_emitted_for_execute_returning_count() { 97 | let (events_to_check, mut conn) = setup_test_case().await; 98 | conn.execute_returning_count(users::table.as_query()) 99 | .await 100 | .unwrap(); 101 | let events = events_to_check.lock().unwrap(); 102 | assert_eq!(events.len(), 3, "{events:?}"); 103 | assert_matches!(events[0], Event::StartQuery { .. }); 104 | assert_matches!(events[1], Event::CacheQuery { .. }); 105 | assert_matches!(events[2], Event::FinishQuery { .. }); 106 | } 107 | 108 | #[tokio::test] 109 | async fn check_events_are_emitted_for_load() { 110 | let (events_to_check, mut conn) = setup_test_case().await; 111 | let _ = AsyncConnectionCore::load(&mut conn, users::table.as_query()) 112 | .await 113 | .unwrap(); 114 | let events = events_to_check.lock().unwrap(); 115 | assert_eq!(events.len(), 3, "{events:?}"); 116 | assert_matches!(events[0], Event::StartQuery { .. }); 117 | assert_matches!(events[1], Event::CacheQuery { .. }); 118 | assert_matches!(events[2], Event::FinishQuery { .. }); 119 | } 120 | 121 | #[tokio::test] 122 | async fn check_events_are_emitted_for_execute_returning_count_does_not_contain_cache_for_uncached_queries( 123 | ) { 124 | let (events_to_check, mut conn) = setup_test_case().await; 125 | conn.execute_returning_count(diesel::sql_query("select 1")) 126 | .await 127 | .unwrap(); 128 | let events = events_to_check.lock().unwrap(); 129 | assert_eq!(events.len(), 2, "{events:?}"); 130 | assert_matches!(events[0], Event::StartQuery { .. }); 131 | assert_matches!(events[1], Event::FinishQuery { .. }); 132 | } 133 | 134 | #[tokio::test] 135 | async fn check_events_are_emitted_for_load_does_not_contain_cache_for_uncached_queries() { 136 | let (events_to_check, mut conn) = setup_test_case().await; 137 | let _ = AsyncConnectionCore::load(&mut conn, diesel::sql_query("select 1")) 138 | .await 139 | .unwrap(); 140 | let events = events_to_check.lock().unwrap(); 141 | assert_eq!(events.len(), 2, "{events:?}"); 142 | assert_matches!(events[0], Event::StartQuery { .. }); 143 | assert_matches!(events[1], Event::FinishQuery { .. }); 144 | } 145 | 146 | #[tokio::test] 147 | async fn check_events_are_emitted_for_execute_returning_count_does_contain_error_for_failures() { 148 | let (events_to_check, mut conn) = setup_test_case().await; 149 | let _ = conn 150 | .execute_returning_count(diesel::sql_query("invalid")) 151 | .await; 152 | let events = events_to_check.lock().unwrap(); 153 | assert_eq!(events.len(), 2, "{events:?}"); 154 | assert_matches!(events[0], Event::StartQuery { .. }); 155 | assert_matches!(events[1], Event::FinishQuery { error: Some(_), .. }); 156 | } 157 | 158 | #[tokio::test] 159 | async fn check_events_are_emitted_for_load_does_contain_error_for_failures() { 160 | let (events_to_check, mut conn) = setup_test_case().await; 161 | let _ = AsyncConnectionCore::load(&mut conn, diesel::sql_query("invalid")).await; 162 | let events = events_to_check.lock().unwrap(); 163 | assert_eq!(events.len(), 2, "{events:?}"); 164 | assert_matches!(events[0], Event::StartQuery { .. }); 165 | assert_matches!(events[1], Event::FinishQuery { error: Some(_), .. }); 166 | } 167 | 168 | #[tokio::test] 169 | async fn check_events_are_emitted_for_execute_returning_count_repeat_does_not_repeat_cache() { 170 | let (events_to_check, mut conn) = setup_test_case().await; 171 | conn.execute_returning_count(users::table.as_query()) 172 | .await 173 | .unwrap(); 174 | conn.execute_returning_count(users::table.as_query()) 175 | .await 176 | .unwrap(); 177 | let events = events_to_check.lock().unwrap(); 178 | assert_eq!(events.len(), 5, "{events:?}"); 179 | assert_matches!(events[0], Event::StartQuery { .. }); 180 | assert_matches!(events[1], Event::CacheQuery { .. }); 181 | assert_matches!(events[2], Event::FinishQuery { .. }); 182 | assert_matches!(events[3], Event::StartQuery { .. }); 183 | assert_matches!(events[4], Event::FinishQuery { .. }); 184 | } 185 | 186 | #[tokio::test] 187 | async fn check_events_are_emitted_for_load_repeat_does_not_repeat_cache() { 188 | let (events_to_check, mut conn) = setup_test_case().await; 189 | let _ = AsyncConnectionCore::load(&mut conn, users::table.as_query()) 190 | .await 191 | .unwrap(); 192 | let _ = AsyncConnectionCore::load(&mut conn, users::table.as_query()) 193 | .await 194 | .unwrap(); 195 | let events = events_to_check.lock().unwrap(); 196 | assert_eq!(events.len(), 5, "{events:?}"); 197 | assert_matches!(events[0], Event::StartQuery { .. }); 198 | assert_matches!(events[1], Event::CacheQuery { .. }); 199 | assert_matches!(events[2], Event::FinishQuery { .. }); 200 | assert_matches!(events[3], Event::StartQuery { .. }); 201 | assert_matches!(events[4], Event::FinishQuery { .. }); 202 | } 203 | 204 | #[tokio::test] 205 | async fn check_events_transaction() { 206 | let (events_to_check, mut conn) = setup_test_case().await; 207 | conn.transaction(|_conn| Box::pin(async { QueryResult::Ok(()) })) 208 | .await 209 | .unwrap(); 210 | let events = events_to_check.lock().unwrap(); 211 | assert_eq!(events.len(), 6, "{events:?}"); 212 | assert_matches!(events[0], Event::BeginTransaction { .. }); 213 | assert_matches!(events[1], Event::StartQuery { .. }); 214 | assert_matches!(events[2], Event::FinishQuery { .. }); 215 | assert_matches!(events[3], Event::CommitTransaction { .. }); 216 | assert_matches!(events[4], Event::StartQuery { .. }); 217 | assert_matches!(events[5], Event::FinishQuery { .. }); 218 | } 219 | 220 | #[tokio::test] 221 | async fn check_events_transaction_error() { 222 | let (events_to_check, mut conn) = setup_test_case().await; 223 | let _ = conn 224 | .transaction(|_conn| { 225 | Box::pin(async { QueryResult::<()>::Err(diesel::result::Error::RollbackTransaction) }) 226 | }) 227 | .await; 228 | let events = events_to_check.lock().unwrap(); 229 | assert_eq!(events.len(), 6, "{events:?}"); 230 | assert_matches!(events[0], Event::BeginTransaction { .. }); 231 | assert_matches!(events[1], Event::StartQuery { .. }); 232 | assert_matches!(events[2], Event::FinishQuery { .. }); 233 | assert_matches!(events[3], Event::RollbackTransaction { .. }); 234 | assert_matches!(events[4], Event::StartQuery { .. }); 235 | assert_matches!(events[5], Event::FinishQuery { .. }); 236 | } 237 | 238 | #[tokio::test] 239 | async fn check_events_transaction_nested() { 240 | let (events_to_check, mut conn) = setup_test_case().await; 241 | conn.transaction(|conn| { 242 | Box::pin(async move { 243 | conn.transaction(|_conn| Box::pin(async { QueryResult::Ok(()) })) 244 | .await 245 | }) 246 | }) 247 | .await 248 | .unwrap(); 249 | let events = events_to_check.lock().unwrap(); 250 | assert_eq!(events.len(), 12, "{events:?}"); 251 | assert_matches!(events[0], Event::BeginTransaction { .. }); 252 | assert_matches!(events[1], Event::StartQuery { .. }); 253 | assert_matches!(events[2], Event::FinishQuery { .. }); 254 | assert_matches!(events[3], Event::BeginTransaction { .. }); 255 | assert_matches!(events[4], Event::StartQuery { .. }); 256 | assert_matches!(events[5], Event::FinishQuery { .. }); 257 | assert_matches!(events[6], Event::CommitTransaction { .. }); 258 | assert_matches!(events[7], Event::StartQuery { .. }); 259 | assert_matches!(events[8], Event::FinishQuery { .. }); 260 | assert_matches!(events[9], Event::CommitTransaction { .. }); 261 | assert_matches!(events[10], Event::StartQuery { .. }); 262 | assert_matches!(events[11], Event::FinishQuery { .. }); 263 | } 264 | 265 | #[cfg(feature = "postgres")] 266 | #[tokio::test] 267 | async fn check_events_transaction_builder() { 268 | use crate::connection_without_transaction; 269 | use diesel::result::Error; 270 | use scoped_futures::ScopedFutureExt; 271 | 272 | let (events_to_check, mut conn) = 273 | setup_test_case_with_connection(connection_without_transaction().await); 274 | conn.build_transaction() 275 | .run(|_tx| async move { Ok::<(), Error>(()) }.scope_boxed()) 276 | .await 277 | .unwrap(); 278 | let events = events_to_check.lock().unwrap(); 279 | assert_eq!(events.len(), 6, "{events:?}"); 280 | assert_matches!(events[0], Event::BeginTransaction { .. }); 281 | assert_matches!(events[1], Event::StartQuery { .. }); 282 | assert_matches!(events[2], Event::FinishQuery { .. }); 283 | assert_matches!(events[3], Event::CommitTransaction { .. }); 284 | assert_matches!(events[4], Event::StartQuery { .. }); 285 | assert_matches!(events[5], Event::FinishQuery { .. }); 286 | } 287 | -------------------------------------------------------------------------------- /src/doctest_setup.rs: -------------------------------------------------------------------------------- 1 | #[allow(unused_imports)] 2 | use diesel::prelude::{ 3 | AsChangeset, ExpressionMethods, Identifiable, IntoSql, QueryDsl, QueryResult, Queryable, 4 | QueryableByName, 5 | }; 6 | 7 | cfg_if::cfg_if! { 8 | if #[cfg(feature = "postgres")] { 9 | use diesel_async::AsyncPgConnection; 10 | #[allow(dead_code)] 11 | type DB = diesel::pg::Pg; 12 | #[allow(dead_code)] 13 | type DbConnection = AsyncPgConnection; 14 | 15 | fn database_url() -> String { 16 | database_url_from_env("PG_DATABASE_URL") 17 | } 18 | 19 | async fn connection_no_transaction() -> AsyncPgConnection { 20 | use diesel_async::AsyncConnection; 21 | let connection_url = database_url(); 22 | AsyncPgConnection::establish(&connection_url).await.unwrap() 23 | } 24 | 25 | async fn connection_no_data() -> AsyncPgConnection { 26 | use diesel_async::AsyncConnection; 27 | let mut connection = connection_no_transaction().await; 28 | connection.begin_test_transaction().await.unwrap(); 29 | connection 30 | } 31 | 32 | async fn create_tables(connection: &mut AsyncPgConnection) { 33 | use diesel_async::RunQueryDsl; 34 | diesel::sql_query("CREATE TEMPORARY TABLE users ( 35 | id SERIAL PRIMARY KEY, 36 | name VARCHAR NOT NULL 37 | )").execute(connection).await.unwrap(); 38 | diesel::sql_query( 39 | "INSERT INTO users (name) VALUES ('Sean'), ('Tess')" 40 | ).execute(connection).await.unwrap(); 41 | 42 | diesel::sql_query( 43 | "CREATE TEMPORARY TABLE animals ( 44 | id SERIAL PRIMARY KEY, 45 | species VARCHAR NOT NULL, 46 | legs INTEGER NOT NULL, 47 | name VARCHAR 48 | )").execute(connection).await.unwrap(); 49 | diesel::sql_query( 50 | "INSERT INTO animals (species, legs, name) VALUES 51 | ('dog', 4, 'Jack'), 52 | ('spider', 8, null)" 53 | ).execute(connection) 54 | .await.unwrap(); 55 | 56 | diesel::sql_query( 57 | "CREATE TEMPORARY TABLE posts ( 58 | id SERIAL PRIMARY KEY, 59 | user_id INTEGER NOT NULL, 60 | title VARCHAR NOT NULL 61 | )").execute(connection).await.unwrap(); 62 | diesel::sql_query( 63 | "INSERT INTO posts (user_id, title) VALUES 64 | (1, 'My first post'), 65 | (1, 'About Rust'), 66 | (2, 'My first post too')").execute(connection).await.unwrap(); 67 | 68 | diesel::sql_query("CREATE TEMPORARY TABLE comments ( 69 | id SERIAL PRIMARY KEY, 70 | post_id INTEGER NOT NULL, 71 | body VARCHAR NOT NULL 72 | )").execute(connection).await.unwrap(); 73 | diesel::sql_query("INSERT INTO comments (post_id, body) VALUES 74 | (1, 'Great post'), 75 | (2, 'Yay! I am learning Rust'), 76 | (3, 'I enjoyed your post')").execute(connection).await.unwrap(); 77 | 78 | diesel::sql_query("CREATE TEMPORARY TABLE brands ( 79 | id SERIAL PRIMARY KEY, 80 | color VARCHAR NOT NULL DEFAULT 'Green', 81 | accent VARCHAR DEFAULT 'Blue' 82 | )").execute(connection).await.unwrap(); 83 | } 84 | 85 | #[allow(dead_code)] 86 | async fn establish_connection() -> AsyncPgConnection { 87 | let mut connection = connection_no_data().await; 88 | create_tables(&mut connection).await; 89 | connection 90 | } 91 | } else if #[cfg(feature = "mysql")] { 92 | use diesel_async::AsyncMysqlConnection; 93 | #[allow(dead_code)] 94 | type DB = diesel::mysql::Mysql; 95 | #[allow(dead_code)] 96 | type DbConnection = AsyncMysqlConnection; 97 | 98 | fn database_url() -> String { 99 | database_url_from_env("MYSQL_UNIT_TEST_DATABASE_URL") 100 | } 101 | 102 | async fn connection_no_data() -> AsyncMysqlConnection { 103 | use diesel_async::AsyncConnection; 104 | let connection_url = database_url(); 105 | AsyncMysqlConnection::establish(&connection_url).await.unwrap() 106 | } 107 | 108 | async fn create_tables(connection: &mut AsyncMysqlConnection) { 109 | use diesel_async::RunQueryDsl; 110 | use diesel_async::AsyncConnection; 111 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS users ( 112 | id INTEGER PRIMARY KEY AUTO_INCREMENT, 113 | name TEXT NOT NULL 114 | ) CHARACTER SET utf8mb4").execute(connection).await.unwrap(); 115 | 116 | 117 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS animals ( 118 | id INTEGER PRIMARY KEY AUTO_INCREMENT, 119 | species TEXT NOT NULL, 120 | legs INTEGER NOT NULL, 121 | name TEXT 122 | ) CHARACTER SET utf8mb4").execute(connection).await.unwrap(); 123 | 124 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS posts ( 125 | id INTEGER PRIMARY KEY AUTO_INCREMENT, 126 | user_id INTEGER NOT NULL, 127 | title TEXT NOT NULL 128 | ) CHARACTER SET utf8mb4").execute(connection).await.unwrap(); 129 | 130 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS comments ( 131 | id INTEGER PRIMARY KEY AUTO_INCREMENT, 132 | post_id INTEGER NOT NULL, 133 | body TEXT NOT NULL 134 | ) CHARACTER SET utf8mb4").execute(connection).await.unwrap(); 135 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS brands ( 136 | id INTEGER PRIMARY KEY AUTO_INCREMENT, 137 | color VARCHAR(255) NOT NULL DEFAULT 'Green', 138 | accent VARCHAR(255) DEFAULT 'Blue' 139 | )").execute(connection).await.unwrap(); 140 | 141 | connection.begin_test_transaction().await.unwrap(); 142 | diesel::sql_query("INSERT INTO users (name) VALUES ('Sean'), ('Tess')").execute(connection).await.unwrap(); 143 | diesel::sql_query("INSERT INTO posts (user_id, title) VALUES 144 | (1, 'My first post'), 145 | (1, 'About Rust'), 146 | (2, 'My first post too')").execute(connection).await.unwrap(); 147 | diesel::sql_query("INSERT INTO comments (post_id, body) VALUES 148 | (1, 'Great post'), 149 | (2, 'Yay! I am learning Rust'), 150 | (3, 'I enjoyed your post')").execute(connection).await.unwrap(); 151 | diesel::sql_query("INSERT INTO animals (species, legs, name) VALUES 152 | ('dog', 4, 'Jack'), 153 | ('spider', 8, null)").execute(connection).await.unwrap(); 154 | 155 | } 156 | 157 | #[allow(dead_code)] 158 | async fn establish_connection() -> AsyncMysqlConnection { 159 | let mut connection = connection_no_data().await; 160 | create_tables(&mut connection).await; 161 | 162 | 163 | connection 164 | } 165 | } else if #[cfg(feature = "sqlite")] { 166 | use diesel_async::sync_connection_wrapper::SyncConnectionWrapper; 167 | use diesel::sqlite::SqliteConnection; 168 | #[allow(dead_code)] 169 | type DB = diesel::sqlite::Sqlite; 170 | #[allow(dead_code)] 171 | type DbConnection = SyncConnectionWrapper; 172 | 173 | fn database_url() -> String { 174 | database_url_from_env("SQLITE_DATABASE_URL") 175 | } 176 | 177 | async fn connection_no_data() -> SyncConnectionWrapper { 178 | use diesel_async::AsyncConnection; 179 | let connection_url = database_url(); 180 | SyncConnectionWrapper::::establish(&connection_url).await.unwrap() 181 | } 182 | 183 | async fn create_tables(connection: &mut SyncConnectionWrapper) { 184 | use diesel_async::RunQueryDsl; 185 | use diesel_async::AsyncConnection; 186 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS users ( 187 | id INTEGER PRIMARY KEY, 188 | name TEXT NOT NULL 189 | )").execute(connection).await.unwrap(); 190 | 191 | 192 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS animals ( 193 | id INTEGER PRIMARY KEY, 194 | species TEXT NOT NULL, 195 | legs INTEGER NOT NULL, 196 | name TEXT 197 | )").execute(connection).await.unwrap(); 198 | 199 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS posts ( 200 | id INTEGER PRIMARY KEY, 201 | user_id INTEGER NOT NULL, 202 | title TEXT NOT NULL 203 | )").execute(connection).await.unwrap(); 204 | 205 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS comments ( 206 | id INTEGER PRIMARY KEY, 207 | post_id INTEGER NOT NULL, 208 | body TEXT NOT NULL 209 | )").execute(connection).await.unwrap(); 210 | diesel::sql_query("CREATE TEMPORARY TABLE IF NOT EXISTS brands ( 211 | id INTEGER PRIMARY KEY, 212 | color VARCHAR(255) NOT NULL DEFAULT 'Green', 213 | accent VARCHAR(255) DEFAULT 'Blue' 214 | )").execute(connection).await.unwrap(); 215 | 216 | diesel::sql_query("INSERT INTO users (name) VALUES ('Sean'), ('Tess')").execute(connection).await.unwrap(); 217 | diesel::sql_query("INSERT INTO posts (user_id, title) VALUES 218 | (1, 'My first post'), 219 | (1, 'About Rust'), 220 | (2, 'My first post too')").execute(connection).await.unwrap(); 221 | diesel::sql_query("INSERT INTO comments (post_id, body) VALUES 222 | (1, 'Great post'), 223 | (2, 'Yay! I am learning Rust'), 224 | (3, 'I enjoyed your post')").execute(connection).await.unwrap(); 225 | diesel::sql_query("INSERT INTO animals (species, legs, name) VALUES 226 | ('dog', 4, 'Jack'), 227 | ('spider', 8, null)").execute(connection).await.unwrap(); 228 | 229 | } 230 | 231 | #[allow(dead_code)] 232 | async fn establish_connection() -> SyncConnectionWrapper { 233 | use diesel_async::AsyncConnection; 234 | 235 | let mut connection = connection_no_data().await; 236 | connection.begin_test_transaction().await.unwrap(); 237 | create_tables(&mut connection).await; 238 | connection 239 | } 240 | 241 | async fn connection_no_transaction() -> SyncConnectionWrapper { 242 | use diesel_async::AsyncConnection; 243 | 244 | let mut connection = SyncConnectionWrapper::::establish(":memory:").await.unwrap(); 245 | create_tables(&mut connection).await; 246 | connection 247 | } 248 | 249 | } else { 250 | compile_error!( 251 | "At least one backend must be used to test this crate.\n \ 252 | Pass argument `--features \"\"` with one or more of the following backends, \ 253 | 'mysql', 'postgres', or 'sqlite'. \n\n \ 254 | ex. cargo test --features \"mysql postgres sqlite\"\n" 255 | ); 256 | } 257 | } 258 | 259 | fn database_url_from_env(backend_specific_env_var: &str) -> String { 260 | use std::env; 261 | 262 | env::var(backend_specific_env_var) 263 | .or_else(|_| env::var("DATABASE_URL")) 264 | .expect("DATABASE_URL must be set in order to run tests") 265 | } 266 | 267 | mod schema { 268 | use diesel::prelude::*; 269 | 270 | table! { 271 | animals { 272 | id -> Integer, 273 | species -> VarChar, 274 | legs -> Integer, 275 | name -> Nullable, 276 | } 277 | } 278 | 279 | table! { 280 | comments { 281 | id -> Integer, 282 | post_id -> Integer, 283 | body -> VarChar, 284 | } 285 | } 286 | 287 | table! { 288 | posts { 289 | id -> Integer, 290 | user_id -> Integer, 291 | title -> VarChar, 292 | } 293 | } 294 | 295 | table! { 296 | users { 297 | id -> Integer, 298 | name -> VarChar, 299 | } 300 | } 301 | 302 | #[cfg(not(feature = "sqlite"))] 303 | table! { 304 | brands { 305 | id -> Integer, 306 | color -> VarChar, 307 | accent -> Nullable, 308 | } 309 | } 310 | 311 | joinable!(posts -> users (user_id)); 312 | allow_tables_to_appear_in_same_query!(animals, comments, posts, users); 313 | } 314 | -------------------------------------------------------------------------------- /src/async_connection_wrapper.rs: -------------------------------------------------------------------------------- 1 | //! This module contains an wrapper type 2 | //! that provides a [`diesel::Connection`] 3 | //! implementation for types that implement 4 | //! [`crate::AsyncConnection`]. Using this type 5 | //! might be useful for the following usecases: 6 | //! 7 | //! * Executing migrations on application startup 8 | //! * Using a pure rust diesel connection implementation 9 | //! as replacement for the existing connection 10 | //! implementations provided by diesel 11 | 12 | use futures_core::Stream; 13 | use futures_util::StreamExt; 14 | use std::future::Future; 15 | use std::pin::Pin; 16 | 17 | /// This is a helper trait that allows to customize the 18 | /// async runtime used to execute futures as part of the 19 | /// [`AsyncConnectionWrapper`] type. By default a 20 | /// tokio runtime is used. 21 | pub trait BlockOn { 22 | /// This function should allow to execute a 23 | /// given future to get the result 24 | fn block_on(&self, f: F) -> F::Output 25 | where 26 | F: Future; 27 | 28 | /// This function should be used to construct 29 | /// a new runtime instance 30 | fn get_runtime() -> Self; 31 | } 32 | 33 | /// A helper type that wraps an [`AsyncConnection`][crate::AsyncConnection] to 34 | /// provide a sync [`diesel::Connection`] implementation. 35 | /// 36 | /// Internally this wrapper type will use `block_on` to wait for 37 | /// the execution of futures from the inner connection. This implies you 38 | /// cannot use functions of this type in a scope with an already existing 39 | /// tokio runtime. If you are in a situation where you want to use this 40 | /// connection wrapper in the scope of an existing tokio runtime (for example 41 | /// for running migrations via `diesel_migration`) you need to wrap 42 | /// the relevant code block into a `tokio::task::spawn_blocking` task. 43 | /// 44 | /// # Examples 45 | /// 46 | /// ```rust,no_run 47 | /// # include!("doctest_setup.rs"); 48 | /// use schema::users; 49 | /// use diesel_async::async_connection_wrapper::AsyncConnectionWrapper; 50 | /// # 51 | /// # fn main() -> Result<(), Box> { 52 | /// use diesel::prelude::{RunQueryDsl, Connection}; 53 | /// # let database_url = database_url(); 54 | /// let mut conn = AsyncConnectionWrapper::::establish(&database_url)?; 55 | /// 56 | /// let all_users = users::table.load::<(i32, String)>(&mut conn)?; 57 | /// # assert_eq!(all_users.len(), 0); 58 | /// # Ok(()) 59 | /// # } 60 | /// ``` 61 | /// 62 | /// If you are in the scope of an existing tokio runtime you need to use 63 | /// `tokio::task::spawn_blocking` to encapsulate the blocking tasks 64 | /// ```rust,no_run 65 | /// # include!("doctest_setup.rs"); 66 | /// use schema::users; 67 | /// use diesel_async::async_connection_wrapper::AsyncConnectionWrapper; 68 | /// 69 | /// async fn some_async_fn() { 70 | /// # let database_url = database_url(); 71 | /// // need to use `spawn_blocking` to execute 72 | /// // a blocking task in the scope of an existing runtime 73 | /// let res = tokio::task::spawn_blocking(move || { 74 | /// use diesel::prelude::{RunQueryDsl, Connection}; 75 | /// let mut conn = AsyncConnectionWrapper::::establish(&database_url)?; 76 | /// 77 | /// let all_users = users::table.load::<(i32, String)>(&mut conn)?; 78 | /// # assert_eq!(all_users.len(), 0); 79 | /// Ok::<_, Box>(()) 80 | /// }).await; 81 | /// 82 | /// # res.unwrap().unwrap(); 83 | /// } 84 | /// 85 | /// # #[tokio::main] 86 | /// # async fn main() { 87 | /// # some_async_fn().await; 88 | /// # } 89 | /// ``` 90 | #[cfg(feature = "tokio")] 91 | pub type AsyncConnectionWrapper = 92 | self::implementation::AsyncConnectionWrapper; 93 | 94 | /// A helper type that wraps an [`crate::AsyncConnectionWrapper`] to 95 | /// provide a sync [`diesel::Connection`] implementation. 96 | /// 97 | /// Internally this wrapper type will use `block_on` to wait for 98 | /// the execution of futures from the inner connection. 99 | #[cfg(not(feature = "tokio"))] 100 | pub use self::implementation::AsyncConnectionWrapper; 101 | 102 | pub(crate) mod implementation { 103 | use diesel::connection::{CacheSize, Instrumentation, SimpleConnection}; 104 | use std::ops::{Deref, DerefMut}; 105 | 106 | use super::*; 107 | 108 | pub struct AsyncConnectionWrapper { 109 | inner: C, 110 | runtime: B, 111 | } 112 | 113 | impl From for AsyncConnectionWrapper 114 | where 115 | C: crate::AsyncConnection, 116 | B: BlockOn + Send, 117 | { 118 | fn from(inner: C) -> Self { 119 | Self { 120 | inner, 121 | runtime: B::get_runtime(), 122 | } 123 | } 124 | } 125 | 126 | impl AsyncConnectionWrapper 127 | where 128 | C: crate::AsyncConnection, 129 | { 130 | /// Consumes the [`AsyncConnectionWrapper`] returning the wrapped inner 131 | /// [`AsyncConnection`]. 132 | pub fn into_inner(self) -> C { 133 | self.inner 134 | } 135 | } 136 | 137 | impl Deref for AsyncConnectionWrapper { 138 | type Target = C; 139 | 140 | fn deref(&self) -> &Self::Target { 141 | &self.inner 142 | } 143 | } 144 | 145 | impl DerefMut for AsyncConnectionWrapper { 146 | fn deref_mut(&mut self) -> &mut Self::Target { 147 | &mut self.inner 148 | } 149 | } 150 | 151 | impl diesel::connection::SimpleConnection for AsyncConnectionWrapper 152 | where 153 | C: crate::SimpleAsyncConnection, 154 | B: BlockOn, 155 | { 156 | fn batch_execute(&mut self, query: &str) -> diesel::QueryResult<()> { 157 | let f = self.inner.batch_execute(query); 158 | self.runtime.block_on(f) 159 | } 160 | } 161 | 162 | impl diesel::connection::ConnectionSealed for AsyncConnectionWrapper {} 163 | 164 | impl diesel::connection::Connection for AsyncConnectionWrapper 165 | where 166 | C: crate::AsyncConnection, 167 | B: BlockOn + Send, 168 | { 169 | type Backend = C::Backend; 170 | 171 | type TransactionManager = AsyncConnectionWrapperTransactionManagerWrapper; 172 | 173 | fn establish(database_url: &str) -> diesel::ConnectionResult { 174 | let runtime = B::get_runtime(); 175 | let f = C::establish(database_url); 176 | let inner = runtime.block_on(f)?; 177 | Ok(Self { inner, runtime }) 178 | } 179 | 180 | fn execute_returning_count(&mut self, source: &T) -> diesel::QueryResult 181 | where 182 | T: diesel::query_builder::QueryFragment + diesel::query_builder::QueryId, 183 | { 184 | let f = self.inner.execute_returning_count(source); 185 | self.runtime.block_on(f) 186 | } 187 | 188 | fn transaction_state( 189 | &mut self, 190 | ) -> &mut >::TransactionStateData{ 191 | self.inner.transaction_state() 192 | } 193 | 194 | fn instrumentation(&mut self) -> &mut dyn Instrumentation { 195 | self.inner.instrumentation() 196 | } 197 | 198 | fn set_instrumentation(&mut self, instrumentation: impl Instrumentation) { 199 | self.inner.set_instrumentation(instrumentation); 200 | } 201 | 202 | fn set_prepared_statement_cache_size(&mut self, size: CacheSize) { 203 | self.inner.set_prepared_statement_cache_size(size) 204 | } 205 | } 206 | 207 | impl diesel::connection::LoadConnection for AsyncConnectionWrapper 208 | where 209 | C: crate::AsyncConnection, 210 | B: BlockOn + Send, 211 | { 212 | type Cursor<'conn, 'query> 213 | = AsyncCursorWrapper<'conn, C::Stream<'conn, 'query>, B> 214 | where 215 | Self: 'conn; 216 | 217 | type Row<'conn, 'query> 218 | = C::Row<'conn, 'query> 219 | where 220 | Self: 'conn; 221 | 222 | fn load<'conn, 'query, T>( 223 | &'conn mut self, 224 | source: T, 225 | ) -> diesel::QueryResult> 226 | where 227 | T: diesel::query_builder::Query 228 | + diesel::query_builder::QueryFragment 229 | + diesel::query_builder::QueryId 230 | + 'query, 231 | Self::Backend: diesel::expression::QueryMetadata, 232 | { 233 | let f = self.inner.load(source); 234 | let stream = self.runtime.block_on(f)?; 235 | 236 | Ok(AsyncCursorWrapper { 237 | stream: Box::pin(stream), 238 | runtime: &self.runtime, 239 | }) 240 | } 241 | } 242 | 243 | pub struct AsyncCursorWrapper<'a, S, B> { 244 | stream: Pin>, 245 | runtime: &'a B, 246 | } 247 | 248 | impl Iterator for AsyncCursorWrapper<'_, S, B> 249 | where 250 | S: Stream, 251 | B: BlockOn, 252 | { 253 | type Item = S::Item; 254 | 255 | fn next(&mut self) -> Option { 256 | let f = self.stream.next(); 257 | self.runtime.block_on(f) 258 | } 259 | } 260 | 261 | pub struct AsyncConnectionWrapperTransactionManagerWrapper; 262 | 263 | impl diesel::connection::TransactionManager> 264 | for AsyncConnectionWrapperTransactionManagerWrapper 265 | where 266 | C: crate::AsyncConnection, 267 | B: BlockOn + Send, 268 | { 269 | type TransactionStateData = 270 | >::TransactionStateData; 271 | 272 | fn begin_transaction(conn: &mut AsyncConnectionWrapper) -> diesel::QueryResult<()> { 273 | let f = >::begin_transaction( 274 | &mut conn.inner, 275 | ); 276 | conn.runtime.block_on(f) 277 | } 278 | 279 | fn rollback_transaction( 280 | conn: &mut AsyncConnectionWrapper, 281 | ) -> diesel::QueryResult<()> { 282 | let f = >::rollback_transaction( 283 | &mut conn.inner, 284 | ); 285 | conn.runtime.block_on(f) 286 | } 287 | 288 | fn commit_transaction(conn: &mut AsyncConnectionWrapper) -> diesel::QueryResult<()> { 289 | let f = >::commit_transaction( 290 | &mut conn.inner, 291 | ); 292 | conn.runtime.block_on(f) 293 | } 294 | 295 | fn transaction_manager_status_mut( 296 | conn: &mut AsyncConnectionWrapper, 297 | ) -> &mut diesel::connection::TransactionManagerStatus { 298 | >::transaction_manager_status_mut( 299 | &mut conn.inner, 300 | ) 301 | } 302 | 303 | fn is_broken_transaction_manager(conn: &mut AsyncConnectionWrapper) -> bool { 304 | >::is_broken_transaction_manager( 305 | &mut conn.inner, 306 | ) 307 | } 308 | } 309 | 310 | #[cfg(feature = "r2d2")] 311 | impl diesel::r2d2::R2D2Connection for AsyncConnectionWrapper 312 | where 313 | B: BlockOn, 314 | Self: diesel::Connection, 315 | C: crate::AsyncConnection::Backend> 316 | + crate::pooled_connection::PoolableConnection 317 | + 'static, 318 | diesel::dsl::select>: 319 | crate::methods::ExecuteDsl, 320 | diesel::query_builder::SqlQuery: crate::methods::ExecuteDsl, 321 | { 322 | fn ping(&mut self) -> diesel::QueryResult<()> { 323 | let fut = crate::pooled_connection::PoolableConnection::ping( 324 | &mut self.inner, 325 | &crate::pooled_connection::RecyclingMethod::Verified, 326 | ); 327 | self.runtime.block_on(fut) 328 | } 329 | 330 | fn is_broken(&mut self) -> bool { 331 | crate::pooled_connection::PoolableConnection::is_broken(&mut self.inner) 332 | } 333 | } 334 | 335 | impl diesel::migration::MigrationConnection for AsyncConnectionWrapper 336 | where 337 | B: BlockOn, 338 | Self: diesel::Connection, 339 | { 340 | fn setup(&mut self) -> diesel::QueryResult { 341 | self.batch_execute(diesel::migration::CREATE_MIGRATIONS_TABLE) 342 | .map(|()| 0) 343 | } 344 | } 345 | 346 | #[cfg(feature = "tokio")] 347 | pub struct Tokio { 348 | handle: Option, 349 | runtime: Option, 350 | } 351 | 352 | #[cfg(feature = "tokio")] 353 | impl BlockOn for Tokio { 354 | fn block_on(&self, f: F) -> F::Output 355 | where 356 | F: Future, 357 | { 358 | if let Some(handle) = &self.handle { 359 | handle.block_on(f) 360 | } else if let Some(runtime) = &self.runtime { 361 | runtime.block_on(f) 362 | } else { 363 | unreachable!() 364 | } 365 | } 366 | 367 | fn get_runtime() -> Self { 368 | if let Ok(handle) = tokio::runtime::Handle::try_current() { 369 | Self { 370 | handle: Some(handle), 371 | runtime: None, 372 | } 373 | } else { 374 | let runtime = tokio::runtime::Builder::new_current_thread() 375 | .enable_io() 376 | .build() 377 | .unwrap(); 378 | Self { 379 | handle: None, 380 | runtime: Some(runtime), 381 | } 382 | } 383 | } 384 | } 385 | } 386 | -------------------------------------------------------------------------------- /src/pg/transaction_builder.rs: -------------------------------------------------------------------------------- 1 | use crate::{AnsiTransactionManager, AsyncConnection, TransactionManager}; 2 | use diesel::backend::Backend; 3 | use diesel::pg::Pg; 4 | use diesel::query_builder::{AstPass, QueryBuilder, QueryFragment}; 5 | use diesel::QueryResult; 6 | use scoped_futures::ScopedBoxFuture; 7 | 8 | /// Used to build a transaction, specifying additional details. 9 | /// 10 | /// This struct is returned by [`AsyncPgConnection::build_transaction`]. 11 | /// See the documentation for methods on this struct for usage examples. 12 | /// See [the PostgreSQL documentation for `SET TRANSACTION`][pg-docs] 13 | /// for details on the behavior of each option. 14 | /// 15 | /// [`AsyncPgConnection::build_transaction`]: super::AsyncPgConnection::build_transaction() 16 | /// [pg-docs]: https://www.postgresql.org/docs/current/static/sql-set-transaction.html 17 | #[must_use = "Transaction builder does nothing unless you call `run` on it"] 18 | #[cfg(feature = "postgres")] 19 | pub struct TransactionBuilder<'a, C> { 20 | connection: &'a mut C, 21 | isolation_level: Option, 22 | read_mode: Option, 23 | deferrable: Option, 24 | } 25 | 26 | impl<'a, C> TransactionBuilder<'a, C> 27 | where 28 | C: AsyncConnection, 29 | { 30 | pub(crate) fn new(connection: &'a mut C) -> Self { 31 | Self { 32 | connection, 33 | isolation_level: None, 34 | read_mode: None, 35 | deferrable: None, 36 | } 37 | } 38 | 39 | /// Makes the transaction `READ ONLY` 40 | /// 41 | /// # Example 42 | /// 43 | /// ```rust 44 | /// # include!("../doctest_setup.rs"); 45 | /// # use diesel::sql_query; 46 | /// use diesel_async::RunQueryDsl; 47 | /// # 48 | /// # #[tokio::main(flavor = "current_thread")] 49 | /// # async fn main() { 50 | /// # run_test().await.unwrap(); 51 | /// # } 52 | /// # 53 | /// # diesel::table! { 54 | /// # users_for_read_only { 55 | /// # id -> Integer, 56 | /// # name -> Text, 57 | /// # } 58 | /// # } 59 | /// # 60 | /// # async fn run_test() -> QueryResult<()> { 61 | /// # use users_for_read_only::table as users; 62 | /// # use users_for_read_only::columns::*; 63 | /// # let conn = &mut connection_no_transaction().await; 64 | /// # sql_query("CREATE TABLE IF NOT EXISTS users_for_read_only ( 65 | /// # id SERIAL PRIMARY KEY, 66 | /// # name TEXT NOT NULL 67 | /// # )").execute(conn).await?; 68 | /// conn.build_transaction() 69 | /// .read_only() 70 | /// .run::<_, diesel::result::Error, _>(|conn| Box::pin(async move { 71 | /// let read_attempt = users.select(name).load::(conn).await; 72 | /// assert!(read_attempt.is_ok()); 73 | /// 74 | /// let write_attempt = diesel::insert_into(users) 75 | /// .values(name.eq("Ruby")) 76 | /// .execute(conn) 77 | /// .await; 78 | /// assert!(write_attempt.is_err()); 79 | /// 80 | /// Ok(()) 81 | /// }) as _).await?; 82 | /// # sql_query("DROP TABLE users_for_read_only").execute(conn).await?; 83 | /// # Ok(()) 84 | /// # } 85 | /// ``` 86 | pub fn read_only(mut self) -> Self { 87 | self.read_mode = Some(ReadMode::ReadOnly); 88 | self 89 | } 90 | 91 | /// Makes the transaction `READ WRITE` 92 | /// 93 | /// This is the default, unless you've changed the 94 | /// `default_transaction_read_only` configuration parameter. 95 | /// 96 | /// # Example 97 | /// 98 | /// ```rust 99 | /// # include!("../doctest_setup.rs"); 100 | /// # use diesel::result::Error::RollbackTransaction; 101 | /// # use diesel::sql_query; 102 | /// use diesel_async::RunQueryDsl; 103 | /// 104 | /// # 105 | /// # #[tokio::main(flavor = "current_thread")] 106 | /// # async fn main() { 107 | /// # assert_eq!(run_test().await, Err(RollbackTransaction)); 108 | /// # } 109 | /// # 110 | /// # async fn run_test() -> QueryResult<()> { 111 | /// # use schema::users::dsl::*; 112 | /// # let conn = &mut connection_no_transaction().await; 113 | /// conn.build_transaction() 114 | /// .read_write() 115 | /// .run(|conn| Box::pin( async move { 116 | /// # sql_query("CREATE TABLE IF NOT EXISTS users ( 117 | /// # id SERIAL PRIMARY KEY, 118 | /// # name TEXT NOT NULL 119 | /// # )").execute(conn).await?; 120 | /// let read_attempt = users.select(name).load::(conn).await; 121 | /// assert!(read_attempt.is_ok()); 122 | /// 123 | /// let write_attempt = diesel::insert_into(users) 124 | /// .values(name.eq("Ruby")) 125 | /// .execute(conn) 126 | /// .await; 127 | /// assert!(write_attempt.is_ok()); 128 | /// 129 | /// # Err(RollbackTransaction) 130 | /// # /* 131 | /// Ok(()) 132 | /// # */ 133 | /// }) as _) 134 | /// .await 135 | /// # } 136 | /// ``` 137 | pub fn read_write(mut self) -> Self { 138 | self.read_mode = Some(ReadMode::ReadWrite); 139 | self 140 | } 141 | 142 | /// Makes the transaction `DEFERRABLE` 143 | /// 144 | /// # Example 145 | /// 146 | /// ```rust 147 | /// # include!("../doctest_setup.rs"); 148 | /// # 149 | /// # #[tokio::main(flavor = "current_thread")] 150 | /// # async fn main() { 151 | /// # run_test().await.unwrap(); 152 | /// # } 153 | /// # 154 | /// # async fn run_test() -> QueryResult<()> { 155 | /// # use schema::users::dsl::*; 156 | /// # let conn = &mut connection_no_transaction().await; 157 | /// conn.build_transaction() 158 | /// .deferrable() 159 | /// .run(|conn| Box::pin(async { Ok(()) })) 160 | /// .await 161 | /// # } 162 | /// ``` 163 | pub fn deferrable(mut self) -> Self { 164 | self.deferrable = Some(Deferrable::Deferrable); 165 | self 166 | } 167 | 168 | /// Makes the transaction `NOT DEFERRABLE` 169 | /// 170 | /// This is the default, unless you've changed the 171 | /// `default_transaction_deferrable` configuration parameter. 172 | /// 173 | /// # Example 174 | /// 175 | /// ```rust 176 | /// # include!("../doctest_setup.rs"); 177 | /// # 178 | /// # #[tokio::main(flavor = "current_thread")] 179 | /// # async fn main() { 180 | /// # run_test().await.unwrap(); 181 | /// # } 182 | /// # 183 | /// # async fn run_test() -> QueryResult<()> { 184 | /// # use schema::users::dsl::*; 185 | /// # let conn = &mut connection_no_transaction().await; 186 | /// conn.build_transaction() 187 | /// .not_deferrable() 188 | /// .run(|conn| Box::pin(async { Ok(()) }) as _) 189 | /// .await 190 | /// # } 191 | /// ``` 192 | pub fn not_deferrable(mut self) -> Self { 193 | self.deferrable = Some(Deferrable::NotDeferrable); 194 | self 195 | } 196 | 197 | /// Makes the transaction `ISOLATION LEVEL READ COMMITTED` 198 | /// 199 | /// This is the default, unless you've changed the 200 | /// `default_transaction_isolation_level` configuration parameter. 201 | /// 202 | /// # Example 203 | /// 204 | /// ```rust 205 | /// # include!("../doctest_setup.rs"); 206 | /// # 207 | /// # #[tokio::main(flavor = "current_thread")] 208 | /// # async fn main() { 209 | /// # run_test().await.unwrap(); 210 | /// # } 211 | /// # 212 | /// # async fn run_test() -> QueryResult<()> { 213 | /// # use schema::users::dsl::*; 214 | /// # let conn = &mut connection_no_transaction().await; 215 | /// conn.build_transaction() 216 | /// .read_committed() 217 | /// .run(|conn| Box::pin(async { Ok(()) }) as _) 218 | /// .await 219 | /// # } 220 | /// ``` 221 | pub fn read_committed(mut self) -> Self { 222 | self.isolation_level = Some(IsolationLevel::ReadCommitted); 223 | self 224 | } 225 | 226 | /// Makes the transaction `ISOLATION LEVEL REPEATABLE READ` 227 | /// 228 | /// # Example 229 | /// 230 | /// ```rust 231 | /// # include!("../doctest_setup.rs"); 232 | /// # 233 | /// # #[tokio::main(flavor = "current_thread")] 234 | /// # async fn main() { 235 | /// # run_test().await.unwrap(); 236 | /// # } 237 | /// # 238 | /// # async fn run_test() -> QueryResult<()> { 239 | /// # use schema::users::dsl::*; 240 | /// # let conn = &mut connection_no_transaction().await; 241 | /// conn.build_transaction() 242 | /// .repeatable_read() 243 | /// .run(|conn| Box::pin(async { Ok(()) }) as _) 244 | /// .await 245 | /// # } 246 | /// ``` 247 | pub fn repeatable_read(mut self) -> Self { 248 | self.isolation_level = Some(IsolationLevel::RepeatableRead); 249 | self 250 | } 251 | 252 | /// Makes the transaction `ISOLATION LEVEL SERIALIZABLE` 253 | /// 254 | /// # Example 255 | /// 256 | /// ```rust 257 | /// # include!("../doctest_setup.rs"); 258 | /// # 259 | /// # #[tokio::main(flavor = "current_thread")] 260 | /// # async fn main() { 261 | /// # run_test().await.unwrap(); 262 | /// # } 263 | /// # 264 | /// # async fn run_test() -> QueryResult<()> { 265 | /// # use schema::users::dsl::*; 266 | /// # let conn = &mut connection_no_transaction().await; 267 | /// conn.build_transaction() 268 | /// .serializable() 269 | /// .run(|conn| Box::pin(async { Ok(()) }) as _) 270 | /// .await 271 | /// # } 272 | /// ``` 273 | pub fn serializable(mut self) -> Self { 274 | self.isolation_level = Some(IsolationLevel::Serializable); 275 | self 276 | } 277 | 278 | /// Runs the given function inside of the transaction 279 | /// with the parameters given to this builder. 280 | /// 281 | /// Returns an error if the connection is already inside a transaction, 282 | /// or if the transaction fails to commit or rollback 283 | /// 284 | /// If the transaction fails to commit due to a `SerializationFailure` or a 285 | /// `ReadOnlyTransaction` a rollback will be attempted. If the rollback succeeds, 286 | /// the original error will be returned, otherwise the error generated by the rollback 287 | /// will be returned. In the second case the connection should be considered broken 288 | /// as it contains a uncommitted unabortable open transaction. 289 | pub async fn run<'b, T, E, F>(&mut self, f: F) -> Result 290 | where 291 | F: for<'r> FnOnce(&'r mut C) -> ScopedBoxFuture<'b, 'r, Result> + Send + 'a, 292 | T: 'b, 293 | E: From + 'b, 294 | { 295 | let mut query_builder = ::QueryBuilder::default(); 296 | self.to_sql(&mut query_builder, &Pg)?; 297 | let sql = query_builder.finish(); 298 | 299 | AnsiTransactionManager::begin_transaction_sql(&mut *self.connection, &sql).await?; 300 | match f(&mut *self.connection).await { 301 | Ok(value) => { 302 | AnsiTransactionManager::commit_transaction(&mut *self.connection).await?; 303 | Ok(value) 304 | } 305 | Err(e) => { 306 | AnsiTransactionManager::rollback_transaction(&mut *self.connection).await?; 307 | Err(e) 308 | } 309 | } 310 | } 311 | } 312 | 313 | impl QueryFragment for TransactionBuilder<'_, C> { 314 | fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { 315 | out.push_sql("BEGIN TRANSACTION"); 316 | if let Some(ref isolation_level) = self.isolation_level { 317 | isolation_level.walk_ast(out.reborrow())?; 318 | } 319 | if let Some(ref read_mode) = self.read_mode { 320 | read_mode.walk_ast(out.reborrow())?; 321 | } 322 | if let Some(ref deferrable) = self.deferrable { 323 | deferrable.walk_ast(out.reborrow())?; 324 | } 325 | Ok(()) 326 | } 327 | } 328 | 329 | #[derive(Debug, Clone, Copy)] 330 | enum IsolationLevel { 331 | ReadCommitted, 332 | RepeatableRead, 333 | Serializable, 334 | } 335 | 336 | impl QueryFragment for IsolationLevel { 337 | fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { 338 | out.push_sql(" ISOLATION LEVEL "); 339 | match *self { 340 | IsolationLevel::ReadCommitted => out.push_sql("READ COMMITTED"), 341 | IsolationLevel::RepeatableRead => out.push_sql("REPEATABLE READ"), 342 | IsolationLevel::Serializable => out.push_sql("SERIALIZABLE"), 343 | } 344 | Ok(()) 345 | } 346 | } 347 | 348 | #[derive(Debug, Clone, Copy)] 349 | enum ReadMode { 350 | ReadOnly, 351 | ReadWrite, 352 | } 353 | 354 | impl QueryFragment for ReadMode { 355 | fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { 356 | match *self { 357 | ReadMode::ReadOnly => out.push_sql(" READ ONLY"), 358 | ReadMode::ReadWrite => out.push_sql(" READ WRITE"), 359 | } 360 | Ok(()) 361 | } 362 | } 363 | 364 | #[derive(Debug, Clone, Copy)] 365 | enum Deferrable { 366 | Deferrable, 367 | NotDeferrable, 368 | } 369 | 370 | impl QueryFragment for Deferrable { 371 | fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { 372 | match *self { 373 | Deferrable::Deferrable => out.push_sql(" DEFERRABLE"), 374 | Deferrable::NotDeferrable => out.push_sql(" NOT DEFERRABLE"), 375 | } 376 | Ok(()) 377 | } 378 | } 379 | 380 | #[cfg(test)] 381 | mod tests { 382 | use super::*; 383 | 384 | #[tokio::test] 385 | async fn test_transaction_builder_generates_correct_sql() { 386 | macro_rules! assert_sql { 387 | ($query:expr, $sql:expr) => { 388 | let mut query_builder = ::QueryBuilder::default(); 389 | $query.to_sql(&mut query_builder, &Pg).unwrap(); 390 | let sql = query_builder.finish(); 391 | assert_eq!(sql, $sql); 392 | }; 393 | } 394 | 395 | let database_url = 396 | dbg!(std::env::var("DATABASE_URL") 397 | .expect("DATABASE_URL must be set in order to run tests")); 398 | let mut conn = crate::AsyncPgConnection::establish(&database_url) 399 | .await 400 | .unwrap(); 401 | 402 | assert_sql!(conn.build_transaction(), "BEGIN TRANSACTION"); 403 | assert_sql!( 404 | conn.build_transaction().read_only(), 405 | "BEGIN TRANSACTION READ ONLY" 406 | ); 407 | assert_sql!( 408 | conn.build_transaction().read_write(), 409 | "BEGIN TRANSACTION READ WRITE" 410 | ); 411 | assert_sql!( 412 | conn.build_transaction().deferrable(), 413 | "BEGIN TRANSACTION DEFERRABLE" 414 | ); 415 | assert_sql!( 416 | conn.build_transaction().not_deferrable(), 417 | "BEGIN TRANSACTION NOT DEFERRABLE" 418 | ); 419 | assert_sql!( 420 | conn.build_transaction().read_committed(), 421 | "BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED" 422 | ); 423 | assert_sql!( 424 | conn.build_transaction().repeatable_read(), 425 | "BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ" 426 | ); 427 | assert_sql!( 428 | conn.build_transaction().serializable(), 429 | "BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE" 430 | ); 431 | assert_sql!( 432 | conn.build_transaction() 433 | .serializable() 434 | .deferrable() 435 | .read_only(), 436 | "BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE" 437 | ); 438 | } 439 | } 440 | --------------------------------------------------------------------------------