├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── migration_test └── 01_data.sql ├── rustfmt.toml ├── src ├── command_executor.rs ├── lib.rs ├── pg_access.rs ├── pg_commands.rs ├── pg_enums.rs ├── pg_errors.rs ├── pg_fetch.rs ├── pg_types.rs ├── pg_unpack.rs └── postgres.rs └── tests ├── common.rs ├── migration_tokio.rs └── postgres_tokio.rs /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | /data_test/ 13 | /data/ 14 | .idea 15 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # v0.9.0 2 | ___ 3 | - Updated libraries 4 | - Updated latest postgresql versions 5 | - Fixed doc example code 6 | - adjusted feature flags 7 | 8 | # v0.6.5 9 | ___ 10 | ### Fix 11 | The errors introduced in v0.6.2 - v0.6.4 got fixed. 12 | Big thanks to nicoulaj for his contribution 13 | 14 | # v0.6.2 - v0.6.4 15 | ___ 16 | ### Important notes 17 | Due to error fixing for the Windows OS console some errors where introduced on other platforms. 18 | Please use a newer version to prevent getting those errors. 19 | 20 | # v0.6.0 21 | ___ 22 | ### Feature 23 | - Timeout can now be disabled through setting PgSettings{.., timeout: None} 24 | 25 | ### Breaking Changes 26 | - PgSettings timeout attribute has been change to Option (description above) 27 | 28 | # v0.5.4 29 | ___ 30 | ### Restructuring 31 | - Extracted command execution 32 | 33 | # v0.5.3 34 | ___ 35 | ### Fix 36 | - Fixed: Concurrent PgEmbed instances trying to acquire pg resources simultaneously 37 | 38 | # v0.5.2 39 | ___ 40 | ### Fix 41 | - Password was created at wrong destination 42 | - stopping db on drop fix 43 | 44 | # v0.5.1 45 | ___ 46 | ### Fix 47 | - **PgEmbed**'s ***stop_db()*** did not execute on drop 48 | - Multiple concurrent **PgEmbed** instances tried each to download the same resources when being setup 49 | 50 | # v0.5.0 51 | ___ 52 | ### Feature 53 | > - Caching postgresql binaries 54 | > 55 | > Removed **executables_dir** attribute from **PgSettings** 56 | > 57 | > The downloaded postgresql binaries are now cached in the following directories: 58 | > 59 | > - On Linux: 60 | > 61 | > **$XDG_CACHE_HOME/pg-embed** 62 | > 63 | > or 64 | > 65 | > **$HOME/.cache/pg-embed** 66 | > - On Windows: 67 | > 68 | > **{FOLDERID_LocalAppData}/pg-embed** 69 | > - On MacOS: 70 | > 71 | > **$HOME/Library/Caches/pg-embed** 72 | > 73 | > Binaries download only happens if cached binaries are not found 74 | > - Cleaner logging 75 | > 76 | > Logging is now done with the **log** crate. 77 | > 78 | > In order to produce log output a logger implementation compatible with the facade has to be used. 79 | > 80 | > See https://crates.io/crates/log for detailed info 81 | > 82 | > 83 | ### Breaking changes 84 | **PgSettings** ***executables_dir*** attribute has been removed (*described above*). 85 | 86 | ### Thanks 87 | ❤️ - Big thanks to **nicoulaj** for his contribution 88 | 89 | # v0.4.3 90 | ___ 91 | - migrator fix 92 | 93 | # v0.4.2 94 | ___ 95 | - updated documentation 96 | 97 | # v0.4.1 98 | ___ 99 | - updated documentation 100 | 101 | # v0.4.0 102 | ___ 103 | ### Fix 104 | - changed file path vars from String to PathBuf 105 | - password authentication 106 | 107 | ### Feature 108 | > - added authentication methods to **PgSettings** 109 | > 110 | > Setting the **auth_method** property of **PgSettings** 111 | > to one of the following values will determine the authentication 112 | > method: 113 | > 114 | > - **PgAuthMethod::Plain** 115 | > 116 | > Plain-Text password 117 | > - **PgAuthMethod::Md5** 118 | > 119 | > Md5 password hash 120 | > 121 | > - **PgAuthMethod::ScramSha256** 122 | > 123 | > Sha256 password hash 124 | > 125 | > 126 | 127 | ### Breaking changes 128 | **PgSettings** has a new property called **auth_method** (*described above*). 129 | 130 | This property has to be set. 131 | 132 | # v0.3.2 133 | ___ 134 | ### Fix 135 | - documentation updates 136 | 137 | # v0.3.0 138 | ___ 139 | ### Feature 140 | > - added cargo features 141 | > 142 | > **rt_tokio** (*build with tokio async runtime and without sqlx db migration support*) 143 | > 144 | > **rt_tokio_migrate** (*build with tokio async runtime and sqlx db migration support*) 145 | 146 | # v0.2.3 147 | ___ 148 | ### Dependencies 149 | - added sqlx 150 | 151 | ### Fix 152 | - added start timeout 153 | 154 | ### Feature 155 | - added PgEmbed::create_database(name) 156 | 157 | # v0.2.2 158 | ___ 159 | 160 | ### Features 161 | - added port setting to PgSettings 162 | 163 | # v0.2.0 164 | ___ 165 | 166 | - switched from async-std to tokio 167 | - switched from surf to reqwest 168 | 169 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pg-embed" 3 | version = "0.9.0" 4 | license = "MIT/Apache-2.0" 5 | readme = "README.md" 6 | repository = "https://github.com/faokunega/pg-embed" 7 | documentation = "https://docs.rs/pg-embed" 8 | description = "Run a Postgresql database locally on Linux, MacOS or Windows as part of another Rust application or test." 9 | edition = "2021" 10 | keywords = ["database", "postgres", "postgresql", "embedded", "server"] 11 | categories = [ 12 | "database", 13 | "database-implementations", 14 | "development-tools", 15 | "asynchronous", 16 | ] 17 | authors = ["Franz-Aliu Okunega "] 18 | 19 | [features] 20 | default = ["rt_tokio_migrate"] 21 | # for now only rt_tokio or rt_tokio_migrate can be used 22 | rt_tokio = ["tokio", "reqwest"] 23 | rt_tokio_migrate = ["tokio", "reqwest", "sqlx"] 24 | 25 | [dependencies] 26 | reqwest = { version = "0.12.12", optional = true } 27 | tokio = { version = "1.43.0", features = ["full"], optional = true } 28 | futures = "0.3" 29 | thiserror = "2.0" 30 | # Waiting for https://github.com/JoyMoe/archiver-rs/pull/6 31 | archiver-rs = { git = "https://github.com/gz/archiver-rs.git", branch = "patch-1" } 32 | sqlx = { version = "0.8", features = ["runtime-tokio-rustls", "postgres", "migrate"], optional = true } 33 | log = "0.4" 34 | dirs = "6.0" 35 | bytes = "1.10" 36 | lazy_static = "1.5" 37 | async-trait = "0.1" 38 | 39 | [dev-dependencies] 40 | serial_test = "3.2" 41 | env_logger = "0.11" 42 | 43 | [[test]] 44 | name = "migration_tokio" 45 | path = "tests/migration_tokio.rs" 46 | required-features = ["rt_tokio_migrate"] 47 | 48 | [[test]] 49 | name = "postgres_tokio1" 50 | path = "tests/postgres_tokio.rs" 51 | required-features = ["rt_tokio"] 52 | 53 | [[test]] 54 | name = "postgres_tokio2" 55 | path = "tests/postgres_tokio.rs" 56 | required-features = ["rt_tokio_migrate"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Franz-Aliu Okunega 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pg-embed 2 | 3 | [![Crates.io](https://img.shields.io/crates/v/pg-embed)](http://crates.io/crates/pg-embed) 4 | [![Docs.rs](https://docs.rs/pg-embed/badge.svg)](https://docs.rs/pg-embed) 5 | [![Crates.io](https://img.shields.io/crates/d/pg-embed)](http://crates.io/crates/pg-embed) 6 | [![Crates.io](https://img.shields.io/crates/l/pg-embed)](https://github.com/faokunega/pg-embed/blob/master/LICENSE) 7 | 8 | Run a Postgresql database locally on Linux, MacOS or Windows as part of another Rust application or test. 9 | 10 | The currently supported async runtime for **pg-embed** is [tokio](https://crates.io/crates/tokio). 11 | 12 | # Usage 13 | 14 | - Add pg-embed to your Cargo.toml 15 | 16 | *Library without sqlx migration support* 17 | 18 | ```toml 19 | # Cargo.toml 20 | [dependencies] 21 | pg-embed = { version = "0.9", default-features = false, features = ["rt_tokio"] } 22 | ``` 23 | 24 | *Library with sqlx migration support* 25 | 26 | ```toml 27 | # Cargo.toml 28 | [dependencies] 29 | pg-embed = "0.9" 30 | ``` 31 | 32 | 33 | # Examples 34 | 35 | ```rust 36 | use pg_embed::postgres::{PgSettings, PgEmbed}; 37 | use pg_embed::pg_fetch::{PgFetchSettings, PG_V17}; 38 | use pg_embed::pg_enums::PgAuthMethod; 39 | use std::time::Duration; 40 | use std::path::PathBuf; 41 | 42 | #[tokio::main] 43 | async fn main() -> Result<(), pg_embed::pg_errors::PgEmbedError> { 44 | /// Postgresql settings 45 | let pg_settings = PgSettings { 46 | // Where to store the postgresql database 47 | database_dir: PathBuf::from("data/db"), 48 | port: 5432, 49 | user: "postgres".to_string(), 50 | password: "password".to_string(), 51 | // authentication method 52 | auth_method: PgAuthMethod::Plain, 53 | // If persistent is false clean up files and directories on drop, otherwise keep them 54 | persistent: false, 55 | // duration to wait before terminating process execution 56 | // pg_ctl start/stop and initdb timeout 57 | // if set to None the process will not be terminated 58 | timeout: Some(Duration::from_secs(15)), 59 | // If migration sql scripts need to be run, the directory containing those scripts can be 60 | // specified here with `Some(PathBuf(path_to_dir)), otherwise `None` to run no migrations. 61 | // To enable migrations view the **Usage** section for details 62 | migration_dir: None, 63 | }; 64 | 65 | /// Postgresql binaries download settings 66 | let fetch_settings = PgFetchSettings { 67 | version: PG_V17, 68 | ..Default::default() 69 | }; 70 | 71 | // Use an async block that returns `Result` 72 | // Create a new instance 73 | let mut pg = PgEmbed::new(pg_settings, fetch_settings).await?; 74 | 75 | // Download, unpack, create password file and database cluster 76 | pg.setup().await?; 77 | 78 | // start postgresql database 79 | pg.start_db().await?; 80 | 81 | // create a new database 82 | // to enable migrations view the [Usage] section for details 83 | pg.create_database("database_name").await?; 84 | 85 | // drop a database 86 | // to enable migrations view [Usage] for details 87 | pg.drop_database("database_name").await?; 88 | 89 | // get the base postgresql uri 90 | // `postgres://{username}:{password}@localhost:{port}` 91 | let pg_uri: &str = &pg.db_uri; 92 | 93 | // get a postgresql database uri 94 | // `postgres://{username}:{password}@localhost:{port}/{specified_database_name}` 95 | let pg_db_uri: String = pg.full_db_uri("database_name"); 96 | 97 | // check database existence 98 | // to enable migrations view [Usage] for details 99 | pg.database_exists("database_name").await?; 100 | 101 | // run migration sql scripts 102 | // to enable migrations view [Usage] for details 103 | pg.migrate("database_name").await?; 104 | 105 | // stop postgresql database 106 | pg.stop_db().await?; 107 | 108 | // Return success 109 | println!("PostgreSQL setup completed successfully!"); 110 | Ok(()) 111 | } 112 | ``` 113 | ## Info 114 | 115 | The downloaded postgresql binaries are cached in the following directories: 116 | 117 | - On Linux: 118 | 119 | `$XDG_CACHE_HOME/pg-embed` 120 | 121 | or 122 | 123 | `$HOME/.cache/pg-embed` 124 | - On Windows: 125 | 126 | `{FOLDERID_LocalAppData}/pg-embed` 127 | - On MacOS: 128 | 129 | `$HOME/Library/Caches/pg-embed` 130 | 131 | 132 | ## Recent Breaking Changes 133 | 134 | pg-embed follows semantic versioning, so breaking changes should only happen upon major version bumps. The only 135 | exception to this rule is breaking changes that happen due to implementation that was deemed to be a bug, security 136 | concerns, or it can be reasonably proved to affect no code. For the full details, 137 | see [CHANGELOG.md](https://github.com/faokunega/pg-embed/blob/master/CHANGELOG.md). 138 | 139 | ## License 140 | 141 | pg-embed is licensed under the MIT license. Please read 142 | the [LICENSE-MIT](https://github.com/faokunega/pg-embed/blob/master/LICENSE) file in this repository for more 143 | information. 144 | 145 | # Notes 146 | 147 | Reliant on the great work being done 148 | by [zonkyio/embedded-postgres-binaries](https://github.com/zonkyio/embedded-postgres-binaries) in order to fetch 149 | precompiled binaries 150 | from [Maven](https://mvnrepository.com/artifact/io.zonky.test.postgres/embedded-postgres-binaries-bom). 151 | 152 | -------------------------------------------------------------------------------- /migration_test/01_data.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS testing 2 | ( 3 | id BIGSERIAL PRIMARY KEY, 4 | description TEXT NOT NULL, 5 | done BOOLEAN NOT NULL DEFAULT FALSE 6 | ); -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | combine_control_expr = false 2 | control_brace_style = "ClosingNextLine" 3 | fn_single_line = true 4 | force_multiline_blocks = true 5 | format_strings = true 6 | max_width = 100 7 | overflow_delimited_expr = true 8 | reorder_impl_items = true 9 | struct_field_align_threshold = 20 10 | use_field_init_shorthand = true 11 | wrap_comments = true -------------------------------------------------------------------------------- /src/command_executor.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Process command creation and execution 3 | //! 4 | use std::error::Error; 5 | use std::ffi::OsStr; 6 | use std::marker; 7 | use std::process::Stdio; 8 | 9 | use async_trait::async_trait; 10 | use log; 11 | use tokio::io::{AsyncBufReadExt, AsyncRead, BufReader}; 12 | use tokio::process::Child; 13 | use tokio::sync::mpsc::{Receiver, Sender}; 14 | use tokio::time::Duration; 15 | 16 | /// 17 | /// Output logging type 18 | /// 19 | #[derive(Debug)] 20 | pub enum LogType { 21 | Info, 22 | Error, 23 | } 24 | 25 | /// 26 | /// Child process status 27 | /// 28 | pub trait ProcessStatus 29 | where 30 | E: Error + Send, 31 | Self: Send, 32 | { 33 | /// process entry status 34 | fn status_entry(&self) -> T; 35 | /// process exit status 36 | fn status_exit(&self) -> T; 37 | /// process error type 38 | fn error_type(&self) -> E; 39 | /// wrap error 40 | fn wrap_error(&self, error: F, message: Option) -> E; 41 | } 42 | 43 | /// 44 | /// Logging data 45 | /// 46 | #[derive(Debug)] 47 | pub struct LogOutputData { 48 | line: String, 49 | log_type: LogType, 50 | } 51 | 52 | /// 53 | /// Async command trait 54 | /// 55 | #[async_trait] 56 | pub trait AsyncCommand 57 | where 58 | E: Error + Send, 59 | P: ProcessStatus + Send, 60 | Self: Sized, 61 | { 62 | /// 63 | /// Create a new async command 64 | /// 65 | fn new(executable_path: &OsStr, args: A, process_type: P) -> Result 66 | where 67 | A: IntoIterator, 68 | B: AsRef; 69 | /// 70 | /// Execute command 71 | /// 72 | /// When timeout is Some(duration) the process execution will be timed out after duration, 73 | /// if set to None the process execution will not be timed out. 74 | /// 75 | async fn execute(&mut self, timeout: Option) -> Result; 76 | } 77 | 78 | /// 79 | /// Process command 80 | /// 81 | pub struct AsyncCommandExecutor 82 | where 83 | S: Send, 84 | E: Error + Send, 85 | P: ProcessStatus, 86 | Self: Send, 87 | { 88 | /// Process command 89 | _command: tokio::process::Command, 90 | /// Process child 91 | process: Child, 92 | /// Process type 93 | process_type: P, 94 | _marker_s: marker::PhantomData, 95 | _marker_e: marker::PhantomData, 96 | } 97 | 98 | impl AsyncCommandExecutor 99 | where 100 | S: Send, 101 | E: Error + Send, 102 | P: ProcessStatus + Send, 103 | { 104 | /// Initialize command 105 | fn init(command: &mut tokio::process::Command, process_type: &P) -> Result { 106 | command 107 | .stdout(Stdio::piped()) 108 | .stderr(Stdio::piped()) 109 | .spawn() 110 | .map_err(|_| process_type.error_type()) 111 | } 112 | 113 | /// Generate a command 114 | fn generate_command(executable_path: &OsStr, args: A) -> tokio::process::Command 115 | where 116 | A: IntoIterator, 117 | B: AsRef, 118 | { 119 | let mut command = tokio::process::Command::new(executable_path); 120 | command.args(args); 121 | command 122 | } 123 | 124 | /// Handle process output 125 | async fn handle_output(data: R, sender: Sender) -> () { 126 | let mut lines = BufReader::new(data).lines(); 127 | while let Some(line) = lines.next_line().await.expect("error handling output") { 128 | let io_data = LogOutputData { 129 | line, 130 | log_type: LogType::Info, 131 | }; 132 | sender 133 | .send(io_data) 134 | .await 135 | .expect("error sending log output data"); 136 | } 137 | } 138 | 139 | /// Log process output 140 | async fn log_output(mut receiver: Receiver) -> () { 141 | while let Some(data) = receiver.recv().await { 142 | match data.log_type { 143 | LogType::Info => { 144 | log::info!("{}", data.line); 145 | } 146 | LogType::Error => { 147 | log::error!("{}", data.line); 148 | } 149 | } 150 | } 151 | } 152 | 153 | /// Run process 154 | async fn run_process(&mut self) -> Result { 155 | let exit_status = self 156 | .process 157 | .wait() 158 | .await 159 | .map_err(|e| self.process_type.wrap_error(e, None))?; 160 | if exit_status.success() { 161 | Ok(self.process_type.status_exit()) 162 | } else { 163 | Err(self.process_type.error_type()) 164 | } 165 | } 166 | 167 | #[cfg(not(target_os = "windows"))] 168 | async fn command_execution(&mut self) -> Result { 169 | let (sender, receiver) = tokio::sync::mpsc::channel::(1000); 170 | let res = self.run_process().await; 171 | let stdout = self.process.stdout.take().unwrap(); 172 | let stderr = self.process.stderr.take().unwrap(); 173 | let tx = sender.clone(); 174 | let _ = tokio::task::spawn(async { Self::handle_output(stdout, tx).await }); 175 | let _ = tokio::task::spawn(async { Self::handle_output(stderr, sender).await }); 176 | let _ = tokio::task::spawn(async { Self::log_output(receiver).await }); 177 | res 178 | } 179 | 180 | #[cfg(target_os = "windows")] 181 | async fn command_execution(&mut self) -> Result { 182 | //TODO: find another way to use stderr on windows 183 | // let (sender, receiver) = tokio::sync::mpsc::channel::(1000); 184 | let res = self.run_process().await; 185 | // let stdout = self.process.stdout.take().unwrap(); 186 | // let stderr = self.process.stderr.take().unwrap(); 187 | // let tx = sender.clone(); 188 | // let _ = tokio::task::spawn(async { Self::handle_output(stdout, tx).await }); 189 | // let _ = tokio::task::spawn(async { Self::handle_output(stderr, sender).await }); 190 | // let _ = tokio::task::spawn(async { Self::log_output(receiver).await }); 191 | res 192 | } 193 | } 194 | 195 | #[async_trait] 196 | impl AsyncCommand for AsyncCommandExecutor 197 | where 198 | S: Send, 199 | E: Error + Send, 200 | P: ProcessStatus + Send, 201 | { 202 | fn new(executable_path: &OsStr, args: A, process_type: P) -> Result 203 | where 204 | A: IntoIterator, 205 | B: AsRef, 206 | { 207 | let mut _command = Self::generate_command(executable_path, args); 208 | let process = Self::init(&mut _command, &process_type)?; 209 | Ok(AsyncCommandExecutor { 210 | _command, 211 | process, 212 | process_type, 213 | _marker_s: Default::default(), 214 | _marker_e: Default::default(), 215 | }) 216 | } 217 | 218 | async fn execute(&mut self, timeout: Option) -> Result { 219 | match timeout { 220 | None => self.command_execution().await, 221 | Some(duration) => tokio::time::timeout(duration, self.command_execution()) 222 | .await 223 | .map_err(|e| { 224 | self.process_type 225 | .wrap_error(e, Some(String::from("timed out"))) 226 | })?, 227 | } 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! # pg-embed 3 | //! 4 | //! [![Crates.io](https://img.shields.io/crates/v/pg-embed)](http://crates.io/crates/pg-embed) 5 | //! [![Docs.rs](https://docs.rs/pg-embed/badge.svg)](https://docs.rs/pg-embed) 6 | //! [![Crates.io](https://img.shields.io/crates/d/pg-embed)](http://crates.io/crates/pg-embed) 7 | //! [![Crates.io](https://img.shields.io/crates/l/pg-embed)](https://github.com/faokunega/pg-embed/blob/master/LICENSE) 8 | //! 9 | //! Run a Postgresql database locally on Linux, MacOS or Windows as part of another Rust application or test. 10 | //! 11 | //! The currently supported async runtime for **pg-embed** is [tokio](https://crates.io/crates/tokio). 12 | //! 13 | //! Support for [async-std](https://crates.io/crates/async-std) and [actix](https://crates.io/crates/actix) is planned 14 | //! and will be available soon. 15 | //! 16 | //! # Usage 17 | //! 18 | //! - Add pg-embed to your Cargo.toml 19 | //! 20 | //! *Library without sqlx migration support* 21 | //! 22 | //! ```toml 23 | //! # Cargo.toml 24 | //! [dependencies] 25 | //! pg-embed = { version = "0.9", default-features = false, features = ["rt_tokio"] } 26 | //! ``` 27 | //! 28 | //! *Library with sqlx migration support* 29 | //! 30 | //! ```toml 31 | //! # Cargo.toml 32 | //! [dependencies] 33 | //! pg-embed = "0.9" 34 | //! ``` 35 | //! 36 | //! 37 | //! # Examples 38 | //! 39 | //! ``` 40 | //! use pg_embed::postgres::{PgSettings, PgEmbed}; 41 | //! use pg_embed::pg_fetch::{PgFetchSettings, PG_V17}; 42 | //! use pg_embed::pg_enums::PgAuthMethod; 43 | //! use std::time::Duration; 44 | //! use std::path::PathBuf; 45 | //! 46 | //! #[tokio::main] 47 | //! async fn main() -> Result<(), pg_embed::pg_errors::PgEmbedError> { 48 | //! /// Postgresql settings 49 | //! let pg_settings = PgSettings { 50 | //! // Where to store the postgresql database 51 | //! database_dir: PathBuf::from("data/db"), 52 | //! port: 5432, 53 | //! user: "postgres".to_string(), 54 | //! password: "password".to_string(), 55 | //! // authentication method 56 | //! auth_method: PgAuthMethod::Plain, 57 | //! // If persistent is false clean up files and directories on drop, otherwise keep them 58 | //! persistent: false, 59 | //! // duration to wait before terminating process execution 60 | //! // pg_ctl start/stop and initdb timeout 61 | //! // if set to None the process will not be terminated 62 | //! timeout: Some(Duration::from_secs(15)), 63 | //! // If migration sql scripts need to be run, the directory containing those scripts can be 64 | //! // specified here with `Some(PathBuf(path_to_dir)), otherwise `None` to run no migrations. 65 | //! // To enable migrations view the **Usage** section for details 66 | //! migration_dir: None, 67 | //! }; 68 | //! 69 | //! /// Postgresql binaries download settings 70 | //! let fetch_settings = PgFetchSettings { 71 | //! version: PG_V17, 72 | //! ..Default::default() 73 | //! }; 74 | //! 75 | //! // Use an async block that returns `Result` 76 | //! // Create a new instance 77 | //! let mut pg = PgEmbed::new(pg_settings, fetch_settings).await?; 78 | //! 79 | //! // Download, unpack, create password file and database cluster 80 | //! pg.setup().await?; 81 | //! 82 | //! // start postgresql database 83 | //! pg.start_db().await?; 84 | //! 85 | //! // create a new database 86 | //! // to enable migrations view the [Usage] section for details 87 | //! pg.create_database("database_name").await?; 88 | //! 89 | //! // drop a database 90 | //! // to enable migrations view [Usage] for details 91 | //! pg.drop_database("database_name").await?; 92 | //! 93 | //! // get the base postgresql uri 94 | //! // `postgres://{username}:{password}@localhost:{port}` 95 | //! let pg_uri: &str = &pg.db_uri; 96 | //! 97 | //! // get a postgresql database uri 98 | //! // `postgres://{username}:{password}@localhost:{port}/{specified_database_name}` 99 | //! let pg_db_uri: String = pg.full_db_uri("database_name"); 100 | //! 101 | //! // check database existence 102 | //! // to enable migrations view [Usage] for details 103 | //! pg.database_exists("database_name").await?; 104 | //! 105 | //! // run migration sql scripts 106 | //! // to enable migrations view [Usage] for details 107 | //! pg.migrate("database_name").await?; 108 | //! 109 | //! // stop postgresql database 110 | //! pg.stop_db().await?; 111 | //! 112 | //! // Return success 113 | //! println!("PostgreSQL setup completed successfully!"); 114 | //! Ok(()) 115 | //! } 116 | //! ``` 117 | //! ## Info 118 | //! 119 | //! The downloaded postgresql binaries are cached in the following directories: 120 | //! 121 | //! - On Linux: 122 | //! 123 | //! `$XDG_CACHE_HOME/pg-embed` 124 | //! 125 | //! or 126 | //! 127 | //! `$HOME/.cache/pg-embed` 128 | //! - On Windows: 129 | //! 130 | //! `{FOLDERID_LocalAppData}/pg-embed` 131 | //! - On MacOS: 132 | //! 133 | //! `$HOME/Library/Caches/pg-embed` 134 | //! 135 | //! 136 | //! ## Recent Breaking Changes 137 | //! 138 | //! pg-embed follows semantic versioning, so breaking changes should only happen upon major version bumps. The only exception to this rule is breaking changes that happen due to implementation that was deemed to be a bug, security concerns, or it can be reasonably proved to affect no code. For the full details, see [CHANGELOG.md](https://github.com/faokunega/pg-embed/blob/master/CHANGELOG.md). 139 | //! 140 | //! 141 | //! 142 | //! ## License 143 | //! 144 | //! pg-embed is licensed under the MIT license. Please read the [LICENSE-MIT](https://github.com/faokunega/pg-embed/blob/master/LICENSE) file in this repository for more information. 145 | //! 146 | //! # Notes 147 | //! 148 | //! Reliant on the great work being done by [zonkyio/embedded-postgres-binaries](https://github.com/zonkyio/embedded-postgres-binaries) in order to fetch precompiled binaries from [Maven](https://mvnrepository.com/artifact/io.zonky.test.postgres/embedded-postgres-binaries-bom). 149 | //! 150 | 151 | extern crate dirs; 152 | #[macro_use] 153 | extern crate lazy_static; 154 | #[cfg(not(any(feature = "rt_tokio_migrate", feature = "rt_tokio",)))] 155 | compile_error!("one of the features ['rt_tokio_migrate', 'rt_tokio'] must be enabled"); 156 | 157 | pub mod command_executor; 158 | pub mod pg_access; 159 | pub mod pg_commands; 160 | pub mod pg_enums; 161 | pub mod pg_errors; 162 | pub mod pg_fetch; 163 | pub mod pg_types; 164 | pub mod pg_unpack; 165 | pub mod postgres; 166 | -------------------------------------------------------------------------------- /src/pg_access.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Cache postgresql files, access to executables, clean up files 3 | //! 4 | 5 | use std::cell::Cell; 6 | use std::collections::HashMap; 7 | use std::path::{Path, PathBuf}; 8 | use std::sync::Arc; 9 | 10 | use futures::TryFutureExt; 11 | use tokio::io::AsyncWriteExt; 12 | use tokio::sync::Mutex; 13 | 14 | use crate::pg_enums::{OperationSystem, PgAcquisitionStatus}; 15 | use crate::pg_errors::{PgEmbedError, PgEmbedErrorType}; 16 | use crate::pg_fetch::PgFetchSettings; 17 | use crate::pg_types::{PgCommandSync, PgResult}; 18 | use crate::pg_unpack; 19 | 20 | lazy_static! { 21 | /// 22 | /// Stores the paths to the cache directories while acquiring the related postgres binaries 23 | /// 24 | /// Used to prevent simultaneous downloads and unpacking of the same binaries 25 | /// while executing multiple PgEmbed instances concurrently. 26 | /// 27 | static ref ACQUIRED_PG_BINS: Arc>> = 28 | Arc::new(Mutex::new(HashMap::with_capacity(5))); 29 | } 30 | 31 | const PG_EMBED_CACHE_DIR_NAME: &'static str = "pg-embed"; 32 | const PG_VERSION_FILE_NAME: &'static str = "PG_VERSION"; 33 | 34 | /// 35 | /// Access to pg_ctl, initdb, database directory and cache directory 36 | /// 37 | pub struct PgAccess { 38 | /// Cache directory path 39 | pub cache_dir: PathBuf, 40 | /// Database directory path 41 | pub database_dir: PathBuf, 42 | /// Postgresql pg_ctl executable path 43 | pub pg_ctl_exe: PathBuf, 44 | /// Postgresql initdb executable path 45 | pub init_db_exe: PathBuf, 46 | /// Password file path 47 | pub pw_file_path: PathBuf, 48 | /// Postgresql binaries zip file path 49 | pub zip_file_path: PathBuf, 50 | /// Postgresql database version file 51 | /// used for internal checks 52 | pg_version_file: PathBuf, 53 | /// Fetch settings 54 | fetch_settings: PgFetchSettings, 55 | } 56 | 57 | impl PgAccess { 58 | /// 59 | /// Create a new instance 60 | /// 61 | /// Directory structure for cached postgresql binaries will be created 62 | /// 63 | pub async fn new( 64 | fetch_settings: &PgFetchSettings, 65 | database_dir: &PathBuf, 66 | ) -> Result { 67 | // cache directory 68 | let cache_dir = Self::create_cache_dir_structure(&fetch_settings).await?; 69 | Self::create_db_dir_structure(database_dir).await?; 70 | // pg_ctl executable 71 | let mut pg_ctl = cache_dir.clone(); 72 | pg_ctl.push("bin/pg_ctl"); 73 | // initdb executable 74 | let mut init_db = cache_dir.clone(); 75 | init_db.push("bin/initdb"); 76 | // postgres zip file 77 | let mut zip_file_path = cache_dir.clone(); 78 | let platform = fetch_settings.platform(); 79 | let file_name = format!("{}-{}.zip", platform, &fetch_settings.version.0); 80 | zip_file_path.push(file_name); 81 | // password file 82 | let mut pw_file = database_dir.clone(); 83 | pw_file.set_extension("pwfile"); 84 | // postgres version file 85 | let mut pg_version_file = database_dir.clone(); 86 | pg_version_file.push(PG_VERSION_FILE_NAME); 87 | 88 | Ok(PgAccess { 89 | cache_dir, 90 | database_dir: database_dir.clone(), 91 | pg_ctl_exe: pg_ctl, 92 | init_db_exe: init_db, 93 | pw_file_path: pw_file, 94 | zip_file_path, 95 | pg_version_file, 96 | fetch_settings: fetch_settings.clone(), 97 | }) 98 | } 99 | 100 | /// 101 | /// Create directory structure for cached postgresql executables 102 | /// 103 | /// Returns PathBuf(cache_directory) on success, an error otherwise 104 | /// 105 | async fn create_cache_dir_structure(fetch_settings: &PgFetchSettings) -> PgResult { 106 | let cache_dir = dirs::cache_dir().ok_or_else(|| PgEmbedError { 107 | error_type: PgEmbedErrorType::InvalidPgUrl, 108 | source: None, 109 | message: None, 110 | })?; 111 | let os_string = match fetch_settings.operating_system { 112 | OperationSystem::Darwin | OperationSystem::Windows | OperationSystem::Linux => { 113 | fetch_settings.operating_system.to_string() 114 | } 115 | OperationSystem::AlpineLinux => { 116 | format!("arch_{}", fetch_settings.operating_system.to_string()) 117 | } 118 | }; 119 | let pg_path = format!( 120 | "{}/{}/{}/{}", 121 | PG_EMBED_CACHE_DIR_NAME, 122 | os_string, 123 | fetch_settings.architecture.to_string(), 124 | fetch_settings.version.0 125 | ); 126 | let mut cache_pg_embed = cache_dir.clone(); 127 | cache_pg_embed.push(pg_path); 128 | tokio::fs::create_dir_all(&cache_pg_embed) 129 | .map_err(|e| PgEmbedError { 130 | error_type: PgEmbedErrorType::DirCreationError, 131 | source: Some(Box::new(e)), 132 | message: None, 133 | }) 134 | .await?; 135 | Ok(cache_pg_embed) 136 | } 137 | 138 | async fn create_db_dir_structure(db_dir: &PathBuf) -> PgResult<()> { 139 | tokio::fs::create_dir_all(db_dir) 140 | .map_err(|e| PgEmbedError { 141 | error_type: PgEmbedErrorType::DirCreationError, 142 | source: Some(Box::new(e)), 143 | message: None, 144 | }) 145 | .await 146 | } 147 | 148 | /// 149 | /// Download and unpack postgres binaries 150 | /// 151 | pub async fn maybe_acquire_postgres(&self) -> PgResult<()> { 152 | let mut lock = ACQUIRED_PG_BINS.lock().await; 153 | 154 | if self.pg_executables_cached().await? { 155 | return Ok(()); 156 | } 157 | 158 | lock.insert(self.cache_dir.clone(), PgAcquisitionStatus::InProgress); 159 | let pg_bin_data = self.fetch_settings.fetch_postgres().await?; 160 | self.write_pg_zip(&pg_bin_data).await?; 161 | log::debug!( 162 | "Unpacking postgres binaries {} {}", 163 | self.zip_file_path.display(), 164 | self.cache_dir.display() 165 | ); 166 | pg_unpack::unpack_postgres(&self.zip_file_path, &self.cache_dir).await?; 167 | 168 | lock.insert(self.cache_dir.clone(), PgAcquisitionStatus::Finished); 169 | Ok(()) 170 | } 171 | 172 | /// 173 | /// Check if postgresql executables are already cached 174 | /// 175 | pub async fn pg_executables_cached(&self) -> PgResult { 176 | Self::path_exists(self.init_db_exe.as_path()).await 177 | } 178 | 179 | /// 180 | /// Check if database files exist 181 | /// 182 | pub async fn db_files_exist(&self) -> PgResult { 183 | Ok(self.pg_executables_cached().await? 184 | && Self::path_exists(self.pg_version_file.as_path()).await?) 185 | } 186 | 187 | /// 188 | /// Check if database version file exists 189 | /// 190 | pub async fn pg_version_file_exists(db_dir: &PathBuf) -> PgResult { 191 | let mut pg_version_file = db_dir.clone(); 192 | pg_version_file.push(PG_VERSION_FILE_NAME); 193 | let file_exists = if let Ok(_) = tokio::fs::File::open(pg_version_file.as_path()).await { 194 | true 195 | } else { 196 | false 197 | }; 198 | Ok(file_exists) 199 | } 200 | 201 | /// 202 | /// Check if file path exists 203 | /// 204 | async fn path_exists(file: &Path) -> PgResult { 205 | if let Ok(_) = tokio::fs::File::open(file).await { 206 | Ok(true) 207 | } else { 208 | Ok(false) 209 | } 210 | } 211 | 212 | /// 213 | /// Check postgresql acquisition status 214 | /// 215 | pub async fn acquisition_status(&self) -> PgAcquisitionStatus { 216 | let lock = ACQUIRED_PG_BINS.lock().await; 217 | let acquisition_status = lock.get(&self.cache_dir); 218 | match acquisition_status { 219 | None => PgAcquisitionStatus::Undefined, 220 | Some(status) => *status, 221 | } 222 | } 223 | 224 | /// 225 | /// Write pg binaries zip to postgresql cache directory 226 | /// 227 | async fn write_pg_zip(&self, bytes: &[u8]) -> PgResult<()> { 228 | let mut file: tokio::fs::File = tokio::fs::File::create(&self.zip_file_path.as_path()) 229 | .map_err(|e| PgEmbedError { 230 | error_type: PgEmbedErrorType::WriteFileError, 231 | source: Some(Box::new(e)), 232 | message: None, 233 | }) 234 | .await?; 235 | file.write_all(&bytes) 236 | .map_err(|e| PgEmbedError { 237 | error_type: PgEmbedErrorType::WriteFileError, 238 | source: Some(Box::new(e)), 239 | message: None, 240 | }) 241 | .await?; 242 | file.sync_data() 243 | .map_err(|e| PgEmbedError { 244 | error_type: PgEmbedErrorType::WriteFileError, 245 | source: Some(Box::new(e)), 246 | message: None, 247 | }) 248 | .await?; 249 | Ok(()) 250 | } 251 | 252 | /// 253 | /// Clean up created files and directories. 254 | /// 255 | /// Remove created directories containing the database and the password file. 256 | /// 257 | pub fn clean(&self) -> PgResult<()> { 258 | // not using tokio::fs async methods because clean() is called on drop 259 | std::fs::remove_dir_all(self.database_dir.as_path()).map_err(|e| PgEmbedError { 260 | error_type: PgEmbedErrorType::PgCleanUpFailure, 261 | source: Some(Box::new(e)), 262 | message: None, 263 | })?; 264 | std::fs::remove_file(self.pw_file_path.as_path()).map_err(|e| PgEmbedError { 265 | error_type: PgEmbedErrorType::PgCleanUpFailure, 266 | source: Some(Box::new(e)), 267 | message: None, 268 | })?; 269 | Ok(()) 270 | } 271 | 272 | /// 273 | /// Purge postgresql executables 274 | /// 275 | /// Remove all cached postgresql executables 276 | /// 277 | pub async fn purge() -> PgResult<()> { 278 | let mut cache_dir = dirs::cache_dir().ok_or_else(|| PgEmbedError { 279 | error_type: PgEmbedErrorType::ReadFileError, 280 | source: None, 281 | message: Some(String::from("cache dir error")), 282 | })?; 283 | cache_dir.push(PG_EMBED_CACHE_DIR_NAME); 284 | let _ = tokio::fs::remove_dir_all(cache_dir.as_path()) 285 | .map_err(|e| PgEmbedError { 286 | error_type: PgEmbedErrorType::PgPurgeFailure, 287 | source: Some(Box::new(e)), 288 | message: None, 289 | }) 290 | .await; 291 | Ok(()) 292 | } 293 | 294 | /// 295 | /// Clean up database directory and password file 296 | /// 297 | pub async fn clean_up(database_dir: PathBuf, pw_file: PathBuf) -> PgResult<()> { 298 | tokio::fs::remove_dir_all(database_dir.as_path()) 299 | .await 300 | .map_err(|e| PgEmbedError { 301 | error_type: PgEmbedErrorType::PgCleanUpFailure, 302 | source: Some(Box::new(e)), 303 | message: None, 304 | })?; 305 | 306 | tokio::fs::remove_file(pw_file.as_path()) 307 | .await 308 | .map_err(|e| PgEmbedError { 309 | error_type: PgEmbedErrorType::PgCleanUpFailure, 310 | source: Some(Box::new(e)), 311 | message: None, 312 | }) 313 | } 314 | 315 | /// 316 | /// Create a database password file 317 | /// 318 | /// Returns `Ok(())` on success, otherwise returns an error. 319 | /// 320 | pub async fn create_password_file(&self, password: &[u8]) -> PgResult<()> { 321 | let mut file: tokio::fs::File = tokio::fs::File::create(self.pw_file_path.as_path()) 322 | .map_err(|e| PgEmbedError { 323 | error_type: PgEmbedErrorType::WriteFileError, 324 | source: Some(Box::new(e)), 325 | message: None, 326 | }) 327 | .await?; 328 | let _ = file 329 | .write(password) 330 | .map_err(|e| PgEmbedError { 331 | error_type: PgEmbedErrorType::WriteFileError, 332 | source: Some(Box::new(e)), 333 | message: None, 334 | }) 335 | .await?; 336 | Ok(()) 337 | } 338 | 339 | /// 340 | /// Create synchronous pg_ctl stop command 341 | /// 342 | pub fn stop_db_command_sync(&self, database_dir: &PathBuf) -> PgCommandSync { 343 | let pg_ctl_executable = self.pg_ctl_exe.to_str().unwrap(); 344 | let mut command = Box::new(Cell::new(std::process::Command::new(pg_ctl_executable))); 345 | command 346 | .get_mut() 347 | .args(&["stop", "-w", "-D", database_dir.to_str().unwrap()]); 348 | command 349 | } 350 | } 351 | -------------------------------------------------------------------------------- /src/pg_commands.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Create postgres command executor 3 | //! 4 | //! Command executors for initdb, pg_ctl start, pg_ctl stop 5 | //! 6 | use std::path::PathBuf; 7 | 8 | use crate::command_executor::{AsyncCommand, AsyncCommandExecutor}; 9 | use crate::pg_enums::{PgAuthMethod, PgProcessType, PgServerStatus}; 10 | use crate::pg_errors::PgEmbedError; 11 | use crate::pg_types::PgResult; 12 | 13 | /// 14 | /// Postgres command executors 15 | /// 16 | pub struct PgCommand {} 17 | 18 | impl PgCommand { 19 | /// 20 | /// Create initdb command 21 | /// 22 | pub fn init_db_executor( 23 | init_db_exe: &PathBuf, 24 | database_dir: &PathBuf, 25 | pw_file_path: &PathBuf, 26 | user: &str, 27 | auth_method: &PgAuthMethod, 28 | ) -> PgResult> { 29 | let init_db_executable = init_db_exe.as_os_str(); 30 | let password_file_arg = format!("--pwfile={}", pw_file_path.to_str().unwrap()); 31 | let auth_host = match auth_method { 32 | PgAuthMethod::Plain => "password", 33 | PgAuthMethod::MD5 => "md5", 34 | PgAuthMethod::ScramSha256 => "scram-sha-256", 35 | }; 36 | let args = [ 37 | "-A", 38 | auth_host, 39 | "-U", 40 | user, 41 | // The postgres-tokio driver uses utf8 encoding, however on windows 42 | // if -E is not specified WIN1252 encoding is chosen by default 43 | // which can lead to encoding errors like this: 44 | // 45 | // ERROR: character with byte sequence 0xe0 0xab 0x87 in encoding 46 | // "UTF8" has no equivalent in encoding "WIN1252" 47 | "-E=UTF8", 48 | "-D", 49 | database_dir.to_str().unwrap(), 50 | &password_file_arg, 51 | ]; 52 | 53 | let command_executor = 54 | AsyncCommandExecutor::::new( 55 | init_db_executable, 56 | args, 57 | PgProcessType::InitDb, 58 | )?; 59 | 60 | Ok(command_executor) 61 | } 62 | 63 | /// 64 | /// Create pg_ctl start command 65 | /// 66 | pub fn start_db_executor( 67 | pg_ctl_exe: &PathBuf, 68 | database_dir: &PathBuf, 69 | port: &u16, 70 | ) -> PgResult> { 71 | let pg_ctl_executable = pg_ctl_exe.as_os_str(); 72 | let port_arg = format!("-F -p {}", port.to_string()); 73 | let args = [ 74 | "-o", 75 | &port_arg, 76 | "start", 77 | "-w", 78 | "-D", 79 | database_dir.to_str().unwrap(), 80 | ]; 81 | let command_executor = 82 | AsyncCommandExecutor::::new( 83 | pg_ctl_executable, 84 | args, 85 | PgProcessType::StartDb, 86 | )?; 87 | 88 | Ok(command_executor) 89 | } 90 | 91 | /// 92 | /// Create pg_ctl stop command 93 | /// 94 | pub fn stop_db_executor( 95 | pg_ctl_exe: &PathBuf, 96 | database_dir: &PathBuf, 97 | ) -> PgResult> { 98 | let pg_ctl_executable = pg_ctl_exe.as_os_str(); 99 | let args = ["stop", "-w", "-D", database_dir.to_str().unwrap()]; 100 | let command_executor = 101 | AsyncCommandExecutor::::new( 102 | pg_ctl_executable, 103 | args, 104 | PgProcessType::StopDb, 105 | )?; 106 | 107 | Ok(command_executor) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/pg_enums.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Enums 3 | //! 4 | 5 | use std::error::Error; 6 | 7 | use crate::command_executor::ProcessStatus; 8 | use crate::pg_errors::{PgEmbedError, PgEmbedErrorType}; 9 | 10 | /// 11 | /// Postgresql authentication method 12 | /// 13 | /// Choose between plain password, md5 or scram_sha_256 authentication. 14 | /// Scram_sha_256 authentication is only available on postgresql versions >= 11 15 | /// 16 | pub enum PgAuthMethod { 17 | /// plain-text 18 | Plain, 19 | /// md5 20 | MD5, 21 | /// scram_sha_256 22 | ScramSha256, 23 | } 24 | 25 | /// 26 | /// Postgresql server status 27 | /// 28 | #[derive(Debug, Clone, Copy, PartialEq)] 29 | pub enum PgServerStatus { 30 | /// Postgres uninitialized 31 | Uninitialized, 32 | /// Initialization process running 33 | Initializing, 34 | /// Initialization process finished 35 | Initialized, 36 | /// Postgres server process starting 37 | Starting, 38 | /// Postgres server process started 39 | Started, 40 | /// Postgres server process stopping 41 | Stopping, 42 | /// Postgres server process stopped 43 | Stopped, 44 | /// Postgres failure 45 | Failure, 46 | } 47 | 48 | /// 49 | /// Postgesql process type 50 | /// 51 | /// Used internally for distinguishing processes being executed 52 | /// 53 | pub enum PgProcessType { 54 | /// initdb process 55 | InitDb, 56 | /// pg_ctl start process 57 | StartDb, 58 | /// pg_ctl stop process 59 | StopDb, 60 | } 61 | 62 | impl ProcessStatus for PgProcessType { 63 | fn status_entry(&self) -> PgServerStatus { 64 | match self { 65 | PgProcessType::InitDb => PgServerStatus::Initializing, 66 | PgProcessType::StartDb => PgServerStatus::Starting, 67 | PgProcessType::StopDb => PgServerStatus::Stopping, 68 | } 69 | } 70 | 71 | fn status_exit(&self) -> PgServerStatus { 72 | match self { 73 | PgProcessType::InitDb => PgServerStatus::Initialized, 74 | PgProcessType::StartDb => PgServerStatus::Started, 75 | PgProcessType::StopDb => PgServerStatus::Stopped, 76 | } 77 | } 78 | 79 | fn error_type(&self) -> PgEmbedError { 80 | match self { 81 | PgProcessType::InitDb => PgEmbedError { 82 | error_type: PgEmbedErrorType::PgInitFailure, 83 | source: None, 84 | message: None, 85 | }, 86 | PgProcessType::StartDb => PgEmbedError { 87 | error_type: PgEmbedErrorType::PgStartFailure, 88 | source: None, 89 | message: None, 90 | }, 91 | PgProcessType::StopDb => PgEmbedError { 92 | error_type: PgEmbedErrorType::PgStopFailure, 93 | source: None, 94 | message: None, 95 | }, 96 | } 97 | } 98 | 99 | fn wrap_error( 100 | &self, 101 | error: E, 102 | message: Option, 103 | ) -> PgEmbedError { 104 | PgEmbedError { 105 | error_type: PgEmbedErrorType::PgError, 106 | source: Some(Box::new(error)), 107 | message, 108 | } 109 | } 110 | } 111 | 112 | impl ToString for PgProcessType { 113 | fn to_string(&self) -> String { 114 | match self { 115 | PgProcessType::InitDb => "initdb".to_string(), 116 | PgProcessType::StartDb => "start".to_string(), 117 | PgProcessType::StopDb => "stop".to_string(), 118 | } 119 | } 120 | } 121 | 122 | /// The operation systems enum 123 | #[derive(Debug, PartialEq, Copy, Clone)] 124 | pub enum OperationSystem { 125 | Darwin, 126 | Windows, 127 | Linux, 128 | AlpineLinux, 129 | } 130 | 131 | impl ToString for OperationSystem { 132 | fn to_string(&self) -> String { 133 | match &self { 134 | OperationSystem::Darwin => "darwin".to_string(), 135 | OperationSystem::Windows => "windows".to_string(), 136 | OperationSystem::Linux => "linux".to_string(), 137 | OperationSystem::AlpineLinux => "linux".to_string(), 138 | } 139 | } 140 | } 141 | 142 | impl Default for OperationSystem { 143 | fn default() -> Self { 144 | #[cfg(not(any(target_os = "linux", target_os = "windows")))] 145 | { 146 | OperationSystem::Darwin 147 | } 148 | 149 | #[cfg(target_os = "linux")] 150 | { 151 | OperationSystem::Linux 152 | } 153 | 154 | #[cfg(target_os = "windows")] 155 | { 156 | OperationSystem::Windows 157 | } 158 | } 159 | } 160 | 161 | /// The cpu architectures enum 162 | #[derive(Debug, PartialEq, Copy, Clone)] 163 | pub enum Architecture { 164 | Amd64, 165 | I386, 166 | Arm32v6, 167 | Arm32v7, 168 | Arm64v8, 169 | Ppc64le, 170 | } 171 | 172 | impl ToString for Architecture { 173 | fn to_string(&self) -> String { 174 | match &self { 175 | Architecture::Amd64 => "amd64".to_string(), 176 | Architecture::I386 => "i386".to_string(), 177 | Architecture::Arm32v6 => "arm32v6".to_string(), 178 | Architecture::Arm32v7 => "arm32v7".to_string(), 179 | Architecture::Arm64v8 => "arm64v8".to_string(), 180 | Architecture::Ppc64le => "ppc64le".to_string(), 181 | } 182 | } 183 | } 184 | 185 | impl Default for Architecture { 186 | fn default() -> Self { 187 | #[cfg(not(any( 188 | target_arch = "x86", 189 | target_arch = "arm", 190 | target_arch = "aarch64", 191 | target_arch = "powerpc64" 192 | )))] 193 | { 194 | Architecture::Amd64 195 | } 196 | 197 | #[cfg(target_arch = "x86")] 198 | { 199 | Architecture::I386 200 | } 201 | 202 | #[cfg(target_arch = "arm")] 203 | { 204 | Architecture::Arm32v7 205 | } 206 | 207 | #[cfg(target_arch = "aarch64")] 208 | { 209 | Architecture::Arm64v8 210 | } 211 | 212 | #[cfg(target_arch = "powerpc64")] 213 | { 214 | Architecture::Ppc64le 215 | } 216 | } 217 | } 218 | 219 | /// The postgresql binaries acquisition status 220 | #[derive(Copy, Clone, PartialEq)] 221 | pub enum PgAcquisitionStatus { 222 | /// Acquiring postgresql binaries 223 | InProgress, 224 | /// Finished acquiring postgresql binaries 225 | Finished, 226 | /// No acquisition 227 | Undefined, 228 | } 229 | -------------------------------------------------------------------------------- /src/pg_errors.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Errors 3 | //! 4 | use std::error::Error; 5 | 6 | use std::fmt; 7 | use std::fmt::Formatter; 8 | use thiserror::Error; 9 | 10 | /// 11 | /// PgEmbed errors 12 | #[derive(Error, Debug)] 13 | pub struct PgEmbedError { 14 | pub error_type: PgEmbedErrorType, 15 | pub source: Option>, 16 | pub message: Option, 17 | } 18 | 19 | impl fmt::Display for PgEmbedError { 20 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 21 | write!( 22 | f, 23 | "error_type: {:?}\nsource: \n{:?}\nmessage: \n{:?}\n", 24 | self.error_type, self.source, self.message 25 | ) 26 | } 27 | } 28 | 29 | /// 30 | /// Common pg_embed errors, independent from features used 31 | /// 32 | #[derive(Debug, PartialEq)] 33 | pub enum PgEmbedErrorType { 34 | /// Invalid postgresql binaries download url 35 | InvalidPgUrl, 36 | /// Invalid postgresql binaries package 37 | InvalidPgPackage, 38 | /// Could not write file 39 | WriteFileError, 40 | /// Could not read file 41 | ReadFileError, 42 | /// Could not create directory 43 | DirCreationError, 44 | /// Failed to unpack postgresql binaries 45 | UnpackFailure, 46 | /// Postgresql could not be started 47 | PgStartFailure, 48 | /// Postgresql could not be stopped 49 | PgStopFailure, 50 | /// Postgresql could not be initialized 51 | PgInitFailure, 52 | /// Clean up error 53 | PgCleanUpFailure, 54 | /// Purging error 55 | PgPurgeFailure, 56 | /// Buffer read error 57 | PgBufferReadError, 58 | /// Lock error 59 | PgLockError, 60 | /// Child process error 61 | PgProcessError, 62 | /// Timed out error 63 | PgTimedOutError, 64 | /// Task join error 65 | PgTaskJoinError, 66 | /// Error wrapper 67 | PgError, 68 | /// Postgresql binaries download failure 69 | DownloadFailure, 70 | /// Request response bytes convertion failure 71 | ConversionFailure, 72 | /// Channel send error 73 | SendFailure, 74 | /// sqlx query error 75 | SqlQueryError, 76 | /// migration error 77 | MigrationError, 78 | } 79 | -------------------------------------------------------------------------------- /src/pg_fetch.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Fetch postgresql binaries 3 | //! 4 | //! Download and unpack postgresql binaries 5 | //! 6 | 7 | use bytes::Bytes; 8 | use futures::TryFutureExt; 9 | use reqwest::Response; 10 | 11 | use crate::pg_enums::{Architecture, OperationSystem}; 12 | use crate::pg_errors::{PgEmbedError, PgEmbedErrorType}; 13 | use crate::pg_types::PgResult; 14 | 15 | /// Postgresql version struct (simple version wrapper) 16 | #[derive(Debug, Copy, Clone)] 17 | pub struct PostgresVersion(pub &'static str); 18 | /// Latest postgres version 17 19 | pub const PG_V17: PostgresVersion = PostgresVersion("17.2.0"); 20 | /// Latest postgres version 16 21 | pub const PG_V16: PostgresVersion = PostgresVersion("16.6.0"); 22 | /// Latest postgres version 15 23 | pub const PG_V15: PostgresVersion = PostgresVersion("15.9.0"); 24 | /// Latest postgres version 14 25 | pub const PG_V14: PostgresVersion = PostgresVersion("14.15.0"); 26 | /// Latest postgres version 13 27 | pub const PG_V13: PostgresVersion = PostgresVersion("13.18.0"); 28 | /// Latest postgres version 12 29 | pub const PG_V12: PostgresVersion = PostgresVersion("12.22.0"); 30 | /// Latest pstgres version 11 31 | pub const PG_V11: PostgresVersion = PostgresVersion("11.22.1"); 32 | /// Latest postgres version 10 33 | pub const PG_V10: PostgresVersion = PostgresVersion("10.23.0"); 34 | 35 | /// Settings that determine the postgres binary to be fetched 36 | #[derive(Debug, Clone)] 37 | pub struct PgFetchSettings { 38 | /// The repository host 39 | pub host: String, 40 | /// The operation system 41 | pub operating_system: OperationSystem, 42 | /// The cpu architecture 43 | pub architecture: Architecture, 44 | /// The postgresql version 45 | pub version: PostgresVersion, 46 | } 47 | 48 | impl Default for PgFetchSettings { 49 | fn default() -> Self { 50 | PgFetchSettings { 51 | host: "https://repo1.maven.org".to_string(), 52 | operating_system: OperationSystem::default(), 53 | architecture: Architecture::default(), 54 | version: PG_V13, 55 | } 56 | } 57 | } 58 | 59 | impl PgFetchSettings { 60 | /// The platform string (*needed to determine the download path*) 61 | pub fn platform(&self) -> String { 62 | let os = self.operating_system.to_string(); 63 | let arch = if self.operating_system == OperationSystem::AlpineLinux { 64 | format!("{}-{}", self.architecture.to_string(), "alpine") 65 | } else { 66 | self.architecture.to_string() 67 | }; 68 | format!("{}-{}", os, arch) 69 | } 70 | 71 | /// 72 | /// Fetch postgres binaries 73 | /// 74 | /// Returns the data of the downloaded binary in an `Ok([u8])` on success, otherwise returns an error. 75 | /// 76 | pub async fn fetch_postgres(&self) -> PgResult { 77 | let platform = &self.platform(); 78 | let version = self.version.0; 79 | let download_url = format!( 80 | "{}/maven2/io/zonky/test/postgres/embedded-postgres-binaries-{}/{}/embedded-postgres-binaries-{}-{}.jar", 81 | &self.host, 82 | &platform, 83 | version, 84 | &platform, 85 | version); 86 | 87 | let response: Response = reqwest::get(download_url) 88 | .map_err(|e| PgEmbedError { 89 | error_type: PgEmbedErrorType::DownloadFailure, 90 | source: Some(Box::new(e)), 91 | message: None, 92 | }) 93 | .await?; 94 | 95 | let content: Bytes = response 96 | .bytes() 97 | .map_err(|e| PgEmbedError { 98 | error_type: PgEmbedErrorType::ConversionFailure, 99 | source: Some(Box::new(e)), 100 | message: None, 101 | }) 102 | .await?; 103 | 104 | log::debug!("Downloaded {} bytes", content.len()); 105 | log::trace!( 106 | "First 1024 bytes: {:?}", 107 | &String::from_utf8_lossy(&content[..1024]) 108 | ); 109 | 110 | Ok(content) 111 | } 112 | } 113 | 114 | #[cfg(test)] 115 | mod tests { 116 | use super::*; 117 | 118 | #[tokio::test] 119 | async fn fetch_postgres() -> Result<(), PgEmbedError> { 120 | let pg_settings = PgFetchSettings::default(); 121 | pg_settings.fetch_postgres().await; 122 | Ok(()) 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/pg_types.rs: -------------------------------------------------------------------------------- 1 | use crate::pg_errors::PgEmbedError; 2 | use std::cell::Cell; 3 | 4 | pub type PgResult = Result; 5 | pub type PgCommandSync = Box>; 6 | -------------------------------------------------------------------------------- /src/pg_unpack.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Unpack postgresql binaries 3 | //! 4 | use std::path::PathBuf; 5 | 6 | use archiver_rs::{Archive, Compressed}; 7 | use futures::TryFutureExt; 8 | 9 | use crate::pg_errors::{PgEmbedError, PgEmbedErrorType}; 10 | use crate::pg_types::PgResult; 11 | 12 | /// 13 | /// Unzip the postgresql txz file 14 | /// 15 | /// Returns `Ok(PathBuf(txz_file_path))` file path of the txz archive on success, otherwise returns an error. 16 | /// 17 | fn unzip_txz(zip_file_path: &PathBuf, cache_dir: &PathBuf) -> Result { 18 | let mut zip = archiver_rs::Zip::open(zip_file_path.as_path()).map_err(|e| PgEmbedError { 19 | error_type: PgEmbedErrorType::ReadFileError, 20 | source: Some(Box::new(e)), 21 | message: Some(format!( 22 | "Could not read zip file {}", 23 | zip_file_path.display() 24 | )), 25 | })?; 26 | let file_name = zip 27 | .files() 28 | .map_err(|e| PgEmbedError { 29 | error_type: PgEmbedErrorType::UnpackFailure, 30 | source: Some(Box::new(e)), 31 | message: None, 32 | })? 33 | .into_iter() 34 | .find(|name| name.ends_with(".txz")); 35 | match file_name { 36 | Some(file_name) => { 37 | // decompress zip 38 | let mut target_path = cache_dir.clone(); 39 | target_path.push(&file_name); 40 | zip.extract_single(&target_path.as_path(), file_name.clone()) 41 | .map_err(|e| PgEmbedError { 42 | error_type: PgEmbedErrorType::UnpackFailure, 43 | source: Some(Box::new(e)), 44 | message: None, 45 | })?; 46 | Ok(target_path) 47 | } 48 | None => Err(PgEmbedError { 49 | error_type: PgEmbedErrorType::InvalidPgPackage, 50 | source: None, 51 | message: Some(String::from("no postgresql txz in zip")), 52 | }), 53 | } 54 | } 55 | 56 | /// 57 | /// Decompress the postgresql txz file 58 | /// 59 | /// Returns `Ok(PathBuf(tar_file_path))` (*the file path to the postgresql tar file*) on success, otherwise returns an error. 60 | /// 61 | fn decompress_xz(file_path: &PathBuf) -> Result { 62 | let mut xz = archiver_rs::Xz::open(file_path.as_path()).map_err(|e| PgEmbedError { 63 | error_type: PgEmbedErrorType::ReadFileError, 64 | source: Some(Box::new(e)), 65 | message: None, 66 | })?; 67 | // rename file path suffix from .txz to .tar 68 | let target_path = file_path.with_extension(".tar"); 69 | xz.decompress(&target_path.as_path()) 70 | .map_err(|e| PgEmbedError { 71 | error_type: PgEmbedErrorType::UnpackFailure, 72 | source: Some(Box::new(e)), 73 | message: None, 74 | })?; 75 | Ok(target_path) 76 | } 77 | 78 | /// 79 | /// Unpack the postgresql tar file 80 | /// 81 | /// Returns `Ok(())` on success, otherwise returns an error. 82 | /// 83 | fn decompress_tar(file_path: &PathBuf, cache_dir: &PathBuf) -> Result<(), PgEmbedError> { 84 | let mut tar = archiver_rs::Tar::open(&file_path.as_path()).map_err(|e| PgEmbedError { 85 | error_type: PgEmbedErrorType::ReadFileError, 86 | source: Some(Box::new(e)), 87 | message: None, 88 | })?; 89 | 90 | tar.extract(cache_dir.as_path()).map_err(|e| PgEmbedError { 91 | error_type: PgEmbedErrorType::UnpackFailure, 92 | source: Some(Box::new(e)), 93 | message: None, 94 | })?; 95 | 96 | Ok(()) 97 | } 98 | 99 | /// 100 | /// Unpack the postgresql executables 101 | /// 102 | /// Returns `Ok(())` on success, otherwise returns an error. 103 | /// 104 | pub async fn unpack_postgres(zip_file_path: &PathBuf, cache_dir: &PathBuf) -> PgResult<()> { 105 | let txz_file_path = unzip_txz(&zip_file_path, &cache_dir)?; 106 | let tar_file_path = decompress_xz(&txz_file_path)?; 107 | tokio::fs::remove_file(txz_file_path) 108 | .map_err(|e| PgEmbedError { 109 | error_type: PgEmbedErrorType::PgCleanUpFailure, 110 | source: Some(Box::new(e)), 111 | message: None, 112 | }) 113 | .await?; 114 | let _ = decompress_tar(&tar_file_path, &cache_dir); 115 | tokio::fs::remove_file(tar_file_path) 116 | .map_err(|e| PgEmbedError { 117 | error_type: PgEmbedErrorType::PgCleanUpFailure, 118 | source: Some(Box::new(e)), 119 | message: None, 120 | }) 121 | .await?; 122 | Ok(()) 123 | } 124 | -------------------------------------------------------------------------------- /src/postgres.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Postgresql server 3 | //! 4 | //! Start, stop, initialize the postgresql server. 5 | //! Create database clusters and databases. 6 | //! 7 | use std::io::BufRead; 8 | use std::path::PathBuf; 9 | use std::process::Stdio; 10 | use std::sync::Arc; 11 | use std::time::Duration; 12 | 13 | use futures::TryFutureExt; 14 | use log::{error, info}; 15 | use tokio::sync::Mutex; 16 | 17 | #[cfg(feature = "rt_tokio_migrate")] 18 | use sqlx::migrate::{MigrateDatabase, Migrator}; 19 | #[cfg(feature = "rt_tokio_migrate")] 20 | use sqlx::postgres::PgPoolOptions; 21 | #[cfg(feature = "rt_tokio_migrate")] 22 | use sqlx::Postgres; 23 | 24 | use crate::command_executor::AsyncCommand; 25 | use crate::pg_access::PgAccess; 26 | use crate::pg_commands::PgCommand; 27 | use crate::pg_enums::{PgAuthMethod, PgServerStatus}; 28 | use crate::pg_errors::{PgEmbedError, PgEmbedErrorType}; 29 | use crate::pg_fetch; 30 | use crate::pg_types::PgResult; 31 | 32 | /// 33 | /// Database settings 34 | /// 35 | pub struct PgSettings { 36 | /// postgresql database directory 37 | pub database_dir: PathBuf, 38 | /// postgresql port 39 | pub port: u16, 40 | /// postgresql user name 41 | pub user: String, 42 | /// postgresql password 43 | pub password: String, 44 | /// authentication 45 | pub auth_method: PgAuthMethod, 46 | /// persist database 47 | pub persistent: bool, 48 | /// duration to wait before terminating process execution 49 | /// pg_ctl start/stop and initdb timeout 50 | pub timeout: Option, 51 | /// migrations folder 52 | /// sql script files to execute on migrate 53 | pub migration_dir: Option, 54 | } 55 | 56 | /// 57 | /// Embedded postgresql database 58 | /// 59 | /// If the PgEmbed instance is dropped / goes out of scope and postgresql is still 60 | /// running, the postgresql process will be killed and depending on the [PgSettings::persistent] setting, 61 | /// file and directories will be cleaned up. 62 | /// 63 | pub struct PgEmbed { 64 | /// Postgresql settings 65 | pub pg_settings: PgSettings, 66 | /// Download settings 67 | pub fetch_settings: pg_fetch::PgFetchSettings, 68 | /// Database uri `postgres://{username}:{password}@localhost:{port}` 69 | pub db_uri: String, 70 | /// Postgres server status 71 | pub server_status: Arc>, 72 | pub shutting_down: bool, 73 | /// Postgres files access 74 | pub pg_access: PgAccess, 75 | } 76 | 77 | impl Drop for PgEmbed { 78 | fn drop(&mut self) { 79 | if !self.shutting_down { 80 | let _ = self.stop_db_sync(); 81 | } 82 | if !&self.pg_settings.persistent { 83 | let _ = &self.pg_access.clean(); 84 | } 85 | } 86 | } 87 | 88 | impl PgEmbed { 89 | /// 90 | /// Create a new PgEmbed instance 91 | /// 92 | pub async fn new( 93 | pg_settings: PgSettings, 94 | fetch_settings: pg_fetch::PgFetchSettings, 95 | ) -> PgResult { 96 | let password: &str = &pg_settings.password; 97 | let db_uri = format!( 98 | "postgres://{}:{}@localhost:{}", 99 | &pg_settings.user, 100 | &password, 101 | pg_settings.port.to_string() 102 | ); 103 | let pg_access = PgAccess::new(&fetch_settings, &pg_settings.database_dir).await?; 104 | Ok(PgEmbed { 105 | pg_settings, 106 | fetch_settings, 107 | db_uri, 108 | server_status: Arc::new(Mutex::new(PgServerStatus::Uninitialized)), 109 | shutting_down: false, 110 | pg_access, 111 | }) 112 | } 113 | 114 | /// 115 | /// Setup postgresql for execution 116 | /// 117 | /// Download, unpack, create password file and database 118 | /// 119 | pub async fn setup(&mut self) -> PgResult<()> { 120 | self.pg_access.maybe_acquire_postgres().await?; 121 | self.pg_access 122 | .create_password_file(self.pg_settings.password.as_bytes()) 123 | .await?; 124 | if self.pg_access.db_files_exist().await? { 125 | let mut server_status = self.server_status.lock().await; 126 | *server_status = PgServerStatus::Initialized; 127 | } else { 128 | let _r = &self.init_db().await?; 129 | } 130 | Ok(()) 131 | } 132 | 133 | /// 134 | /// Initialize postgresql database 135 | /// 136 | /// Returns `Ok(())` on success, otherwise returns an error. 137 | /// 138 | pub async fn init_db(&mut self) -> PgResult<()> { 139 | { 140 | let mut server_status = self.server_status.lock().await; 141 | *server_status = PgServerStatus::Initializing; 142 | } 143 | 144 | let mut executor = PgCommand::init_db_executor( 145 | &self.pg_access.init_db_exe, 146 | &self.pg_access.database_dir, 147 | &self.pg_access.pw_file_path, 148 | &self.pg_settings.user, 149 | &self.pg_settings.auth_method, 150 | )?; 151 | let exit_status = executor.execute(self.pg_settings.timeout).await?; 152 | let mut server_status = self.server_status.lock().await; 153 | *server_status = exit_status; 154 | Ok(()) 155 | } 156 | 157 | /// 158 | /// Start postgresql database 159 | /// 160 | /// Returns `Ok(())` on success, otherwise returns an error. 161 | /// 162 | pub async fn start_db(&mut self) -> PgResult<()> { 163 | { 164 | let mut server_status = self.server_status.lock().await; 165 | *server_status = PgServerStatus::Starting; 166 | } 167 | self.shutting_down = false; 168 | let mut executor = PgCommand::start_db_executor( 169 | &self.pg_access.pg_ctl_exe, 170 | &self.pg_access.database_dir, 171 | &self.pg_settings.port, 172 | )?; 173 | let exit_status = executor.execute(self.pg_settings.timeout).await?; 174 | let mut server_status = self.server_status.lock().await; 175 | *server_status = exit_status; 176 | Ok(()) 177 | } 178 | 179 | /// 180 | /// Stop postgresql database 181 | /// 182 | /// Returns `Ok(())` on success, otherwise returns an error. 183 | /// 184 | pub async fn stop_db(&mut self) -> PgResult<()> { 185 | { 186 | let mut server_status = self.server_status.lock().await; 187 | *server_status = PgServerStatus::Stopping; 188 | } 189 | self.shutting_down = true; 190 | let mut executor = 191 | PgCommand::stop_db_executor(&self.pg_access.pg_ctl_exe, &self.pg_access.database_dir)?; 192 | let exit_status = executor.execute(self.pg_settings.timeout).await?; 193 | let mut server_status = self.server_status.lock().await; 194 | *server_status = exit_status; 195 | Ok(()) 196 | } 197 | 198 | /// 199 | /// Stop postgresql database synchronous 200 | /// 201 | /// Returns `Ok(())` on success, otherwise returns an error. 202 | /// 203 | pub fn stop_db_sync(&mut self) -> PgResult<()> { 204 | self.shutting_down = true; 205 | let mut stop_db_command = self 206 | .pg_access 207 | .stop_db_command_sync(&self.pg_settings.database_dir); 208 | let process = stop_db_command 209 | .get_mut() 210 | .stdout(Stdio::piped()) 211 | .stderr(Stdio::piped()) 212 | .spawn() 213 | .map_err(|e| PgEmbedError { 214 | error_type: PgEmbedErrorType::PgError, 215 | source: Some(Box::new(e)), 216 | message: None, 217 | })?; 218 | 219 | self.handle_process_io_sync(process) 220 | } 221 | 222 | /// 223 | /// Handle process logging synchronous 224 | /// 225 | pub fn handle_process_io_sync(&self, mut process: std::process::Child) -> PgResult<()> { 226 | let reader_out = std::io::BufReader::new(process.stdout.take().unwrap()).lines(); 227 | let reader_err = std::io::BufReader::new(process.stderr.take().unwrap()).lines(); 228 | reader_out.for_each(|line| info!("{}", line.unwrap())); 229 | reader_err.for_each(|line| error!("{}", line.unwrap())); 230 | Ok(()) 231 | } 232 | 233 | /// 234 | /// Create a database 235 | /// 236 | #[cfg(any(feature = "rt_tokio_migrate"))] 237 | pub async fn create_database(&self, db_name: &str) -> PgResult<()> { 238 | Postgres::create_database(&self.full_db_uri(db_name)) 239 | .map_err(|e| PgEmbedError { 240 | error_type: PgEmbedErrorType::PgTaskJoinError, 241 | source: Some(Box::new(e)), 242 | message: None, 243 | }) 244 | .await?; 245 | Ok(()) 246 | } 247 | 248 | /// 249 | /// Drop a database 250 | /// 251 | #[cfg(any(feature = "rt_tokio_migrate"))] 252 | pub async fn drop_database(&self, db_name: &str) -> PgResult<()> { 253 | Postgres::drop_database(&self.full_db_uri(db_name)) 254 | .map_err(|e| PgEmbedError { 255 | error_type: PgEmbedErrorType::PgTaskJoinError, 256 | source: Some(Box::new(e)), 257 | message: None, 258 | }) 259 | .await?; 260 | Ok(()) 261 | } 262 | 263 | /// 264 | /// Check database existence 265 | /// 266 | #[cfg(any(feature = "rt_tokio_migrate"))] 267 | pub async fn database_exists(&self, db_name: &str) -> PgResult { 268 | let result = Postgres::database_exists(&self.full_db_uri(db_name)) 269 | .map_err(|e| PgEmbedError { 270 | error_type: PgEmbedErrorType::PgTaskJoinError, 271 | source: Some(Box::new(e)), 272 | message: None, 273 | }) 274 | .await?; 275 | Ok(result) 276 | } 277 | 278 | /// 279 | /// The full database uri 280 | /// 281 | /// (*postgres://{username}:{password}@localhost:{port}/{db_name}*) 282 | /// 283 | pub fn full_db_uri(&self, db_name: &str) -> String { 284 | format!("{}/{}", &self.db_uri, db_name) 285 | } 286 | 287 | /// 288 | /// Run migrations 289 | /// 290 | #[cfg(any(feature = "rt_tokio_migrate"))] 291 | pub async fn migrate(&self, db_name: &str) -> PgResult<()> { 292 | if let Some(migration_dir) = &self.pg_settings.migration_dir { 293 | let m = Migrator::new(migration_dir.as_path()) 294 | .map_err(|e| PgEmbedError { 295 | error_type: PgEmbedErrorType::MigrationError, 296 | source: Some(Box::new(e)), 297 | message: None, 298 | }) 299 | .await?; 300 | let pool = PgPoolOptions::new() 301 | .connect(&self.full_db_uri(db_name)) 302 | .map_err(|e| PgEmbedError { 303 | error_type: PgEmbedErrorType::SqlQueryError, 304 | source: Some(Box::new(e)), 305 | message: None, 306 | }) 307 | .await?; 308 | m.run(&pool) 309 | .map_err(|e| PgEmbedError { 310 | error_type: PgEmbedErrorType::MigrationError, 311 | source: Some(Box::new(e)), 312 | message: None, 313 | }) 314 | .await?; 315 | } 316 | Ok(()) 317 | } 318 | } 319 | -------------------------------------------------------------------------------- /tests/common.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | use std::time::Duration; 3 | 4 | use env_logger::Env; 5 | 6 | use pg_embed::pg_enums::PgAuthMethod; 7 | use pg_embed::pg_errors::PgEmbedError; 8 | use pg_embed::pg_fetch::{PgFetchSettings, PG_V17}; 9 | use pg_embed::postgres::{PgEmbed, PgSettings}; 10 | 11 | pub async fn setup( 12 | port: u16, 13 | database_dir: PathBuf, 14 | persistent: bool, 15 | migration_dir: Option, 16 | ) -> Result { 17 | let _ = env_logger::Builder::from_env(Env::default().default_filter_or("info")) 18 | .is_test(true) 19 | .try_init(); 20 | let pg_settings = PgSettings { 21 | database_dir, 22 | port, 23 | user: "postgres".to_string(), 24 | password: "password".to_string(), 25 | auth_method: PgAuthMethod::MD5, 26 | persistent, 27 | timeout: Some(Duration::from_secs(10)), 28 | migration_dir, 29 | }; 30 | let fetch_settings = PgFetchSettings { 31 | version: PG_V17, 32 | ..Default::default() 33 | }; 34 | let mut pg = PgEmbed::new(pg_settings, fetch_settings).await?; 35 | pg.setup().await?; 36 | Ok(pg) 37 | } 38 | -------------------------------------------------------------------------------- /tests/migration_tokio.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use serial_test::serial; 4 | 5 | use pg_embed::pg_errors::{PgEmbedError, PgEmbedErrorType}; 6 | use sqlx::{Connection, PgConnection}; 7 | 8 | #[path = "common.rs"] 9 | mod common; 10 | 11 | #[tokio::test] 12 | #[serial] 13 | async fn db_create_database() -> Result<(), PgEmbedError> { 14 | let mut pg = common::setup(5432, PathBuf::from("data_test/db"), false, None).await?; 15 | pg.start_db().await?; 16 | let db_name = "test"; 17 | 18 | pg.create_database(&db_name).await?; 19 | assert!(pg.database_exists(&db_name).await?); 20 | Ok(()) 21 | } 22 | 23 | #[tokio::test] 24 | #[serial] 25 | async fn db_drop_database() -> Result<(), PgEmbedError> { 26 | let mut pg = common::setup(5432, PathBuf::from("data_test/db"), false, None).await?; 27 | pg.start_db().await?; 28 | let db_name = "test"; 29 | 30 | pg.create_database(&db_name).await?; 31 | assert_eq!(true, pg.database_exists(&db_name).await?); 32 | 33 | pg.drop_database(&db_name).await?; 34 | assert_eq!(false, pg.database_exists(&db_name).await?); 35 | Ok(()) 36 | } 37 | 38 | #[tokio::test] 39 | #[serial] 40 | async fn db_migration() -> Result<(), PgEmbedError> { 41 | let mut pg = common::setup( 42 | 5432, 43 | PathBuf::from("data_test/db"), 44 | false, 45 | Some(PathBuf::from("migration_test")), 46 | ) 47 | .await?; 48 | pg.start_db().await?; 49 | let db_name = "test"; 50 | pg.create_database(&db_name).await?; 51 | 52 | pg.migrate(&db_name).await?; 53 | 54 | let db_uri = pg.full_db_uri(&db_name); 55 | 56 | let mut conn = PgConnection::connect(&db_uri) 57 | .await 58 | .map_err(|_| PgEmbedError { 59 | error_type: PgEmbedErrorType::SqlQueryError, 60 | source: None, 61 | message: None, 62 | })?; 63 | 64 | let _ = sqlx::query("INSERT INTO testing (description) VALUES ('Hello')") 65 | .execute(&mut conn) 66 | .await 67 | .map_err(|_| PgEmbedError { 68 | error_type: PgEmbedErrorType::SqlQueryError, 69 | source: None, 70 | message: None, 71 | })?; 72 | 73 | let rows = sqlx::query("SELECT * FROM testing") 74 | .fetch_all(&mut conn) 75 | .await 76 | .map_err(|_| PgEmbedError { 77 | error_type: PgEmbedErrorType::SqlQueryError, 78 | source: None, 79 | message: None, 80 | })?; 81 | 82 | assert_eq!(1, rows.len()); 83 | 84 | Ok(()) 85 | } 86 | -------------------------------------------------------------------------------- /tests/postgres_tokio.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use futures::stream::StreamExt; 4 | use serial_test::serial; 5 | use tokio::sync::Mutex; 6 | 7 | use env_logger::Env; 8 | use pg_embed::pg_access::PgAccess; 9 | use pg_embed::pg_enums::{PgAuthMethod, PgServerStatus}; 10 | use pg_embed::pg_errors::PgEmbedError; 11 | use pg_embed::pg_fetch::{PgFetchSettings, PG_V17}; 12 | use pg_embed::postgres::{PgEmbed, PgSettings}; 13 | use std::time::Duration; 14 | 15 | #[path = "common.rs"] 16 | mod common; 17 | 18 | #[tokio::test] 19 | #[serial] 20 | async fn postgres_server_start_stop() -> Result<(), PgEmbedError> { 21 | let mut pg = common::setup(5432, PathBuf::from("data_test/db"), false, None).await?; 22 | { 23 | let server_status = *pg.server_status.lock().await; 24 | assert_eq!(server_status, PgServerStatus::Initialized); 25 | } 26 | 27 | pg.start_db().await?; 28 | { 29 | let server_status = *pg.server_status.lock().await; 30 | assert_eq!(server_status, PgServerStatus::Started); 31 | } 32 | 33 | pg.stop_db().await?; 34 | { 35 | let server_status = *pg.server_status.lock().await; 36 | assert_eq!(server_status, PgServerStatus::Stopped); 37 | } 38 | 39 | Ok(()) 40 | } 41 | 42 | #[tokio::test] 43 | #[serial] 44 | async fn postgres_server_drop() -> Result<(), PgEmbedError> { 45 | let db_path = PathBuf::from("data_test/db"); 46 | { 47 | let mut pg = common::setup(5432, db_path.clone(), false, None).await?; 48 | pg.start_db().await?; 49 | let file_exists = PgAccess::pg_version_file_exists(&db_path).await?; 50 | assert_eq!(true, file_exists); 51 | } 52 | let file_exists = PgAccess::pg_version_file_exists(&db_path).await?; 53 | assert_eq!(false, file_exists); 54 | Ok(()) 55 | } 56 | 57 | #[tokio::test] 58 | #[serial] 59 | async fn postgres_server_multiple_concurrent() -> Result<(), PgEmbedError> { 60 | PgAccess::purge().await?; 61 | 62 | let tasks = vec![ 63 | common::setup(5432, PathBuf::from("data_test/db1"), false, None), 64 | common::setup(5434, PathBuf::from("data_test/db3"), false, None), 65 | ]; 66 | 67 | let wrap_with_mutex = 68 | |val: Result| val.map(|pg| Mutex::new(pg)).unwrap(); 69 | 70 | let pgs: Vec> = futures::future::join_all(tasks) 71 | .await 72 | .into_iter() 73 | .map(wrap_with_mutex) 74 | .collect(); 75 | 76 | futures::stream::iter(&pgs) 77 | .for_each_concurrent(None, |pg| async move { 78 | let mut pg = pg.lock().await; 79 | let _ = pg.start_db().await; 80 | { 81 | let server_status = *pg.server_status.lock().await; 82 | assert_eq!(server_status, PgServerStatus::Started); 83 | } 84 | }) 85 | .await; 86 | 87 | futures::stream::iter(&pgs) 88 | .for_each_concurrent(None, |pg| async move { 89 | let mut pg = pg.lock().await; 90 | let _ = pg.stop_db().await; 91 | { 92 | let server_status = *pg.server_status.lock().await; 93 | assert_eq!(server_status, PgServerStatus::Stopped); 94 | } 95 | }) 96 | .await; 97 | 98 | Ok(()) 99 | } 100 | 101 | #[tokio::test] 102 | #[serial] 103 | async fn postgres_server_persistent_true() -> Result<(), PgEmbedError> { 104 | let db_path = PathBuf::from("data_test/db"); 105 | let mut database_dir = PathBuf::new(); 106 | let mut pw_file_path = PathBuf::new(); 107 | { 108 | let pg = common::setup(5432, db_path.clone(), true, None).await?; 109 | database_dir.clone_from(&pg.pg_access.database_dir); 110 | pw_file_path.clone_from(&pg.pg_access.pw_file_path); 111 | let file_exists = PgAccess::pg_version_file_exists(&db_path).await?; 112 | assert_eq!(true, file_exists); 113 | } 114 | let file_exists = PgAccess::pg_version_file_exists(&db_path).await?; 115 | assert_eq!(true, file_exists); 116 | 117 | PgAccess::clean_up(database_dir, pw_file_path).await?; 118 | 119 | let file_exists = PgAccess::pg_version_file_exists(&db_path).await?; 120 | assert_eq!(false, file_exists); 121 | 122 | Ok(()) 123 | } 124 | 125 | #[tokio::test] 126 | #[serial] 127 | async fn postgres_server_persistent_false() -> Result<(), PgEmbedError> { 128 | let db_path = PathBuf::from("data_test/db"); 129 | { 130 | let _pg = common::setup(5432, db_path.clone(), false, None).await?; 131 | let file_exists = PgAccess::pg_version_file_exists(&db_path).await?; 132 | assert_eq!(true, file_exists); 133 | } 134 | let file_exists = PgAccess::pg_version_file_exists(&db_path).await?; 135 | assert_eq!(false, file_exists); 136 | 137 | Ok(()) 138 | } 139 | 140 | #[tokio::test] 141 | #[serial] 142 | async fn postgres_server_timeout() -> Result<(), PgEmbedError> { 143 | let database_dir = PathBuf::from("data_test/db"); 144 | let _ = env_logger::Builder::from_env(Env::default().default_filter_or("info")) 145 | .is_test(true) 146 | .try_init(); 147 | let pg_settings = PgSettings { 148 | database_dir, 149 | port: 5432, 150 | user: "postgres".to_string(), 151 | password: "password".to_string(), 152 | auth_method: PgAuthMethod::MD5, 153 | persistent: false, 154 | timeout: Some(Duration::from_secs(10)), 155 | migration_dir: None, 156 | }; 157 | let fetch_settings = PgFetchSettings { 158 | version: PG_V17, 159 | ..Default::default() 160 | }; 161 | let mut pg = PgEmbed::new(pg_settings, fetch_settings).await?; 162 | let _ = pg.setup().await; 163 | pg.pg_settings.timeout = Some(Duration::from_millis(10)); 164 | let res = pg.start_db().await.err().map(|e| e.message).flatten(); 165 | assert_eq!(Some("timed out".to_string()), res); 166 | 167 | Ok(()) 168 | } 169 | --------------------------------------------------------------------------------