├── src ├── handlers │ ├── sqlite │ │ ├── mod.rs │ │ ├── tables_names.rs │ │ └── tables.rs │ ├── mysql │ │ ├── mod.rs │ │ ├── mysql_keywords.rs │ │ └── mysql_queries_builders.rs │ ├── mod.rs │ ├── syntax_skip_handlers.rs │ ├── html_handlers.rs │ ├── comments_headers.rs │ ├── import_handlers.rs │ ├── reports_handlers.rs │ ├── dump_handlers.rs │ ├── scan_handlers.rs │ └── diagram_handlers.rs ├── cloud │ ├── mod.rs │ ├── login.rs │ ├── push.rs │ ├── pull.rs │ └── api.rs ├── utils │ ├── mod.rs │ ├── open.rs │ ├── date.rs │ ├── file.rs │ └── generate.rs ├── cmd │ ├── mod.rs │ ├── entropy.rs │ ├── connection.rs │ ├── encrypt.rs │ └── runner.rs ├── constants │ ├── api │ │ ├── mod.rs │ │ ├── api_names.rs │ │ ├── api_token.rs │ │ └── api_endpoints.rs │ ├── mod.rs │ ├── protocols.rs │ ├── folders.rs │ ├── regexp.rs │ ├── urls.rs │ └── global.rs ├── helpers │ ├── mod.rs │ ├── converter.rs │ ├── env.rs │ ├── write_env.rs │ ├── configs.rs │ └── history.rs ├── core │ ├── mod.rs │ ├── truncate.rs │ ├── transfer.rs │ ├── dump_data.rs │ ├── import.rs │ ├── export.rs │ └── import_data.rs ├── plugins │ ├── mod.rs │ ├── diagram.rs │ ├── pastebin.rs │ ├── checksum.rs │ ├── schema.rs │ ├── scan_xss.rs │ ├── reports_pdf.rs │ ├── history_logs.rs │ └── reports_xss.rs ├── ui │ ├── mod.rs │ ├── schema_alerts.rs │ ├── reconnect_alerts.rs │ ├── scan_alerts.rs │ ├── report_xss_alerts.rs │ ├── checksum_alerts.rs │ ├── share_alerts.rs │ ├── report_alerts.rs │ ├── ui_base.rs │ ├── errors_alerts.rs │ └── success_alerts.rs ├── main.rs ├── service.rs ├── init.rs ├── dump_sync.rs ├── addons.rs ├── dumper.rs └── args_cli.rs ├── docs ├── init.md ├── skip-syntax.md ├── importdata.md ├── dumpdata.md ├── history.md ├── pull.md ├── truncate.md ├── visual.md ├── transfer.md ├── schema.md ├── install.md ├── checksum.md ├── share.md ├── import.md ├── connection.md ├── scan.md ├── settings.md ├── export.md └── writing-patterns.md ├── dumpsync.yml ├── .env-example ├── dumpsync.json ├── .github ├── FUNDING.yml └── workflows │ └── rust.yml ├── .gitignore ├── LICENSE ├── Cargo.toml ├── README.md └── patterns.txt /src/handlers/sqlite/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod tables; 2 | pub mod tables_names; -------------------------------------------------------------------------------- /src/cloud/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | pub mod pull; 3 | pub mod push; 4 | pub mod login; -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod open; 2 | pub mod date; 3 | pub mod file; 4 | pub mod generate; -------------------------------------------------------------------------------- /src/cmd/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod runner; 2 | pub mod entropy; 3 | pub mod encrypt; 4 | pub mod connection; -------------------------------------------------------------------------------- /src/constants/api/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api_token; 2 | pub mod api_names; 3 | pub mod api_endpoints; 4 | -------------------------------------------------------------------------------- /src/helpers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod env; 2 | pub mod history; 3 | pub mod configs; 4 | pub mod write_env; 5 | pub mod converter; -------------------------------------------------------------------------------- /src/handlers/mysql/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod mysql_keywords; 2 | pub mod mysql_export_handlers; 3 | pub mod mysql_queries_builders; -------------------------------------------------------------------------------- /src/constants/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | 3 | pub mod urls; 4 | pub mod global; 5 | pub mod regexp; 6 | pub mod folders; 7 | pub mod protocols; -------------------------------------------------------------------------------- /src/core/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod dump; 2 | pub mod export; 3 | pub mod import; 4 | pub mod transfer; 5 | pub mod truncate; 6 | pub mod dump_data; 7 | pub mod import_data; -------------------------------------------------------------------------------- /src/plugins/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod schema; 2 | pub mod diagram; 3 | pub mod pastebin; 4 | pub mod scan_xss; 5 | pub mod checksum; 6 | pub mod reports_xss; 7 | pub mod reports_pdf; 8 | pub mod history_logs; -------------------------------------------------------------------------------- /docs/init.md: -------------------------------------------------------------------------------- 1 | # Command init 2 | 3 | The `init` command will create a `dumpsync.yml` file in the current directory with default settings. If the file already exists, it will be overwritten. 4 | 5 | ```bash 6 | dumpsync init 7 | ``` -------------------------------------------------------------------------------- /src/constants/api/api_names.rs: -------------------------------------------------------------------------------- 1 | pub enum ApiNames { 2 | Env, 3 | } 4 | 5 | impl ApiNames { 6 | 7 | pub fn as_str(&self) -> &'static str { 8 | match self { 9 | ApiNames::Env => "DS_API_KEY", 10 | } 11 | } 12 | 13 | } -------------------------------------------------------------------------------- /dumpsync.yml: -------------------------------------------------------------------------------- 1 | exports: 2 | dump_data: true 3 | lock_tables: false 4 | compress_data: false 5 | insert_ignore_into: false 6 | drop_table_if_exists: true 7 | database_if_not_exists: true 8 | 9 | connection: 10 | max_retries: 3 11 | retry_connection_interval: 5 -------------------------------------------------------------------------------- /src/handlers/sqlite/tables_names.rs: -------------------------------------------------------------------------------- 1 | pub enum TablesNames { 2 | Backups, 3 | } 4 | 5 | impl TablesNames { 6 | 7 | pub fn as_str(&self) -> &'static str { 8 | match self { 9 | TablesNames::Backups => "backups", 10 | } 11 | } 12 | 13 | } -------------------------------------------------------------------------------- /src/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod mysql; 2 | pub mod sqlite; 3 | 4 | pub mod syntax_skip_handlers; 5 | pub mod dump_handlers; 6 | pub mod scan_handlers; 7 | pub mod html_handlers; 8 | pub mod import_handlers; 9 | pub mod reports_handlers; 10 | pub mod diagram_handlers; 11 | pub mod comments_headers; -------------------------------------------------------------------------------- /src/ui/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod ui_base; 2 | 3 | pub mod scan_alerts; 4 | pub mod share_alerts; 5 | pub mod schema_alerts; 6 | pub mod checksum_alerts; 7 | pub mod report_xss_alerts; 8 | 9 | pub mod reconnect_alerts; 10 | pub mod report_alerts; 11 | pub mod errors_alerts; 12 | pub mod success_alerts; -------------------------------------------------------------------------------- /src/constants/protocols.rs: -------------------------------------------------------------------------------- 1 | pub enum Protocols { 2 | Http, 3 | Https, 4 | } 5 | 6 | impl Protocols { 7 | 8 | pub fn as_str(&self) -> &'static str { 9 | match self { 10 | Protocols::Http => "http://", 11 | Protocols::Https => "https://", 12 | } 13 | } 14 | 15 | } -------------------------------------------------------------------------------- /src/ui/schema_alerts.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | 3 | use colored::*; 4 | 5 | pub struct SchemaAlerts; 6 | 7 | impl SchemaAlerts { 8 | 9 | pub fn success(file: &str) { 10 | println!( 11 | "Schema successfully saved at {}", 12 | file.blue() 13 | ); 14 | } 15 | 16 | } 17 | -------------------------------------------------------------------------------- /.env-example: -------------------------------------------------------------------------------- 1 | DS_DB_NAME="" 2 | DS_DB_PORT="" 3 | DS_DB_DSN="" 4 | DS_DB_USER="" 5 | DS_DB_PASSWORD="" 6 | DS_DB_HOST="" 7 | 8 | DS_DUMP_INTERVAL="" 9 | DS_DUMP_PATH="" 10 | 11 | DS_TRANSFER_HOST="" 12 | DS_TRANSFER_PORT="" 13 | DS_TRANSFER_USER="" 14 | DS_TRANSFER_PASSWORD="" 15 | DS_TRANSFER_DB_NAME="" 16 | 17 | PASTEBIN_API_KEY="" -------------------------------------------------------------------------------- /src/constants/api/api_token.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | helpers::env::Env, 3 | constants::api::api_names::ApiNames, 4 | }; 5 | 6 | pub struct APIToken; 7 | 8 | impl APIToken { 9 | 10 | pub fn value(&self) -> String { 11 | let api_token = Env.system(ApiNames::Env.as_str()); 12 | format!("Bearer {}", api_token) 13 | } 14 | 15 | } 16 | -------------------------------------------------------------------------------- /src/handlers/syntax_skip_handlers.rs: -------------------------------------------------------------------------------- 1 | pub enum SyntaxSkip { 2 | SkipLine, 3 | SkipTables, 4 | } 5 | 6 | impl SyntaxSkip { 7 | 8 | pub fn as_str(&self) -> &'static str { 9 | match self { 10 | SyntaxSkip::SkipLine => "-- skip line", 11 | SyntaxSkip::SkipTables => "-- skip tables", 12 | } 13 | } 14 | 15 | } -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | mod ui; 2 | mod cmd; 3 | mod core; 4 | mod utils; 5 | mod cloud; 6 | mod plugins; 7 | mod helpers; 8 | mod handlers; 9 | mod constants; 10 | 11 | mod init; 12 | mod addons; 13 | mod dumper; 14 | mod service; 15 | mod args_cli; 16 | mod dump_sync; 17 | 18 | use anyhow::Result; 19 | use crate::dump_sync::DumpSync; 20 | 21 | #[tokio::main] 22 | async fn main() -> Result<()> { 23 | let _ = DumpSync.init().await; 24 | Ok(()) 25 | } 26 | -------------------------------------------------------------------------------- /src/utils/open.rs: -------------------------------------------------------------------------------- 1 | use open; 2 | 3 | use crate::ui::errors_alerts::ErrorsAlerts; 4 | 5 | pub struct Open { 6 | item: String 7 | } 8 | 9 | impl Open { 10 | 11 | pub fn new(item: &str) -> Self { 12 | Self { 13 | item: item.to_owned() 14 | } 15 | } 16 | 17 | pub fn link(&self) { 18 | if open::that(&self.item).is_err() { 19 | ErrorsAlerts::open_link(); 20 | } 21 | } 22 | 23 | } -------------------------------------------------------------------------------- /src/constants/folders.rs: -------------------------------------------------------------------------------- 1 | extern crate chrono; 2 | 3 | use std::path::PathBuf; 4 | use once_cell::sync::Lazy; 5 | use dirs_next::config_dir; 6 | 7 | use crate::constants::global::*; 8 | 9 | pub struct Folders; 10 | 11 | impl Folders { 12 | 13 | pub const APP_FOLDER: Lazy = Lazy::new(|| { 14 | let mut path = config_dir().expect("No config directory"); 15 | path.push(Global::app(GlobalNames::AppName)); 16 | path 17 | }); 18 | 19 | } 20 | -------------------------------------------------------------------------------- /src/constants/api/api_endpoints.rs: -------------------------------------------------------------------------------- 1 | use crate::constants::urls::*; 2 | 3 | pub struct APIEndpoints; 4 | 5 | impl APIEndpoints { 6 | 7 | pub fn backups(&self, endpoint: &str) -> String { 8 | let mut api_url = String::from(Urls::as_str(UrlsNames::DumpsyncApi)); 9 | api_url.push_str("backups/"); 10 | api_url.push_str(endpoint); 11 | api_url 12 | } 13 | 14 | pub fn login(&self) -> &str { 15 | Urls::as_str(UrlsNames::DumpsyncApiKey) 16 | } 17 | 18 | } 19 | -------------------------------------------------------------------------------- /dumpsync.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.1.38", 3 | "description": "DumpSync is a lightweight tool designed for efficiently dumping and restoring MySQL databases.", 4 | "license": "MIT", 5 | "homepage": "https://dumpsync.com", 6 | 7 | "architecture": { 8 | "64bit": { 9 | "url": "https://github.com/YeDawa/DumpSync/releases/download/0.1.38/dumpsync-0.1.38.zip", 10 | "bin": "dumpsync.exe", 11 | "hash": "sha256:3a4e3267a62177fc29376962743e11c794ea16cb6ec9ff6c2a2f980d6b0b8f8d" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /docs/skip-syntax.md: -------------------------------------------------------------------------------- 1 | # Skip Syntax 2 | 3 | This document outlines the syntax for skipping specific tables or lines during database restore operations using pattern matching. 4 | 5 | ### Skip a specific table 6 | This command will skip the specified tables (`table1` and `table2`) during the restore operation. 7 | 8 | ``` 9 | -- skip tables "table1, table2" 10 | ``` 11 | 12 | ### Skip line matching a pattern 13 | This command will skip the line that matches the specified pattern during the restore operation. 14 | 15 | ``` 16 | -- skip line 17 | ``` 18 | -------------------------------------------------------------------------------- /docs/importdata.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Import Data 2 | 3 | To import json data format, you can use the following command: 4 | 5 | ```bash 6 | dumpsync import-data 7 | ``` 8 | 9 | For connectting to a server, read the [Connecting to a Server](connection.md) guide. 10 | 11 | ### Command Breakdown 12 | 13 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 14 | - **import-data**: This subcommand initiates the import data process to create a dump of the specified database. 15 | 16 | ### Options 17 | 18 | - **-f**: Indicates the file path where the dump will be saved. 19 | -------------------------------------------------------------------------------- /docs/dumpdata.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Dump Data 2 | 3 | To create a dump json data format, you can use the following command: 4 | 5 | ```bash 6 | dumpsync dump-data 7 | ``` 8 | 9 | For connectting to a server, read the [Connecting to a Server](connection.md) guide. 10 | 11 | ### Command Breakdown 12 | 13 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 14 | - **dump-data**: This subcommand initiates the dump data process to create a dump of the specified database. 15 | 16 | ### Options 17 | 18 | - **-f**: (Optional) Indicates the file path where the dump will be saved. 19 | -------------------------------------------------------------------------------- /src/handlers/html_handlers.rs: -------------------------------------------------------------------------------- 1 | pub struct HTMLHandlers; 2 | 3 | impl HTMLHandlers { 4 | 5 | pub fn html_escape(&self, input: &str) -> String { 6 | input 7 | .replace('&', "&") 8 | .replace('<', "<") 9 | .replace('>', ">") 10 | .replace('"', """) 11 | .replace('\'', "'") 12 | } 13 | 14 | pub fn escape_for_sql(&self, input: &str) -> String { 15 | input 16 | .replace('\\', "\\\\") 17 | .replace('\'', "''") 18 | .replace('\"', "\\\"") 19 | } 20 | 21 | } 22 | -------------------------------------------------------------------------------- /src/ui/reconnect_alerts.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | 3 | use colored::*; 4 | 5 | use crate::utils::date::Date; 6 | 7 | pub struct ReconnectAlerts; 8 | 9 | impl ReconnectAlerts { 10 | 11 | pub fn reconnect(attempt: u64, max_retries: u64) { 12 | let current_datetime = Date::date_time(); 13 | 14 | println!( 15 | "\r{} Reconnection attempt in 5 seconds... (Attempt {}/{})", 16 | current_datetime.green().bold(), 17 | attempt.to_string().blue(), 18 | max_retries.to_string().yellow() 19 | ); 20 | } 21 | 22 | } 23 | -------------------------------------------------------------------------------- /src/utils/date.rs: -------------------------------------------------------------------------------- 1 | extern crate chrono; 2 | 3 | use chrono::{ 4 | Utc, 5 | Local, 6 | }; 7 | 8 | pub struct Date; 9 | 10 | impl Date { 11 | 12 | pub fn date_time() -> String { 13 | let local_time = Local::now(); 14 | 15 | let date_formated = local_time.format("%Y-%m-%d").to_string(); 16 | let hour_formated = local_time.format("%H:%M:%S").to_string(); 17 | 18 | format!("{} {}", date_formated, hour_formated) 19 | } 20 | 21 | pub fn timestamp() -> String { 22 | Utc::now().format("%Y-%m-%d %H:%M:%S").to_string() 23 | } 24 | 25 | } 26 | -------------------------------------------------------------------------------- /src/ui/scan_alerts.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | 3 | use colored::*; 4 | 5 | pub struct ScanAlerts; 6 | 7 | impl ScanAlerts { 8 | 9 | pub fn detected(table: &str, row_index: usize, column: &str, value: &str) { 10 | println!( 11 | "Possible XSS detected in table '{}', row {}, column '{}': {}", 12 | table.blue(), row_index.to_string().green(), column.cyan(), value.yellow() 13 | ); 14 | } 15 | 16 | pub fn not_detected(table: &str) { 17 | println!( 18 | "No XSS detected in table '{}'", 19 | table.blue() 20 | ); 21 | } 22 | 23 | } 24 | -------------------------------------------------------------------------------- /src/helpers/converter.rs: -------------------------------------------------------------------------------- 1 | use std:: { 2 | error::Error, 3 | fs::read_to_string, 4 | }; 5 | 6 | use serde_json::{ 7 | Value, 8 | to_string_pretty, 9 | }; 10 | 11 | use serde_yaml::from_str; 12 | 13 | pub struct Converter { 14 | yaml: String, 15 | } 16 | 17 | impl Converter { 18 | 19 | pub fn new(yaml: String) -> Self { 20 | Self { 21 | yaml: yaml 22 | } 23 | } 24 | 25 | pub fn yaml_to_json(&self) -> Result> { 26 | let yaml_content = read_to_string(&self.yaml)?; 27 | let data: Value = from_str(&yaml_content)?; 28 | Ok(to_string_pretty(&data)?) 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /src/ui/report_xss_alerts.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | 3 | use colored::*; 4 | 5 | use crate::constants::global::Global; 6 | 7 | pub struct ReportXSSAlerts; 8 | 9 | impl ReportXSSAlerts { 10 | 11 | pub fn generated(output_path: &str) { 12 | println!("Report generated and salved in: {}", output_path.green()); 13 | } 14 | 15 | pub fn invalid_format() { 16 | let formats = Global::formats_supported().join(", "); 17 | let message = format!( 18 | "Invalid file format, only {} are supported.", 19 | formats.to_uppercase() 20 | ); 21 | 22 | println!( 23 | "{}", message.red().bold(), 24 | ); 25 | } 26 | 27 | } 28 | -------------------------------------------------------------------------------- /src/constants/regexp.rs: -------------------------------------------------------------------------------- 1 | pub struct RegExp; 2 | 3 | impl RegExp { 4 | 5 | pub const USE_CASE: &'static str = r"(?i)(USE\s+`?)(\w+)(`?)"; 6 | pub const COLS_IN_CONSTRAINT_RE: &'static str = r"\(([^)]+)\)"; 7 | pub const CREATE_TABLE: &'static str = r"(?i)CREATE TABLE\s+`?(\w+)`?"; 8 | pub const CREATE_TABLE_ERD: &'static str = r"(?i)CREATE TABLE\s+`?(\w+)`?\s*\("; 9 | pub const CREATE_TABLE_COLUMNS: &'static str = r"(?i)^`?(\w+)`?\s+([^\s]+)(.*)$"; 10 | pub const CREATE_TABLE_INSERTS: &'static str = r"(?i)\b(?:CREATE\s+TABLE|INSERT\s+INTO)\s+`?(\w+)`?"; 11 | pub const CREATE_DATABASE_CASES: &'static str = r"(?i)CREATE DATABASE\s+(`?)(\w+)(`?)\s*(IF NOT EXISTS)?;"; 12 | 13 | } 14 | -------------------------------------------------------------------------------- /docs/history.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: History 2 | 3 | The dumpsync history command is used to view the history of actions performed by the DumpSync tool. 4 | 5 | ```bash 6 | dumpsync history backups 7 | ``` 8 | 9 | ### Command Breakdown 10 | 11 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 12 | - **history**: This subcommand is used to view the history of actions performed by the DumpSync tool. 13 | - **backups**: This specifies that we want to view the history of backup actions. 14 | - **filter_criteria**: This is an optional parameter to filter the history results. 15 | 16 | ### Example 17 | 18 | View the history of backup actions: 19 | 20 | ```bash 21 | dumpsync history backups 22 | ``` -------------------------------------------------------------------------------- /docs/pull.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Pull 2 | 3 | The `pull` command allows you to download SQL files directly from a URL and import them into your database in one operation, without saving the file locally. This is particularly useful for quickly importing database dumps hosted on the internet or company servers. 4 | 5 | ## Usage 6 | 7 | ```bash 8 | dumpsync pull 9 | ``` 10 | 11 | ### Parameters 12 | 13 | - ``: The URL of the SQL file to download and import 14 | 15 | ## Examples 16 | 17 | ### Basic Usage 18 | 19 | ```bash 20 | dumpsync pull https://example.com/database_dump.sql 21 | ``` 22 | 23 | This command will download the SQL file from the specified URL and directly import it into your configured database. 24 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: Kremilly 4 | patreon: Kremilly 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] -------------------------------------------------------------------------------- /src/ui/checksum_alerts.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | 3 | use colored::*; 4 | 5 | use crate::utils::date::Date; 6 | 7 | pub struct ChecksumAlerts; 8 | 9 | impl ChecksumAlerts { 10 | 11 | pub fn file(file: &str) { 12 | println!("{}\n", file.blue()); 13 | } 14 | 15 | pub fn checksum(file: &str) { 16 | let current_datetime = Date::date_time(); 17 | 18 | println!( 19 | "\r{} The checksum was successfully generated and saved in: {}", 20 | current_datetime.green().bold(), 21 | file.blue() 22 | ); 23 | } 24 | 25 | pub fn printable(algo: &str, hash: &str) { 26 | println!( 27 | "{}: {}", 28 | algo.cyan(), hash.yellow() 29 | ); 30 | } 31 | 32 | } 33 | -------------------------------------------------------------------------------- /src/handlers/sqlite/tables.rs: -------------------------------------------------------------------------------- 1 | use crate::handlers::sqlite::tables_names::TablesNames; 2 | 3 | pub enum Table { 4 | FileName, 5 | } 6 | 7 | pub struct Tables; 8 | 9 | impl Tables { 10 | 11 | pub fn as_str(&self, table: Table) -> &'static str { 12 | match table { 13 | Table::FileName => "history.db", 14 | } 15 | } 16 | 17 | pub fn history(&self) -> String { 18 | format!("CREATE TABLE IF NOT EXISTS {} ( 19 | id INTEGER PRIMARY KEY AUTOINCREMENT, 20 | slug TEXT NOT NULL, 21 | db TEXT NOT NULL, 22 | host TEXT NOT NULL, 23 | filename TEXT NOT NULL, 24 | compress BOOLEAN NOT NULL, 25 | encrypt BOOLEAN NOT NULL, 26 | size INTEGER NOT NULL, 27 | created_at TEXT NOT NULL 28 | )", TablesNames::Backups.as_str()) 29 | } 30 | 31 | } -------------------------------------------------------------------------------- /src/ui/share_alerts.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | 3 | use colored::*; 4 | 5 | use crate::constants::urls::*; 6 | 7 | pub struct ShareAlerts; 8 | 9 | impl ShareAlerts { 10 | 11 | pub fn success(link: &str) { 12 | println!( 13 | "Success! Link: {}", 14 | link.blue() 15 | ); 16 | } 17 | 18 | pub fn error(message: &str) { 19 | println!( 20 | "An error occurred: {}", message.red().bold(), 21 | ); 22 | } 23 | 24 | pub fn api_key_missing() { 25 | let api_link = Urls::as_str(UrlsNames::PastebinApiUri); 26 | let message = "Please provide a valid API key. Click this link to get one"; 27 | 28 | Self::error("API key is missing or empty"); 29 | 30 | println!( 31 | "{}: {}", 32 | message.yellow(), 33 | api_link.blue() 34 | ); 35 | } 36 | 37 | } 38 | -------------------------------------------------------------------------------- /src/cmd/entropy.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::File, 3 | collections::HashMap, 4 | 5 | io::{ 6 | Read, 7 | Result, 8 | }, 9 | }; 10 | 11 | pub struct Entropy<'a> { 12 | file_path: &'a str, 13 | } 14 | 15 | impl<'a> Entropy<'a> { 16 | 17 | pub fn new(file_path: &'a str) -> Self { 18 | Self { 19 | file_path 20 | } 21 | } 22 | 23 | pub fn calculate(&self) -> Result { 24 | let mut file = File::open(self.file_path)?; 25 | let mut buffer = Vec::new(); 26 | file.read_to_end(&mut buffer)?; 27 | 28 | let mut freq = HashMap::new(); 29 | for &byte in &buffer { 30 | *freq.entry(byte).or_insert(0) += 1; 31 | } 32 | 33 | let len = buffer.len() as f64; 34 | Ok(freq.values() 35 | .map(|&count| { 36 | let prob = count as f64 / len; 37 | -prob * prob.log2() 38 | }) 39 | .sum() 40 | ) 41 | } 42 | 43 | } 44 | -------------------------------------------------------------------------------- /src/helpers/env.rs: -------------------------------------------------------------------------------- 1 | use dotenvy::dotenv; 2 | 3 | use std::{ 4 | env, 5 | sync::Once, 6 | }; 7 | 8 | use crate::constants::folders::Folders; 9 | 10 | pub struct Env; 11 | 12 | impl Env { 13 | 14 | pub fn new() { 15 | dotenv().ok(); 16 | } 17 | 18 | pub fn get_var(var: &str) -> String { 19 | env::var(var).expect( 20 | &format!("{} is not defined in the .env", var) 21 | ) 22 | } 23 | 24 | pub fn system(&self, key: &str) -> String { 25 | let load_env: Once = Once::new(); 26 | 27 | load_env.call_once(|| { 28 | dotenvy::from_path( 29 | &Folders::APP_FOLDER.join(".env") 30 | ).ok(); 31 | }); 32 | 33 | env::var(key).expect(&format!("{} not set", key)) 34 | } 35 | 36 | pub fn get_var_u64(var: &str) -> u64 { 37 | env::var(var).expect( 38 | &format!("{} is not defined in the .env", var) 39 | ).parse().expect( 40 | &format!("{} is not a valid number", var) 41 | ) 42 | } 43 | 44 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | # MSVC Windows builds of rustc generate these, which store debugging information 14 | *.pdb 15 | 16 | # RustRover 17 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 18 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 19 | # and can be added to the global gitignore or merged into this file. For a more nuclear 20 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 21 | .idea/ 22 | 23 | # Added by cargo 24 | 25 | /target 26 | 27 | *.sql 28 | 29 | .env 30 | 31 | # Mkdocs 32 | /site 33 | 34 | # Obsolete settings file 35 | dumpsync1.yml 36 | 37 | # Reports files 38 | *.csv 39 | *.xml 40 | *.pdf 41 | *.html 42 | *.xlsx 43 | .vercel 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Kremilly 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/cloud/login.rs: -------------------------------------------------------------------------------- 1 | use rpassword::prompt_password; 2 | 3 | use crate::{ 4 | utils::open::Open, 5 | helpers::write_env::WriteEnv, 6 | 7 | ui::{ 8 | ui_base::UI, 9 | success_alerts::SuccessAlerts, 10 | }, 11 | 12 | constants::api::{ 13 | api_names::ApiNames, 14 | api_endpoints::APIEndpoints, 15 | }, 16 | }; 17 | 18 | pub struct Login; 19 | 20 | impl Login { 21 | 22 | pub fn new() -> Self { 23 | Self 24 | } 25 | 26 | pub fn print(&self) { 27 | let url = APIEndpoints.login(); 28 | let message = format!("Opening URL {} to retrieve the API Key", url); 29 | 30 | UI::label(&message, "normal"); 31 | Open::new(url).link(); 32 | } 33 | 34 | pub fn save_var(&self) { 35 | let api_key = prompt_password("Enter the api key [input is hiding]: ") 36 | .expect("Error reading the password"); 37 | 38 | let mut writer = WriteEnv::new(); 39 | writer.add(ApiNames::Env.as_str().to_owned(), api_key); 40 | 41 | writer.save().expect("Error writing the env file"); 42 | SuccessAlerts::api_key(); 43 | } 44 | 45 | } -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust CI and Publish 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Rust 19 | uses: actions-rs/toolchain@v1 20 | with: 21 | toolchain: stable 22 | override: true 23 | - name: Build 24 | run: cargo build --verbose 25 | - name: Run tests 26 | run: cargo test --verbose 27 | 28 | publish: 29 | if: ${{ github.event_name == 'push' && github.event.head_commit.modified && contains(github.event.head_commit.modified, 'Cargo.toml') }} 30 | runs-on: ubuntu-latest 31 | 32 | steps: 33 | - uses: actions/checkout@v4 34 | - name: Set up Rust 35 | uses: actions-rs/toolchain@v1 36 | with: 37 | toolchain: stable 38 | override: true 39 | - name: Cargo Publish 40 | uses: ryohidaka/action-cargo-publish@v0.1.0 41 | with: 42 | token: ${{ secrets.CARGO_REGISTRY_TOKEN }} 43 | -------------------------------------------------------------------------------- /docs/truncate.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Truncate 2 | 3 | To truncate a database, you can use the following command: 4 | 5 | ```bash 6 | dumpsync truncate --table table1 7 | ``` 8 | 9 | ### Command Breakdown 10 | 11 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 12 | - **truncate**: This subcommand initiates the truncation process to remove all data from the specified table. 13 | 14 | ### Options 15 | 16 | - **-t table1**: Specifies the name of the table that you want to truncate. Replace `table1` with the actual name of the table you want to truncate. 17 | - **-p**: Optional flag to specify path of dump file. If not specified, the default path will be used. 18 | - **--encrypt**: Optional flag to encrypt the dump file. If specified, the dump file will be encrypted. 19 | 20 | ### Example 21 | 22 | To truncate a table named `table1`, you would run: 23 | 24 | ```bash 25 | dumpsync truncate --table table1 26 | ``` 27 | 28 | ### Notes 29 | 30 | - Truncating a table will remove all data from the specified table. 31 | - Before truncating a table, will be generated a backup of the table. 32 | - Be cautious when using this command, especially if truncating tables in a production environment. 33 | -------------------------------------------------------------------------------- /docs/visual.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Visual 2 | 3 | The `visual` command in DumpSync is used to view the table structure of a database. This feature allows you to visualize the table structure. 4 | 5 | ```bash 6 | dumpsync visual --table table1 7 | ``` 8 | 9 | ### Command Breakdown 10 | 11 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 12 | - **visual**: This subcommand initiates the visual process to view the table structure of a database. 13 | 14 | ### Flags 15 | 16 | - **--table table1**: Specifies the table name for which you want to view the structure. 17 | 18 | ### Example 19 | 20 | To view the table structure of a table named `users`, you would run: 21 | 22 | ```bash 23 | dumpsync visual --table users 24 | ``` 25 | 26 | ```bash 27 | TABLE: users 28 | +----------+--------------+-----+ 29 | | Column | Type | Key | 30 | +----------+--------------+-----+ 31 | | id | int | PK | 32 | | username | varchar(255) | | 33 | | email | varchar(255) | | 34 | | profile | text | | 35 | +----------+--------------+-----+ 36 | ``` 37 | 38 | ### Description 39 | 40 | - The `visual` command allows you to visualize the table structure of a database. 41 | -------------------------------------------------------------------------------- /docs/transfer.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Transfer 2 | 3 | To transfer a database from one server to another, you can use the following command: 4 | 5 | ```bash 6 | dumpsync transfer -f /path/to/dump.sql 7 | ``` 8 | 9 | ### Command Breakdown 10 | 11 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 12 | - **transfer**: This subcommand initiates the transfer process to restore the database from the specified dump file. 13 | 14 | ### Options 15 | 16 | - **-f /path/to/dump.sql**: Specifies the path to the dump file that you want to restore. Replace `/path/to/dump.sql` with the actual file path to your SQL dump file. 17 | 18 | ### Example 19 | 20 | To transfer a database from a dump file located at `/backups/example_dump.sql`, you would run: 21 | 22 | ```bash 23 | dumpsync transfer -f /backups/example_dump.sql 24 | ``` 25 | 26 | ### Notes 27 | 28 | - Ensure that the dump file exists and that you have the necessary permissions to read it. 29 | - The transfer process will overwrite existing data in the database, so be cautious when using this command, especially if restoring to a production environment. 30 | - It’s recommended to back up current data before performing an import to avoid accidental data loss. 31 | -------------------------------------------------------------------------------- /src/handlers/comments_headers.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io::Write, 3 | error::Error, 4 | }; 5 | 6 | use crate::utils::date::Date; 7 | 8 | pub struct CommentsHeaders; 9 | 10 | impl CommentsHeaders { 11 | 12 | pub fn core(&self, dbname: &str, writer: &mut dyn Write) -> Result<(), Box> { 13 | writeln!(writer, "-- Exporting using {} v.{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"))?; 14 | writeln!(writer, "-- Database backup: {}", dbname)?; 15 | writeln!(writer, "-- Export date and time: {}", Date::timestamp())?; 16 | writeln!(writer, "-- ---------------------------------------------------\n")?; 17 | 18 | Ok(()) 19 | } 20 | 21 | pub fn truncate(&self, dbname: &str, table: &str, writer: &mut dyn Write) -> Result<(), Box> { 22 | writeln!(writer, "-- Exporting using {} v.{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"))?; 23 | writeln!(writer, "-- Database: {}", dbname)?; 24 | writeln!(writer, "-- Truncate table: {}", table)?; 25 | writeln!(writer, "-- Export date and time: {}", Date::timestamp())?; 26 | writeln!(writer, "-- ---------------------------------------------------\n")?; 27 | 28 | Ok(()) 29 | } 30 | 31 | } -------------------------------------------------------------------------------- /src/ui/report_alerts.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | use std::collections::HashSet; 3 | 4 | use colored::*; 5 | 6 | use crate::ui::ui_base::UI; 7 | 8 | pub struct ReportAlerts; 9 | 10 | impl ReportAlerts { 11 | 12 | pub fn report(dump_file_path: &str, dump_count: usize, last_dump: &str, size: &str, interval: usize) { 13 | UI::section_header("Final Report", "info"); 14 | 15 | println!("Directory: {}", dump_file_path.bold().blue()); 16 | println!("Interval: {} seconds", interval.to_string().bold().blue()); 17 | println!("Total number of dumps: {}", dump_count.to_string().bold().blue()); 18 | println!("Last dump: {} ({})", last_dump.bold().cyan(), size.bold().yellow()); 19 | } 20 | 21 | pub fn tables(tables: &HashSet) { 22 | println!("\nTables dumped:"); 23 | 24 | for table in tables { 25 | println!(" - {}", table.bold().blue()); 26 | } 27 | } 28 | 29 | pub fn no_tables() { 30 | let message = "No tables found in the dump."; 31 | println!("{}", message.bold().red()); 32 | } 33 | 34 | pub fn success_pdf(file: &str) { 35 | let message = "Report generated successfully. Saved as ".to_string(); 36 | println!("\n{}{}", message.bold().green(), file.bold().cyan()); 37 | } 38 | 39 | } 40 | -------------------------------------------------------------------------------- /docs/schema.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Schema 2 | 3 | The dumpsync schema command is used to generate a schema file for the database. The schema file contains the structure of the database, including tables, columns, and their data types. 4 | 5 | ```bash 6 | dumpsync schema -f 7 | ``` 8 | 9 | ### Command Breakdown 10 | 11 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 12 | - **schema**: This subcommand is used to generate a schema file for the database. 13 | 14 | ### Parameters 15 | 16 | - **-f **: Specifies the path to save the schema file. 17 | 18 | ### Example 19 | 20 | Generate a schema file named `schema.sql`: 21 | 22 | ```bash 23 | dumpsync schema -f schema.sql 24 | ``` 25 | 26 | ### Schema File 27 | 28 | The schema file generated by the dumpsync schema command contains the following information: 29 | 30 | - **Tables**: List of tables in the database. 31 | - **Columns**: List of columns in each table along with their data types. 32 | - **Primary Keys**: Primary key columns for each table. 33 | - **Foreign Keys**: Foreign key columns for each table along with their references. 34 | 35 | ### Schema Format 36 | 37 | The schema file can be generated only in JSON format. The JSON format provides a structured representation of the database schema, making it easier to read and understand. -------------------------------------------------------------------------------- /docs/install.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | ## Installing Rust on Different Operating Systems 4 | 5 | #### 1. Linux and macOS 6 | 7 | To install Rust on **Linux** and **macOS**, use the `rustup` script: 8 | 9 | 1. Open your terminal. 10 | 2. Run the following command: 11 | 12 | ```bash 13 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 14 | ``` 15 | 16 | 3. Follow the on-screen instructions to complete the installation. 17 | 4. After the installation, restart the terminal and verify it by running: 18 | 19 | ```bash 20 | rustc --version 21 | ``` 22 | 23 | --- 24 | 25 | #### 2. Windows 26 | 27 | To install Rust on **Windows**, follow these steps: 28 | 29 | 1. Download the `rustup-init.exe` installer from the official website: [https://rust-lang.org/tools/install](https://rust-lang.org/tools/install). 30 | 2. Run the installer and follow the setup instructions. 31 | 3. After installation, open **Command Prompt** or **PowerShell** and verify Rust with: 32 | 33 | ```powershell 34 | rustc --version 35 | ``` 36 | 37 | **Note:** Rust will be added to your PATH automatically during installation. 38 | 39 | --- 40 | 41 | ## Installing DumpSync in your Operational System 42 | 43 | Once Rust is installed, use this command to install **DumpSync**: 44 | 45 | ```bash 46 | cargo install dumpsync 47 | ``` 48 | 49 | This will install **DumpSync** globally, allowing you to run it from any directory. -------------------------------------------------------------------------------- /src/constants/urls.rs: -------------------------------------------------------------------------------- 1 | pub enum UrlsNames { 2 | AppConfigs, 3 | XssDetectRegex, 4 | PastebinApiUri, 5 | CdnBootstrap, 6 | DumpsyncApi, 7 | DumpsyncApiKey, 8 | } 9 | 10 | pub struct Urls; 11 | 12 | impl Urls { 13 | 14 | const APP_CONFIGS: &'static str = "https://raw.githubusercontent.com/YeDawa/DumpSync/refs/heads/main/dumpsync.yml"; 15 | const XSS_DETECT_REGEX: &'static str = "https://raw.githubusercontent.com/YeDawa/DumpSync/refs/heads/main/patterns.txt"; 16 | 17 | const PASTEBIN_API_URI: &'static str = "https://pastebin.com/api/api_post.php"; 18 | const DUMPSYNC_API: &'static str = "https://service.dumpsync.com/"; 19 | const DUMPSYNC_API_KEY: &'static str = "https://dumpsync.com/dashboard/settings/api-key"; 20 | 21 | const CDN_BOOTSTRAP: &'static str = "https://cdn.jsdelivr.net/npm/bootstrap@5.3.3/dist/css/bootstrap.min.css"; 22 | 23 | pub fn as_str(urls_name: UrlsNames) -> &'static str { 24 | match urls_name { 25 | UrlsNames::AppConfigs => Urls::APP_CONFIGS, 26 | UrlsNames::XssDetectRegex => Urls::XSS_DETECT_REGEX, 27 | UrlsNames::PastebinApiUri => Urls::PASTEBIN_API_URI, 28 | UrlsNames::DumpsyncApi => Urls::DUMPSYNC_API, 29 | UrlsNames::DumpsyncApiKey => Urls::DUMPSYNC_API_KEY, 30 | UrlsNames::CdnBootstrap => Urls::CDN_BOOTSTRAP, 31 | } 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /src/cmd/connection.rs: -------------------------------------------------------------------------------- 1 | use mysql::{ 2 | Pool, 3 | Opts, 4 | OptsBuilder 5 | }; 6 | 7 | use std::error::Error; 8 | 9 | use crate::ui::errors_alerts::ErrorsAlerts; 10 | 11 | pub struct Connection { 12 | pub host: String, 13 | pub port: u16, 14 | pub user: String, 15 | pub password: String, 16 | pub dbname: Option, 17 | } 18 | 19 | impl Connection { 20 | 21 | pub fn create_mysql_pool(&self) -> Result> { 22 | let mut opts_builder = OptsBuilder::new() 23 | .ip_or_hostname(Some(&self.host)) 24 | .tcp_port(self.port) 25 | .user(Some(&self.user)); 26 | 27 | if !self.password.is_empty() { 28 | opts_builder = opts_builder.pass( 29 | Some(&self.password) 30 | ); 31 | } 32 | 33 | if let Some(ref dbname) = self.dbname { 34 | if !dbname.is_empty() { 35 | opts_builder = opts_builder.db_name(Some(dbname)); 36 | } else { 37 | opts_builder = opts_builder.db_name::(None); 38 | } 39 | } else { 40 | opts_builder = opts_builder.db_name::(None); 41 | } 42 | 43 | Pool::new(Opts::from(opts_builder)).map_err(|e| { 44 | ErrorsAlerts::dump(&e.to_string()); 45 | Box::new(e) as Box 46 | }) 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /src/cloud/push.rs: -------------------------------------------------------------------------------- 1 | extern crate reqwest; 2 | 3 | use std::error::Error; 4 | 5 | use crate::{ 6 | cloud::api::API, 7 | utils::open::Open, 8 | cmd::entropy::Entropy, 9 | 10 | ui::{ 11 | errors_alerts::ErrorsAlerts, 12 | success_alerts::SuccessAlerts, 13 | } 14 | }; 15 | 16 | pub struct Push { 17 | path: String, 18 | dbname: String, 19 | interval: u64, 20 | } 21 | 22 | impl Push { 23 | 24 | pub fn new( 25 | path: &str, 26 | dbname: &str, 27 | interval: u64, 28 | ) -> Self { 29 | Self { 30 | path: path.to_string(), 31 | dbname: dbname.to_string(), 32 | interval, 33 | } 34 | } 35 | 36 | pub async fn push(&self) -> Result<(), Box> { 37 | let encrypted = if Entropy::new(&self.path).calculate()? > 7.5 { 38 | true 39 | } else { 40 | false 41 | }; 42 | 43 | match API::new( 44 | Some(&self.path), 45 | None, 46 | Some(&self.dbname), 47 | Some(encrypted), 48 | Some(self.interval), 49 | ).post().await { 50 | Ok(data) => { 51 | SuccessAlerts::push(&data.message); 52 | Open::new(&data.url).link(); 53 | } 54 | 55 | Err(_) => { 56 | ErrorsAlerts::push(); 57 | } 58 | } 59 | 60 | Ok(()) 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /src/utils/file.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs, 3 | io::Error, 4 | path::Path 5 | }; 6 | 7 | pub struct FileUtils; 8 | 9 | impl FileUtils { 10 | 11 | pub fn create_path(path: &str) { 12 | if let Some(parent_dir) = Path::new(path).parent() { 13 | let _ = fs::create_dir_all(parent_dir); 14 | } 15 | } 16 | 17 | pub fn extension(file_path: &str) -> String { 18 | let extension = Path::new(file_path) 19 | .extension() 20 | .unwrap_or_default() 21 | .to_str() 22 | .unwrap_or_default(); 23 | 24 | extension.to_lowercase() 25 | } 26 | 27 | pub fn content(file_path: &str) -> String { 28 | fs::read_to_string(file_path) 29 | .unwrap_or_default() 30 | } 31 | 32 | pub fn exists(file_path: &str) -> bool { 33 | fs::metadata(file_path).is_ok() 34 | } 35 | 36 | pub fn size(size: u64) -> String { 37 | let sizes = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]; 38 | let mut size_f = size as f64; 39 | let mut index = 0; 40 | 41 | while size_f >= 1024.0 && index < sizes.len() - 1 { 42 | size_f /= 1024.0; 43 | index += 1; 44 | } 45 | 46 | format!("{:.2} {}", size_f, sizes[index]) 47 | } 48 | 49 | pub fn file_size(file_path: &str) -> Result { 50 | let metadata = fs::metadata(file_path)?; 51 | Ok(metadata.len()) 52 | } 53 | 54 | } 55 | -------------------------------------------------------------------------------- /src/handlers/import_handlers.rs: -------------------------------------------------------------------------------- 1 | use regex::Regex; 2 | 3 | use crate::{ 4 | constants::regexp::RegExp, 5 | handlers::mysql::mysql_queries_builders::MySqlQueriesBuilders, 6 | }; 7 | 8 | pub struct ImportHandlers { 9 | dbname: String, 10 | dump_content: String, 11 | } 12 | 13 | impl ImportHandlers { 14 | 15 | pub fn new(dbname: &str, dump_content: &str) -> Self { 16 | Self { 17 | dbname: dbname.to_string(), 18 | dump_content: dump_content.to_string(), 19 | } 20 | } 21 | 22 | pub fn check_db_name(&self) -> String { 23 | let use_db_regex = Regex::new(RegExp::USE_CASE).unwrap(); 24 | let db_regex = Regex::new(RegExp::CREATE_DATABASE_CASES).unwrap(); 25 | 26 | let content = db_regex.replace_all(&self.dump_content, |caps: ®ex::Captures| { 27 | let db_name = if &caps[2] != self.dbname { 28 | self.dbname.clone() 29 | } else { 30 | caps[2].to_string() 31 | }; 32 | 33 | MySqlQueriesBuilders.create_database_not_exists(&db_name) 34 | }); 35 | 36 | let dump_content = use_db_regex.replace_all(&content, |caps: ®ex::Captures| { 37 | if &caps[2] != &self.dbname { 38 | format!("{}{}{};", &caps[1], &self.dbname, &caps[3]) 39 | } else { 40 | caps[0].to_string() 41 | } 42 | }); 43 | 44 | dump_content.to_string() 45 | } 46 | 47 | } -------------------------------------------------------------------------------- /docs/checksum.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Checksum 2 | 3 | The `checksum` command in DumpSync is used to verify the integrity of dump files by calculating and comparing checksums. This feature ensures that the dump files have not been tampered with or corrupted during transfer or storage. 4 | 5 | ```bash 6 | dumpsync checksum -f -o 7 | ``` 8 | 9 | ### Command Breakdown 10 | 11 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 12 | - **checksum**: This subcommand initiates the checksum verification process to ensure the integrity of the dump file. 13 | 14 | ### Parameters 15 | 16 | - **-f **: Specifies the path to the dump file for which the checksum needs to be calculated and verified. 17 | - **-o ** (Optional): Specifies the path to save the checksum output. 18 | 19 | ### Example 20 | 21 | Calculate and verify the checksum of a dump file located at `path/to/dump.sql`: 22 | 23 | ```bash 24 | dumpsync checksum -f path/to/dump.sql -o path/to/output.txt 25 | ``` 26 | 27 | ### Description 28 | 29 | - The `checksum` command calculates the checksum of the specified dump file and compares it with the original checksum stored in the file. 30 | - If the checksums match, the dump file is considered intact and has not been altered. 31 | - This process provides an additional layer of security and ensures the reliability of dump files for data restoration. 32 | - The output file will contain the calculated checksum and the comparison result. -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dumpsync" 3 | version = "0.1.38" 4 | edition = "2021" 5 | license = "MIT" 6 | authors = ["kremilly"] 7 | categories = ["command-line-utilities", "database"] 8 | keywords = ["mysql", "dump", "restore", "backup", "database"] 9 | description = "DumpSync is a lightweight tool designed for efficiently dumping and restoring MySQL databases. Whether you need to create backups, restore databases, or transfer data between servers." 10 | repository = "https://github.com/YeDawa/DumpSync" 11 | documentation = "https://docs.dumpsync.com/" 12 | homepage = "https://dumpsync.com" 13 | 14 | [dependencies] 15 | aes-gcm = "0.10.3" 16 | anyhow = "1.0.100" 17 | bytes = "1.10.1" 18 | chrono = "0.4.42" 19 | clap = { version = "4.5.48", features = ["cargo", "derive"] } 20 | clap-cargo = "0.18.1" 21 | colored = "3.0.0" 22 | crc32fast = "1.4.2" 23 | csv = "1.4.0" 24 | ctrlc = "3.5.0" 25 | dirs-next = "2.0.0" 26 | dotenvy = "0.15.7" 27 | figlet-rs = "0.1.5" 28 | flate2 = "1.1.4" 29 | md-5 = "0.10.6" 30 | mysql = "26.0.1" 31 | once_cell = "1.21.3" 32 | open = "5.3.2" 33 | printpdf = "0.7.0" 34 | rand = "0.9.1" 35 | rayon = "1.11.0" 36 | regex = "1.12.2" 37 | reqwest = { version = "0.12.24", features = ["blocking", "json", "multipart"] } 38 | rpassword = "7.4.0" 39 | rusqlite = { version = "0.37.0", features = ["bundled"] } 40 | serde = { version = "1.0.219", features = ["derive"] } 41 | serde_json = "1.0.140" 42 | serde_yaml = "0.9.34" 43 | sha1 = "0.10.6" 44 | sha2 = "0.10.9" 45 | tokio = { version = "1.45.0", features = ["full"] } 46 | 47 | [profile.release] 48 | lto = true 49 | -------------------------------------------------------------------------------- /docs/share.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Share 2 | 3 | The dumpsync share command is used to share on PasteBin the dump or scan result file generated by the DumpSync tool. 4 | 5 | ```bash 6 | dumpsync share -f --privacy 7 | ``` 8 | 9 | ### Command Breakdown 10 | 11 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 12 | - **share**: This subcommand is used to share a dump or scan result file. 13 | 14 | ### Parameters 15 | 16 | - **-f **: Specifies the path to share the file. 17 | - **--privacy ** (Optional|Default: 'unlisted'): Sets the privacy level of the shared file. Options are public, unlisted, or private. 18 | 19 | ### Example 20 | 21 | Share the file `located at path/to/payload.txt`: 22 | 23 | ```bash 24 | dumpsync share -f path/to/file.txt --privacy public 25 | ``` 26 | 27 | ### Privacy Levels 28 | 29 | - **Public**: The shared file is visible to everyone. 30 | - **Unlisted**: The shared file is not visible in the public list but can be accessed by anyone with the link. 31 | - **Private**: The shared file is only visible to the user who created it. 32 | 33 | ### Get the API Key 34 | 35 | To share files, you need to get an API key from PasteBin. You can get the API key by creating an account on PasteBin and generating an API key from the account settings. Click [here](https://pastebin.com/doc_api) to get the API key. 36 | 37 | ### Shareable Files 38 | 39 | Formats supported for the share file are: 40 | 41 | - SQL 42 | - TXT 43 | - CSV 44 | - JSON 45 | - HTML 46 | -------------------------------------------------------------------------------- /src/service.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | ui::ui_base::UI, 3 | helpers::env::Env, 4 | init::DumpSyncInit, 5 | 6 | cloud::{ 7 | push::Push, 8 | pull::Pull, 9 | login::Login, 10 | }, 11 | }; 12 | 13 | pub struct DumpSyncService; 14 | 15 | impl DumpSyncService { 16 | 17 | pub fn login(&self) { 18 | Env::new(); 19 | UI::header(); 20 | UI::section_header("Login to DumpSync", "info"); 21 | 22 | let login = Login::new(); 23 | login.print(); 24 | login.save_var(); 25 | } 26 | 27 | pub async fn pull(&self, backup: &str) { 28 | Env::new(); 29 | UI::header(); 30 | 31 | let (dbname, host, user, password, port) = DumpSyncInit.load_db_config(); 32 | UI::section_header("Importing dump to server", "info"); 33 | 34 | Pull::new( 35 | &host, 36 | port, 37 | &user, 38 | &password, 39 | &dbname, 40 | &backup, 41 | ).pull().await.expect("Failed to download SQL file"); 42 | } 43 | 44 | pub async fn push(&self, path: &str) { 45 | Env::new(); 46 | UI::header(); 47 | UI::section_header("Pushing dump to server", "info"); 48 | 49 | let interval = Env::get_var_u64("DS_DUMP_INTERVAL"); 50 | let (dbname, _, _, _, _) = DumpSyncInit.load_db_config(); 51 | 52 | Push::new( 53 | &path, 54 | &dbname, 55 | interval, 56 | ).push().await.expect("Failed to upload SQL file"); 57 | } 58 | 59 | } 60 | -------------------------------------------------------------------------------- /src/init.rs: -------------------------------------------------------------------------------- 1 | use reqwest; 2 | 3 | use std::{ 4 | env, 5 | error::Error, 6 | }; 7 | 8 | use tokio::{ 9 | fs::File, 10 | io::AsyncWriteExt, 11 | }; 12 | 13 | use crate::{ 14 | ui::success_alerts::SuccessAlerts, 15 | 16 | constants::{ 17 | urls::*, 18 | global::Global, 19 | }, 20 | }; 21 | 22 | pub struct DumpSyncInit; 23 | 24 | impl DumpSyncInit { 25 | 26 | pub async fn initialize(&self) -> Result<(), Box> { 27 | let response = reqwest::get(Urls::as_str(UrlsNames::AppConfigs)).await?; 28 | let content = response.bytes().await?; 29 | 30 | let mut file = File::create(Global::app_config()).await?; 31 | file.write_all(&content).await?; 32 | 33 | SuccessAlerts::settings(); 34 | Ok(()) 35 | } 36 | 37 | pub fn load_db_config(&self) -> (String, String, String, String, u16) { 38 | let dbname = env::var("DB_NAME").or_else(|_| env::var("DS_DB_NAME")).unwrap_or_default(); 39 | let host = env::var("DB_HOST").or_else(|_| env::var("DS_DB_HOST")).unwrap_or_default(); 40 | let user = env::var("DB_USER").or_else(|_| env::var("DS_DB_USER")).unwrap_or_default(); 41 | let password = env::var("DB_PASSWORD").or_else(|_| env::var("DS_DB_PASSWORD")).unwrap_or_default(); 42 | 43 | let port = env::var("DB_PORT") 44 | .or_else(|_| env::var("DS_DB_PORT")) 45 | .unwrap_or_default() 46 | .parse::() 47 | .expect("Invalid port"); 48 | 49 | (dbname, host, user, password, port) 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /src/helpers/write_env.rs: -------------------------------------------------------------------------------- 1 | extern crate open; 2 | 3 | use std::{ 4 | path::PathBuf, 5 | io::Error as IoError, 6 | 7 | fs::{ 8 | File, 9 | write, 10 | read_to_string 11 | }, 12 | }; 13 | 14 | use crate::constants::folders::Folders; 15 | 16 | pub struct WriteEnv { 17 | entries: Vec<(String, String)>, 18 | } 19 | 20 | impl WriteEnv { 21 | 22 | pub fn new() -> Self { 23 | Self { 24 | entries: Vec::new() 25 | } 26 | } 27 | 28 | pub fn add(&mut self, key: String, val: String) { 29 | self.entries.push((key, val)); 30 | } 31 | 32 | pub fn save(&self) -> Result<(), IoError> { 33 | let app_folder = &*Folders::APP_FOLDER; 34 | let env_path: PathBuf = app_folder.join(".env"); 35 | 36 | if !env_path.exists() { 37 | File::create(&env_path)?; 38 | } 39 | 40 | let mut contents = read_to_string(&env_path).unwrap_or_default(); 41 | let mut lines: Vec = contents 42 | .lines() 43 | .map(|line| line.to_string()) 44 | .collect(); 45 | 46 | for (key, value) in &self.entries { 47 | let mut found = false; 48 | 49 | for line in &mut lines { 50 | if line.starts_with(&format!("{}=", key)) { 51 | *line = format!("{}=\"{}\"", key, value); 52 | found = true; 53 | break; 54 | } 55 | } 56 | 57 | if !found { 58 | lines.push(format!("{}=\"{}\"", key, value)); 59 | } 60 | } 61 | 62 | contents = lines.join("\n") + "\n"; 63 | write(&env_path, contents)?; 64 | Ok(()) 65 | } 66 | 67 | } 68 | -------------------------------------------------------------------------------- /src/ui/ui_base.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | extern crate figlet_rs; 3 | 4 | use colored::*; 5 | use figlet_rs::FIGfont; 6 | 7 | use crate::constants::global::*; 8 | 9 | pub struct UI; 10 | 11 | impl UI { 12 | 13 | fn colorize(text: &str, level: &str) -> String { 14 | let message = match level { 15 | "normal" => text.bold().to_string(), 16 | "info" => text.bold().blue().to_string(), 17 | "warning" => text.bold().yellow().to_string(), 18 | "error" => text.bold().red().to_string(), 19 | "success" => text.bold().green().to_string(), 20 | _ => text.bold().to_string(), 21 | }; 22 | 23 | message 24 | } 25 | 26 | pub fn header() { 27 | let name = Global::app(GlobalNames::AppName); 28 | let standard_font = FIGfont::standard().unwrap(); 29 | 30 | if let Some(title) = standard_font.convert(&name) { 31 | println!("{}", &title.to_string().bold().cyan()); 32 | 33 | println!( 34 | "Version: {} | Author: {} | License: {} | Home: {}", 35 | 36 | Global::app(GlobalNames::AppVersion).bold().green(), 37 | Global::app(GlobalNames::AppAuthor).bold().cyan(), 38 | Global::app(GlobalNames::AppLicense).bold().blue(), 39 | Global::app(GlobalNames::AppHome).bold().yellow() 40 | ); 41 | } 42 | } 43 | 44 | pub fn section_header(text: &str, level: &str) { 45 | let message = Self::colorize(&text.to_uppercase(), level); 46 | println!("\n{}\n", message); 47 | } 48 | 49 | pub fn label(text: &str, level: &str) { 50 | let message = Self::colorize(&text, level); 51 | println!("{}", message); 52 | } 53 | 54 | } 55 | -------------------------------------------------------------------------------- /docs/import.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Import 2 | 3 | To restore a database dump, you can use the following command: 4 | 5 | ```bash 6 | dumpsync import -f /path/to/dump.sql 7 | ``` 8 | 9 | For connectting to a server, read the [Connecting to a Server](connection.md) guide. 10 | 11 | ### Command Breakdown 12 | 13 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 14 | - **import**: This subcommand initiates the import process to restore the database from the specified dump file. 15 | 16 | ### Options 17 | 18 | - **-f /path/to/dump.sql**: Specifies the path to the dump file that you want to restore. Replace `/path/to/dump.sql` with the actual file path to your SQL dump file. 19 | - **--ignore-drop-table**: (Optional) Use this flag if you want to ignore any `DROP TABLE` statements in the dump file during the import process. This can be useful if you want to preserve existing tables in the database. 20 | 21 | ### Example 22 | 23 | To restore a database from a dump file located at `/backups/example_dump.sql`, you would run: 24 | 25 | ```bash 26 | dumpsync import -f /backups/example_dump.sql 27 | ``` 28 | 29 | ### Encrypted Dumps 30 | 31 | ```bash 32 | dumpsync import -f /backups/example_dump.sql.aes 33 | ``` 34 | 35 | DumpSync detects encrypted dump files and automatically decrypts them during the import process. If your dump file is encrypted, you can still use the `import` command as shown above. 36 | 37 | ### Notes 38 | 39 | - Ensure that the dump file exists and that you have the necessary permissions to read it. 40 | - The import process will overwrite existing data in the database, so be cautious when using this command, especially if restoring to a production environment. 41 | - It’s recommended to back up current data before performing an import to avoid accidental data loss. 42 | -------------------------------------------------------------------------------- /src/dump_sync.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use std::error::Error; 3 | 4 | use crate::{ 5 | args_cli::*, 6 | init::DumpSyncInit, 7 | addons::DumpSyncAddons, 8 | dumper::DumpSyncDumper, 9 | service::DumpSyncService, 10 | helpers::history::History, 11 | }; 12 | 13 | pub struct DumpSync; 14 | 15 | impl DumpSync { 16 | 17 | pub async fn init(&self) -> Result<(), Box> { 18 | History::new().init_db()?; 19 | 20 | match Cli::parse().command { 21 | Commands::Init => DumpSyncInit.initialize().await?, 22 | 23 | Commands::Export(options) => DumpSyncDumper.export(options), 24 | Commands::DumpData(options) => DumpSyncDumper.export_dumpdata(options), 25 | Commands::Import(options) => DumpSyncDumper.import(options), 26 | Commands::ImportData(options) => DumpSyncDumper.import_json(options), 27 | Commands::Transfer(options) => DumpSyncDumper.transfer(options), 28 | Commands::Truncate(options) => DumpSyncDumper.truncate(options), 29 | 30 | Commands::Schema(options) => DumpSyncAddons.schema(options)?, 31 | Commands::Checksum(options) => DumpSyncAddons.checksum(options), 32 | Commands::Visual(options) => DumpSyncAddons.visual(options).await, 33 | Commands::Share(options) => DumpSyncAddons.share(options).await?, 34 | Commands::Scan(options) => DumpSyncAddons.scan_xss(options).await?, 35 | Commands::History { history_type, filter } => DumpSyncAddons.history(&history_type, filter), 36 | 37 | Commands::Pull { file } => DumpSyncService.pull(&file).await, 38 | Commands::Push { file } => DumpSyncService.push(&file).await, 39 | Commands::Login => DumpSyncService.login(), 40 | } 41 | 42 | Ok(()) 43 | } 44 | 45 | } 46 | -------------------------------------------------------------------------------- /src/utils/generate.rs: -------------------------------------------------------------------------------- 1 | use std::time::{ 2 | SystemTime, 3 | UNIX_EPOCH 4 | }; 5 | 6 | pub struct Generate; 7 | 8 | impl Generate { 9 | 10 | fn get_random_bytes(&self) -> [u8; 16] { 11 | let mut bytes = [0u8; 16]; 12 | 13 | let seed = SystemTime::now() 14 | .duration_since(UNIX_EPOCH) 15 | .unwrap() 16 | .as_nanos() as u64; 17 | 18 | let mut state = seed; 19 | for i in 0..16 { 20 | state ^= state >> 12; 21 | state ^= state << 25; 22 | state ^= state >> 27; 23 | bytes[i] = (state.wrapping_mul(2685821657736338717) >> 59) as u8; 24 | } 25 | 26 | bytes 27 | } 28 | 29 | pub fn uuid_v4(&self) -> String { 30 | let mut uuid = self.get_random_bytes(); 31 | 32 | uuid[6] = (uuid[6] & 0x0F) | 0x40; 33 | uuid[8] = (uuid[8] & 0x3F) | 0x80; 34 | 35 | format!( 36 | "{:08x}-{:04x}-{:04x}-{:04x}-{:012x}", 37 | u32::from_be_bytes([uuid[0], uuid[1], uuid[2], uuid[3]]), 38 | u16::from_be_bytes([uuid[4], uuid[5]]), 39 | u16::from_be_bytes([uuid[6], uuid[7]]), 40 | u16::from_be_bytes([uuid[8], uuid[9]]), 41 | u64::from_be_bytes([ 42 | uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15], 0, 0 43 | ]) >> 16 44 | ) 45 | } 46 | 47 | pub fn random_string(&self, size: usize) -> String { 48 | let charset = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; 49 | let mut result = String::new(); 50 | 51 | let current_time = SystemTime::now().duration_since(UNIX_EPOCH) 52 | .unwrap() 53 | .as_nanos() as usize; 54 | 55 | let mut rng = current_time; 56 | 57 | for _ in 0..size { 58 | let idx = rng % charset.len(); 59 | result.push(charset[idx] as char); 60 | rng = rng >> 1; 61 | } 62 | 63 | result 64 | } 65 | 66 | } 67 | -------------------------------------------------------------------------------- /src/plugins/diagram.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | 3 | use mysql::{ 4 | *, 5 | prelude::*, 6 | }; 7 | 8 | use crate::{ 9 | cmd::connection::Connection, 10 | 11 | handlers::{ 12 | diagram_handlers::DiagramHandlers, 13 | mysql::mysql_queries_builders::MySqlQueriesBuilders, 14 | } 15 | }; 16 | 17 | pub struct Diagram { 18 | host: String, 19 | port: u16, 20 | user: String, 21 | password: String, 22 | dbname: String, 23 | table: String, 24 | } 25 | 26 | impl Diagram { 27 | 28 | pub fn new( 29 | host: &str, 30 | port: u16, 31 | user: &str, 32 | password: &str, 33 | dbname: &str, 34 | table: &str, 35 | ) -> Self { 36 | Self { 37 | host: host.to_string(), 38 | port, 39 | user: user.to_string(), 40 | password: password.to_string(), 41 | dbname: dbname.to_string(), 42 | table: table.to_string(), 43 | } 44 | } 45 | 46 | pub async fn diagram(&self) -> Result<(), Box> { 47 | let pool = Connection { 48 | host: self.host.clone(), 49 | port: self.port, 50 | user: self.user.clone(), 51 | password: self.password.clone(), 52 | dbname: Some(self.dbname.clone()), 53 | }.create_mysql_pool()?; 54 | 55 | let mut conn = pool.get_conn()?; 56 | let sql = MySqlQueriesBuilders.show_create_table(&self.table); 57 | let row: Option<(String, String)> = conn.query_first(sql)?; 58 | 59 | let table_sql: String = if let Some((_, create_table)) = row { 60 | create_table 61 | } else { 62 | return Err("No result found for the given table".into()); 63 | }; 64 | 65 | let table = DiagramHandlers.parse_show_create_table(&table_sql)?; 66 | let diagram = DiagramHandlers.generate_ascii_diagram_with_key(&table); 67 | println!("{}", diagram); 68 | 69 | Ok(()) 70 | } 71 | 72 | } 73 | -------------------------------------------------------------------------------- /src/ui/errors_alerts.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | 3 | use colored::*; 4 | 5 | use crate::utils::date::Date; 6 | 7 | pub struct ErrorsAlerts; 8 | 9 | impl ErrorsAlerts { 10 | 11 | pub fn dump(e: &str) { 12 | let current_datetime = Date::date_time(); 13 | 14 | println!( 15 | "{} Failed to dump the database: {}", 16 | current_datetime.red().bold(), 17 | e.red() 18 | ); 19 | } 20 | 21 | pub fn import(database: &str, command: &str, error: &str) { 22 | let current_datetime = Date::date_time(); 23 | 24 | println!( 25 | "{} Failed to execute the command to '{}': '{}'. Error: '{}'", 26 | current_datetime.red().bold(), 27 | database.cyan(), 28 | command.yellow(), 29 | error.red() 30 | ); 31 | } 32 | 33 | pub fn attempt(error: &str) { 34 | let current_datetime = Date::date_time(); 35 | 36 | println!( 37 | "{} Error during backup execution: '{}'", 38 | current_datetime.red().bold(), 39 | error.red() 40 | ); 41 | } 42 | 43 | pub fn max_attempts() { 44 | let current_datetime = Date::date_time(); 45 | 46 | println!( 47 | "{} Maximum number of reconnection attempts reached. Shutting down.", 48 | current_datetime.red().bold(), 49 | ); 50 | } 51 | 52 | pub fn checksum(error: &str) { 53 | let current_datetime = Date::date_time(); 54 | 55 | println!( 56 | "{} Error generating checksum': {}", 57 | current_datetime.red().bold(), 58 | error.red() 59 | ); 60 | } 61 | 62 | pub fn push() { 63 | let current_datetime = Date::date_time(); 64 | 65 | println!( 66 | "{} Failed to upload dump to the cloud.", 67 | current_datetime.red().bold(), 68 | ); 69 | } 70 | 71 | pub fn open_link() { 72 | let current_datetime = Date::date_time(); 73 | 74 | println!( 75 | "{} Failed to open the link in the browser.", 76 | current_datetime.red().bold(), 77 | ); 78 | } 79 | 80 | } 81 | -------------------------------------------------------------------------------- /src/helpers/configs.rs: -------------------------------------------------------------------------------- 1 | use reqwest::blocking; 2 | use serde_yaml::Value; 3 | 4 | use std::{ 5 | error::Error, 6 | io::BufReader, 7 | 8 | fs::{ 9 | File, 10 | metadata, 11 | read_to_string, 12 | }, 13 | }; 14 | 15 | use crate::constants::{ 16 | urls::*, 17 | global::Global, 18 | }; 19 | 20 | pub struct Configs; 21 | 22 | impl Configs { 23 | 24 | fn default_config(&self) -> Result> { 25 | let response = blocking::get(Urls::as_str(UrlsNames::AppConfigs))?.text()?; 26 | let config: Value = serde_yaml::from_str(&response)?; 27 | Ok(config) 28 | } 29 | 30 | pub fn load(&self) -> Value { 31 | let file_path = Global::app_config(); 32 | 33 | match metadata(&file_path) { 34 | Ok(_) => { 35 | let file = File::open(file_path).expect("Failed to open local config file"); 36 | let reader = BufReader::new(file); 37 | 38 | serde_yaml::from_reader(reader).unwrap_or_else(|_| { 39 | self.default_config().expect("Error loading default config") 40 | }) 41 | } 42 | 43 | Err(_) => { 44 | self.default_config().expect("Error loading default config") 45 | } 46 | } 47 | } 48 | 49 | pub fn generic(&self, section: &str, option: &str) -> Value { 50 | let configs = self.load(); 51 | 52 | configs 53 | .get(section) 54 | .and_then(|conn| conn.get(option)) 55 | .cloned() 56 | .unwrap_or(serde_yaml::Value::Null) 57 | } 58 | 59 | pub fn boolean(&self, section: &str, option: &str, default: bool) -> bool { 60 | let configs = self.load(); 61 | 62 | configs 63 | .get(section) 64 | .and_then(|exports| exports.get(option)) 65 | .and_then(|val| val.as_bool()) 66 | .unwrap_or(default) 67 | } 68 | 69 | pub fn list(&self, section: &str, option: &str) -> Option> { 70 | let configs = self.load(); 71 | 72 | configs 73 | .get(section) 74 | .and_then(|exports| exports.get(option)) 75 | .and_then(|ignore_tables| ignore_tables.as_sequence()) 76 | .cloned() 77 | } 78 | 79 | pub fn read_yaml_as_text(&self) -> String { 80 | read_to_string(Global::app_config()).expect("Error reading the YAML file") 81 | } 82 | 83 | } 84 | -------------------------------------------------------------------------------- /src/helpers/history.rs: -------------------------------------------------------------------------------- 1 | use rusqlite::{ 2 | params, 3 | Result, 4 | Connection, 5 | }; 6 | 7 | use crate::{ 8 | constants::folders::Folders, 9 | 10 | handlers::sqlite::{ 11 | tables::*, 12 | tables_names::TablesNames, 13 | }, 14 | }; 15 | 16 | pub struct History { 17 | db_path: String, 18 | } 19 | 20 | impl History { 21 | 22 | pub fn new() -> Self { 23 | History { 24 | db_path: Folders::APP_FOLDER.join( 25 | Tables.as_str(Table::FileName) 26 | ).to_string_lossy().to_string(), 27 | } 28 | } 29 | 30 | pub fn init_db(&self) -> Result<()> { 31 | let conn = Connection::open(&self.db_path)?; 32 | conn.execute( 33 | &Tables.history(), [], 34 | )?; 35 | 36 | Ok(()) 37 | } 38 | 39 | pub fn insert_backup(&self, slug: &str, filename: &str, db: &str, host: &str, created_at: &str, size: i64, encrypt: bool, compress: bool) -> Result<()> { 40 | let conn = Connection::open(&self.db_path)?; 41 | conn.execute( 42 | &format!("INSERT INTO {} (slug, filename, db, host, created_at, size, encrypt, compress) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", TablesNames::Backups.as_str()), 43 | params![slug, filename, db, host, created_at, size, encrypt, compress], 44 | )?; 45 | 46 | Ok(()) 47 | } 48 | 49 | pub fn list_backups_with_filters(&self, filter: Option<&str>) -> Result> { 50 | let conn = Connection::open(&self.db_path)?; 51 | let mut stmt = conn.prepare(&format!("SELECT id, slug, db, filename, host, created_at, size, encrypt, compress FROM {} WHERE id LIKE ?1 OR slug LIKE ?1 OR filename LIKE ?1 OR created_at LIKE ?1 OR db LIKE ?1 OR host LIKE ?1", TablesNames::Backups.as_str()))?; 52 | 53 | let backups = stmt 54 | .query_map(params![format!("%{}%", filter.unwrap_or(""))], |row| { 55 | Ok(( 56 | row.get(0)?, 57 | row.get(1)?, 58 | row.get(2)?, 59 | row.get(3)?, 60 | row.get(4)?, 61 | row.get(5)?, 62 | row.get(6)?, 63 | row.get(7)?, 64 | row.get(8)?, 65 | )) 66 | })? 67 | .collect::, _>>()?; 68 | 69 | Ok(backups) 70 | } 71 | 72 | } -------------------------------------------------------------------------------- /src/constants/global.rs: -------------------------------------------------------------------------------- 1 | pub enum GlobalNames { 2 | AppIcon, 3 | AppName, 4 | AppHome, 5 | AppAuthor, 6 | AppVersion, 7 | AppLicense, 8 | } 9 | 10 | pub struct Global; 11 | 12 | impl Global { 13 | 14 | const APP_NAME: &'static str = env!("CARGO_PKG_NAME"); 15 | const APP_AUTHOR: &'static str = env!("CARGO_PKG_AUTHORS"); 16 | const APP_VERSION: &'static str = env!("CARGO_PKG_VERSION"); 17 | const APP_LICENSE: &'static str = env!("CARGO_PKG_LICENSE"); 18 | const APP_HOMEPAGE: &'static str = env!("CARGO_PKG_HOMEPAGE"); 19 | 20 | const APP_ICON: &'static str = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEYAAABGCAYAAABxLuKEAAAACXBIWXMAAC4jAAAuIwF4pT92AAADSklEQVR4nO2aP07jQBSHf95si0QQQgihIBJRUAfJgtocAY4ARwhHIFfIEcgRNjXVuqFBCGEkhBBCKC5ow5stHENY/Gb8Z5zYu++Tpkk8+jlfZuyZZztKKQjf+bHoE6gqIoZBxDCIGAYRwyBiGEQMg4hhEDEMIoZBxDCIGAYRwyBiGEQMg4hhSCPGA6AstTGAXwB6Gc+zCeB82jdv7gWAk7SBjqmCp5TypidkGx/AoeM4oSG/O81vWsodOo5zbDrIKOb9/b0sMQDgNxqNPU12G8Bv2JMS0280Gme6A4xTSSlVZutOJhN2eCulekqpZgm5vclk0tb97p8mMURkOqQoXU22V2KuB2DAfVkFMew/R0Taf7Ug2ulZBTGVzDaKSfncaQBgmPB5E9FtMhcpsgMAp7ZzAXsjJlhaWholffH29pb1nLJkl5ILzGEqFem/qL6AvalUSv+CfbXrFEQLTJa6j5jmeDxurqysfFs9Ly8v93MHo/5iugDGr6+vQ0QjYLi6uhrkDpyh7mJijqbt/OXlJQAwitva2pp2L8ZRdTEhsu+T2oh20ScA8Pz8/CFpfX1de12ZpepiRohGQhG8acPT01MA4GxjYyNpzfUF4yaSiFK1Iv01fftEFKY9hxStTUQXj4+P57UWs7m56RPRIREFFuWAiHoPDw/aDaq1skOR/jparZbfarU6SqlTpdRQKRVaKj1oxVT6GjPL1tbWANMywf39fXzd8KApWxjQ9quNmFm2t7fjOw3u7u7a+BR0BEvVvlqKmaXT6QT4LDid3t7exoKyFty/UHsxf7Ozs+MD8G9ubv5dMdfX16Yi/HB3dzexPFn67nrBCzxTzTexFlM0F6i+mIX0BezVY7yrq6ukzwvdIVJkH5WRC9gbMR/7EZukLDvkXcdokacEDFUQw5YCiCiA5rlTWbnAHGq+KWDvLEqpAaK3HOaaCyx+xAxc19XdcgeIVrG2ryPaXMBi2SFH67uum/Sw7APXdUMiOiYif565QLqpFMIw7DIQIprbo4ODg1Rlxv39/QDA3uXlZTxy8oyezLnG92P+V+QdPAYRwyBiGEQMg4hhEDEMIoZBxDCIGAYRwyBiGEQMg4hhEDEMIoZBxDCIGAYRw/AHZ4ENamm/WDkAAAAASUVORK5CYII="; 21 | 22 | pub fn app_config() -> String { 23 | format!("{}.yml", Self::APP_NAME) 24 | } 25 | 26 | pub fn formats_supported() -> Vec<&'static str> { 27 | vec!["sql", "txt", "xml", "csv", "json", "html"] 28 | } 29 | 30 | pub fn app(name: GlobalNames) -> &'static str { 31 | match name { 32 | GlobalNames::AppName => Self::APP_NAME, 33 | GlobalNames::AppIcon => Self::APP_ICON, 34 | GlobalNames::AppHome => Self::APP_HOMEPAGE, 35 | GlobalNames::AppAuthor => Self::APP_AUTHOR, 36 | GlobalNames::AppVersion => Self::APP_VERSION, 37 | GlobalNames::AppLicense => Self::APP_LICENSE, 38 | } 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /docs/connection.md: -------------------------------------------------------------------------------- 1 | # Database Connection 2 | 3 | To connect to the database, you need to configure the Environment System in your `.env` file. You should include the following settings: 4 | 5 | ```dotenv 6 | DB_HOST="YOUR_HOST" # Database host 7 | DB_PORT="YOUR_PORT" # Database port 8 | DB_USER="YOUR_USERNAME" # Database username 9 | DB_PASSWORD="YOUR_PASSWORD" # Database password 10 | DB_NAME="YOUR_DATABASE" # Database name 11 | 12 | DS_DUMP_INTERVAL="3600" # Interval for the dump process in seconds 13 | DS_DUMP_PATH="/path/to/" # Path where dumps will be saved 14 | 15 | DS_TRANSFER_HOST="YOUR_TRANSFER_HOST" # Transfer database host 16 | DS_TRANSFER_PORT="YOUR_TRANSFER_PORT" # Transfer database port 17 | DS_TRANSFER_USER="YOUR_TRANSFER_USERNAME" # Transfer database username 18 | DS_TRANSFER_PASSWORD="YOUR_TRANSFER_PASSWORD" # Transfer database password 19 | DS_TRANSFER_DB_NAME="YOUR_TRANSFER_DATABASE" # Transfer database name 20 | 21 | # OR, alternatively 22 | 23 | DS_DB_HOST="YOUR_HOST" 24 | DS_DB_PORT="YOUR_PORT" 25 | DS_DB_USER="YOUR_USERNAME" 26 | DS_DB_PASSWORD="YOUR_PASSWORD" 27 | DS_DB_NAME="YOUR_DATABASE" 28 | 29 | DS_DUMP_INTERVAL="3600" # Interval for the dump process in seconds 30 | DS_DUMP_PATH="/path/to/" # Path where dumps will be saved 31 | 32 | PASTEBIN_API_KEY="" # Your Pastebin API key 33 | ``` 34 | 35 | ### Configuration Details 36 | 37 | - **DB_HOST** / **DS_DB_HOST**: The hostname or IP address of your database server. 38 | - **DB_PORT** / **DS_DB_PORT**: The port number on which your database is listening. 39 | - **DB_USER** / **DS_DB_USER**: Your database username. 40 | - **DB_PASSWORD** / **DS_DB_PASSWORD**: The password associated with the database user. 41 | - **DB_NAME** / **DS_DB_NAME**: The name of the database you want to connect to. 42 | - **DS_DUMP_INTERVAL**: The interval (in seconds) for the dump process; defaults to 3600 seconds (1 hour). 43 | - **DS_DUMP_PATH**: The directory path where the database dumps will be saved. 44 | - **DS_TRANSFER_HOST**: The hostname or IP address of the transfer database server. 45 | - **DS_TRANSFER_PORT**: The port number on which the transfer database is listening. 46 | - **DS_TRANSFER_USER**: Your transfer database username. 47 | - **DS_TRANSFER_PASSWORD**: The password associated with the transfer database user. 48 | - **DS_TRANSFER_DB_NAME**: The name of the transfer database you want to connect to. 49 | - **PASTEBIN_API_KEY**: Your Pastebin API key. 50 | 51 | You can choose to use either the `DB_` prefixed variables or the `DS_` prefixed variables for your configuration. Make sure to adjust the values accordingly to fit your environment. 52 | -------------------------------------------------------------------------------- /src/cmd/encrypt.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io::Result, 3 | 4 | fs::{ 5 | read, 6 | write, 7 | remove_file 8 | }, 9 | }; 10 | 11 | use aes_gcm::{ 12 | Key, 13 | Nonce, 14 | Aes256Gcm, 15 | 16 | aead::{ 17 | Aead, 18 | KeyInit 19 | }, 20 | }; 21 | 22 | use sha2::{ 23 | Digest, 24 | Sha256 25 | }; 26 | 27 | use rpassword::prompt_password; 28 | 29 | use crate::{ 30 | utils::file::FileUtils, 31 | ui::success_alerts::SuccessAlerts, 32 | }; 33 | 34 | pub struct Encrypt<'a> { 35 | file_path: &'a str, 36 | } 37 | 38 | impl<'a> Encrypt<'a> { 39 | 40 | pub fn new(file_path: &'a str) -> Self { 41 | Self { 42 | file_path 43 | } 44 | } 45 | 46 | pub fn encrypt(&self) -> Result<()> { 47 | let user_key = prompt_password("Enter the key (password): ") 48 | .expect("Error reading the password"); 49 | 50 | let key_hash = Sha256::digest(user_key.as_bytes()); 51 | let key = Key::::from_slice(&key_hash); 52 | 53 | let cipher = Aes256Gcm::new(key); 54 | let data = read(&self.file_path)?; 55 | 56 | let nonce_bytes = rand::random::<[u8; 12]>(); 57 | let nonce = Nonce::from_slice(&nonce_bytes); 58 | 59 | let encrypted_data = cipher 60 | .encrypt(nonce, data.as_ref()) 61 | .expect("Encryption error"); 62 | 63 | let encrypted_file_path = format!("{}.aes", &self.file_path); 64 | FileUtils::create_path(&encrypted_file_path); 65 | 66 | let mut output = vec![]; 67 | output.extend_from_slice(nonce); 68 | output.extend_from_slice(&encrypted_data); 69 | write(&encrypted_file_path, output)?; 70 | 71 | remove_file(&self.file_path)?; 72 | SuccessAlerts::dump(&encrypted_file_path); 73 | 74 | Ok(()) 75 | } 76 | 77 | pub fn decrypt_and_read(&self) -> Result> { 78 | let user_key = prompt_password("Enter the password: ") 79 | .expect("Error reading the password"); 80 | 81 | let key_hash = Sha256::digest(user_key.as_bytes()); 82 | let key = Key::::from_slice(&key_hash); 83 | 84 | let data = read(&self.file_path)?; 85 | let (nonce_bytes, encrypted_data) = data.split_at(12); 86 | let nonce = Nonce::from_slice(nonce_bytes); 87 | let cipher = Aes256Gcm::new(key); 88 | 89 | let decrypted_data = cipher 90 | .decrypt(nonce, encrypted_data) 91 | .expect("Decryption error"); 92 | 93 | Ok(decrypted_data) 94 | } 95 | 96 | } 97 | -------------------------------------------------------------------------------- /src/core/truncate.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | 3 | use mysql::{ 4 | *, 5 | prelude::* 6 | }; 7 | 8 | use crate::{ 9 | core::export::Export, 10 | cmd::connection::Connection, 11 | ui::success_alerts::SuccessAlerts, 12 | 13 | handlers::{ 14 | dump_handlers::DumpHandlers, 15 | mysql::mysql_queries_builders::MySqlQueriesBuilders, 16 | }, 17 | }; 18 | 19 | pub struct Truncate { 20 | pub host: String, 21 | pub port: u16, 22 | pub user: String, 23 | pub password: String, 24 | pub dbname: String, 25 | pub backup_path: String, 26 | pub table: String, 27 | pub encrypt: Option, 28 | } 29 | 30 | impl Truncate { 31 | 32 | pub fn new( 33 | host: &str, 34 | port: u16, 35 | user: &str, 36 | password: &str, 37 | dbname: &str, 38 | backup_path: &str, 39 | table: &str, 40 | encrypt: Option, 41 | ) -> Self { 42 | Self { 43 | host: host.to_string(), 44 | port, 45 | user: user.to_string(), 46 | password: password.to_string(), 47 | dbname: dbname.to_string(), 48 | backup_path: backup_path.to_string(), 49 | table: table.to_string(), 50 | encrypt, 51 | } 52 | } 53 | 54 | pub fn dump_table(&self) -> Result<(), &'static str> { 55 | let dump_file_path = DumpHandlers.generate_dump_file_truncate_path(&self.dbname, &self.table, &self.backup_path); 56 | let password = if self.password.is_empty() { "" } else { &self.password }; 57 | 58 | Export::new( 59 | &self.host, 60 | self.port as u16, 61 | &self.user, 62 | password, 63 | &self.dbname, 64 | &dump_file_path, 65 | Some(self.encrypt.unwrap_or(false)), 66 | Some(self.table.clone()) 67 | ).dump_table().map_err(|_| "Failed to generate dump file")?; 68 | 69 | Ok(()) 70 | } 71 | 72 | pub fn table(&self) -> Result<(), Box> { 73 | let pool = Connection { 74 | host: self.host.clone(), 75 | port: self.port, 76 | user: self.user.clone(), 77 | password: self.password.clone(), 78 | dbname: Some(self.dbname.clone()), 79 | }.create_mysql_pool()?; 80 | 81 | let _ = &self.dump_table()?; 82 | 83 | let mut conn = pool.get_conn()?; 84 | let query = MySqlQueriesBuilders.truncate_table(&self.table); 85 | conn.query_drop(query)?; 86 | 87 | SuccessAlerts::truncate(&self.table); 88 | Ok(()) 89 | } 90 | 91 | } 92 | -------------------------------------------------------------------------------- /docs/scan.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Scan 2 | 3 | The dumpsync scan command is used to scan a database table for Cross-Site Scripting (XSS) vulnerabilities. It allows you to provide a payload file containing scripts that will be tested on the input fields of the specified table. 4 | 5 | ```bash 6 | dumpsync scan -t -p 7 | ``` 8 | 9 | ### Command Breakdown 10 | 11 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 12 | - **scan**: This subcommand initiates the scanning process to check for XSS vulnerabilities in the specified table. 13 | 14 | ### Parameters 15 | 16 | - **-t `
`**: Specifies the name of the table or multiple tables (split by commas) to be scanned. The command will check the fields in the table for any inputs that could be vulnerable to XSS attacks. 17 | - **-p ** (Optional): Specifies the path or URL to the payload file containing XSS scripts that will be tested on the input fields of the table. 18 | - **-l ``** (Optional): Specifies the limit of the number of rows to scan. 19 | - **-o ``** (Optional): Specifies the offset from where to start scanning the rows. 20 | - **-f ** (Optional): Specifies the path to save the scan results in CSV, JSON or HTML format. 21 | 22 | ### Example 23 | 24 | Scan a table called example_table using a payload file `located at path/to/payload.txt`: 25 | 26 | ```bash 27 | dumpsync scan -t example_table -p path/to/payload.txt 28 | ``` 29 | 30 | Scan multiple tables (example_table1, example_table2) using a payload file located at path/to/payload.txt: 31 | 32 | ```bash 33 | dumpsync scan -t "example_table1, example_table2" -p path/to/payload.txt 34 | ``` 35 | 36 | ### Description 37 | 38 | - The command will access the input fields of the specified table and attempt to inject each payload from the provided file. 39 | - This process helps identify vulnerable points where XSS attacks could be performed, providing a way to improve the security of your application. 40 | 41 | ### Notes 42 | 43 | - Ensure that the dump file exists and that you have the necessary permissions to read it. 44 | - The transfer process will overwrite existing data in the database, so be cautious when using this command, especially if restoring to a production environment. 45 | - It’s recommended to back up current data before performing an import to avoid accidental data loss. 46 | 47 | ## Report output file 48 | 49 | It's possible to save the scan results to a file using the `-f` or `--file` option. The report will be saved in CSV, JSON or HTML format. 50 | 51 | ```bash 52 | dumpsync scan -t example_table -f path/to/report.csv 53 | ``` 54 | 55 | Formats supported for the report file are: 56 | 57 | - TXT 58 | - CSV 59 | - XML 60 | - JSON 61 | - HTML 62 | -------------------------------------------------------------------------------- /src/plugins/pastebin.rs: -------------------------------------------------------------------------------- 1 | use reqwest::Client; 2 | 3 | use std::{ 4 | error::Error, 5 | collections::HashMap, 6 | }; 7 | 8 | use crate::{ 9 | utils::file::FileUtils, 10 | ui::share_alerts::ShareAlerts, 11 | 12 | constants::{ 13 | urls::*, 14 | global::*, 15 | protocols::*, 16 | }, 17 | }; 18 | 19 | pub struct Pastebin { 20 | file: String, 21 | api_key: String, 22 | privacy: String, 23 | } 24 | 25 | impl Pastebin { 26 | 27 | pub fn new(file: &str, api_key: &str, privacy: &str) -> Self { 28 | Self { 29 | file: file.to_string(), 30 | api_key: api_key.to_string(), 31 | privacy: privacy.to_string(), 32 | } 33 | } 34 | 35 | fn privacy(&self) -> String { 36 | match self.privacy.as_str() { 37 | "public" => "0", 38 | "unlisted" => "1", 39 | "private" => "2", 40 | _ => "1", 41 | }.to_string() 42 | } 43 | 44 | pub async fn share(&self) -> Result<(), Box> { 45 | let ext = FileUtils::extension(&self.file); 46 | 47 | if !FileUtils::exists(&self.file) { 48 | ShareAlerts::error("File does not exist"); 49 | return Ok(()); 50 | } 51 | 52 | if self.api_key.trim().is_empty() { 53 | ShareAlerts::api_key_missing(); 54 | return Ok(()); 55 | } 56 | 57 | if Global::formats_supported().iter().any(|&e| e == ext) { 58 | let privacy = &self.privacy(); 59 | let api_option = "paste".to_string(); 60 | let name = format!("{}: {}", Global::app(GlobalNames::AppName), &self.file); 61 | let content = FileUtils::content(&self.file); 62 | 63 | let mut params = HashMap::new(); 64 | params.insert("api_dev_key", &self.api_key); 65 | params.insert("api_option", &api_option); 66 | params.insert("api_paste_code", &content); 67 | params.insert("api_paste_private", &privacy); 68 | params.insert("api_paste_name", &name); 69 | params.insert("api_paste_format", &ext); 70 | 71 | let response = Client::new() 72 | .post(Urls::as_str(UrlsNames::PastebinApiUri)) 73 | .form(¶ms) 74 | .send() 75 | .await?; 76 | 77 | let response_text = response.text().await?; 78 | if response_text.starts_with(Protocols::Http.as_str()) { 79 | ShareAlerts::success(&response_text); 80 | } else { 81 | ShareAlerts::error(&response_text); 82 | } 83 | } else { 84 | ShareAlerts::error("Invalid file extension"); 85 | } 86 | 87 | Ok(()) 88 | } 89 | 90 | } -------------------------------------------------------------------------------- /src/ui/success_alerts.rs: -------------------------------------------------------------------------------- 1 | extern crate colored; 2 | 3 | use colored::*; 4 | 5 | use crate::utils::date::Date; 6 | 7 | pub struct SuccessAlerts; 8 | 9 | impl SuccessAlerts { 10 | 11 | pub fn dump(file: &str) { 12 | let current_datetime = Date::date_time(); 13 | 14 | println!( 15 | "\r{} Dump successfully completed and saved at {}", 16 | current_datetime.green().bold(), 17 | file.blue() 18 | ); 19 | } 20 | 21 | pub fn table(table: &str) { 22 | let current_datetime = Date::date_time(); 23 | 24 | println!( 25 | "{} Table '{}' successfully imported.", 26 | current_datetime.green().bold(), 27 | table.blue() 28 | ); 29 | } 30 | 31 | pub fn table_ignored(table: &str) { 32 | let current_datetime = Date::date_time(); 33 | 34 | println!( 35 | "{} Table '{}' was ignored.", 36 | current_datetime.green().bold(), 37 | table.yellow() 38 | ); 39 | } 40 | 41 | pub fn truncate(table: &str) { 42 | let current_datetime = Date::date_time(); 43 | 44 | println!( 45 | "{} Table '{}' successfully truncated.", 46 | current_datetime.green().bold(), 47 | table.blue() 48 | ); 49 | } 50 | 51 | pub fn import(database: &str) { 52 | let current_datetime = Date::date_time(); 53 | 54 | println!("{}", "-".repeat(16)); 55 | println!( 56 | "{} Dump successfully imported into the database `{}`", 57 | current_datetime.green().bold(), 58 | database.blue() 59 | ); 60 | } 61 | 62 | pub fn terminate() { 63 | let current_datetime = Date::date_time(); 64 | 65 | println!( 66 | "\n{} {}", 67 | current_datetime.green().bold(), 68 | "Process terminated by user. Exiting gracefully...".red().bold(), 69 | ); 70 | } 71 | 72 | pub fn settings() { 73 | let current_datetime = Date::date_time(); 74 | 75 | println!( 76 | "\r{} The settings file was successfully created", 77 | current_datetime.green().bold(), 78 | ); 79 | } 80 | 81 | pub fn push(message: &str) { 82 | let current_datetime = Date::date_time(); 83 | 84 | println!( 85 | "\r{} {}", 86 | current_datetime.green().bold(), 87 | message.blue() 88 | ); 89 | } 90 | 91 | pub fn api_key() { 92 | let current_datetime = Date::date_time(); 93 | 94 | println!( 95 | "\r{} {}", 96 | current_datetime.green().bold(), 97 | "API Key successfully saved".blue() 98 | ); 99 | } 100 | 101 | } 102 | -------------------------------------------------------------------------------- /src/handlers/reports_handlers.rs: -------------------------------------------------------------------------------- 1 | use regex::Regex; 2 | 3 | use std::{ 4 | fs, 5 | time::SystemTime, 6 | collections::HashSet, 7 | }; 8 | 9 | use crate::{ 10 | constants::regexp::RegExp, 11 | ui::report_alerts::ReportAlerts, 12 | plugins::reports_pdf::ReportsPdfs, 13 | 14 | utils::{ 15 | file::FileUtils, 16 | generate::Generate, 17 | } 18 | }; 19 | 20 | pub struct ReportsHandlers { 21 | path: String, 22 | interval: usize, 23 | counter: usize, 24 | pdf: Option, 25 | } 26 | 27 | impl ReportsHandlers { 28 | 29 | pub fn new(path: &str, interval: &u64, counter: usize, pdf: Option) -> Self { 30 | Self { 31 | path: path.to_string(), 32 | interval: *interval as usize, 33 | counter, 34 | pdf 35 | } 36 | } 37 | 38 | pub fn extract_table_names(&self, sql_file_path: &str) -> Option> { 39 | let sql_content = fs::read_to_string(sql_file_path).ok()?; 40 | let re = Regex::new(RegExp::CREATE_TABLE_INSERTS).ok()?; 41 | 42 | let tables: HashSet = re.captures_iter(&sql_content) 43 | .filter_map(|cap| cap.get(1)) 44 | .map(|m| m.as_str().to_string()) 45 | .collect(); 46 | 47 | if tables.is_empty() { 48 | None 49 | } else { 50 | Some(tables) 51 | } 52 | } 53 | 54 | pub fn get_most_recent_sql_file(&self, dump_file_path: &str) -> Option<(String, String)> { 55 | fs::read_dir(&dump_file_path) 56 | .ok()? 57 | .filter_map(|entry| entry.ok()) 58 | .filter(|entry| entry.path().extension().map(|ext| ext == "sql").unwrap_or(false)) 59 | .max_by_key(|entry| entry.metadata().ok().and_then(|meta| meta.modified().ok()).unwrap_or(SystemTime::UNIX_EPOCH)) 60 | .and_then(|entry| { 61 | let path = entry.path(); 62 | let file_size = entry.metadata().ok()?.len(); 63 | Some((path.display().to_string(), FileUtils::size(file_size))) 64 | }) 65 | } 66 | 67 | pub fn report(&self) { 68 | if let Some((last_dump, size)) = self.get_most_recent_sql_file(&self.path) { 69 | ReportAlerts::report(&self.path, self.counter, &last_dump, &size, self.interval as usize); 70 | 71 | if let Some(tables) = &self.extract_table_names(&last_dump) { 72 | ReportAlerts::tables(tables); 73 | } else { 74 | ReportAlerts::no_tables(); 75 | } 76 | 77 | if self.pdf.unwrap_or(false) { 78 | let file = Generate.uuid_v4() + ".pdf"; 79 | 80 | let _ = ReportsPdfs::new( 81 | &file, &self.path, self.interval, self.counter 82 | ).dump(); 83 | } 84 | } 85 | } 86 | 87 | } 88 | -------------------------------------------------------------------------------- /docs/settings.md: -------------------------------------------------------------------------------- 1 | # Settings File for DumpSync 2 | 3 | The configuration file `dumpsync.yml` allows you to customize the options for the dump process. 4 | 5 | It should be saved as `dumpsync.yml` in the same directory where your project is running. 6 | 7 | ## File Structure 8 | 9 | Example structure: 10 | 11 | ```yaml 12 | exports: 13 | dump_data: true 14 | lock_tables: true 15 | compress_data: true 16 | insert_ignore_into: false 17 | drop_table_if_exists: true 18 | database_if_not_exists: true 19 | 20 | connection: 21 | max_retries: 3 22 | retry_connection_interval: 5 23 | ``` 24 | 25 | ### Properties of `exports` 26 | 27 | - **dump_data** (`boolean`): Determines whether all data in your database should be exported. If set to `true`, the content of all tables will be included in the dump. If `false`, only the structures of the tables will be exported.\ 28 | - **lock_tables** (`boolean`): Indicates whether tables should be locked during the dump process. If set to `true`, the tables will be locked to prevent any changes while the dump is being created, ensuring data consistency. If `false`, tables will remain unlocked, which may lead to potential inconsistencies if data is modified during the dump. 29 | - **compress_data** (`boolean`): Specifies whether the dump should be compressed. If set to `true`, the dump will be compressed using the `gzip` algorithm, reducing the file size. If `false`, the dump will be saved as a plain SQL file. 30 | - **drop_table_if_exists** (`boolean`): Specifies whether existing tables should be dropped before being recreated during the import process. Setting this to `true` will include a `DROP TABLE IF EXISTS` statement before the `CREATE TABLE` statement, preventing table duplication conflicts. 31 | - **insert_ignore_into** (`boolean`): The `insert_ignore_into` property specifies whether data should be inserted using the `INSERT IGNORE INTO` statement during the import process. Setting it to `true` will ensure that duplicate records are ignored, preventing errors from duplicate entries in the table. 32 | - **database_if_not_exists** (`boolean`): Indicates whether the database should be created only if it does not exist. If set to `true`, a `CREATE DATABASE IF NOT EXISTS` statement will be included in the dump, avoiding errors if the database is already present. 33 | - **ignore_tables** (`array` of `strings`): A list of tables to be ignored during the dump. Tables listed here will not have their structure or data exported. Example: 34 | 35 | ```yaml 36 | ignore_tables: 37 | - table 38 | ``` 39 | 40 | ### Properties of `connection` 41 | 42 | - **max_retries** (`integer`): Defines the maximum number of retry attempts to establish a connection to the database. If a connection attempt fails, the application will retry up to this number. 43 | - **retry_connection_interval** (`integer`): Specifies the interval (in seconds) to wait between each retry attempt when attempting to connect to the database. This allows for gradual retries rather than immediate retries. 44 | -------------------------------------------------------------------------------- /src/plugins/checksum.rs: -------------------------------------------------------------------------------- 1 | use crc32fast::Hasher as Crc32Hasher; 2 | 3 | use md5::{ 4 | Md5, 5 | Digest as Md5Digest 6 | }; 7 | 8 | use sha1::Sha1; 9 | use sha2::Sha256; 10 | 11 | use std::{ 12 | fs::{ 13 | File, 14 | OpenOptions, 15 | }, 16 | 17 | io::{ 18 | Read, 19 | Write, 20 | Result, 21 | }, 22 | }; 23 | 24 | use crate::ui::checksum_alerts::ChecksumAlerts; 25 | 26 | pub struct Checksum { 27 | file_path: String, 28 | output_path: Option, 29 | } 30 | 31 | impl Checksum { 32 | 33 | pub fn new(file_path: &str, output_path: Option<&str>) -> Self { 34 | Self { 35 | file_path: file_path.to_string(), 36 | output_path: output_path.map(|s| s.to_string()), 37 | } 38 | } 39 | 40 | fn read_file(&self) -> Result> { 41 | let mut file = File::open(&self.file_path)?; 42 | let mut buffer = Vec::new(); 43 | file.read_to_end(&mut buffer)?; 44 | 45 | Ok(buffer) 46 | } 47 | 48 | pub fn calculate_hashes(&self) -> Result<(u32, String, String, String)> { 49 | let buffer = self.read_file()?; 50 | 51 | let mut crc32_hasher = Crc32Hasher::new(); 52 | crc32_hasher.update(&buffer); 53 | let crc32 = crc32_hasher.finalize(); 54 | 55 | let mut md5_hasher = Md5::new(); 56 | md5_hasher.update(&buffer); 57 | let md5 = format!("{:x}", md5_hasher.finalize()); 58 | 59 | let mut sha1_hasher = Sha1::new(); 60 | sha1_hasher.update(&buffer); 61 | let sha1 = format!("{:x}", sha1_hasher.finalize()); 62 | 63 | let mut sha256_hasher = Sha256::new(); 64 | sha256_hasher.update(&buffer); 65 | let sha256 = format!("{:x}", sha256_hasher.finalize()); 66 | 67 | Ok((crc32, md5, sha1, sha256)) 68 | } 69 | 70 | pub fn generated(&self) -> Result<()> { 71 | let (crc32, md5, sha1, sha256) = &self.calculate_hashes()?; 72 | let _ = &self.printable()?; 73 | 74 | if let Some(output_path) = &self.output_path { 75 | let mut output_file = OpenOptions::new() 76 | .write(true) 77 | .create(true) 78 | .truncate(true) 79 | .open(output_path)?; 80 | 81 | writeln!(output_file, "CRC32: {:08x}", crc32)?; 82 | writeln!(output_file, "MD5: {}", md5)?; 83 | writeln!(output_file, "SHA1: {}", sha1)?; 84 | writeln!(output_file, "SHA256: {}", sha256)?; 85 | 86 | ChecksumAlerts::checksum(output_path); 87 | } 88 | 89 | Ok(()) 90 | } 91 | 92 | pub fn printable(&self) -> Result<()> { 93 | let (crc32, md5, sha1, sha256) = &self.calculate_hashes()?; 94 | 95 | ChecksumAlerts::file(&self.file_path); 96 | ChecksumAlerts::printable("crc32", &format!("{:08x}", crc32)); 97 | ChecksumAlerts::printable("md5", &md5); 98 | ChecksumAlerts::printable("sha1", &sha1); 99 | ChecksumAlerts::printable("sha256", &sha256); 100 | 101 | Ok(()) 102 | } 103 | 104 | } -------------------------------------------------------------------------------- /docs/export.md: -------------------------------------------------------------------------------- 1 | # DumpSync Command: Export 2 | 3 | To create a database dump, you can use the following command: 4 | 5 | ```bash 6 | dumpsync export 7 | ``` 8 | 9 | For connectting to a server, read the [Connecting to a Server](connection.md) guide. 10 | 11 | ### Command Breakdown 12 | 13 | - **dumpsync**: This is the main command to invoke the DumpSync tool. 14 | - **export**: This subcommand initiates the export process to create a dump of the specified database. 15 | 16 | ### Options 17 | 18 | - **-i**: (Optional) Sets the interval (in seconds) for the dump process. In this example, the interval is set to 3600 seconds (1 hour). You can adjust this value based on your requirements. 19 | - **-f**: (Optional) Indicates the file path where the dump will be saved. Replace `/path/to/` with the desired directory path on your system. 20 | - **--encrypt**: (Optional) Encrypts the dump file using AES-256 encryption. This option requires a password to encrypt and decrypt the dump file. 21 | - **--once**: (Optional) Exports the database dump only once without creating a recurring schedule. 22 | - **--retain**: (Optional) The retainimum number of backups to retain for the dump. If the number of dumps exceeds this limit, the scheduler will be terminated. 23 | - **--pdf**: (Optional) Generates a PDF report of the dump process with your settings and tables dumped. 24 | 25 | ### Example 26 | 27 | ```bash 28 | dumpsync export 29 | ``` 30 | 31 | ### Exporting only once time 32 | 33 | To export a database dump only once without creating a recurring schedule, you can use the `--once` option: 34 | 35 | ```bash 36 | dumpsync export --once 37 | ``` 38 | 39 | This command will create a dump of the specified database and then exit without creating a recurring schedule. 40 | 41 | ### Setting the Retainimum Number of Backups 42 | 43 | To set the retainimum number of backups to retain for the dump, you can use the `--retain` option: 44 | 45 | ```bash 46 | dumpsync export --retain 5 47 | ``` 48 | 49 | This command will create a dump of the specified database and retain a retainimum of 5 backups. If the number of backups exceeds this limit, the scheduler will be terminated. 50 | 51 | ### Encrypt Dumps 52 | 53 | To create an encrypted dump file, you can add the `--encrypt` option to the command: 54 | 55 | ```bash 56 | dumpsync export --encrypt 57 | ``` 58 | 59 | The encryption process use AES-256 encryption and will prompt you to enter a password for the encryption and decryption of the dump file. 60 | 61 | ### Generate a PDF Report 62 | 63 | To generate a PDF report of the dump process, you can use the `--pdf` option: 64 | 65 | ```bash 66 | dumpsync export --pdf 67 | ``` 68 | 69 | This command will create a PDF report of the dump process with your settings and tables dumped. 70 | 71 | ### Notes 72 | 73 | - The export process will create a dump file of the specified database at the specified interval. 74 | - If you choose to encrypt the dump file, you will need to provide a password during the encryption process. 75 | - Ensure that the specified path for the dump exists and that you have the necessary permissions to write to that directory. 76 | - Adjust the interval according to your backup strategy to ensure that you have up-to-date dumps without overwhelming your database resources. 77 | -------------------------------------------------------------------------------- /src/handlers/dump_handlers.rs: -------------------------------------------------------------------------------- 1 | use chrono::Local; 2 | 3 | use std::{ 4 | fs, 5 | thread, 6 | process, 7 | path::Path, 8 | time::Duration, 9 | }; 10 | 11 | use crate::{ 12 | helpers::configs::Configs, 13 | utils::generate::Generate, 14 | 15 | ui::{ 16 | errors_alerts::ErrorsAlerts, 17 | reconnect_alerts::ReconnectAlerts, 18 | }, 19 | }; 20 | 21 | pub struct DumpHandlers; 22 | 23 | impl DumpHandlers { 24 | 25 | pub fn generate_dump_file_path(&self, dbname: &str, dump_file_path: &str) -> String { 26 | let sanitized = dbname.replace(|c: char| !c.is_alphanumeric(), "_"); 27 | let folder = Path::new(dump_file_path).join(&sanitized); 28 | fs::create_dir_all(&folder).expect("Failed to create dump folder"); 29 | 30 | let filename = format!( 31 | "{}_{}.sql", 32 | Local::now().format("%Y_%m_%d_%H%M%S"), 33 | Generate.random_string(6) 34 | ); 35 | 36 | Path::new(&folder).join(&filename).to_str().unwrap().to_string() 37 | } 38 | 39 | pub fn generate_dump_json_file_path(&self, dbname: &str, dump_file_path: &str) -> String { 40 | let sanitized = dbname.replace(|c: char| !c.is_alphanumeric(), "_"); 41 | let folder = Path::new(dump_file_path).join(&sanitized); 42 | fs::create_dir_all(&folder).expect("Failed to create dump folder"); 43 | 44 | let filename = format!( 45 | "{}_{}.json", 46 | Local::now().format("%Y_%m_%d_%H%M%S"), 47 | Generate.random_string(6) 48 | ); 49 | 50 | Path::new(&folder).join(&filename).to_str().unwrap().to_string() 51 | } 52 | 53 | pub fn generate_dump_file_truncate_path(&self, dbname: &str, table: &str, dump_file_path: &str) -> String { 54 | let sanitized = dbname.replace(|c: char| !c.is_alphanumeric(), "_"); 55 | let folder = Path::new(dump_file_path).join(&sanitized); 56 | fs::create_dir_all(&folder).expect("Failed to create dump folder"); 57 | 58 | let filename = format!( 59 | "{}_{}_{}.sql", 60 | table.replace(|c: char| !c.is_alphanumeric(), "_"), 61 | Local::now().format("%Y_%m_%d_%H%M%S"), 62 | Generate.random_string(6) 63 | ); 64 | 65 | Path::new(&folder).join(&filename).to_str().unwrap().to_string() 66 | } 67 | 68 | pub fn setup_retry_config(&self) -> (usize, u64, u64) { 69 | let max_retries = Configs.generic("connection", "max_retries").as_u64().unwrap_or(3); 70 | let retry_interval = Configs.generic("connection", "retry_connection_interval") 71 | .as_u64() 72 | .unwrap_or(60); 73 | 74 | (0, max_retries, retry_interval) 75 | } 76 | 77 | pub fn handle_retry(&self, attempt: &mut usize, error: &'static str, max_retries: u64, retry_interval: u64) { 78 | ErrorsAlerts::attempt(error); 79 | 80 | *attempt += 1; 81 | if *attempt >= max_retries as usize { 82 | ErrorsAlerts::max_attempts(); 83 | process::exit(1); 84 | } else { 85 | ReconnectAlerts::reconnect(*attempt as u64, max_retries); 86 | thread::sleep(Duration::from_secs(retry_interval)); 87 | } 88 | } 89 | 90 | } 91 | -------------------------------------------------------------------------------- /src/cloud/pull.rs: -------------------------------------------------------------------------------- 1 | extern crate reqwest; 2 | 3 | use std::{ 4 | error::Error, 5 | 6 | io::{ 7 | ErrorKind, 8 | Error as ErrorIo, 9 | }, 10 | }; 11 | 12 | use crate::{ 13 | cloud::api::API, 14 | core::import::Import, 15 | constants::protocols::Protocols, 16 | ui::errors_alerts::ErrorsAlerts, 17 | }; 18 | 19 | pub struct Pull { 20 | host: String, 21 | port: u16, 22 | user: String, 23 | password: String, 24 | dbname: String, 25 | backup: String, 26 | } 27 | 28 | impl Pull { 29 | 30 | pub fn new( 31 | host: &str, 32 | port: u16, 33 | user: &str, 34 | password: &str, 35 | dbname: &str, 36 | backup: &str, 37 | ) -> Self { 38 | Self { 39 | host: host.to_string(), 40 | port, 41 | user: user.to_string(), 42 | password: password.to_string(), 43 | dbname: dbname.to_string(), 44 | backup: backup.to_string(), 45 | } 46 | } 47 | 48 | async fn pull_url(&self, url: &str) -> Result> { 49 | let response = reqwest::get(url).await?; 50 | 51 | if !response.status().is_success() { 52 | let status_code = response.status(); 53 | let error_message = format!("Failed to download SQL data: HTTP {}", status_code); 54 | ErrorsAlerts::dump(&error_message); 55 | 56 | return Err(Box::new(ErrorIo::new( 57 | ErrorKind::Other, 58 | error_message, 59 | ))); 60 | } 61 | 62 | let sql_content = response.text().await?; 63 | 64 | Import::new( 65 | &self.host, 66 | self.port, 67 | &self.user, 68 | &self.password, 69 | &self.dbname, 70 | None, 71 | None, 72 | None, 73 | Some(&sql_content), 74 | ).dump_directly().await?; 75 | 76 | Ok(sql_content) 77 | } 78 | 79 | async fn pull_dumpsync(&self, backup: &str) -> Result<(), Box> { 80 | match API::new( 81 | None, 82 | Some(backup), 83 | None, 84 | None, 85 | None, 86 | ).get().await { 87 | Ok(sql_content) => { 88 | Import::new( 89 | &self.host, 90 | self.port, 91 | &self.user, 92 | &self.password, 93 | &self.dbname, 94 | None, 95 | None, 96 | None, 97 | Some(&sql_content), 98 | ).dump_directly().await?; 99 | } 100 | 101 | Err(_) => { 102 | ErrorsAlerts::dump("Failed to pull dump from Cloud."); 103 | } 104 | } 105 | 106 | Ok(()) 107 | } 108 | 109 | pub async fn pull(&self) -> Result<(), Box> { 110 | match self.backup.as_str() { 111 | http if http.starts_with(Protocols::Http.as_str()) || http.starts_with(Protocols::Https.as_str()) => { 112 | self.pull_url(http).await?; 113 | } 114 | _ => { 115 | self.pull_dumpsync(&self.backup).await?; 116 | } 117 | }; 118 | 119 | Ok(()) 120 | } 121 | 122 | } 123 | -------------------------------------------------------------------------------- /src/handlers/scan_handlers.rs: -------------------------------------------------------------------------------- 1 | use reqwest; 2 | use regex::Regex; 3 | 4 | use std::{ 5 | fs::File, 6 | path::Path, 7 | error::Error, 8 | 9 | io::{ 10 | self, 11 | BufRead 12 | }, 13 | }; 14 | 15 | use crate::constants::urls::*; 16 | 17 | pub struct ScanHandlers; 18 | 19 | impl ScanHandlers { 20 | 21 | pub fn load_patterns_from_file(&self, path: &str) -> Result, Box> { 22 | let path = Path::new(path); 23 | let file = File::open(path)?; 24 | let reader = io::BufReader::new(file); 25 | 26 | let mut patterns = Vec::new(); 27 | for line in reader.lines() { 28 | let line = line?; 29 | let trimmed = line.trim(); 30 | 31 | if trimmed.is_empty() || trimmed.starts_with("//") { 32 | continue; 33 | } 34 | 35 | let pattern = line.split("//").next().unwrap_or("").trim(); 36 | 37 | if !pattern.is_empty() { 38 | match Regex::new(pattern) { 39 | Ok(regex) => patterns.push(regex), 40 | Err(e) => eprintln!("Invalid regex '{}': {}", pattern, e), 41 | } 42 | } 43 | } 44 | 45 | Ok(patterns) 46 | } 47 | 48 | pub async fn load_patterns_from_url(&self, url: &str) -> Result, Box> { 49 | let response = reqwest::get(url).await?; 50 | 51 | if !response.status().is_success() { 52 | return Err( 53 | format!("Error accessing URL: {}", url).into() 54 | ); 55 | } 56 | 57 | let body = response.text().await?; 58 | let mut patterns = Vec::new(); 59 | for line in body.lines() { 60 | let trimmed = line.trim(); 61 | 62 | if trimmed.is_empty() || trimmed.starts_with("//") { 63 | continue; 64 | } 65 | 66 | let pattern = trimmed.split("//").next().unwrap().trim(); 67 | 68 | if !pattern.is_empty() { 69 | match Regex::new(pattern) { 70 | Ok(regex) => patterns.push(regex), 71 | Err(e) => eprintln!("Invalid regex '{}': {}", pattern, e), 72 | } 73 | } 74 | } 75 | 76 | Ok(patterns) 77 | } 78 | 79 | pub fn is_potential_xss(&self, value: &str, patterns: &[Regex]) -> bool { 80 | for pattern in patterns { 81 | if pattern.is_match(value) { 82 | return true; 83 | } 84 | } 85 | 86 | false 87 | } 88 | 89 | pub async fn read_patterns(&self, payload: Option) -> Result, Box> { 90 | let patterns = match payload { 91 | Some(value) => { 92 | if value.starts_with("http://") || value.starts_with("https://") { 93 | ScanHandlers.load_patterns_from_url(&value).await? 94 | } else if Path::new(&value).exists() { 95 | ScanHandlers.load_patterns_from_file(&value)? 96 | } else { 97 | return Err("Invalid payload source, not a valid file or URL.".into()); 98 | } 99 | } 100 | 101 | None => { 102 | ScanHandlers.load_patterns_from_url(Urls::as_str(UrlsNames::XssDetectRegex)).await? 103 | } 104 | }; 105 | 106 | Ok(patterns) 107 | } 108 | 109 | } 110 | -------------------------------------------------------------------------------- /src/plugins/schema.rs: -------------------------------------------------------------------------------- 1 | use serde::Serialize; 2 | use serde_json::to_string_pretty; 3 | 4 | use std::{ 5 | fs, 6 | error::Error, 7 | }; 8 | 9 | use mysql::{ 10 | *, 11 | prelude::*, 12 | }; 13 | 14 | use crate::{ 15 | cmd::connection::Connection, 16 | ui::schema_alerts::SchemaAlerts, 17 | handlers::mysql::mysql_queries_builders::MySqlQueriesBuilders, 18 | }; 19 | 20 | #[derive(Serialize, Debug)] 21 | struct Column { 22 | name: String, 23 | data_type: String, 24 | is_nullable: bool, 25 | is_primary_key: bool, 26 | } 27 | 28 | #[derive(Serialize, Debug)] 29 | struct ForeignKey { 30 | column: String, 31 | referenced_table: String, 32 | referenced_column: String, 33 | } 34 | 35 | #[derive(Serialize, Debug)] 36 | struct Table { 37 | name: String, 38 | columns: Vec, 39 | foreign_keys: Vec, 40 | } 41 | 42 | pub struct Schema { 43 | host: String, 44 | port: u16, 45 | user: String, 46 | password: String, 47 | dbname: String, 48 | 49 | file: String, 50 | } 51 | 52 | impl Schema { 53 | 54 | pub fn new( 55 | host: &str, 56 | port: u16, 57 | user: &str, 58 | password: &str, 59 | dbname: &str, 60 | 61 | file: &str, 62 | ) -> Self { 63 | Self { 64 | host: host.to_string(), 65 | port, 66 | user: user.to_string(), 67 | password: password.to_string(), 68 | dbname: dbname.to_string(), 69 | 70 | file: file.to_string(), 71 | } 72 | } 73 | 74 | pub fn create(&self) -> Result<(), Box> { 75 | let pool = Connection { 76 | host: self.host.clone(), 77 | port: self.port, 78 | user: self.user.clone(), 79 | password: self.password.clone(), 80 | dbname: Some(self.dbname.clone()), 81 | }.create_mysql_pool()?; 82 | 83 | let mut conn = pool.get_conn()?; 84 | let tables: Vec = conn.query(MySqlQueriesBuilders.get_table_names())?; 85 | 86 | let mut schema = Vec::new(); 87 | 88 | for table in tables { 89 | let columns: Vec<(String, String, String, String)> = conn.query(MySqlQueriesBuilders.table_info(&table))?; 90 | 91 | let column_data: Vec = columns 92 | .iter() 93 | .map(|col| Column { 94 | name: col.0.clone(), 95 | data_type: col.1.clone(), 96 | is_nullable: col.2 == "YES", 97 | is_primary_key: col.3 == "PRI", 98 | }) 99 | .collect(); 100 | 101 | let foreign_keys: Vec<(String, String, String)> = conn.query(MySqlQueriesBuilders.foreign_key_info(&table))?; 102 | 103 | let foreign_key_data: Vec = foreign_keys 104 | .iter() 105 | .map(|fk| ForeignKey { 106 | column: fk.0.clone(), 107 | referenced_table: fk.1.clone(), 108 | referenced_column: fk.2.clone(), 109 | }) 110 | .collect(); 111 | 112 | schema.push(Table { 113 | name: table, 114 | columns: column_data, 115 | foreign_keys: foreign_key_data, 116 | }); 117 | } 118 | 119 | let json_schema = to_string_pretty(&schema)?; 120 | fs::write(&self.file, json_schema)?; 121 | 122 | SchemaAlerts::success(&self.file); 123 | Ok(()) 124 | } 125 | 126 | } -------------------------------------------------------------------------------- /src/handlers/mysql/mysql_keywords.rs: -------------------------------------------------------------------------------- 1 | pub enum MySQLKeywords { 2 | Use, 3 | Null, 4 | Limit, 5 | Offset, 6 | Values, 7 | Unique, 8 | Insert, 9 | IfExists, 10 | TableInfo, 11 | DropTable, 12 | References, 13 | ShowTables, 14 | SelectFrom, 15 | AlterTable, 16 | ForeignKey, 17 | LockTables, 18 | InsertInto, 19 | ColumnName, 20 | CreateTable, 21 | ShowColumns, 22 | IfNotExists, 23 | InsertIgnore, 24 | UnlockTables, 25 | ShowKeysFrom, 26 | GetTableNames, 27 | TruncateTable, 28 | AddConstraint, 29 | GetAlterTable, 30 | CreateDatabase, 31 | ConstraintName, 32 | ForeignKeyInfo, 33 | ShowCreateTable, 34 | WherePrimaryKey, 35 | ReferencedTableName, 36 | ReferencedColumnName, 37 | AndReferencedIsNotNull, 38 | 39 | Comments, 40 | FinalComments, 41 | } 42 | 43 | impl MySQLKeywords { 44 | 45 | pub fn as_str(&self) -> &'static str { 46 | match self { 47 | MySQLKeywords::Use => "USE", 48 | MySQLKeywords::Null => "NULL", 49 | MySQLKeywords::Limit => "LIMIT", 50 | MySQLKeywords::Insert => "INSERT", 51 | MySQLKeywords::Unique => "UNIQUE", 52 | MySQLKeywords::Values => "VALUES", 53 | MySQLKeywords::Offset => "OFFSET", 54 | MySQLKeywords::IfExists => "IF EXISTS", 55 | MySQLKeywords::DropTable => "DROP TABLE", 56 | MySQLKeywords::References => "REFERENCES", 57 | MySQLKeywords::ShowTables => "SHOW TABLES", 58 | MySQLKeywords::LockTables => "LOCK TABLES", 59 | MySQLKeywords::InsertInto => "INSERT INTO", 60 | MySQLKeywords::AlterTable => "ALTER TABLE", 61 | MySQLKeywords::ColumnName => "COLUMN_NAME", 62 | MySQLKeywords::ForeignKey => "FOREIGN KEY", 63 | MySQLKeywords::CreateTable => "CREATE TABLE", 64 | MySQLKeywords::SelectFrom => "SELECT * FROM", 65 | MySQLKeywords::IfNotExists => "IF NOT EXISTS", 66 | MySQLKeywords::UnlockTables => "UNLOCK TABLES", 67 | MySQLKeywords::ShowKeysFrom => "SHOW KEYS FROM", 68 | MySQLKeywords::AddConstraint => "ADD CONSTRAINT", 69 | MySQLKeywords::TruncateTable => "TRUNCATE TABLE", 70 | MySQLKeywords::ShowColumns => "SHOW COLUMNS FROM", 71 | MySQLKeywords::ConstraintName => "CONSTRAINT_NAME", 72 | MySQLKeywords::CreateDatabase => "CREATE DATABASE", 73 | MySQLKeywords::InsertIgnore => "INSERT IGNORE INTO", 74 | MySQLKeywords::ShowCreateTable => "SHOW CREATE TABLE", 75 | MySQLKeywords::WherePrimaryKey => "WHERE Key_name='PRIMARY'", 76 | MySQLKeywords::ReferencedTableName => "REFERENCED_TABLE_NAME", 77 | MySQLKeywords::ReferencedColumnName => "REFERENCED_COLUMN_NAME", 78 | MySQLKeywords::AndReferencedIsNotNull => "AND REFERENCED_TABLE_NAME IS NOT NULL", 79 | MySQLKeywords::GetTableNames => "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = DATABASE()", 80 | MySQLKeywords::TableInfo => "SELECT COLUMN_NAME, DATA_TYPE, IS_NULLABLE, COLUMN_KEY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME", 81 | MySQLKeywords::ForeignKeyInfo => "SELECT COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME", 82 | MySQLKeywords::GetAlterTable => "SELECT CONSTRAINT_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME FROM information_schema.KEY_COLUMN_USAGE WHERE TABLE_NAME = DATABASE() AND TABLE_SCHEMA", 83 | 84 | MySQLKeywords::Comments => "--", 85 | MySQLKeywords::FinalComments => "---------------------------------------------------", 86 | } 87 | } 88 | 89 | } -------------------------------------------------------------------------------- /src/core/transfer.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::File, 3 | error::Error, 4 | 5 | io::{ 6 | Read, 7 | BufReader, 8 | }, 9 | 10 | path::{ 11 | Path, 12 | PathBuf 13 | }, 14 | }; 15 | 16 | use regex::Regex; 17 | use flate2::read::GzDecoder; 18 | 19 | use mysql::{ 20 | *, 21 | prelude::* 22 | }; 23 | 24 | use crate::{ 25 | constants::regexp::RegExp, 26 | cmd::connection::Connection, 27 | handlers::import_handlers::ImportHandlers, 28 | 29 | ui::{ 30 | errors_alerts::ErrorsAlerts, 31 | success_alerts::SuccessAlerts, 32 | }, 33 | }; 34 | 35 | pub struct Transfer { 36 | host: String, 37 | port: u16, 38 | user: String, 39 | password: String, 40 | dbname: String, 41 | path: String, 42 | dump_file_path: String, 43 | } 44 | 45 | impl Transfer { 46 | 47 | pub fn new( 48 | host: &str, 49 | port: u16, 50 | user: &str, 51 | password: &str, 52 | dbname: &str, 53 | dump_file_path: &str, 54 | path: &str, 55 | ) -> Self { 56 | Self { 57 | host: host.to_string(), 58 | port, 59 | user: user.to_string(), 60 | password: password.to_string(), 61 | dbname: dbname.to_string(), 62 | path: path.to_string(), 63 | dump_file_path: dump_file_path.to_string(), 64 | } 65 | } 66 | 67 | fn complete_path(&self) -> Result> { 68 | let path = Path::new(&self.dump_file_path); 69 | 70 | if path.is_absolute() { 71 | Ok(path.to_path_buf()) 72 | } else { 73 | let dump_file_path = Path::new(&self.dump_file_path); 74 | Ok(dump_file_path.join(&self.path)) 75 | } 76 | } 77 | 78 | pub fn dump(&self) -> Result<(), Box> { 79 | let pool = Connection { 80 | host: self.host.clone(), 81 | port: self.port, 82 | user: self.user.clone(), 83 | password: self.password.clone(), 84 | dbname: Some(self.dbname.clone()), 85 | }.create_mysql_pool()?; 86 | 87 | let mut conn = pool.get_conn()?; 88 | let is_compressed = self.dump_file_path.ends_with(".sql.gz"); 89 | 90 | let file = self.complete_path()?; 91 | 92 | let dump_content = if is_compressed { 93 | let file = File::open(file)?; 94 | 95 | let mut decoder = GzDecoder::new(BufReader::new(file)); 96 | let mut content = String::new(); 97 | 98 | decoder.read_to_string(&mut content)?; 99 | content 100 | } else { 101 | let mut file = File::open(&self.dump_file_path)?; 102 | let mut content = String::new(); 103 | 104 | file.read_to_string(&mut content)?; 105 | content 106 | }; 107 | 108 | let dump_content = ImportHandlers::new(&self.dbname, &dump_content).check_db_name(); 109 | 110 | let create_table_regex = Regex::new(RegExp::CREATE_TABLE).unwrap(); 111 | 112 | for statement in dump_content.split(';') { 113 | let trimmed = statement.trim(); 114 | 115 | if !trimmed.is_empty() { 116 | match conn.query_drop(trimmed) { 117 | Ok(_) => { 118 | if let Some(captures) = create_table_regex.captures(trimmed) { 119 | if let Some(table_name) = captures.get(1) { 120 | SuccessAlerts::table(table_name.as_str()); 121 | } 122 | } 123 | } 124 | Err(e) => ErrorsAlerts::import(&self.dbname, trimmed, &e.to_string()), 125 | } 126 | } 127 | } 128 | 129 | SuccessAlerts::import(&self.dbname); 130 | Ok(()) 131 | } 132 | 133 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 |

4 |
5 | 6 |
7 | 8 | 9 | 10 |
11 | 12 | DumpSync is a lightweight tool designed for efficiently dumping and restoring MySQL databases. Whether you need to create backups, restore databases, or transfer data between servers, DumpSync provides a simple and automated solution. 13 | 14 |
15 | 16 |
17 | 18 | ## Features 19 | 20 | Some of the key features of DumpSync include: 21 | 22 | - **Automated Backups**: Schedule regular database dumps at configurable intervals. 23 | - **Flexible Configuration**: Easily configure settings using environment variables or command-line arguments. 24 | - **Simple Restoration**: Quickly restore your database from previously created dump files. 25 | - **Cross-Server Transfers**: Seamlessly transfer databases between different MySQL servers. 26 | - **XSS Scanner**: Scan for XSS vulnerabilities in tables to enhance security. 27 | - **XSS Report**: Generate a detailed report of XSS vulnerabilities found in the database. 28 | - **Share**: Share your dump or scan results with others. 29 | - **Schema**: Generate a schema file for the database. 30 | - **Encryption**: Encrypt your dump files for added security. 31 | - **Compression**: Compress your dump files to save disk space. 32 | - **Checksum**: Verify the integrity of your dump files using checksums. 33 | - **PDF Report**: Generate PDF reports for your dump files and your settings. 34 | - **Truncate**: Truncate tables to remove all data from the specified table. 35 | - **Visual**: Visualize the table structure of a database. 36 | - **Pull**: Pull the latest changes from a remote database to your local environment. 37 | - **History**: View the history of actions performed by the DumpSync tool. 38 | - **Pattern Matching**: Use patterns to exclude specific tables or lines during restore operations. 39 | - **Lock tables**: Lock tables during dump operations to ensure data consistency. 40 | - **Dump Data**: Dump data from specific tables or the entire database. 41 | - **Import Data**: Import data from a JSON dump file into the database. 42 | 43 | ## Installation 44 | 45 | To install DumpSync, use the following command: 46 | 47 | > Make sure you have Rust installed on your system. If not, you can install it from [here](https://docs.dumpsync.com/install). 48 | 49 | ```bash 50 | cargo install dumpsync 51 | ``` 52 | 53 | To install using Scoop, run: 54 | 55 | ```bash 56 | scoop bucket add dumpsync https://github.com/YeDawa/DumpSync.git 57 | 58 | scoop install dumpsync 59 | ``` 60 | 61 | ## Documentation 62 | 63 | For more help and document, see our documentation: 64 | 65 | - [Overview](https://docs.dumpsync.com) 66 | - [Install](https://docs.dumpsync.com/install) 67 | - [Init](https://docs.dumpsync.com/init) 68 | - [Connection](https://docs.dumpsync.com/connection) 69 | - [Settings](https://docs.dumpsync.com/settings) 70 | - [Export](https://docs.dumpsync.com/export) 71 | - [Dump Data](https://docs.dumpsync.com/dumpdata) 72 | - [Import](https://docs.dumpsync.com/import) 73 | - [Import Data](https://docs.dumpsync.com/importdata) 74 | - [Transfer](https://docs.dumpsync.com/transfer) 75 | - [Truncate](https://docs.dumpsync.com/truncate) 76 | - [Pull](https://docs.dumpsync.com/pull) 77 | - [Scan XSS](https://docs.dumpsync.com/scan) 78 | - [Share](https://docs.dumpsync.com/share) 79 | - [Schema](https://docs.dumpsync.com/schema) 80 | - [Checksum](https://docs.dumpsync.com/checksum) 81 | - [Visual](https://docs.dumpsync.com/visual) 82 | - [History](https://docs.dumpsync.com/history) 83 | - [Skip Syntax](https://docs.dumpsync.com/skip-syntax) 84 | - [Writing patterns](https://docs.dumpsync.com/writing-patterns) 85 | 86 | ## License 87 | 88 | This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details. 89 | -------------------------------------------------------------------------------- /src/addons.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | env, 3 | error::Error, 4 | }; 5 | 6 | use crate::{ 7 | args_cli::*, 8 | init::DumpSyncInit, 9 | 10 | helpers::env::Env, 11 | 12 | ui::{ 13 | ui_base::UI, 14 | errors_alerts::ErrorsAlerts, 15 | }, 16 | 17 | plugins::{ 18 | schema::Schema, 19 | diagram::Diagram, 20 | scan_xss::ScanXSS, 21 | pastebin::Pastebin, 22 | checksum::Checksum, 23 | history_logs::HistoryLogs, 24 | }, 25 | }; 26 | 27 | pub struct DumpSyncAddons; 28 | 29 | impl DumpSyncAddons { 30 | 31 | pub async fn scan_xss(&self, options: ScanOptions) -> Result<(), Box> { 32 | Env::new(); 33 | UI::header(); 34 | 35 | let table = options.table; 36 | let payload = options.payload; 37 | 38 | let file = options.file; 39 | let offset = options.offset.unwrap_or(0); 40 | let limit = options.limit.unwrap_or(99999999999); 41 | let (dbname, host, user, password, port) = DumpSyncInit.load_db_config(); 42 | 43 | let header = format!("Scanning table(s): '{}'", table); 44 | UI::section_header(&header, "info"); 45 | 46 | ScanXSS::new( 47 | &host, port, &user, &password, &dbname, &table, payload.as_deref(), Some(offset), Some(limit), file.as_deref(), 48 | ).scan().await.expect("Failed to scan tables for XSS"); 49 | 50 | Ok(()) 51 | } 52 | 53 | pub fn schema(&self, options: SchemaOptions) -> Result<(), Box> { 54 | Env::new(); 55 | UI::header(); 56 | 57 | let file = options.file; 58 | let (dbname, host, user, password, port) = DumpSyncInit.load_db_config(); 59 | 60 | let header = "Generating schema file".to_string(); 61 | UI::section_header(&header, "info"); 62 | 63 | Schema::new( 64 | &host, port, &user, &password, &dbname, &file, 65 | ).create()?; 66 | 67 | Ok(()) 68 | } 69 | 70 | pub async fn share(&self, options: ShareOptions) -> Result<(), Box> { 71 | Env::new(); 72 | UI::header(); 73 | 74 | let file = options.file; 75 | let privacy = options.privacy.unwrap_or("unlisted".to_string()); 76 | let api_key = env::var("PASTEBIN_API_KEY").unwrap_or_default(); 77 | 78 | let header = format!("Sharing file: '{}'", file); 79 | UI::section_header(&header, "info"); 80 | 81 | Pastebin::new(&file, &api_key, &privacy).share().await?; 82 | Ok(()) 83 | } 84 | 85 | pub fn checksum(&self, options: ChecksumOptions) { 86 | Env::new(); 87 | UI::header(); 88 | 89 | let file = options.file; 90 | let output = options.output; 91 | 92 | UI::section_header("Generating checksum", "info"); 93 | 94 | if let Err(e) = Checksum::new( 95 | &file, 96 | output.as_deref(), 97 | ).generated() { 98 | ErrorsAlerts::checksum(e.to_string().as_str()); 99 | } 100 | } 101 | 102 | pub async fn visual(&self, options: VisualOptions) { 103 | Env::new(); 104 | UI::header(); 105 | 106 | let table = options.table; 107 | let (dbname, host, user, password, port) = DumpSyncInit.load_db_config(); 108 | 109 | let header = format!("Generating ER diagram for table: '{}'", table); 110 | UI::section_header(&header, "info"); 111 | 112 | let _ = Diagram::new( 113 | &host, port, &user, &password, &dbname, &table, 114 | ).diagram().await; 115 | } 116 | 117 | pub fn history(&self, history_type: &str, filter: Option) { 118 | Env::new(); 119 | UI::header(); 120 | 121 | match history_type { 122 | "backups" => { 123 | UI::section_header("Backup History", "info"); 124 | HistoryLogs::new().backups(filter); 125 | }, 126 | _ => UI::label("Unknown history type", "error"), 127 | } 128 | } 129 | 130 | } 131 | -------------------------------------------------------------------------------- /src/plugins/scan_xss.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | 3 | use mysql::{ 4 | *, 5 | prelude::*, 6 | }; 7 | 8 | use crate::{ 9 | cmd::connection::Connection, 10 | plugins::reports_xss::ReportsXSS, 11 | 12 | ui::{ 13 | ui_base::UI, 14 | scan_alerts::ScanAlerts, 15 | }, 16 | 17 | handlers::{ 18 | scan_handlers::ScanHandlers, 19 | mysql::mysql_queries_builders::MySqlQueriesBuilders, 20 | } 21 | }; 22 | 23 | pub struct ScanXSS { 24 | host: String, 25 | port: u16, 26 | user: String, 27 | password: String, 28 | dbname: String, 29 | 30 | table: String, 31 | payload: Option, 32 | offset: Option, 33 | limit: Option, 34 | file: Option, 35 | } 36 | 37 | impl ScanXSS { 38 | 39 | pub fn new( 40 | host: &str, 41 | port: u16, 42 | user: &str, 43 | password: &str, 44 | dbname: &str, 45 | 46 | table: &str, 47 | payload: Option<&str>, 48 | offset: Option, 49 | limit: Option, 50 | file: Option<&str>, 51 | ) -> Self { 52 | Self { 53 | host: host.to_string(), 54 | port, 55 | user: user.to_string(), 56 | password: password.to_string(), 57 | dbname: dbname.to_string(), 58 | 59 | table: table.to_string(), 60 | payload: payload.map(|s| s.to_string()), 61 | offset, 62 | limit, 63 | file: file.map(|s| s.to_string()), 64 | } 65 | } 66 | 67 | pub async fn scan(&self) -> Result<(), Box> { 68 | let pool = Connection { 69 | host: self.host.clone(), 70 | port: self.port, 71 | user: self.user.clone(), 72 | password: self.password.clone(), 73 | dbname: Some(self.dbname.clone()), 74 | }.create_mysql_pool()?; 75 | 76 | let mut conn = pool.get_conn()?; 77 | let patterns = ScanHandlers.read_patterns(self.payload.clone()).await?; 78 | let mut detections = Vec::new(); 79 | 80 | let tables: Vec<&str> = self.table.split(',') 81 | .map(|t| t.trim()) 82 | .filter(|t| !t.is_empty()) 83 | .collect(); 84 | 85 | for table in &tables { 86 | let mut xss_count = 0; 87 | 88 | if tables.len() > 1 { 89 | let text = format!("Table: '{}'", table); 90 | UI::label(&text, "info"); 91 | } 92 | 93 | let query = MySqlQueriesBuilders.select(table, self.offset.map(|o| o as usize), self.limit.map(|l| l as usize)); 94 | let rows: Vec = conn.query(query)?; 95 | 96 | for (row_index, row) in rows.iter().enumerate() { 97 | for (col_index, column) in row.columns_ref().iter().enumerate() { 98 | let value: Option = row.get(col_index); 99 | 100 | if let Some(value_str) = value.as_ref() { 101 | if ScanHandlers.is_potential_xss(value_str, &patterns) { 102 | let row_index = row_index + 1; 103 | let column = column.name_str(); 104 | ScanAlerts::detected(table, row_index, &column, &value_str); 105 | 106 | detections.push(( 107 | table.to_string(), 108 | row_index, 109 | column.to_string(), 110 | value_str.to_string(), 111 | )); 112 | 113 | xss_count += 1; 114 | } 115 | } 116 | } 117 | } 118 | 119 | if xss_count == 0 { 120 | ScanAlerts::not_detected(table); 121 | } 122 | 123 | if tables.len() > 1 { 124 | print!("\n"); 125 | } 126 | } 127 | 128 | let file_path = self.file.as_deref(); 129 | ReportsXSS.autodetect(detections, file_path)?; 130 | Ok(()) 131 | } 132 | 133 | } 134 | -------------------------------------------------------------------------------- /src/dumper.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use crate::{ 4 | args_cli::*, 5 | ui::ui_base::UI, 6 | helpers::env::Env, 7 | init::DumpSyncInit, 8 | 9 | core::{ 10 | dump::Dump, 11 | truncate::Truncate, 12 | dump_data::DumpData, 13 | }, 14 | }; 15 | 16 | pub struct DumpSyncDumper; 17 | 18 | impl DumpSyncDumper { 19 | 20 | pub fn import(&self, options: ImportOptions) { 21 | Env::new(); 22 | UI::header(); 23 | 24 | let ignore_drop_table = options.ignore_drop_table; 25 | let backup_path = options.file.unwrap_or_else(|| Env::get_var("DS_DUMP_PATH")); 26 | let (dbname, host, user, password, port) = DumpSyncInit.load_db_config(); 27 | 28 | UI::section_header("Importing dump to server", "info"); 29 | Dump::new( 30 | &host, port, &user, &password, &dbname, &backup_path, None, &backup_path, None, Some(ignore_drop_table), None, None, None, 31 | ).import(); 32 | } 33 | 34 | pub fn import_json(&self, options: ImportDataOptions) { 35 | Env::new(); 36 | UI::header(); 37 | 38 | let backup_path = options.file; 39 | let (dbname, host, user, password, port) = DumpSyncInit.load_db_config(); 40 | 41 | UI::section_header("Importing JSON dump to server", "info"); 42 | Dump::new( 43 | &host, port, &user, &password, &dbname, &backup_path, None, &backup_path, None, None, None, None, None, 44 | ).import_json(); 45 | } 46 | 47 | pub fn export(&self, options: ExportOptions) { 48 | Env::new(); 49 | UI::header(); 50 | 51 | let interval = options.interval.unwrap_or_else(|| { 52 | Env::get_var_u64("DS_DUMP_INTERVAL") 53 | }); 54 | 55 | let pdf = options.pdf; 56 | let once = options.once; 57 | let encrypt = options.encrypt; 58 | let retain = options.retain; 59 | let backup_path = options.folder.unwrap_or_else(|| Env::get_var("DS_DUMP_PATH")); 60 | let (dbname, host, user, password, port) = DumpSyncInit.load_db_config(); 61 | 62 | UI::label("Press CTRL+C to exit the tool", "normal"); 63 | UI::section_header("Dumping the database", "info"); 64 | 65 | Dump::new( 66 | &host, port, &user, &password, &dbname, &backup_path, Some(interval), &backup_path, Some(encrypt), None, Some(once), retain, Some(pdf), 67 | ).export(); 68 | } 69 | 70 | pub fn export_dumpdata(&self, options: DumpDataOptions) { 71 | Env::new(); 72 | UI::header(); 73 | 74 | let dump_file_path = options.file.unwrap_or_else(|| Env::get_var("DS_DUMP_PATH")); 75 | let (dbname, host, user, password, port) = DumpSyncInit.load_db_config(); 76 | UI::section_header("Dumping the database", "info"); 77 | 78 | let _ = DumpData::new( 79 | &host, port, &user, &password, &dbname, &dump_file_path, 80 | ).export(); 81 | } 82 | 83 | pub fn truncate(&self, options: TruncateOptions) { 84 | Env::new(); 85 | UI::header(); 86 | 87 | let table = options.table; 88 | let encrypt = options.encrypt; 89 | let backup_path = options.folder.unwrap_or_else(|| Env::get_var("DS_DUMP_PATH")); 90 | let (dbname, host, user, password, port) = DumpSyncInit.load_db_config(); 91 | 92 | UI::label("Press CTRL+C to exit the tool", "normal"); 93 | UI::section_header("Truncate table", "info"); 94 | 95 | let _ = Truncate::new( 96 | &host, port, &user, &password, &dbname, &backup_path, &table, Some(encrypt), 97 | ).table(); 98 | } 99 | 100 | pub fn transfer(&self, options: TransferOptions) { 101 | Env::new(); 102 | UI::header(); 103 | 104 | let backup_path = options.file.unwrap(); 105 | let (_, host, user, password, port) = DumpSyncInit.load_db_config(); 106 | let dbname = env::var("DS_TRANSFER_DB_NAME").or_else(|_| env::var("DS_TRANSFER_DB_NAME")).unwrap_or_default(); 107 | 108 | UI::section_header("Importing dump to server", "info"); 109 | Dump::new( 110 | &host, port, &user, &password, &dbname, &backup_path, None, &backup_path, None, None, None, None, None, 111 | ).transfer(); 112 | } 113 | 114 | } 115 | -------------------------------------------------------------------------------- /src/plugins/reports_pdf.rs: -------------------------------------------------------------------------------- 1 | use printpdf::*; 2 | 3 | use std::{ 4 | fs::File, 5 | io::BufWriter, 6 | }; 7 | 8 | use crate::{ 9 | helpers::configs::Configs, 10 | ui::report_alerts::ReportAlerts, 11 | handlers::reports_handlers::ReportsHandlers, 12 | }; 13 | 14 | pub struct ReportsPdfs { 15 | file: String, 16 | path: String, 17 | interval: usize, 18 | counter: usize, 19 | } 20 | 21 | impl ReportsPdfs { 22 | 23 | pub fn new(file: &str, path: &str, interval: usize, counter: usize) -> Self { 24 | Self { 25 | file: file.to_string(), 26 | path: path.to_string(), 27 | interval, 28 | counter, 29 | } 30 | } 31 | 32 | fn add_text( 33 | &self, 34 | doc: &mut PdfDocumentReference, 35 | current_page: &mut PdfPageIndex, 36 | current_layer: &mut PdfLayerIndex, 37 | text: &str, 38 | y: &mut Mm, 39 | margin_left: &Mm, 40 | font: &IndirectFontRef, 41 | line_height: &Mm, 42 | size: f32, 43 | ) { 44 | if *y < Mm(20.0) { 45 | let (new_page, new_layer) = doc.add_page(Mm(210.0), Mm(297.0), "New page"); 46 | *current_page = new_page; 47 | *current_layer = new_layer; 48 | *y = Mm(280.0); 49 | } 50 | 51 | let layer = doc.get_page(*current_page).get_layer(*current_layer); 52 | layer.use_text(text, size, *margin_left, *y, font); 53 | 54 | *y -= *line_height; 55 | } 56 | 57 | pub fn dump(&self) { 58 | let interval_u64 = self.interval as u64; 59 | let reports = ReportsHandlers::new(&self.path, &interval_u64, self.counter, None); 60 | 61 | let (mut doc, mut current_page, mut current_layer) = 62 | PdfDocument::new("Report dumps", Mm(210.0), Mm(297.0), "Layer 1"); 63 | 64 | let font = doc.add_builtin_font(BuiltinFont::Helvetica).unwrap(); 65 | let font_header = doc.add_builtin_font(BuiltinFont::CourierBold).unwrap(); 66 | 67 | let mg_top = Mm(280.0); 68 | let mg_left = Mm(10.0); 69 | let line_height = Mm(8.0); 70 | let mut y_position = mg_top; 71 | 72 | self.add_text(&mut doc, &mut current_page, &mut current_layer, "Final report", &mut y_position, &mg_left, &font_header, &line_height, 12.0); 73 | self.add_text(&mut doc, &mut current_page, &mut current_layer, &format!("Path: {}", &self.path), &mut y_position, &mg_left, &font, &line_height, 8.0); 74 | self.add_text(&mut doc, &mut current_page, &mut current_layer, &format!("Interval: {} seconds", &self.interval), &mut y_position, &mg_left, &font, &line_height, 8.0); 75 | self.add_text(&mut doc, &mut current_page, &mut current_layer, &format!("Total number of dumps: {}", &self.counter), &mut y_position, &mg_left, &font, &line_height, 8.0); 76 | 77 | if let Some((last_dump, size)) = reports.get_most_recent_sql_file(&self.path) { 78 | self.add_text(&mut doc, &mut current_page, &mut current_layer, &format!("Last dump: {} ({})", last_dump, size), &mut y_position, &mg_left, &font, &line_height, 8.0); 79 | 80 | if let Some(tables) = reports.extract_table_names(&last_dump) { 81 | self.add_text(&mut doc, &mut current_page, &mut current_layer, "Tables dumped:", &mut y_position, &mg_left, &font_header, &line_height, 12.0); 82 | 83 | for table in tables { 84 | self.add_text(&mut doc, &mut current_page, &mut current_layer, &format!("- {}", table), &mut y_position, &mg_left, &font, &line_height, 8.0); 85 | } 86 | } else { 87 | self.add_text(&mut doc, &mut current_page, &mut current_layer, "No tables found in the dump.", &mut y_position, &mg_left, &font, &line_height, 8.0); 88 | } 89 | } 90 | 91 | self.add_text(&mut doc, &mut current_page, &mut current_layer, "Settings:", &mut y_position, &mg_left, &font_header, &line_height, 12.0); 92 | 93 | for line in Configs.read_yaml_as_text().lines() { 94 | if line.is_empty() { continue } 95 | self.add_text(&mut doc, &mut current_page, &mut current_layer, line, &mut y_position, &mg_left, &font, &line_height, 8.0); 96 | } 97 | 98 | let mut pdf = BufWriter::new(File::create(&self.file).expect("Error creating PDF")); 99 | doc.save(&mut pdf).expect("Error saving PDF"); 100 | 101 | ReportAlerts::success_pdf(&self.file); 102 | } 103 | 104 | } 105 | -------------------------------------------------------------------------------- /src/cloud/api.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use serde_json::from_str; 3 | 4 | use std::{ 5 | io::Read, 6 | fs::File, 7 | error::Error, 8 | }; 9 | 10 | use reqwest::{ 11 | Body, 12 | Client, 13 | header::AUTHORIZATION, 14 | 15 | multipart::{ 16 | Form, 17 | Part, 18 | }, 19 | }; 20 | 21 | use crate::{ 22 | helpers::converter::Converter, 23 | 24 | constants::{ 25 | global::Global, 26 | 27 | api::{ 28 | api_token::APIToken, 29 | api_endpoints::APIEndpoints, 30 | }, 31 | }, 32 | }; 33 | 34 | #[allow(dead_code)] 35 | #[derive(Debug, Deserialize)] 36 | pub struct Response { 37 | pub url: String, 38 | pub encrypted: bool, 39 | pub private: bool, 40 | pub size: u64, 41 | pub db_name: String, 42 | pub created_at: String, 43 | } 44 | 45 | #[allow(dead_code)] 46 | #[derive(Debug, Deserialize, Default)] 47 | pub struct ResponseUpload { 48 | pub success: bool, 49 | pub message: String, 50 | pub url: String, 51 | } 52 | 53 | pub struct API { 54 | path: Option, 55 | encrypted: Option, 56 | backup: Option, 57 | dbname: Option, 58 | interval: Option, 59 | } 60 | 61 | #[allow(dead_code)] 62 | #[derive(Debug, Deserialize)] 63 | pub struct APIUpload { 64 | url: String, 65 | message: String, 66 | } 67 | 68 | impl API { 69 | 70 | pub fn new( 71 | path: Option<&str>, 72 | backup: Option<&str>, 73 | dbname: Option<&str>, 74 | encrypted: Option, 75 | interval: Option, 76 | ) -> Self { 77 | Self { 78 | path: path.map(|s| s.to_string()), 79 | dbname: dbname.map(|s| s.to_string()), 80 | backup: backup.map(|s| s.to_string()), 81 | 82 | interval, 83 | encrypted, 84 | } 85 | } 86 | 87 | pub async fn get(&self) -> Result> { 88 | let endpoint = format!("{}/raw", self.backup.as_deref().unwrap_or("")); 89 | let api_url = APIEndpoints.backups(&endpoint); 90 | 91 | let client = reqwest::Client::new(); 92 | let request = client 93 | .get(api_url) 94 | .header(AUTHORIZATION, APIToken.value()); 95 | 96 | let response = request 97 | .send() 98 | .await? 99 | .error_for_status()? 100 | .text() 101 | .await?; 102 | 103 | Ok(response) 104 | } 105 | 106 | pub async fn post(&self) -> Result> { 107 | let api_url = APIEndpoints.backups("create"); 108 | let db_name = self.dbname.clone().unwrap_or_default(); 109 | let path = self.path.as_ref().ok_or("No path provided")?; 110 | 111 | let client = Client::new(); 112 | let mut file = File::open(path)?; 113 | let mut buffer = Vec::new(); 114 | file.read_to_end(&mut buffer)?; 115 | 116 | let settings_json = Converter::new( 117 | Global::app_config() 118 | ).yaml_to_json()?; 119 | 120 | let file_name = std::path::Path::new(path) 121 | .file_name() 122 | .and_then(|name| name.to_str()) 123 | .ok_or("Invalid file name")?; 124 | 125 | let file_part = Part::stream(Body::from(buffer)) 126 | .file_name(file_name.to_string()); 127 | 128 | let interval_str = self.interval.map_or("0".to_string(), |v| v.to_string()); 129 | let encrypted_str = self.encrypted.map_or("false".to_string(), |v| v.to_string()); 130 | 131 | let form = Form::new() 132 | .text("db_name", db_name) 133 | .text("path", path.clone()) 134 | .text("settings", settings_json) 135 | .text("interval", interval_str) 136 | .text("encrypted", encrypted_str) 137 | .text("privacy", "private".to_string()) 138 | .part("file", file_part); 139 | 140 | let response = client 141 | .post(api_url) 142 | .header(AUTHORIZATION, APIToken.value()) 143 | .multipart(form) 144 | .send() 145 | .await?; 146 | 147 | let response_raw = response.text().await?; 148 | 149 | let parsed = match from_str::(&response_raw) { 150 | Ok(json) => json, 151 | Err(_) => { 152 | ResponseUpload { 153 | message: response_raw.clone(), 154 | ..Default::default() 155 | } 156 | } 157 | }; 158 | 159 | Ok(parsed) 160 | } 161 | 162 | } 163 | -------------------------------------------------------------------------------- /patterns.txt: -------------------------------------------------------------------------------- 1 | (?i).*? // Detects .*? // Identifies tags containing embedded `, etc. 74 | - `[^>]*?`: Matches any characters inside the tag, non-greedily, including attributes. 75 | - `(href|src)`: Matches the `href` or `src` attributes, which can be used to execute malicious JavaScript. 76 | - `\s*=\s*`: Matches the equal sign (`=`) with optional whitespace on either side. 77 | - `['\"]?javascript:`: Detects the presence of `javascript:` in the attribute value, which is a common scheme for XSS. 78 | - `[^'\">]+`: Matches any characters after `javascript:`, up until a `'`, `"`, or `>`. 79 | 80 | ### Purpose: 81 | This regex detects tags with `href` or `src` attributes that use `javascript:` as a URL scheme, often found in links or images used to execute scripts. 82 | 83 | ### 5. Detecting Inline Event Handlers 84 | 85 | **Regex Pattern**: 86 | ```regex 87 | (?i)<[a-z]+[^>]*?(on[a-z]+)\s*=\s*['\"]?[^'\">]+['\"]?[^>]*> 88 | ``` 89 | 90 | ### Explanation: 91 | - `(?i)`: Case-insensitive flag to match HTML tag names. 92 | - `<[a-z]+`: Matches any HTML tag. 93 | - `[^>]*?`: Matches any characters inside the tag, non-greedily. 94 | - `(on[a-z]+)`: Matches any inline event handler, such as `onclick`, `onload`, `onmouseover`, etc. 95 | - `\s*=\s*`: Matches the equal sign with optional whitespace. 96 | - `['\"]?`: Matches an optional single or double quote. 97 | - `[^'\">]+`: Matches any characters after the `=` until a `'`, `"`, or `>`, indicating the event handler's JavaScript code. 98 | 99 | ### Purpose: 100 | This regex identifies HTML tags with inline event handlers, which are often used to execute JavaScript code when triggered, making them a potential target for XSS attacks. 101 | 102 | ### Comments in pattern file 103 | 104 | - A comment line begins with `//`. 105 | - Anything written after the `//` is treated as a comment and can be any text you like. 106 | - Comments are ignored by the interpreter and do not affect the execution of the code. -------------------------------------------------------------------------------- /src/plugins/history_logs.rs: -------------------------------------------------------------------------------- 1 | use std::cmp; 2 | 3 | use crate::{ 4 | ui::ui_base::UI, 5 | 6 | helpers::{ 7 | env::Env, 8 | history::History, 9 | }, 10 | }; 11 | 12 | pub struct HistoryLogs; 13 | 14 | impl HistoryLogs { 15 | 16 | pub fn new() -> Self { 17 | HistoryLogs 18 | } 19 | 20 | pub fn backups(&self, filter: Option) { 21 | Env::new(); 22 | UI::header(); 23 | 24 | UI::section_header("Backup History", "info"); 25 | let items = History::new().list_backups_with_filters( 26 | Some(filter.as_deref().unwrap_or("")) 27 | ); 28 | 29 | match items { 30 | Ok(backups) => { 31 | if backups.is_empty() { 32 | UI::label("No backups found", "warning"); 33 | } else { 34 | let mut max_id = 2; 35 | let mut max_slug = 4; 36 | let mut max_db = 8; 37 | let mut max_filename = 8; 38 | let mut max_host = 4; 39 | let mut max_date = 10; 40 | let mut max_size = 4; 41 | let mut max_encrypt = 7; 42 | let mut max_compress = 8; 43 | 44 | for (id, slug, db, filename, host, created_at, size, encrypt, compress) in &backups { 45 | max_id = cmp::max(max_id, id.to_string().len()); 46 | max_slug = cmp::max(max_slug, slug.len()); 47 | max_db = cmp::max(max_db, db.len()); 48 | max_filename = cmp::max(max_filename, filename.len()); 49 | max_host = cmp::max(max_host, host.len()); 50 | 51 | let replaced = created_at.replace("T", " "); 52 | let date = replaced.split('.').next().unwrap_or(&created_at); 53 | 54 | max_date = cmp::max(max_date, date.len()); 55 | max_size = cmp::max(max_size, size.to_string().len()); 56 | max_encrypt = cmp::max(max_encrypt, encrypt.to_string().len()); 57 | max_compress = cmp::max(max_compress, compress.to_string().len()); 58 | } 59 | 60 | let print_row = |id: &str, slug: &str, db: &str, filename: &str, host: &str, date: &str, size: &str, encrypt: &str, compress: &str| { 61 | println!( 62 | "| {:sizew$} | {: UI::label(&format!("Error fetching history: {}", e), "error"), 117 | } 118 | } 119 | 120 | } 121 | -------------------------------------------------------------------------------- /src/args_cli.rs: -------------------------------------------------------------------------------- 1 | use clap_cargo::style; 2 | 3 | use clap::{ 4 | Parser, 5 | Subcommand, 6 | ColorChoice, 7 | builder::styling::Styles, 8 | }; 9 | 10 | pub const CLAP_STYLING: Styles = Styles::styled() 11 | .header(style::HEADER) 12 | .usage(style::USAGE) 13 | .literal(style::LITERAL) 14 | .placeholder(style::PLACEHOLDER) 15 | .error(style::ERROR) 16 | .valid(style::VALID) 17 | .invalid(style::INVALID); 18 | 19 | #[derive(Parser)] 20 | #[command(styles = CLAP_STYLING)] 21 | #[command(author, version, about, long_about = None, color = ColorChoice::Auto)] 22 | #[command(propagate_version = true)] 23 | pub struct Cli { 24 | #[command(subcommand)] 25 | pub command: Commands, 26 | } 27 | 28 | #[derive(Subcommand)] 29 | pub enum Commands { 30 | /// Make the database dump 31 | Export(ExportOptions), 32 | 33 | /// Make the database dump in JSON format 34 | DumpData(DumpDataOptions), 35 | 36 | /// Import the database dump in JSON format 37 | ImportData(ImportDataOptions), 38 | 39 | /// Import the database dump 40 | Import(ImportOptions), 41 | 42 | /// Transfer the dump to other server 43 | Transfer(TransferOptions), 44 | 45 | /// Initialize the new dump sync project 46 | Init, 47 | 48 | /// Show Visual diagram of the table 49 | Visual(VisualOptions), 50 | 51 | /// Safe truncate the table or tables 52 | Truncate(TruncateOptions), 53 | 54 | /// Scan the table for xss prevention 55 | Scan(ScanOptions), 56 | 57 | /// Share the dump or scan results 58 | Share(ShareOptions), 59 | 60 | /// Generate shcema from the database 61 | Schema(SchemaOptions), 62 | 63 | /// Generate checksum for the file 64 | Checksum(ChecksumOptions), 65 | 66 | /// Pull the dump of DumpSync Cloud 67 | Pull { 68 | /// Pull the dump from the cloud 69 | file: String, 70 | }, 71 | 72 | /// Push the dump to DumpSync Cloud 73 | Push { 74 | /// Push the dump to the cloud 75 | file: String, 76 | }, 77 | 78 | /// History of backups 79 | History { 80 | /// Type of history 81 | history_type: String, 82 | 83 | /// Filter by different fields 84 | filter: Option, 85 | }, 86 | 87 | /// Login to DumpSync Cloud 88 | Login, 89 | } 90 | 91 | #[derive(Parser)] 92 | pub struct ExportOptions { 93 | #[arg(short, long)] 94 | /// Interval of the make dump (in seconds) 95 | pub interval: Option, 96 | 97 | #[arg(short, long)] 98 | /// Backup path 99 | pub folder: Option, 100 | 101 | #[arg(short, long)] 102 | /// Encryption file path 103 | pub encrypt: bool, 104 | 105 | #[arg(long)] 106 | /// Export the dump once then exit 107 | pub once: bool, 108 | 109 | #[arg(short, long)] 110 | /// Maximum number of backups to retain for the dump 111 | pub retain: Option, 112 | 113 | #[arg(long)] 114 | /// Generate a pdf report 115 | pub pdf: bool, 116 | } 117 | 118 | #[derive(Parser)] 119 | pub struct DumpDataOptions { 120 | #[arg(short, long)] 121 | /// Backup file path 122 | pub file: Option, 123 | } 124 | 125 | #[derive(Parser)] 126 | pub struct ImportDataOptions { 127 | #[arg(short, long)] 128 | /// Backup file path 129 | pub file: String, 130 | } 131 | 132 | #[derive(Parser)] 133 | pub struct ImportOptions { 134 | #[arg(short, long)] 135 | /// Dump file path 136 | pub file: Option, 137 | 138 | #[arg(long)] 139 | /// Ignore drop table option 140 | pub ignore_drop_table: bool, 141 | } 142 | 143 | #[derive(Parser)] 144 | pub struct TransferOptions { 145 | #[arg(short, long)] 146 | /// Dump file path 147 | pub file: Option, 148 | } 149 | 150 | #[derive(Parser)] 151 | pub struct VisualOptions { 152 | #[arg(short, long)] 153 | /// Table name for show ER diagram 154 | pub table: String, 155 | } 156 | 157 | #[derive(Parser)] 158 | pub struct TruncateOptions { 159 | #[arg(short, long)] 160 | /// Table name for truncate 161 | pub table: String, 162 | 163 | #[arg(short, long)] 164 | /// Backup path 165 | pub folder: Option, 166 | 167 | #[arg(short, long)] 168 | /// Encryption file path 169 | pub encrypt: bool, 170 | } 171 | 172 | #[derive(Parser)] 173 | pub struct ScanOptions { 174 | #[arg(short, long)] 175 | /// Table name for scan 176 | pub table: String, 177 | 178 | #[arg(short, long)] 179 | /// Payload file path 180 | pub payload: Option, 181 | 182 | #[arg(short, long)] 183 | /// Offset for scan 184 | pub offset: Option, 185 | 186 | #[arg(short, long)] 187 | /// Limit for scan 188 | pub limit: Option, 189 | 190 | #[arg(short, long)] 191 | /// File path for output 192 | pub file: Option, 193 | } 194 | 195 | #[derive(Parser)] 196 | pub struct ShareOptions { 197 | #[arg(long)] 198 | /// Privacy level for share 199 | pub privacy: Option, 200 | 201 | #[arg(short, long)] 202 | /// File path for share 203 | pub file: String, 204 | } 205 | 206 | #[derive(Parser)] 207 | pub struct SchemaOptions { 208 | #[arg(short, long)] 209 | /// Output file path 210 | pub file: String, 211 | } 212 | 213 | #[derive(Parser)] 214 | pub struct ChecksumOptions { 215 | #[arg(short, long)] 216 | /// Input file path to calculate checksum 217 | pub file: String, 218 | 219 | #[arg(short, long)] 220 | /// Output file path to save checksum 221 | pub output: Option, 222 | } -------------------------------------------------------------------------------- /src/core/dump_data.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | error::Error, 3 | fs::File, 4 | io::{Write, BufWriter}, 5 | }; 6 | 7 | use mysql::*; 8 | use mysql::prelude::*; 9 | use serde_json::{json, Value}; 10 | 11 | use crate::{ 12 | utils::file::FileUtils, 13 | helpers::configs::Configs, 14 | cmd::connection::Connection, 15 | ui::success_alerts::SuccessAlerts, 16 | 17 | handlers::{ 18 | dump_handlers::DumpHandlers, 19 | mysql::mysql_queries_builders::MySqlQueriesBuilders, 20 | }, 21 | }; 22 | 23 | pub struct DumpData { 24 | pub host: String, 25 | pub port: u16, 26 | pub user: String, 27 | pub password: String, 28 | pub dbname: String, 29 | pub dump_file_path: String, 30 | } 31 | 32 | impl DumpData { 33 | 34 | pub fn new( 35 | host: &str, 36 | port: u16, 37 | user: &str, 38 | password: &str, 39 | dbname: &str, 40 | dump_file_path: &str, 41 | ) -> Self { 42 | Self { 43 | host: host.to_string(), 44 | port, 45 | user: user.to_string(), 46 | password: password.to_string(), 47 | dbname: dbname.to_string(), 48 | dump_file_path: dump_file_path.to_string(), 49 | } 50 | } 51 | 52 | pub fn export(&self) -> Result<(), Box> { 53 | let dump_file_path = DumpHandlers.generate_dump_json_file_path( 54 | &self.dbname, 55 | &self.dump_file_path 56 | ); 57 | 58 | FileUtils::create_path(&dump_file_path); 59 | let file = File::create(&dump_file_path)?; 60 | let mut writer = BufWriter::new(file); 61 | 62 | let pool = Connection { 63 | host: self.host.clone(), 64 | port: self.port, 65 | user: self.user.clone(), 66 | password: self.password.clone(), 67 | dbname: Some(self.dbname.clone()), 68 | }.create_mysql_pool()?; 69 | 70 | let mut conn = pool.get_conn()?; 71 | 72 | writer.write_all(b"[\n")?; 73 | 74 | let mut is_first = true; 75 | self.dump_all_tables(&mut conn, &mut writer, &mut is_first)?; 76 | 77 | writer.write_all(b"\n]")?; 78 | writer.flush()?; 79 | 80 | SuccessAlerts::dump(&dump_file_path); 81 | Ok(()) 82 | } 83 | 84 | fn dump_all_tables(&self, conn: &mut PooledConn, writer: &mut BufWriter, is_first: &mut bool) -> Result<(), Box> { 85 | let rows: Vec = conn.query(MySqlQueriesBuilders.show_tables())?; 86 | let ignore_tables = Configs.list("exports", "ignore_tables").unwrap_or_default(); 87 | 88 | for row in rows { 89 | let table: String = row.get(0).unwrap(); 90 | if ignore_tables.contains(&serde_yaml::Value::String(table.clone())) { 91 | continue; 92 | } 93 | 94 | self.dump_rows(conn, writer, &table, is_first)?; 95 | } 96 | 97 | Ok(()) 98 | } 99 | 100 | fn dump_rows(&self, conn: &mut PooledConn, writer: &mut BufWriter, table: &str, is_first: &mut bool) -> Result<(), Box> { 101 | let pk = self.get_primary_key(conn, table)?; 102 | let rows: Vec = conn.exec(MySqlQueriesBuilders.select(table, None, None), ())?; 103 | 104 | for row in rows { 105 | let obj = self.row_to_django_obj(&self.dbname, table, &pk, &row)?; 106 | let js = serde_json::to_string(&obj)?; 107 | 108 | if !*is_first { 109 | writer.write_all(b",\n")?; 110 | } 111 | 112 | *is_first = false; 113 | writer.write_all(js.as_bytes())?; 114 | } 115 | 116 | Ok(()) 117 | } 118 | 119 | fn get_primary_key(&self, conn: &mut PooledConn, table: &str) -> Result> { 120 | let sql = MySqlQueriesBuilders.get_primary_key(table); 121 | let rows: Vec = conn.query(sql)?; 122 | 123 | let col = rows.first() 124 | .and_then(|r| r.get::("Column_name")) 125 | .ok_or("Primary key not found")?; 126 | 127 | Ok(col) 128 | } 129 | 130 | fn row_to_django_obj(&self, app: &str, table: &str, pk_column: &str, row: &Row) -> Result> { 131 | let mut fields = serde_json::Map::new(); 132 | let mut pk_value = Value::Null; 133 | 134 | let columns = row.columns_ref(); 135 | 136 | for (i, col) in columns.iter().enumerate() { 137 | let name = col.name_str().to_string(); 138 | let raw: Option = row.get(i); 139 | 140 | let val = match raw.unwrap_or(mysql::Value::NULL) { 141 | mysql::Value::NULL => Value::Null, 142 | mysql::Value::Bytes(b) => Value::String(String::from_utf8_lossy(&b).to_string()), 143 | mysql::Value::Int(x) => json!(x), 144 | mysql::Value::UInt(x) => json!(x), 145 | mysql::Value::Float(x) => json!(x), 146 | mysql::Value::Double(x) => json!(x), 147 | mysql::Value::Date(y, m, d, hh, mm, ss, _) => 148 | json!(format!("{y:04}-{m:02}-{d:02}T{hh:02}:{mm:02}:{ss:02}Z")), 149 | other => json!(format!("{:?}", other)), 150 | }; 151 | 152 | if name == pk_column { 153 | pk_value = val; 154 | } else { 155 | fields.insert(name, val); 156 | } 157 | } 158 | 159 | Ok(json!({ 160 | "model": format!("{app}.{table}"), 161 | "pk": pk_value, 162 | "fields": fields 163 | })) 164 | } 165 | 166 | } 167 | -------------------------------------------------------------------------------- /src/core/import.rs: -------------------------------------------------------------------------------- 1 | use mysql::*; 2 | use flate2::read::GzDecoder; 3 | 4 | use std::{ 5 | fs::File, 6 | error::Error, 7 | 8 | io::{ 9 | Read, 10 | BufReader, 11 | }, 12 | 13 | path::{ 14 | Path, 15 | PathBuf 16 | }, 17 | }; 18 | 19 | use crate::{ 20 | ui::success_alerts::SuccessAlerts, 21 | handlers::import_handlers::ImportHandlers, 22 | 23 | cmd::{ 24 | runner::Runner, 25 | encrypt::Encrypt, 26 | entropy::Entropy, 27 | connection::Connection, 28 | }, 29 | }; 30 | 31 | pub struct Import { 32 | host: String, 33 | port: u16, 34 | user: String, 35 | password: String, 36 | dbname: String, 37 | path: Option, 38 | ignore_drop_table: Option, 39 | dump_file_path: Option, 40 | sql_content: Option 41 | } 42 | 43 | impl Import { 44 | 45 | pub fn new( 46 | host: &str, 47 | port: u16, 48 | user: &str, 49 | password: &str, 50 | dbname: &str, 51 | ignore_drop_table: Option, 52 | dump_file_path: Option<&str>, 53 | path: Option<&str>, 54 | sql_content: Option<&str>, 55 | ) -> Self { 56 | Self { 57 | host: host.to_string(), 58 | port, 59 | ignore_drop_table, 60 | user: user.to_string(), 61 | password: password.to_string(), 62 | dbname: dbname.to_string(), 63 | path: path.map(|s| s.to_string()), 64 | dump_file_path: dump_file_path.map(|s| s.to_string()), 65 | sql_content: sql_content.map(|s| s.to_string()), 66 | } 67 | } 68 | 69 | fn complete_path(&self) -> Result> { 70 | let dump_file_path = self.dump_file_path.as_ref().ok_or("dump_file_path is None")?; 71 | let path = Path::new(dump_file_path); 72 | 73 | if path.is_absolute() { 74 | Ok(path.to_path_buf()) 75 | } else { 76 | let base_path = self.path.as_ref().ok_or("path is None")?; 77 | Ok(Path::new(base_path).join(dump_file_path)) 78 | } 79 | } 80 | 81 | pub fn dump_encrypted(&self) -> Result<(), Box> { 82 | let pool = Connection { 83 | host: self.host.clone(), 84 | port: self.port, 85 | user: self.user.clone(), 86 | password: self.password.clone(), 87 | dbname: Some(self.dbname.clone()), 88 | }.create_mysql_pool()?; 89 | 90 | let mut conn = pool.get_conn()?; 91 | 92 | let dump_file_path = self.dump_file_path.as_ref().ok_or("dump_file_path is None")?; 93 | let decrypt = Encrypt::new(dump_file_path); 94 | let dump_content = String::from_utf8(decrypt.decrypt_and_read()?)?; 95 | 96 | let dump_content = ImportHandlers::new(&self.dbname, &dump_content).check_db_name(); 97 | let _ = Runner.import(&mut conn, &dump_content, &self.dbname, self.ignore_drop_table); 98 | 99 | SuccessAlerts::import(&self.dbname); 100 | Ok(()) 101 | } 102 | 103 | pub fn dump_plain(&self) -> Result<(), Box> { 104 | let pool = Connection { 105 | host: self.host.clone(), 106 | port: self.port, 107 | user: self.user.clone(), 108 | password: self.password.clone(), 109 | dbname: Some(self.dbname.clone()), 110 | }.create_mysql_pool()?; 111 | 112 | let mut conn = pool.get_conn()?; 113 | let is_compressed = self.dump_file_path.as_ref().map_or(false, |s| s.ends_with(".sql.gz")); 114 | 115 | let file = self.complete_path()?; 116 | 117 | let dump_content = if is_compressed { 118 | let file = File::open(file)?; 119 | let mut decoder = GzDecoder::new(BufReader::new(file)); 120 | let mut content = String::new(); 121 | decoder.read_to_string(&mut content)?; 122 | content 123 | } else { 124 | let dump_file_path = self.dump_file_path.as_ref().ok_or("dump_file_path is None")?; 125 | let mut file = File::open(dump_file_path)?; 126 | let mut content = String::new(); 127 | file.read_to_string(&mut content)?; 128 | content 129 | }; 130 | 131 | let dump_content = ImportHandlers::new(&self.dbname, &dump_content).check_db_name(); 132 | let _ = Runner.import(&mut conn, &dump_content, &self.dbname, self.ignore_drop_table); 133 | 134 | SuccessAlerts::import(&self.dbname); 135 | Ok(()) 136 | } 137 | 138 | pub async fn dump_directly(&self) -> Result<(), Box> { 139 | let pool = Connection { 140 | host: self.host.clone(), 141 | port: self.port, 142 | user: self.user.clone(), 143 | password: self.password.clone(), 144 | dbname: Some(self.dbname.clone()), 145 | }.create_mysql_pool()?; 146 | 147 | let mut conn = pool.get_conn()?; 148 | let sql_content = self.sql_content.as_deref().ok_or("sql_content is None")?; 149 | let dump_content = ImportHandlers::new(&self.dbname, sql_content).check_db_name(); 150 | let _ = Runner.import(&mut conn, &dump_content, &self.dbname, self.ignore_drop_table); 151 | 152 | Ok(()) 153 | } 154 | 155 | pub fn dump(&self) -> Result<(), Box> { 156 | let dump_file_path = self.dump_file_path.as_ref().ok_or("dump_file_path is None")?; 157 | 158 | if Entropy::new(dump_file_path.as_str()).calculate()? > 7.5 { 159 | let _ = self.dump_encrypted(); 160 | } else { 161 | let _ = self.dump_plain(); 162 | } 163 | 164 | Ok(()) 165 | } 166 | 167 | } 168 | -------------------------------------------------------------------------------- /src/core/export.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::File, 3 | error::Error, 4 | }; 5 | 6 | use chrono::Utc; 7 | 8 | use mysql::{ 9 | *, 10 | prelude::* 11 | }; 12 | 13 | use crate::{ 14 | ui::success_alerts::SuccessAlerts, 15 | 16 | cmd::{ 17 | encrypt::Encrypt, 18 | connection::Connection, 19 | }, 20 | 21 | utils::{ 22 | file::FileUtils, 23 | generate::Generate, 24 | }, 25 | 26 | helpers::{ 27 | configs::Configs, 28 | history::History, 29 | }, 30 | 31 | handlers::{ 32 | comments_headers::CommentsHeaders, 33 | 34 | mysql::{ 35 | mysql_export_handlers::ExportHandlers, 36 | mysql_queries_builders::MySqlQueriesBuilders, 37 | }, 38 | }, 39 | }; 40 | 41 | pub struct Export { 42 | pub host: String, 43 | pub port: u16, 44 | pub user: String, 45 | pub password: String, 46 | pub dbname: String, 47 | pub dump_file_path: String, 48 | pub encrypt: Option, 49 | pub table: Option, 50 | } 51 | 52 | impl Export { 53 | 54 | pub fn new( 55 | host: &str, 56 | port: u16, 57 | user: &str, 58 | password: &str, 59 | dbname: &str, 60 | dump_file_path: &str, 61 | encrypt: Option, 62 | table: Option 63 | ) -> Self { 64 | Self { 65 | host: host.to_string(), 66 | port, 67 | user: user.to_string(), 68 | password: password.to_string(), 69 | dbname: dbname.to_string(), 70 | dump_file_path: dump_file_path.to_string(), 71 | encrypt, 72 | table, 73 | } 74 | } 75 | 76 | pub fn dump(&self) -> Result<(), Box> { 77 | let compress_data = Configs.boolean("exports", "compress_data", false); 78 | 79 | let dump_file_path = if compress_data { 80 | format!("{}.gz", self.dump_file_path) 81 | } else { 82 | self.dump_file_path.clone() 83 | }; 84 | 85 | let export_handlers = ExportHandlers::new( 86 | File::create(dump_file_path.clone())?, 87 | &self.dbname 88 | ); 89 | 90 | let pool = Connection { 91 | host: self.host.clone(), 92 | port: self.port, 93 | user: self.user.clone(), 94 | password: self.password.clone(), 95 | dbname: Some(self.dbname.clone()), 96 | }.create_mysql_pool()?; 97 | 98 | FileUtils::create_path(&dump_file_path.clone()); 99 | 100 | let mut conn = pool.get_conn()?; 101 | let mut writer = export_handlers.create_writer()?; 102 | 103 | CommentsHeaders.core(&self.dbname, writer.as_write())?; 104 | export_handlers.write_create_new_database(writer.as_write())?; 105 | 106 | let tables: Vec = conn.query(MySqlQueriesBuilders.show_tables())?; 107 | let ignore_tables = Configs.list("exports", "ignore_tables").unwrap_or_default(); 108 | 109 | for table in tables { 110 | if ignore_tables.contains(&serde_yaml::Value::String(table.clone())) { 111 | writeln!(writer.as_write(), "-- Table `{}` is ignored.", table)?; 112 | continue; 113 | } 114 | 115 | export_handlers.write_structure_for_table(&table, &mut conn, writer.as_write())?; 116 | export_handlers.write_inserts_for_table(&table, &mut conn, writer.as_write())?; 117 | writeln!(writer.as_write(), "-- End of table `{}`\n", table)?; 118 | } 119 | 120 | if self.encrypt.unwrap_or(false) { 121 | let _ = Encrypt::new(&dump_file_path).encrypt(); 122 | } else { 123 | SuccessAlerts::dump(&dump_file_path); 124 | } 125 | 126 | let file_size = FileUtils::file_size(&dump_file_path)? as i64; 127 | History::new().insert_backup( 128 | &Generate.random_string(16), 129 | &dump_file_path, 130 | &self.dbname, 131 | &self.host, 132 | &Utc::now().to_rfc3339(), 133 | file_size, 134 | self.encrypt.unwrap_or(false), 135 | compress_data, 136 | )?; 137 | 138 | Ok(()) 139 | } 140 | 141 | pub fn dump_table(&self) -> Result<(), Box> { 142 | let compress_data = Configs.boolean("exports", "compress_data", false); 143 | 144 | let dump_file_path = if compress_data { 145 | format!("{}.gz", self.dump_file_path) 146 | } else { 147 | self.dump_file_path.clone() 148 | }; 149 | 150 | let export_handlers = ExportHandlers::new( 151 | File::create(dump_file_path.clone())?, 152 | &self.dbname 153 | ); 154 | 155 | let pool = Connection { 156 | host: self.host.clone(), 157 | port: self.port, 158 | user: self.user.clone(), 159 | password: self.password.clone(), 160 | dbname: Some(self.dbname.clone()), 161 | }.create_mysql_pool()?; 162 | 163 | FileUtils::create_path(&dump_file_path.clone()); 164 | 165 | let mut conn = pool.get_conn()?; 166 | let mut writer = export_handlers.create_writer()?; 167 | let table = self.table.as_deref().unwrap_or(""); 168 | 169 | CommentsHeaders.truncate(&self.dbname, &table, writer.as_write())?; 170 | export_handlers.write_inserts_for_table(&table, &mut conn, writer.as_write())?; 171 | writeln!(writer.as_write(), "-- End of table `{}`", table)?; 172 | 173 | if self.encrypt.unwrap_or(false) { 174 | let _ = Encrypt::new(&dump_file_path).encrypt(); 175 | } else { 176 | SuccessAlerts::dump(&dump_file_path); 177 | } 178 | 179 | Ok(()) 180 | } 181 | 182 | } -------------------------------------------------------------------------------- /src/cmd/runner.rs: -------------------------------------------------------------------------------- 1 | use mysql::{ 2 | *, 3 | prelude::* 4 | }; 5 | 6 | use crate::{ 7 | ui::{ 8 | errors_alerts::ErrorsAlerts, 9 | success_alerts::SuccessAlerts, 10 | }, 11 | 12 | handlers::{ 13 | syntax_skip_handlers::SyntaxSkip, 14 | mysql::mysql_keywords::MySQLKeywords, 15 | }, 16 | }; 17 | 18 | pub struct Runner; 19 | 20 | impl Runner { 21 | 22 | pub fn import(&self, conn: &mut PooledConn, dump_content: &str, dbname: &str, ignore_drop_table: Option) { 23 | let mut buffer = String::new(); 24 | let mut tables_ignored = Vec::new(); 25 | 26 | for line in dump_content.lines() { 27 | let trimmed_line = line.trim(); 28 | 29 | if trimmed_line.contains(SyntaxSkip::SkipTables.as_str()) { 30 | let table_names = if let Some(start) = trimmed_line.find('"') { 31 | if let Some(end) = trimmed_line[start + 1..].find('"') { 32 | &trimmed_line[start + 1..start + 1 + end] 33 | } else { 34 | continue; 35 | } 36 | } else { 37 | continue; 38 | }; 39 | 40 | if table_names == "unknown" || table_names.is_empty() { 41 | continue; 42 | } 43 | 44 | for table_name in table_names.split(',') { 45 | tables_ignored.push(table_name.trim().to_string()); 46 | } 47 | 48 | continue; 49 | } 50 | 51 | if trimmed_line.is_empty() || trimmed_line.starts_with(MySQLKeywords::Comments.as_str()) { 52 | continue; 53 | } 54 | 55 | if trimmed_line.contains(SyntaxSkip::SkipLine.as_str()) { 56 | continue; 57 | } 58 | 59 | if ignore_drop_table.unwrap_or(false) { 60 | if trimmed_line.starts_with(MySQLKeywords::DropTable.as_str()) { 61 | continue; 62 | } 63 | 64 | if trimmed_line.starts_with(MySQLKeywords::CreateTable.as_str()) { 65 | let create_table_line = trimmed_line.replace( 66 | MySQLKeywords::CreateTable.as_str(), 67 | &format!( 68 | "{} {}", 69 | MySQLKeywords::CreateTable.as_str(), 70 | MySQLKeywords::IfNotExists.as_str() 71 | ), 72 | ); 73 | 74 | buffer.push_str(&create_table_line); 75 | continue; 76 | } 77 | } 78 | 79 | buffer.push_str(trimmed_line); 80 | buffer.push(' '); 81 | 82 | if trimmed_line.ends_with(");") || trimmed_line.ends_with(";") { 83 | let sql = buffer.trim(); 84 | 85 | if !sql.is_empty() { 86 | let (should_skip, table_name) = if sql.to_uppercase().contains(MySQLKeywords::Insert.as_str()) || sql.to_uppercase().contains(MySQLKeywords::CreateTable.as_str()) { 87 | let found_table = tables_ignored.iter().find(|table| { 88 | sql.contains(&format!("`{}`", table)) 89 | }); 90 | 91 | if let Some(table) = found_table { 92 | (true, table.clone()) 93 | } else { 94 | (false, String::new()) 95 | } 96 | } else { 97 | (false, String::new()) 98 | }; 99 | 100 | if should_skip { 101 | SuccessAlerts::table_ignored(&table_name); 102 | buffer.clear(); 103 | continue; 104 | } 105 | 106 | match conn.query_drop(sql) { 107 | Ok(_) => { 108 | if sql.to_uppercase().contains(MySQLKeywords::CreateTable.as_str()) { 109 | let actual_table_name = if let Some(table_start) = sql.to_uppercase().find(MySQLKeywords::CreateTable.as_str()) { 110 | let trimmed = &sql[table_start + 12..].trim(); 111 | 112 | let table_part = if trimmed.to_uppercase().starts_with(MySQLKeywords::IfNotExists.as_str()) { 113 | trimmed[13..].trim() 114 | } else { 115 | trimmed 116 | }; 117 | 118 | if let Some(backtick_start) = table_part.find('`') { 119 | if let Some(backtick_end) = table_part[backtick_start + 1..].find('`') { 120 | &table_part[backtick_start + 1..backtick_start + 1 + backtick_end] 121 | } else { 122 | table_part.split_whitespace().next().unwrap_or("unknown") 123 | } 124 | } else { 125 | table_part.split_whitespace().next().unwrap_or("unknown") 126 | } 127 | } else { 128 | "unknown" 129 | }; 130 | 131 | SuccessAlerts::table(actual_table_name); 132 | } 133 | } 134 | Err(e) => ErrorsAlerts::import(dbname, sql, &e.to_string()), 135 | } 136 | } 137 | 138 | buffer.clear(); 139 | } 140 | } 141 | } 142 | 143 | } 144 | -------------------------------------------------------------------------------- /src/plugins/reports_xss.rs: -------------------------------------------------------------------------------- 1 | use csv::Writer; 2 | use serde::Serialize; 3 | use serde_json::to_writer_pretty; 4 | 5 | use std::{ 6 | fs::File, 7 | io::Write, 8 | error::Error, 9 | }; 10 | 11 | use crate::{ 12 | utils::file::FileUtils, 13 | handlers::html_handlers::HTMLHandlers, 14 | ui::report_xss_alerts::ReportXSSAlerts, 15 | 16 | constants::{ 17 | urls::*, 18 | global::*, 19 | } 20 | }; 21 | 22 | #[derive(Serialize)] 23 | struct Detection { 24 | table: String, 25 | row_index: usize, 26 | column: String, 27 | value: String, 28 | } 29 | 30 | pub struct ReportsXSS; 31 | 32 | impl ReportsXSS { 33 | 34 | pub fn xml(&self, detections: Vec<(String, usize, String, String)>, output_path: &str) -> Result<(), Box> { 35 | let mut file = File::create(output_path)?; 36 | 37 | file.write_all(b"\n")?; 38 | file.write_all(b"\n")?; 39 | 40 | for (table, row_index, column, value) in detections { 41 | file.write_all(b" \n")?; 42 | 43 | file.write_all(format!("
{}
\n", &table).as_bytes())?; 44 | file.write_all(format!(" {}\n", row_index).as_bytes())?; 45 | file.write_all(format!(" {}\n", &column).as_bytes())?; 46 | file.write_all(format!(" {}\n", &value).as_bytes())?; 47 | 48 | file.write_all(b" \n")?; 49 | } 50 | 51 | file.write_all(b"\n")?; 52 | 53 | ReportXSSAlerts::generated(output_path); 54 | Ok(()) 55 | } 56 | 57 | pub fn txt(&self, detections: Vec<(String, usize, String, String)>, output_path: &str) -> Result<(), Box> { 58 | let mut file = File::create(output_path)?; 59 | 60 | writeln!(file, "XSS Detection Report")?; 61 | writeln!(file, "====================")?; 62 | 63 | for (table, row_index, column, value) in detections { 64 | writeln!(file, "Table : {}", table)?; 65 | writeln!(file, "Row : {}", row_index)?; 66 | writeln!(file, "Column : {}", column)?; 67 | writeln!(file, "Value : {}", value)?; 68 | writeln!(file, "---------------------")?; 69 | } 70 | 71 | ReportXSSAlerts::generated(output_path); 72 | Ok(()) 73 | } 74 | 75 | pub fn csv(&self, detections: Vec<(String, usize, String, String)>, output_path: &str) -> Result<(), Box> { 76 | let mut writer = Writer::from_path(output_path)?; 77 | writer.write_record(&["Table", "Row Index", "Column", "Value"])?; 78 | 79 | for (table, row_index, column, value) in detections { 80 | writer.write_record(&[ 81 | table, 82 | row_index.to_string(), 83 | column, 84 | value, 85 | ])?; 86 | } 87 | 88 | writer.flush()?; 89 | 90 | ReportXSSAlerts::generated(output_path); 91 | Ok(()) 92 | } 93 | 94 | pub fn json(&self, detections: Vec<(String, usize, String, String)>, output_path: &str) -> Result<(), Box> { 95 | let detections: Vec = detections 96 | .into_iter() 97 | .map(|(table, row_index, column, value)| Detection { 98 | table, 99 | row_index, 100 | column, 101 | value, 102 | }) 103 | .collect(); 104 | 105 | let file = File::create(output_path)?; 106 | to_writer_pretty(file, &detections)?; 107 | 108 | ReportXSSAlerts::generated(output_path); 109 | Ok(()) 110 | } 111 | 112 | pub fn html(&self, detections: Vec<(String, usize, String, String)>, output_path: &str) -> Result<(), Box> { 113 | let mut file = File::create(output_path)?; 114 | file.write_all(format!("{}: XSS Reports", Global::app(GlobalNames::AppName)).as_bytes())?; 115 | file.write_all(format!("", Urls::as_str(UrlsNames::CdnBootstrap)).as_bytes())?; 116 | 117 | file.write_all(format!( 118 | "", 119 | Global::app(GlobalNames::AppIcon), 120 | ).as_bytes())?; 121 | 122 | file.write_all(b"
")?; 123 | file.write_all(b"")?; 124 | 125 | for (table, row_index, column, value) in detections { 126 | let encoded_table = HTMLHandlers.html_escape(&table); 127 | let encoded_column = HTMLHandlers.html_escape(&column); 128 | let encoded_value = HTMLHandlers.html_escape(&value); 129 | 130 | file.write_all(format!( 131 | "", 132 | encoded_table, row_index, encoded_column, encoded_value 133 | ).as_bytes())?; 134 | } 135 | 136 | file.write_all(b"
TableRow IndexColumnValue
{}{}{}{}
")?; 137 | 138 | ReportXSSAlerts::generated(output_path); 139 | Ok(()) 140 | } 141 | 142 | pub fn autodetect(&self, detections: Vec<(String, usize, String, String)>, file_path: Option<&str>) -> Result<(), Box> { 143 | if let Some(file_path) = file_path { 144 | let extension = FileUtils::extension(file_path); 145 | 146 | let result = match extension.as_str() { 147 | "txt" => self.txt(detections, file_path), 148 | "csv" => self.csv(detections, file_path), 149 | "xml" => self.xml(detections, file_path), 150 | "json" => self.json(detections, file_path), 151 | "html" => self.html(detections, file_path), 152 | _ => Ok(ReportXSSAlerts::invalid_format()), 153 | }; 154 | 155 | result?; 156 | } 157 | 158 | Ok(()) 159 | } 160 | 161 | } 162 | -------------------------------------------------------------------------------- /src/handlers/mysql/mysql_queries_builders.rs: -------------------------------------------------------------------------------- 1 | use crate::handlers::mysql::mysql_keywords::MySQLKeywords; 2 | 3 | pub struct MySqlQueriesBuilders; 4 | 5 | impl MySqlQueriesBuilders { 6 | 7 | pub fn use_db(&self, dbname: &str) -> String { 8 | format!("{} `{}`;", MySQLKeywords::Use.as_str(), dbname) 9 | } 10 | 11 | pub fn show_tables(&self) -> String { 12 | MySQLKeywords::ShowTables.as_str().to_string() 13 | } 14 | 15 | pub fn lock_tables(&self, table: &str) -> String { 16 | format!("{} `{}`;", MySQLKeywords::LockTables.as_str(), table) 17 | } 18 | 19 | pub fn unlock_tables(&self, table: &str) -> String { 20 | format!("{} `{}`;", MySQLKeywords::UnlockTables.as_str(), table) 21 | } 22 | 23 | pub fn truncate_table(&self, table: &str) -> String { 24 | format!("{} `{}`;", MySQLKeywords::TruncateTable.as_str(), table) 25 | } 26 | 27 | pub fn show_create_table(&self, table: &str) -> String { 28 | format!("{} `{}`;", MySQLKeywords::ShowCreateTable.as_str(), table) 29 | } 30 | 31 | pub fn drop_table(&self, table: &str) -> String { 32 | format!("{} {} `{}`;", MySQLKeywords::DropTable.as_str(), MySQLKeywords::IfExists.as_str(), table) 33 | } 34 | 35 | pub fn create_database_not_exists(&self, dbname: &str) -> String { 36 | format!("{} {} `{}`;", MySQLKeywords::CreateDatabase.as_str(), MySQLKeywords::IfNotExists.as_str(), dbname) 37 | } 38 | 39 | pub fn get_table_names(&self) -> String { 40 | format!("{};", MySQLKeywords::GetTableNames.as_str()) 41 | } 42 | 43 | pub fn show_columns(&self, table: &str) -> String { 44 | format!("{} `{}`;", MySQLKeywords::ShowColumns.as_str(), table) 45 | } 46 | 47 | pub fn table_info(&self, table: &str) -> String { 48 | format!("{} = '{}'", MySQLKeywords::TableInfo.as_str(), table) 49 | } 50 | 51 | pub fn foreign_key_info(&self, table: &str) -> String { 52 | format!("{} = '{}' {};", MySQLKeywords::ForeignKeyInfo.as_str(), table, MySQLKeywords::AndReferencedIsNotNull.as_str()) 53 | } 54 | 55 | pub fn get_alter_table(&self, table: &str) -> String { 56 | format!("{} = '{}' {}", MySQLKeywords::GetAlterTable.as_str(), table, MySQLKeywords::AndReferencedIsNotNull.as_str()) 57 | } 58 | 59 | pub fn create_database(&self, dbname: &str) -> Result<(String, String), String> { 60 | let create_db = format!( 61 | "{} {} `{}`;\n", MySQLKeywords::CreateDatabase.as_str(), MySQLKeywords::IfNotExists.as_str(), dbname 62 | ); 63 | 64 | let use_db = format!("{} `{}`;", MySQLKeywords::Use.as_str(), dbname); 65 | Ok((create_db, use_db)) 66 | } 67 | 68 | pub fn select(&self, table: &str, offset: Option, limit: Option) -> String { 69 | let mut query = format!("{} `{}`", MySQLKeywords::SelectFrom.as_str(), table); 70 | 71 | if let Some(l) = limit { 72 | query.push_str(&format!(" {} {}", MySQLKeywords::Limit.as_str(), l)); 73 | } 74 | 75 | if let Some(o) = offset { 76 | query.push_str(&format!(" {} {}", MySQLKeywords::Offset.as_str(), o)); 77 | } 78 | 79 | query 80 | } 81 | 82 | pub fn get_foreign_keys(&self, table: &str, constraint_name: &str, column_name: &str, ref_table: &str, ref_column: &str) -> String { 83 | format!( 84 | "{} `{}` {} `{}` {} (`{}`) {} `{}` (`{}`);", 85 | MySQLKeywords::AlterTable.as_str(), 86 | table, 87 | MySQLKeywords::AddConstraint.as_str(), 88 | constraint_name, 89 | MySQLKeywords::ForeignKey.as_str(), 90 | column_name, 91 | MySQLKeywords::References.as_str(), 92 | ref_table, 93 | ref_column 94 | ) 95 | } 96 | 97 | pub fn get_unique_keys(&self, table: &str, constraint_name: &str, column_name: &str) -> String { 98 | format!( 99 | "{} `{}` {} `{}` {} (`{}`);", 100 | MySQLKeywords::AlterTable.as_str(), 101 | table, 102 | MySQLKeywords::AddConstraint.as_str(), 103 | constraint_name, 104 | MySQLKeywords::Unique.as_str(), 105 | column_name 106 | ) 107 | } 108 | 109 | pub fn get_primary_key(&self, table: &str) -> String { 110 | format!("{} `{}` {}", MySQLKeywords::ShowKeysFrom.as_str(), table, MySQLKeywords::WherePrimaryKey.as_str()) 111 | } 112 | 113 | pub fn insert_into_start(&self, table: &str, columns: &[String], values: &[String], insert_ignore: bool) -> String { 114 | let prefix = if insert_ignore { 115 | MySQLKeywords::InsertIgnore.as_str() 116 | } else { 117 | MySQLKeywords::InsertInto.as_str() 118 | }; 119 | 120 | let sanitized_values: Vec = values.iter() 121 | .map(|v| v.trim_start_matches('(') 122 | .trim_end_matches(')') 123 | .to_string()) 124 | .collect(); 125 | 126 | let values_sql = sanitized_values 127 | .iter() 128 | .map(|v| format!("({})", v)) 129 | .collect::>() 130 | .join(", "); 131 | 132 | format!( 133 | "{} `{}` ({}) {} {};", 134 | prefix, 135 | table, 136 | columns.join(", "), 137 | MySQLKeywords::Values.as_str(), 138 | values_sql 139 | ) 140 | } 141 | 142 | pub fn insert_into_start_json(&self, table: &str, columns: &[String], values: &[String], insert_ignore: bool) -> String { 143 | let prefix = if insert_ignore { 144 | MySQLKeywords::InsertIgnore.as_str() 145 | } else { 146 | MySQLKeywords::InsertInto.as_str() 147 | }; 148 | 149 | let sanitized_values: Vec = values.iter() 150 | .map(|v| v.trim_start_matches('(') 151 | .trim_end_matches(')') 152 | .to_string()) 153 | .collect(); 154 | 155 | let values_sql = format!("({})", sanitized_values.join(", ")); 156 | 157 | format!( 158 | "{} `{}` ({}) {} {};", 159 | prefix, 160 | table, 161 | columns.join(", "), 162 | MySQLKeywords::Values.as_str(), 163 | values_sql 164 | ) 165 | } 166 | 167 | } -------------------------------------------------------------------------------- /src/handlers/diagram_handlers.rs: -------------------------------------------------------------------------------- 1 | use regex::Regex; 2 | use std::error::Error; 3 | 4 | use crate::constants::regexp::RegExp; 5 | 6 | #[derive(Debug)] 7 | pub struct ColumnDefinition { 8 | pub name: String, 9 | pub col_type: String, 10 | pub key: Option, 11 | } 12 | 13 | #[derive(Debug)] 14 | pub struct TableDefinition { 15 | pub name: String, 16 | pub columns: Vec, 17 | } 18 | 19 | pub struct DiagramHandlers; 20 | 21 | impl DiagramHandlers { 22 | 23 | pub fn generate_ascii_diagram_with_key(&self, table: &TableDefinition) -> String { 24 | let header_col = "Column"; 25 | let header_type = "Type"; 26 | let header_key = "Key"; 27 | 28 | let col1_width = table.columns 29 | .iter() 30 | .map(|col| col.name.len()) 31 | .max() 32 | .unwrap_or(0) 33 | .max(header_col.len()); 34 | 35 | let col2_width = table.columns 36 | .iter() 37 | .map(|col| col.col_type.len()) 38 | .max() 39 | .unwrap_or(0) 40 | .max(header_type.len()); 41 | 42 | let col3_width = table.columns 43 | .iter() 44 | .map(|col| col.key.as_ref().map(|s| s.len()).unwrap_or(0)) 45 | .max() 46 | .unwrap_or(0) 47 | .max(header_key.len()); 48 | 49 | let border_line = format!( 50 | "+-{:- Result> { 98 | let table_name_re = Regex::new(RegExp::CREATE_TABLE_ERD)?; 99 | let table_name_caps = table_name_re 100 | .captures(sql) 101 | .ok_or("Table name not found")?; 102 | 103 | let table_name = table_name_caps.get(1).unwrap().as_str().to_string(); 104 | 105 | let start = sql.find('(').ok_or("Opening parenthesis not found")?; 106 | let end = sql.rfind(')').ok_or("Closing parenthesis not found")?; 107 | let columns_str = &sql[start + 1..end]; 108 | 109 | let column_lines: Vec<&str> = columns_str 110 | .lines() 111 | .map(|s| s.trim().trim_end_matches(',')) 112 | .filter(|s| !s.is_empty()) 113 | .collect(); 114 | 115 | let mut columns = Vec::new(); 116 | let mut constraints = Vec::new(); 117 | let column_re = Regex::new(RegExp::CREATE_TABLE_COLUMNS)?; 118 | 119 | for line in &column_lines { 120 | let line_upper = line.to_uppercase(); 121 | 122 | if line_upper.starts_with("PRIMARY KEY") || line_upper.starts_with("FOREIGN KEY") || line_upper.starts_with("KEY") || line_upper.starts_with("UNIQUE KEY") || line_upper.starts_with("CONSTRAINT") { 123 | constraints.push(*line); 124 | continue; 125 | } 126 | 127 | if let Some(caps) = column_re.captures(line) { 128 | let col_name = caps.get(1).unwrap().as_str().to_string(); 129 | let col_type = caps.get(2).unwrap().as_str().to_string(); 130 | 131 | columns.push(ColumnDefinition { 132 | name: col_name, 133 | col_type, 134 | key: None, 135 | }); 136 | } 137 | } 138 | 139 | let cols_in_constraint_re = Regex::new(RegExp::COLS_IN_CONSTRAINT_RE)?; 140 | for cons_line in constraints { 141 | let cons_line_upper = cons_line.to_uppercase(); 142 | 143 | if let Some(caps) = cols_in_constraint_re.captures(cons_line) { 144 | let cols_str = caps.get(1).unwrap().as_str(); 145 | 146 | let col_names: Vec<&str> = cols_str 147 | .split(',') 148 | .map(|s| s.trim().trim_matches('`')) 149 | .collect(); 150 | 151 | for col in col_names { 152 | for column in columns.iter_mut() { 153 | if column.name == col { 154 | if cons_line_upper.starts_with("PRIMARY KEY") { 155 | column.key = Some("PK".to_string()); 156 | } else if cons_line_upper.contains("FOREIGN KEY") || cons_line_upper.contains("REFERENCES") { 157 | column.key = Some("FK".to_string()); 158 | } else if cons_line_upper.starts_with("KEY") && column.key.is_none() { 159 | column.key = Some("KEY".to_string()); 160 | } else if cons_line_upper.starts_with("UNIQUE KEY") { 161 | column.key = Some("UNI".to_string()); 162 | } 163 | } 164 | } 165 | } 166 | } 167 | } 168 | 169 | Ok(TableDefinition { 170 | name: table_name, 171 | columns, 172 | }) 173 | } 174 | 175 | } 176 | -------------------------------------------------------------------------------- /src/core/import_data.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::File, 3 | sync::Arc, 4 | error::Error, 5 | io::BufReader, 6 | }; 7 | 8 | use chrono::{ 9 | DateTime, 10 | NaiveDateTime 11 | }; 12 | 13 | use mysql::{ 14 | *, 15 | prelude::*, 16 | }; 17 | 18 | use rayon::prelude::*; 19 | use serde_json::Value; 20 | 21 | use crate::{ 22 | cmd::connection::Connection, 23 | ui::success_alerts::SuccessAlerts, 24 | 25 | handlers::mysql::{ 26 | mysql_keywords::MySQLKeywords, 27 | mysql_queries_builders::MySqlQueriesBuilders, 28 | }, 29 | }; 30 | 31 | pub struct ImportDumpData { 32 | pub host: String, 33 | pub port: u16, 34 | pub user: String, 35 | pub password: String, 36 | pub dbname: String, 37 | pub json_path: String, 38 | pub chunk_size: usize, 39 | } 40 | 41 | impl ImportDumpData { 42 | 43 | pub fn new(host: &str, port: u16, user: &str, password: &str, dbname: &str, json_path: &str) -> Self { 44 | Self { 45 | host: host.to_string(), 46 | port, 47 | user: user.to_string(), 48 | password: password.to_string(), 49 | dbname: dbname.to_string(), 50 | json_path: json_path.to_string(), 51 | 52 | chunk_size: 800, 53 | } 54 | } 55 | 56 | fn extract_tables(&self, array: &[Value]) -> Vec { 57 | use std::collections::HashSet; 58 | let mut set = HashSet::new(); 59 | 60 | for entry in array { 61 | if let Some(model) = entry.get("model").and_then(|v| v.as_str()) { 62 | if let Some(table) = model.split('.').nth(1) { 63 | set.insert(table.to_string()); 64 | } 65 | } 66 | } 67 | 68 | set.into_iter().collect() 69 | } 70 | 71 | fn truncate_all_tables(&self, tables: &[String], pool: &Arc) -> Result<(), Box> { 72 | let mut conn = pool.get_conn()?; 73 | conn.query_drop(MySqlQueriesBuilders.use_db(&self.dbname))?; 74 | 75 | for table in tables { 76 | let sql = MySqlQueriesBuilders.truncate_table(table); 77 | conn.query_drop(sql)?; 78 | } 79 | 80 | Ok(()) 81 | } 82 | 83 | fn fix_datetime_format(&self, s: &str) -> String { 84 | if let Ok(dt) = DateTime::parse_from_rfc3339(s) { 85 | return dt.naive_utc().format("%Y-%m-%d %H:%M:%S").to_string(); 86 | } 87 | 88 | if let Ok(dt) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S") { 89 | return dt.format("%Y-%m-%d %H:%M:%S").to_string(); 90 | } 91 | 92 | s.to_string() 93 | } 94 | 95 | fn json_to_mysql(&self, v: &Value) -> String { 96 | match v { 97 | Value::Null => MySQLKeywords::Null.as_str().to_string(), 98 | Value::String(s) => { 99 | let fixed = self.fix_datetime_format(s); 100 | let escaped = fixed.replace('\\', "\\\\") 101 | .replace('\'', "\\'") 102 | .replace('"', "\\\"") 103 | .replace('\n', "\\n") 104 | .replace('\r', "\\r") 105 | .replace('\0', "\\0"); 106 | 107 | format!("'{}'", escaped) 108 | }, 109 | Value::Number(n) => n.to_string(), 110 | Value::Bool(b) => (*b as i32).to_string(), 111 | _ => MySQLKeywords::Null.as_str().to_string(), 112 | } 113 | } 114 | 115 | fn process_chunk(&self, index: usize, chunk: &[Value], pool: &Arc) -> Result<(), Box> { 116 | let mut conn = pool.get_conn()?; 117 | conn.query_drop(MySqlQueriesBuilders.use_db(&self.dbname))?; 118 | let mut tx = conn.start_transaction(TxOpts::default())?; 119 | 120 | for entry in chunk { 121 | if let Err(err) = self.import_entry(entry, &mut tx) { 122 | eprintln!("Chunk {} entry error: {}", index, err); 123 | } 124 | } 125 | 126 | tx.commit()?; 127 | Ok(()) 128 | } 129 | 130 | fn import_entry(&self, entry: &Value, conn: &mut Transaction) -> Result<(), Box> { 131 | let model = entry 132 | .get("model") 133 | .and_then(|v| v.as_str()) 134 | .ok_or("Missing model")?; 135 | 136 | let table = model 137 | .split('.') 138 | .nth(1) 139 | .ok_or("Invalid model format")?; 140 | 141 | let pk = entry.get("pk").ok_or("Missing PK")?; 142 | let fields = entry 143 | .get("fields") 144 | .and_then(|v| v.as_object()) 145 | .ok_or("Missing fields")?; 146 | 147 | let mut columns = vec![]; 148 | let mut values = vec![]; 149 | 150 | for (field, value) in fields { 151 | columns.push(format!("`{}`", field)); 152 | values.push(self.json_to_mysql(value)); 153 | } 154 | 155 | let mut columns = vec!["id".to_string()]; 156 | let mut values = vec![self.json_to_mysql(pk)]; 157 | 158 | for (field, value) in fields { 159 | if field == "id" { 160 | continue; 161 | } 162 | 163 | columns.push(format!("`{}`", field)); 164 | values.push(self.json_to_mysql(value)); 165 | } 166 | 167 | let sql = MySqlQueriesBuilders.insert_into_start_json(table, &columns, &values, false); 168 | conn.query_drop(sql)?; 169 | Ok(()) 170 | } 171 | 172 | pub fn import_parallel(&self) -> Result<(), Box> { 173 | let file = File::open(&self.json_path)?; 174 | let reader = BufReader::new(file); 175 | let json: Value = serde_json::from_reader(reader)?; 176 | let array = json.as_array().ok_or("JSON root must be an array")?; 177 | 178 | let pool = Arc::new( 179 | Connection { 180 | host: self.host.clone(), 181 | port: self.port, 182 | user: self.user.clone(), 183 | password: self.password.clone(), 184 | dbname: Some(self.dbname.clone()), 185 | } 186 | .create_mysql_pool()?, 187 | ); 188 | 189 | let tables = self.extract_tables(array); 190 | self.truncate_all_tables(&tables, &pool)?; 191 | 192 | array 193 | .par_chunks(self.chunk_size) 194 | .enumerate() 195 | .for_each(|(chunk_index, chunk)| { 196 | if let Err(err) = self.process_chunk(chunk_index, chunk, &pool) { 197 | eprintln!("Chunk {} failed: {}", chunk_index, err); 198 | } 199 | }); 200 | 201 | SuccessAlerts::import(&self.dbname); 202 | Ok(()) 203 | } 204 | 205 | } 206 | --------------------------------------------------------------------------------