"
25 | LABEL org.opencontainers.image.version=${appVersion}
26 | LABEL org.opencontainers.image.source="https://github.com/jkaninda/mysql-bkup"
27 |
28 | RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata ca-certificates
29 | RUN mkdir -p $WORKDIR $BACKUPDIR $TEMPLATES_DIR $BACKUP_TMP_DIR && \
30 | chmod a+rw $WORKDIR $BACKUPDIR $BACKUP_TMP_DIR
31 | COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
32 | COPY ./templates/* $TEMPLATES_DIR/
33 | RUN chmod +x /usr/local/bin/mysql-bkup && \
34 | ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
35 |
36 | # Create backup script and make it executable
37 | RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
38 | chmod +x /usr/local/bin/backup
39 | # Create restore script and make it executable
40 | RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
41 | chmod +x /usr/local/bin/restore
42 | # Create migrate script and make it executable
43 | RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
44 | chmod +x /usr/local/bin/migrate
45 |
46 | WORKDIR $WORKDIR
47 | ENTRYPOINT ["/usr/local/bin/mysql-bkup"]
48 |
--------------------------------------------------------------------------------
/cmd/restore.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package cmd
26 |
27 | import (
28 | "github.com/jkaninda/mysql-bkup/pkg"
29 | "github.com/jkaninda/mysql-bkup/utils"
30 | "github.com/spf13/cobra"
31 | )
32 |
33 | var RestoreCmd = &cobra.Command{
34 | Use: "restore",
35 | Short: "Restore database operation",
36 | Example: utils.RestoreExample,
37 | Run: func(cmd *cobra.Command, args []string) {
38 | if len(args) == 0 {
39 | pkg.StartRestore(cmd)
40 | } else {
41 | utils.Fatal(`"restore" accepts no argument %q`, args)
42 |
43 | }
44 |
45 | },
46 | }
47 |
48 | func init() {
49 | // Restore
50 | RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
51 | RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
52 | RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
53 |
54 | }
55 |
--------------------------------------------------------------------------------
/docs/how-tos/restore.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Restore database
3 | layout: default
4 | parent: How Tos
5 | nav_order: 5
6 | ---
7 |
8 |
9 | # Restore Database
10 |
11 | To restore a MySQL database, use the `restore` command and specify the backup file to restore with the `--file` flag.
12 |
13 | The system supports the following file formats:
14 |
15 | - `.sql` (uncompressed SQL dump)
16 | - `.sql.gz` (gzip-compressed SQL dump)
17 | - `.sql.gpg` (GPG-encrypted SQL dump)
18 | - `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
19 |
20 | ---
21 |
22 | ## Configuration Steps
23 |
24 | 1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
25 | 2. **Provide Database Credentials**: Ensure the correct database connection details are provided.
26 |
27 | ---
28 |
29 | ## Example: Restore Configuration
30 |
31 | Below is an example `docker-compose.yml` configuration for restoring a database:
32 |
33 | ```yaml
34 | services:
35 | mysql-bkup:
36 | # In production, lock your image tag to a specific release version
37 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
38 | # for available releases.
39 | image: jkaninda/mysql-bkup
40 | container_name: mysql-bkup
41 | command: restore -d database -f store_20231219_022941.sql.gz
42 | volumes:
43 | - ./backup:/backup # Mount the directory containing the backup file
44 | environment:
45 | - DB_PORT=3306
46 | - DB_HOST=postgres
47 | - DB_NAME=database
48 | - DB_USERNAME=username
49 | - DB_PASSWORD=password
50 | # Ensure the pg-bkup container is connected to the same network as your database
51 | networks:
52 | - web
53 |
54 | networks:
55 | web:
56 | ```
57 |
58 | ---
59 |
60 | ## Key Notes
61 |
62 | - **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
63 | - **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
64 | - **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.
65 |
--------------------------------------------------------------------------------
/cmd/root.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package cmd
26 |
27 | import (
28 | "github.com/jkaninda/mysql-bkup/utils"
29 | "github.com/spf13/cobra"
30 | "os"
31 | )
32 |
33 | // rootCmd represents the base command when called without any subcommands
34 | var rootCmd = &cobra.Command{
35 | Use: "mysql-bkup [Command]",
36 | Short: "MySQL Backup tool, backup database to S3 or Object Storage",
37 | Long: `MySQL Database backup and restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage.`,
38 | Example: utils.MainExample,
39 | Version: appVersion,
40 | }
41 |
42 | // Execute adds all child commands to the root command and sets flags appropriately.
43 | // This is called by main.main(). It only needs to happen once to the rootCmd.
44 | func Execute() {
45 | err := rootCmd.Execute()
46 | if err != nil {
47 | os.Exit(1)
48 | }
49 | }
50 |
51 | func init() {
52 | rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
53 | rootCmd.AddCommand(VersionCmd)
54 | rootCmd.AddCommand(BackupCmd)
55 | rootCmd.AddCommand(RestoreCmd)
56 | rootCmd.AddCommand(MigrateCmd)
57 |
58 | }
59 |
--------------------------------------------------------------------------------
/templates/email-error.tmpl:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | 🔴 Urgent: Database Backup Failure
7 |
45 |
46 |
47 | 🔴 Urgent: Database Backup Failure Notification
48 | Hi,
49 | An error occurred during the database backup process. Please review the details below and take the necessary actions:
50 |
51 |
52 |
Failure Details:
53 |
54 | - Database Name: {{.DatabaseName}}
55 | - Date: {{.EndTime}}
56 | - Backup Reference: {{.BackupReference}}
57 | - Error Message: {{.Error}}
58 |
59 |
60 |
61 | We recommend investigating the issue as soon as possible to prevent potential data loss or service disruptions.
62 |
63 | For more information, visit the mysql-bkup documentation.
64 |
65 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/templates/email.tmpl:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | ✅ Database Backup Successful – {{.Database}}
7 |
45 |
46 |
47 | ✅ Database Backup Successful
48 | Hi,
49 | The backup process for the {{.Database}} database was successfully completed. Please find the details below:
50 |
51 |
52 |
Backup Details:
53 |
54 | - Database Name: {{.Database}}
55 | - Backup Duration: {{.Duration}}
56 | - Backup Storage: {{.Storage}}
57 | - Backup Location: {{.BackupLocation}}
58 | - Backup Size: {{.BackupSize}}
59 | - Backup Reference: {{.BackupReference}}
60 |
61 |
62 |
63 | You can access the backup at the specified location if needed. Thank you for using mysql-bkup.
64 |
65 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/docs/how-tos/backup-all.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Backup all databases in the server
3 | layout: default
4 | parent: How Tos
5 | nav_order: 12
6 | ---
7 |
8 | # Backup All Databases
9 |
10 | MySQL-Bkup supports backing up all databases on the server using the `--all-databases` (`-a`) flag. By default, this creates separate backup files for each database. If you prefer a single backup file, you can use the `--all-in-one` (`-A`) flag.
11 |
12 | Backing up all databases is useful for creating a snapshot of the entire database server, whether for disaster recovery or migration purposes.
13 | ## Backup Modes
14 |
15 | ### Separate Backup Files (Default)
16 |
17 | Using --all-databases without --all-in-one creates individual backup files for each database.
18 |
19 | - Creates separate backup files for each database.
20 | - Provides more flexibility in restoring individual databases or tables.
21 | - Can be more manageable in cases where different databases have different retention policies.
22 | - Might take slightly longer due to multiple file operations.
23 | - It is the default behavior when using the `--all-databases` flag.
24 | - It does not backup system databases (`information_schema`, `performance_schema`, `mysql`, `sys`, `innodb`,...).
25 |
26 | **Command:**
27 |
28 | ```bash
29 | docker run --rm --network your_network_name \
30 | -v $PWD/backup:/backup/ \
31 | -e "DB_HOST=dbhost" \
32 | -e "DB_PORT=3306" \
33 | -e "DB_USERNAME=username" \
34 | -e "DB_PASSWORD=password" \
35 | jkaninda/mysql-bkup backup --all-databases
36 | ```
37 | ### Single Backup File
38 |
39 | Using --all-in-one (-A) creates a single backup file containing all databases.
40 |
41 | - Creates a single backup file containing all databases.
42 | - Easier to manage if you need to restore everything at once.
43 | - Faster to back up and restore in bulk.
44 | - Can be problematic if you only need to restore a specific database or table.
45 | - It is recommended to use this option for disaster recovery purposes.
46 | - It backups system databases as well.
47 |
48 | ```bash
49 | docker run --rm --network your_network_name \
50 | -v $PWD/backup:/backup/ \
51 | -e "DB_HOST=dbhost" \
52 | -e "DB_PORT=3306" \
53 | -e "DB_USERNAME=username" \
54 | -e "DB_PASSWORD=password" \
55 | jkaninda/mysql-bkup backup --all-in-one
56 | ```
57 |
58 | ### When to Use Which?
59 |
60 | - Use `--all-in-one` if you want a quick, simple backup for disaster recovery where you'll restore everything at once.
61 | - Use `--all-databases` if you need granularity in restoring specific databases or tables without affecting others.
62 |
--------------------------------------------------------------------------------
/docs/how-tos/backup-to-ftp.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Backup to FTP remote server
3 | layout: default
4 | parent: How Tos
5 | nav_order: 4
6 | ---
7 |
8 | # Backup to FTP Remote Server
9 |
10 | To store your backups on an FTP remote server, you can configure the backup process to use the `--storage ftp` option.
11 |
12 | This section explains how to set up and configure FTP-based backups.
13 |
14 | ---
15 |
16 | ## Configuration Steps
17 |
18 | 1. **Specify the Storage Type**
19 | Add the `--storage ftp` flag to your backup command.
20 |
21 | 2. **Set the Remote Path**
22 | Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
23 | Example: `--path /home/jkaninda/backups`.
24 |
25 | 3. **Required Environment Variables**
26 | The following environment variables are mandatory for FTP-based backups:
27 |
28 | - `FTP_HOST`: The hostname or IP address of the FTP server.
29 | - `FTP_PORT`: The FTP port (default is `21`).
30 | - `FTP_USER`: The username for FTP authentication.
31 | - `FTP_PASSWORD`: The password for FTP authentication.
32 | - `REMOTE_PATH`: The directory on the FTP server where backups will be stored.
33 |
34 | ---
35 |
36 | ## Example Configuration
37 |
38 | Below is an example `docker-compose.yml` configuration for backing up to an FTP remote server:
39 |
40 | ```yaml
41 | services:
42 | mysql-bkup:
43 | # In production, lock your image tag to a specific release version
44 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
45 | # for available releases.
46 | image: jkaninda/mysql-bkup
47 | container_name: mysql-bkup
48 | command: backup --storage ftp -d database
49 | environment:
50 | - DB_PORT=3306
51 | - DB_HOST=mysql
52 | - DB_NAME=database
53 | - DB_USERNAME=username
54 | - DB_PASSWORD=password
55 | ## FTP Configuration
56 | - FTP_HOST="hostname"
57 | - FTP_PORT=21
58 | - FTP_USER=user
59 | - FTP_PASSWORD=password
60 | - REMOTE_PATH=/home/jkaninda/backups
61 |
62 | # Ensure the mysql-bkup container is connected to the same network as your database
63 | networks:
64 | - web
65 |
66 | networks:
67 | web:
68 | ```
69 |
70 | ---
71 |
72 | ## Key Notes
73 |
74 | - **Security**: FTP transmits data, including passwords, in plaintext. For better security, consider using SFTP (SSH File Transfer Protocol) or FTPS (FTP Secure) if supported by your server.
75 | - **Remote Path**: Ensure the `REMOTE_PATH` directory exists on the FTP server and is writable by the specified `FTP_USER`.
--------------------------------------------------------------------------------
/pkg/migrate.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package pkg
26 |
27 | import (
28 | "fmt"
29 | "github.com/jkaninda/mysql-bkup/utils"
30 | "github.com/spf13/cobra"
31 | "time"
32 | )
33 |
34 | func StartMigration(cmd *cobra.Command) {
35 | intro()
36 | utils.Info("Starting database migration...")
37 | // Get DB config
38 | dbConf = initDbConfig(cmd)
39 | targetDbConf = initTargetDbConfig()
40 |
41 | // Defining the target database variables
42 | newDbConfig := dbConfig{}
43 | newDbConfig.dbHost = targetDbConf.targetDbHost
44 | newDbConfig.dbPort = targetDbConf.targetDbPort
45 | newDbConfig.dbName = targetDbConf.targetDbName
46 | newDbConfig.dbUserName = targetDbConf.targetDbUserName
47 | newDbConfig.dbPassword = targetDbConf.targetDbPassword
48 |
49 | // Generate file name
50 | backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
51 | conf := &RestoreConfig{}
52 | conf.file = backupFileName
53 | // Backup source Database
54 | err := BackupDatabase(dbConf, backupFileName, true, false, false)
55 | if err != nil {
56 | utils.Fatal("Error backing up database: %s", err)
57 | }
58 | // Restore source database into target database
59 | utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
60 | RestoreDatabase(&newDbConfig, conf)
61 | utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
62 | utils.Info("Database migration completed.")
63 | }
64 |
--------------------------------------------------------------------------------
/cmd/backup.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package cmd
26 |
27 | import (
28 | "github.com/jkaninda/mysql-bkup/pkg"
29 | "github.com/jkaninda/mysql-bkup/utils"
30 | "github.com/spf13/cobra"
31 | )
32 |
33 | var BackupCmd = &cobra.Command{
34 | Use: "backup ",
35 | Short: "Backup database operation",
36 | Example: utils.BackupExample,
37 | Run: func(cmd *cobra.Command, args []string) {
38 | if len(args) == 0 {
39 | pkg.StartBackup(cmd)
40 | } else {
41 | utils.Fatal(`"backup" accepts no argument %q`, args)
42 | }
43 | },
44 | }
45 |
46 | func init() {
47 | // Backup
48 | BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp, azure")
49 | BackupCmd.PersistentFlags().StringP("path", "P", "", "Storage path without file name. e.g: /custom_path or ssh remote path `/home/foo/backup`")
50 | BackupCmd.PersistentFlags().StringP("cron-expression", "e", "", "Backup cron expression (e.g., `0 0 * * *` or `@daily`)")
51 | BackupCmd.PersistentFlags().StringP("config", "c", "", "Configuration file for multi database backup. (e.g: `/backup/config.yaml`)")
52 | BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
53 | BackupCmd.PersistentFlags().BoolP("all-databases", "a", false, "Backup all databases")
54 | BackupCmd.PersistentFlags().BoolP("all-in-one", "A", false, "Backup all databases in a single file")
55 | BackupCmd.PersistentFlags().StringP("custom-name", "", "", "Custom backup name")
56 |
57 | }
58 |
--------------------------------------------------------------------------------
/docs/how-tos/azure-blob.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Azure Blob storage
3 | layout: default
4 | parent: How Tos
5 | nav_order: 5
6 | ---
7 |
8 | # Backup to Azure Blob Storage
9 |
10 | To store your backups on Azure Blob Storage, you can configure the backup process to use the `--storage azure` option.
11 |
12 | This section explains how to set up and configure Azure Blob-based backups.
13 |
14 | ---
15 |
16 | ## Configuration Steps
17 |
18 | 1. **Specify the Storage Type**
19 | Add the `--storage azure` flag to your backup command.
20 |
21 | 2. **Set the Blob Path**
22 | Optionally, specify a custom folder within your Azure Blob container where backups will be stored using the `--path` flag.
23 | Example: `--path my-custom-path`.
24 |
25 | 3. **Required Environment Variables**
26 | The following environment variables are mandatory for Azure Blob-based backups:
27 |
28 | - `AZURE_STORAGE_CONTAINER_NAME`: The name of the Azure Blob container where backups will be stored.
29 | - `AZURE_STORAGE_ACCOUNT_NAME`: The name of your Azure Storage account.
30 | - `AZURE_STORAGE_ACCOUNT_KEY`: The access key for your Azure Storage account.
31 |
32 | ---
33 |
34 | ## Example Configuration
35 |
36 | Below is an example `docker-compose.yml` configuration for backing up to Azure Blob Storage:
37 |
38 | ```yaml
39 | services:
40 | mysql-bkup:
41 | # In production, lock your image tag to a specific release version
42 | # instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
43 | # for available releases.
44 | image: jkaninda/mysql-bkup
45 | container_name: mysql-bkup
46 | command: backup --storage azure -d database --path my-custom-path
47 | environment:
48 | - DB_PORT=3306
49 | - DB_HOST=mysql
50 | - DB_NAME=database
51 | - DB_USERNAME=username
52 | - DB_PASSWORD=password
53 | ## Azure Blob Configuration
54 | - AZURE_STORAGE_CONTAINER_NAME=backup-container
55 | - AZURE_STORAGE_ACCOUNT_NAME=account-name
56 | - AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
57 |
58 | # Ensure the mysql-bkup container is connected to the same network as your database
59 | networks:
60 | - web
61 |
62 | networks:
63 | web:
64 | ```
65 |
66 | ---
67 |
68 | ## Key Notes
69 |
70 | - **Custom Path**: Use the `--path` flag to specify a folder within your Azure Blob container for organizing backups.
71 | - **Security**: Ensure your `AZURE_STORAGE_ACCOUNT_KEY` is kept secure and not exposed in public repositories.
72 | - **Compatibility**: This configuration works with Azure Blob Storage and other compatible storage solutions.
73 |
--------------------------------------------------------------------------------
/pkg/var.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package pkg
26 |
27 | import (
28 | "path/filepath"
29 | "time"
30 | )
31 |
32 | const tmpPath = "/tmp/backup"
33 | const gpgHome = "/config/gnupg"
34 | const gpgExtension = "gpg"
35 | const timeFormat = "2006-01-02 at 15:04:05"
36 |
37 | var (
38 | storage = "local"
39 | file = ""
40 |
41 | storagePath = "/backup"
42 | workingDir = "/config"
43 | disableCompression = false
44 | encryption = false
45 | usingKey = false
46 | backupSize int64 = 0
47 | startTime = time.Now()
48 | backupRescueMode = false
49 | mysqlClientConfig = filepath.Join(tmpPath, "my.cnf")
50 | )
51 |
52 | // dbHVars Required environment variables for database
53 | var dbHVars = []string{
54 | "DB_HOST",
55 | "DB_PASSWORD",
56 | "DB_USERNAME",
57 | }
58 | var tdbRVars = []string{
59 | "TARGET_DB_HOST",
60 | "TARGET_DB_NAME",
61 | "TARGET_DB_USERNAME",
62 | "TARGET_DB_PASSWORD",
63 | }
64 |
65 | var dbConf *dbConfig
66 | var targetDbConf *targetDbConfig
67 |
68 | var ftpVars = []string{
69 | "FTP_HOST_NAME",
70 | "FTP_USER",
71 | "FTP_PASSWORD",
72 | "FTP_PORT",
73 | }
74 | var azureVars = []string{
75 | "AZURE_STORAGE_CONTAINER_NAME",
76 | "AZURE_STORAGE_ACCOUNT_NAME",
77 | "AZURE_STORAGE_ACCOUNT_KEY",
78 | }
79 |
80 | // AwsVars Required environment variables for AWS S3 storage
81 | var awsVars = []string{
82 | "AWS_S3_ENDPOINT",
83 | "AWS_S3_BUCKET_NAME",
84 | "AWS_ACCESS_KEY",
85 | "AWS_SECRET_KEY",
86 | "AWS_REGION",
87 | }
88 |
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | # Welcome to Jekyll!
2 | #
3 | # This config file is meant for settings that affect your whole blog, values
4 | # which you are expected to set up once and rarely edit after that. If you find
5 | # yourself editing this file very often, consider using Jekyll's data files
6 | # feature for the data you need to update frequently.
7 | #
8 | # For technical reasons, this file is *NOT* reloaded automatically when you use
9 | # 'bundle exec jekyll serve'. If you change this file, please restart the server process.
10 |
11 | # Site settings
12 | # These are used to personalize your new site. If you look in the HTML files,
13 | # you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
14 | # You can create any custom variable you would like, and they will be accessible
15 | # in the templates via {{ site.myvariable }}.
16 | title: MySQL Backup Docker container image
17 | email: hi@jonaskaninda.com
18 | description: >- # this means to ignore newlines until "baseurl:"
19 | MySQL Backup is a Docker container image that can be used to backup and restore MySQL database.
20 | It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
21 |
22 | baseurl: "" # the subpath of your site, e.g. /blog
23 | url: "" # the base hostname & protocol for your site, e.g. http://example.com
24 | twitter_username: jonaskaninda
25 | github_username: jkaninda
26 |
27 | callouts_level: quiet
28 | callouts:
29 | highlight:
30 | color: yellow
31 | important:
32 | title: Important
33 | color: blue
34 | new:
35 | title: New
36 | color: green
37 | note:
38 | title: Note
39 | color: purple
40 | warning:
41 | title: Warning
42 | color: red
43 | # Build settings
44 | markdown: kramdown
45 | theme: just-the-docs
46 | plugins:
47 | - jekyll-feed
48 | aux_links:
49 | 'GitHub Repository':
50 | - https://github.com/jkaninda/mysql-bkup
51 |
52 | nav_external_links:
53 | - title: GitHub Repository
54 | url: https://github.com/jkaninda/mysql-bkup
55 |
56 | footer_content: >-
57 | Copyright © 2024 Jonas Kaninda.
58 | Distributed under the MIT License.
59 | Something missing, unclear or not working? Open an issue.
60 |
61 | # Exclude from processing.
62 | # The following items will not be processed, by default. Create a custom list
63 | # to override the default setting.
64 | # exclude:
65 | # - Gemfile
66 | # - Gemfile2.lock
67 | # - node_modules
68 | # - vendor/bundle/
69 | # - vendor/cache/
70 | # - vendor/gems/
71 | # - vendor/ruby/
72 |
--------------------------------------------------------------------------------
/utils/config.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package utils
26 |
27 | import "os"
28 |
29 | type MailConfig struct {
30 | MailHost string
31 | MailPort int
32 | MailUserName string
33 | MailPassword string
34 | MailTo string
35 | MailFrom string
36 | SkipTls bool
37 | }
38 | type NotificationData struct {
39 | File string
40 | BackupSize string
41 | Database string
42 | Duration string
43 | Storage string
44 | BackupLocation string
45 | BackupReference string
46 | }
47 | type ErrorMessage struct {
48 | Database string
49 | EndTime string
50 | Error string
51 | BackupReference string
52 | DatabaseName string
53 | }
54 |
55 | // loadMailConfig gets mail environment variables and returns MailConfig
56 | func loadMailConfig() *MailConfig {
57 | return &MailConfig{
58 | MailHost: os.Getenv("MAIL_HOST"),
59 | MailPort: GetIntEnv("MAIL_PORT"),
60 | MailUserName: os.Getenv("MAIL_USERNAME"),
61 | MailPassword: os.Getenv("MAIL_PASSWORD"),
62 | MailTo: os.Getenv("MAIL_TO"),
63 | MailFrom: os.Getenv("MAIL_FROM"),
64 | SkipTls: os.Getenv("MAIL_SKIP_TLS") == "false",
65 | }
66 |
67 | }
68 |
69 | // TimeFormat returns the format of the time
70 | func TimeFormat() string {
71 | format := os.Getenv("TIME_FORMAT")
72 | if format == "" {
73 | return "2006-01-02 at 15:04:05"
74 |
75 | }
76 | return format
77 | }
78 |
79 | func backupReference() string {
80 | return os.Getenv("BACKUP_REFERENCE")
81 | }
82 |
83 | const templatePath = "/config/templates"
84 |
85 | var DatabaseName = ""
86 | var vars = []string{
87 | "TG_TOKEN",
88 | "TG_CHAT_ID",
89 | }
90 | var mailVars = []string{
91 | "MAIL_HOST",
92 | "MAIL_PORT",
93 | "MAIL_FROM",
94 | "MAIL_TO",
95 | }
96 |
--------------------------------------------------------------------------------
/docs/how-tos/restore-from-s3.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Restore database from AWS S3
3 | layout: default
4 | parent: How Tos
5 | nav_order: 6
6 | ---
7 |
8 | # Restore Database from S3 Storage
9 |
10 | To restore a MySQL database from a backup stored in S3, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
11 |
12 | - `.sql` (uncompressed SQL dump)
13 | - `.sql.gz` (gzip-compressed SQL dump)
14 | - `.sql.gpg` (GPG-encrypted SQL dump)
15 | - `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
16 |
17 | ---
18 |
19 | ## Configuration Steps
20 |
21 | 1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
22 | 2. **Set the Storage Type**: Add the `--storage s3` flag to indicate that the backup is stored in S3.
23 | 3. **Provide S3 Configuration**: Include the necessary AWS S3 credentials and configuration.
24 | 4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
25 |
26 | ---
27 |
28 | ## Example: Restore from S3 Configuration
29 |
30 | Below is an example `docker-compose.yml` configuration for restoring a database from S3 storage:
31 |
32 | ```yaml
33 | services:
34 | mysql-bkup:
35 | # In production, lock your image tag to a specific release version
36 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
37 | # for available releases.
38 | image: jkaninda/mysql-bkup
39 | container_name: mysql-bkup
40 | command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
41 | volumes:
42 | - ./backup:/backup # Mount the directory for local operations (if needed)
43 | environment:
44 | - DB_PORT=3306
45 | - DB_HOST=mysql
46 | - DB_NAME=database
47 | - DB_USERNAME=username
48 | - DB_PASSWORD=password
49 | ## AWS S3 Configuration
50 | - AWS_S3_ENDPOINT=https://s3.amazonaws.com
51 | - AWS_S3_BUCKET_NAME=backup
52 | - AWS_REGION=us-west-2
53 | - AWS_ACCESS_KEY=xxxx
54 | - AWS_SECRET_KEY=xxxxx
55 | ## Optional: Disable SSL for S3 alternatives like Minio
56 | - AWS_DISABLE_SSL=false
57 | ## Optional: Enable path-style access for S3 alternatives like Minio
58 | - AWS_FORCE_PATH_STYLE=false
59 | # Ensure the pg-bkup container is connected to the same network as your database
60 | networks:
61 | - web
62 |
63 | networks:
64 | web:
65 | ```
66 |
67 | ---
68 |
69 | ## Key Notes
70 |
71 | - **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
72 | - **S3 Path**: Use the `--path` flag to specify the folder within the S3 bucket where the backup file is located.
73 | - **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
74 | - **S3 Alternatives**: For S3-compatible storage like Minio, set `AWS_DISABLE_SSL` and `AWS_FORCE_PATH_STYLE` as needed.
75 | - **Network Configuration**: Ensure the `pg-bkup` container is connected to the same network as your database.
--------------------------------------------------------------------------------
/docs/how-tos/restore-from-ssh.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Restore database from SSH
3 | layout: default
4 | parent: How Tos
5 | nav_order: 7
6 | ---
7 |
8 | # Restore Database from SSH Remote Server
9 |
10 | To restore a MySQL database from a backup stored on an SSH remote server, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
11 |
12 | - `.sql` (uncompressed SQL dump)
13 | - `.sql.gz` (gzip-compressed SQL dump)
14 | - `.sql.gpg` (GPG-encrypted SQL dump)
15 | - `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
16 |
17 | ---
18 |
19 | ## Configuration Steps
20 |
21 | 1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
22 | 2. **Set the Storage Type**: Add the `--storage ssh` flag to indicate that the backup is stored on an SSH remote server.
23 | 3. **Provide SSH Configuration**: Include the necessary SSH credentials and configuration.
24 | 4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
25 |
26 | ---
27 |
28 | ## Example: Restore from SSH Remote Server Configuration
29 |
30 | Below is an example `docker-compose.yml` configuration for restoring a database from an SSH remote server:
31 |
32 | ```yaml
33 | services:
34 | mysql-bkup:
35 | # In production, lock your image tag to a specific release version
36 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
37 | # for available releases.
38 | image: jkaninda/mysql-bkup
39 | container_name: mysql-bkup
40 | command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
41 | volumes:
42 | - ./backup:/backup # Mount the directory for local operations (if needed)
43 | - ./id_ed25519:/tmp/id_ed25519 # Mount the SSH private key file
44 | environment:
45 | - DB_PORT=3306
46 | - DB_HOST=mysql
47 | - DB_NAME=database
48 | - DB_USERNAME=username
49 | - DB_PASSWORD=password
50 | ## SSH Configuration
51 | - SSH_HOST_NAME=hostname
52 | - SSH_PORT=22
53 | - SSH_USER=user
54 | - SSH_REMOTE_PATH=/home/jkaninda/backups
55 | - SSH_IDENTIFY_FILE=/tmp/id_ed25519
56 | ## Optional: Use password instead of private key (not recommended)
57 | #- SSH_PASSWORD=password
58 | # Ensure the mysql-bkup container is connected to the same network as your database
59 | networks:
60 | - web
61 |
62 | networks:
63 | web:
64 | ```
65 |
66 | ---
67 |
68 | ## Key Notes
69 |
70 | - **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
71 | - **SSH Path**: Use the `--path` flag to specify the folder on the SSH remote server where the backup file is located.
72 | - **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
73 | - **SSH Authentication**: Use a private key (`SSH_IDENTIFY_FILE`) for SSH authentication instead of a password for better security.
74 | - **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.
--------------------------------------------------------------------------------
/docs/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GEM
2 | remote: https://rubygems.org/
3 | specs:
4 | addressable (2.8.7)
5 | public_suffix (>= 2.0.2, < 7.0)
6 | colorator (1.1.0)
7 | concurrent-ruby (1.3.3)
8 | csv (3.3.0)
9 | em-websocket (0.5.3)
10 | eventmachine (>= 0.12.9)
11 | http_parser.rb (~> 0)
12 | eventmachine (1.2.7)
13 | ffi (1.17.0)
14 | ffi (1.17.0-aarch64-linux-gnu)
15 | ffi (1.17.0-aarch64-linux-musl)
16 | ffi (1.17.0-arm-linux-gnu)
17 | ffi (1.17.0-arm-linux-musl)
18 | ffi (1.17.0-arm64-darwin)
19 | ffi (1.17.0-x86-linux-gnu)
20 | ffi (1.17.0-x86-linux-musl)
21 | ffi (1.17.0-x86_64-darwin)
22 | ffi (1.17.0-x86_64-linux-gnu)
23 | ffi (1.17.0-x86_64-linux-musl)
24 | forwardable-extended (2.6.0)
25 | http_parser.rb (0.8.0)
26 | i18n (1.14.5)
27 | concurrent-ruby (~> 1.0)
28 | jekyll (3.10.0)
29 | addressable (~> 2.4)
30 | colorator (~> 1.0)
31 | csv (~> 3.0)
32 | em-websocket (~> 0.5)
33 | i18n (>= 0.7, < 2)
34 | jekyll-sass-converter (~> 1.0)
35 | jekyll-watch (~> 2.0)
36 | kramdown (>= 1.17, < 3)
37 | liquid (~> 4.0)
38 | mercenary (~> 0.3.3)
39 | pathutil (~> 0.9)
40 | rouge (>= 1.7, < 4)
41 | safe_yaml (~> 1.0)
42 | webrick (>= 1.0)
43 | jekyll-feed (0.17.0)
44 | jekyll (>= 3.7, < 5.0)
45 | jekyll-include-cache (0.2.1)
46 | jekyll (>= 3.7, < 5.0)
47 | jekyll-sass-converter (1.5.2)
48 | sass (~> 3.4)
49 | jekyll-seo-tag (2.8.0)
50 | jekyll (>= 3.8, < 5.0)
51 | jekyll-watch (2.2.1)
52 | listen (~> 3.0)
53 | just-the-docs (0.8.2)
54 | jekyll (>= 3.8.5)
55 | jekyll-include-cache
56 | jekyll-seo-tag (>= 2.0)
57 | rake (>= 12.3.1)
58 | kramdown (2.4.0)
59 | rexml
60 | kramdown-parser-gfm (1.1.0)
61 | kramdown (~> 2.0)
62 | liquid (4.0.4)
63 | listen (3.9.0)
64 | rb-fsevent (~> 0.10, >= 0.10.3)
65 | rb-inotify (~> 0.9, >= 0.9.10)
66 | mercenary (0.3.6)
67 | minima (2.5.1)
68 | jekyll (>= 3.5, < 5.0)
69 | jekyll-feed (~> 0.9)
70 | jekyll-seo-tag (~> 2.1)
71 | pathutil (0.16.2)
72 | forwardable-extended (~> 2.6)
73 | public_suffix (6.0.1)
74 | rake (13.2.1)
75 | rb-fsevent (0.11.2)
76 | rb-inotify (0.11.1)
77 | ffi (~> 1.0)
78 | rexml (3.3.2)
79 | strscan
80 | rouge (3.30.0)
81 | safe_yaml (1.0.5)
82 | sass (3.7.4)
83 | sass-listen (~> 4.0.0)
84 | sass-listen (4.0.0)
85 | rb-fsevent (~> 0.9, >= 0.9.4)
86 | rb-inotify (~> 0.9, >= 0.9.7)
87 | strscan (3.1.0)
88 | wdm (0.1.1)
89 | webrick (1.8.1)
90 |
91 | PLATFORMS
92 | aarch64-linux-gnu
93 | aarch64-linux-musl
94 | arm-linux-gnu
95 | arm-linux-musl
96 | arm64-darwin
97 | ruby
98 | x86-linux-gnu
99 | x86-linux-musl
100 | x86_64-darwin
101 | x86_64-linux-gnu
102 | x86_64-linux-musl
103 |
104 | DEPENDENCIES
105 | http_parser.rb (~> 0.6.0)
106 | jekyll (~> 3.10.0)
107 | jekyll-feed (~> 0.6)
108 | just-the-docs
109 | kramdown-parser-gfm
110 | minima (~> 2.0)
111 | tzinfo (>= 1, < 3)
112 | tzinfo-data
113 | wdm (~> 0.1.0)
114 |
115 | BUNDLED WITH
116 | 2.5.16
117 |
--------------------------------------------------------------------------------
/utils/logger.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package utils
26 |
27 | import (
28 | "fmt"
29 | "log"
30 | "os"
31 | "runtime"
32 | "strings"
33 | )
34 |
35 | // Info returns info log
36 | func Info(msg string, args ...interface{}) {
37 | log.SetOutput(getStd("/dev/stdout"))
38 | logWithCaller("INFO", msg, args...)
39 |
40 | }
41 |
42 | // Warn returns warning log
43 | func Warn(msg string, args ...interface{}) {
44 | log.SetOutput(getStd("/dev/stdout"))
45 | logWithCaller("WARN", msg, args...)
46 |
47 | }
48 |
49 | // Error logs error messages
50 | func Error(msg string, args ...interface{}) {
51 | log.SetOutput(getStd("/dev/stderr"))
52 | logWithCaller("ERROR", msg, args...)
53 | }
54 |
55 | func Fatal(msg string, args ...interface{}) {
56 | log.SetOutput(os.Stdout)
57 | // Format message if there are additional arguments
58 | formattedMessage := msg
59 | if len(args) > 0 {
60 | formattedMessage = fmt.Sprintf(msg, args...)
61 | }
62 | logWithCaller("ERROR", msg, args...)
63 | NotifyError(formattedMessage)
64 | os.Exit(1)
65 | }
66 |
67 | // Helper function to format and log messages with file and line number
68 | func logWithCaller(level, msg string, args ...interface{}) {
69 | // Format message if there are additional arguments
70 | formattedMessage := msg
71 | if len(args) > 0 {
72 | formattedMessage = fmt.Sprintf(msg, args...)
73 | }
74 |
75 | // Get the caller's file and line number (skip 2 frames)
76 | _, file, line, ok := runtime.Caller(2)
77 | if !ok {
78 | file = "unknown"
79 | line = 0
80 | }
81 | // Log message with caller information if GOMA_LOG_LEVEL is trace
82 | if strings.ToLower(level) != "off" {
83 | if strings.ToLower(level) == traceLog {
84 | log.Printf("%s: %s (File: %s, Line: %d)\n", level, formattedMessage, file, line)
85 | } else {
86 | log.Printf("%s: %s\n", level, formattedMessage)
87 | }
88 | }
89 | }
90 |
91 | func getStd(out string) *os.File {
92 | switch out {
93 | case "/dev/stdout":
94 | return os.Stdout
95 | case "/dev/stderr":
96 | return os.Stderr
97 | case "/dev/stdin":
98 | return os.Stdin
99 | default:
100 | return os.Stdout
101 |
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Overview
3 | layout: home
4 | nav_order: 1
5 | ---
6 |
7 | # About mysql-bkup
8 | {:.no_toc}
9 |
10 | **MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
11 | It supports a variety of storage options and ensures data security through GPG encryption.
12 |
13 | **MYSQL-BKUP** is designed for seamless deployment on **Docker** and **Kubernetes**, simplifying MySQL backup, restoration, and migration across environments.
14 | It is a lightweight, multi-architecture solution compatible with **Docker**, **Docker Swarm**, **Kubernetes**, and other container orchestration platforms.
15 | ---
16 |
17 | ## Key Features
18 |
19 | ### Storage Options
20 | - **Local storage**
21 | - **AWS S3** or any S3-compatible object storage
22 | - **FTP**
23 | - **SFTP**
24 | - **SSH-compatible storage**
25 | - **Azure Blob storage**
26 |
27 | ### Data Security
28 | - Backups can be encrypted using **GPG** to ensure data confidentiality.
29 |
30 | ### Deployment Flexibility
31 | - Available as the [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
32 | - Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
33 | - Supports recurring backups of MySQL databases:
34 | - On Docker for automated backup schedules.
35 | - As a **Job** or **CronJob** on Kubernetes.
36 |
37 | ### Notifications
38 | - Receive real-time updates on backup success or failure via:
39 | - **Telegram**
40 | - **Email**
41 |
42 | ---
43 |
44 | ## 💡Use Cases
45 |
46 | - **Scheduled Backups**: Automate recurring backups using Docker or Kubernetes.
47 | - **Disaster Recovery:** Quickly restore backups to a clean MySQL instance.
48 | - **Database Migration**: Seamlessly move data across environments using the built-in `migrate` feature.
49 | - **Secure Archiving:** Keep backups encrypted and safely stored in the cloud or remote servers.
50 |
51 |
52 | ## ✅ Verified Platforms:
53 | MYSQL-BKUP has been tested and runs successfully on:
54 |
55 | - Docker
56 | - Docker Swarm
57 | - Kubernetes
58 | - OpenShift
59 |
60 | ---
61 |
62 | ## Get Involved
63 |
64 | We welcome contributions! Feel free to give us a ⭐, submit PRs, or open issues on our [GitHub repository](https://github.com/jkaninda/mysql-bkup).
65 |
66 | {: .fs-6 .fw-300 }
67 |
68 | ---
69 |
70 | {: .note }
71 | Code and documentation for the `v1` version are available on [this branch][v1-branch].
72 |
73 | [v1-branch]: https://github.com/jkaninda/mysql-bkup
74 |
75 | ---
76 |
77 | ## Available Image Registries
78 |
79 | The Docker image is published to both **Docker Hub** and the **GitHub Container Registry**. You can use either of the following:
80 |
81 | ```bash
82 | docker pull jkaninda/mysql-bkup
83 | docker pull ghcr.io/jkaninda/mysql-bkup
84 | ```
85 |
86 | While the documentation references Docker Hub, all examples work seamlessly with `ghcr.io`.
87 |
88 | ---
89 |
90 | ## References
91 |
92 | We created this image as a simpler and more lightweight alternative to existing solutions. Here’s why:
93 |
94 | - **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
95 | - **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
96 | - **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
97 | - **Kubernetes Support:** Designed to work seamlessly with Kubernetes.
98 |
--------------------------------------------------------------------------------
/docs/how-tos/migrate.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Migrate database
3 | layout: default
4 | parent: How Tos
5 | nav_order: 10
6 | ---
7 |
8 | # Migrate Database
9 |
10 | To migrate a MySQL database from a source to a target database, you can use the `migrate` command. This feature simplifies the process by combining the backup and restore operations into a single step.
11 |
12 | {: .note }
13 | The `migrate` command eliminates the need for separate backup and restore operations. It directly transfers data from the source database to the target database.
14 |
15 | {: .warning }
16 | The `migrate` operation is **irreversible**. Always back up your target database before performing this action.
17 |
18 | ---
19 |
20 | ## Configuration Steps
21 |
22 | 1. **Source Database**: Provide connection details for the source database.
23 | 2. **Target Database**: Provide connection details for the target database.
24 | 3. **Run the Migration**: Use the `migrate` command to initiate the migration.
25 |
26 | ---
27 |
28 | ## Example: Docker Compose Configuration
29 |
30 | Below is an example `docker-compose.yml` configuration for migrating a database:
31 |
32 | ```yaml
33 | services:
34 | mysql-bkup:
35 | # In production, lock your image tag to a specific release version
36 | # instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
37 | # for available releases.
38 | image: jkaninda/mysql-bkup
39 | container_name: mysql-bkup
40 | command: migrate
41 | volumes:
42 | - ./backup:/backup
43 | environment:
44 | ## Source Database
45 | - DB_PORT=3306
46 | - DB_HOST=mysql
47 | - DB_NAME=database
48 | - DB_USERNAME=username
49 | - DB_PASSWORD=password
50 |
51 | ## Target Database
52 | - TARGET_DB_HOST=target-postgres
53 | - TARGET_DB_PORT=3306
54 | - TARGET_DB_NAME=dbname
55 | - TARGET_DB_USERNAME=username
56 | - TARGET_DB_PASSWORD=password
57 |
58 | # Ensure the mysql-bkup container is connected to the same network as your database
59 | networks:
60 | - web
61 |
62 | networks:
63 | web:
64 | ```
65 |
66 | ---
67 |
68 | ## Migrate Database Using Docker CLI
69 |
70 | You can also run the migration directly using the Docker CLI. Below is an example:
71 |
72 | ### Environment Variables
73 |
74 | Save your source and target database connection details in an environment file (e.g., `your-env`):
75 |
76 | ```bash
77 | ## Source Database
78 | DB_HOST=postgres
79 | DB_PORT=3306
80 | DB_NAME=dbname
81 | DB_USERNAME=username
82 | DB_PASSWORD=password
83 |
84 | ## Target Database
85 | TARGET_DB_HOST=target-postgres
86 | TARGET_DB_PORT=3306
87 | TARGET_DB_NAME=dbname
88 | TARGET_DB_USERNAME=username
89 | TARGET_DB_PASSWORD=password
90 | ```
91 |
92 | ### Run the Migration
93 |
94 | ```bash
95 | docker run --rm --network your_network_name \
96 | --env-file your-env \
97 | -v $PWD/backup:/backup/ \
98 | jkaninda/pg-bkup migrate
99 | ```
100 |
101 | ---
102 |
103 | ## Key Notes
104 |
105 | - **Irreversible Operation**: The `migrate` command directly transfers data from the source to the target database. Ensure you have a backup of the target database before proceeding.
106 | - **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your source and target databases.
107 |
--------------------------------------------------------------------------------
/docs/how-tos/backup.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Backup
3 | layout: default
4 | parent: How Tos
5 | nav_order: 1
6 | ---
7 |
8 | # Backup Database
9 |
10 | To back up your database, use the `backup` command.
11 |
12 | This section explains how to configure and run backups, including recurring backups, using Docker or Kubernetes.
13 |
14 | ---
15 |
16 | ## Default Configuration
17 |
18 | - **Storage**: By default, backups are stored locally in the `/backup` directory.
19 | - **Compression**: Backups are compressed using `gzip` by default. Use the `--disable-compression` flag to disable compression.
20 | - **Security**: It is recommended to create a dedicated user with read-only access for backup tasks.
21 |
22 | {: .note }
23 | The backup process supports recurring backups on Docker or Docker Swarm. On Kubernetes, it can be deployed as a CronJob.
24 |
25 | ---
26 |
27 | ## Example: Basic Backup Configuration
28 |
29 | Below is an example `docker-compose.yml` configuration for backing up a database:
30 |
31 | ```yaml
32 | services:
33 | mysql-bkup:
34 | # In production, lock your image tag to a specific release version
35 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
36 | # for available releases.
37 | image: jkaninda/mysql-bkup
38 | container_name: mysql-bkup
39 | command: backup -d database
40 | volumes:
41 | - ./backup:/backup
42 | environment:
43 | - DB_PORT=3306
44 | - DB_HOST=mysql
45 | - DB_NAME=database
46 | - DB_USERNAME=username
47 | - DB_PASSWORD=password
48 |
49 | # Ensure the mysql-bkup container is connected to the same network as your database
50 | networks:
51 | - web
52 |
53 | networks:
54 | web:
55 | ```
56 |
57 | ---
58 |
59 | ## Backup Using Docker CLI
60 |
61 | You can also run backups directly using the Docker CLI:
62 |
63 | ```bash
64 | docker run --rm --network your_network_name \
65 | -v $PWD/backup:/backup/ \
66 | -e "DB_HOST=dbhost" \
67 | -e "DB_USERNAME=username" \
68 | -e "DB_PASSWORD=password" \
69 | jkaninda/pg-bkup backup -d database_name
70 | ```
71 |
72 | ---
73 |
74 | ## Recurring Backups
75 |
76 | To schedule recurring backups, use the `--cron-expression (-e)` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
77 |
78 | ### Example: Recurring Backup Configuration
79 |
80 | ```yaml
81 | services:
82 | mysql-bkup:
83 | # In production, lock your image tag to a specific release version
84 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
85 | # for available releases.
86 | image: jkaninda/mysql-bkup
87 | container_name: mysql-bkup
88 | command: backup -d database --cron-expression @midnight
89 | volumes:
90 | - ./backup:/backup
91 | environment:
92 | - DB_PORT=3306
93 | - DB_HOST=mysql
94 | - DB_NAME=database
95 | - DB_USERNAME=username
96 | - DB_PASSWORD=password
97 | ## Optional: Define a cron schedule for recurring backups
98 | - BACKUP_CRON_EXPRESSION=@midnight
99 | ## Optional: Delete old backups after a specified number of days
100 | #- BACKUP_RETENTION_DAYS=7
101 |
102 | # Ensure the mysql-bkup container is connected to the same network as your database
103 | networks:
104 | - web
105 |
106 | networks:
107 | web:
108 | ```
109 |
110 | ---
111 |
112 | ## Key Notes
113 |
114 | - **Cron Expression**: Use the `--cron-expression (-e)` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example:
115 | - `@midnight`: Runs the backup daily at midnight.
116 | - `0 1 * * *`: Runs the backup daily at 1:00 AM.
117 | - **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
118 |
--------------------------------------------------------------------------------
/docs/how-tos/encrypt-backup.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Encrypt backups using GPG
3 | layout: default
4 | parent: How Tos
5 | nav_order: 8
6 | ---
7 | # Encrypt Backup
8 |
9 | The image supports encrypting backups using one of two methods: **GPG with a passphrase** or **GPG with a public key**. When a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted and saved as a `.sql.gpg` or `.sql.gz.gpg` file.
10 |
11 | {: .warning }
12 | To restore an encrypted backup, you must provide the same GPG passphrase or private key used during the backup process.
13 |
14 | ---
15 |
16 | ## Key Features
17 |
18 | - **Cipher Algorithm**: `aes256`
19 | - **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored automatically without manual decryption.
20 | - **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption before restoration.
21 |
22 | ---
23 |
24 | ## Using GPG Passphrase
25 |
26 | To encrypt backups using a GPG passphrase, set the `GPG_PASSPHRASE` environment variable. The backup will be encrypted and can be restored automatically.
27 |
28 | ### Example Configuration
29 |
30 | ```yaml
31 | services:
32 | mysql-bkup:
33 | # In production, lock your image tag to a specific release version
34 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
35 | # for available releases.
36 | image: jkaninda/mysql-bkup
37 | container_name: mysql-bkup
38 | command: backup -d database
39 | volumes:
40 | - ./backup:/backup
41 | environment:
42 | - DB_PORT=3306
43 | - DB_HOST=mysql
44 | - DB_NAME=database
45 | - DB_USERNAME=username
46 | - DB_PASSWORD=password
47 | ## Required to encrypt backup
48 | - GPG_PASSPHRASE=my-secure-passphrase
49 | # Ensure the pg-bkup container is connected to the same network as your database
50 | networks:
51 | - web
52 |
53 | networks:
54 | web:
55 | ```
56 |
57 | ---
58 |
59 | ## Using GPG Public Key
60 |
61 | To encrypt backups using a GPG public key, set the `GPG_PUBLIC_KEY` environment variable to the path of your public key file. Backups encrypted with a public key require manual decryption before restoration.
62 |
63 | ### Example Configuration
64 |
65 | ```yaml
66 | services:
67 | mysql-bkup:
68 | # In production, lock your image tag to a specific release version
69 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
70 | # for available releases.
71 | image: jkaninda/mysql-bkup
72 | container_name: mysql-bkup
73 | command: backup -d database
74 | volumes:
75 | - ./backup:/backup
76 | - ./public_key.asc:/config/public_key.asc
77 | environment:
78 | - DB_PORT=3306
79 | - DB_HOST=mysql
80 | - DB_NAME=database
81 | - DB_USERNAME=username
82 | - DB_PASSWORD=password
83 | ## Required to encrypt backup
84 | - GPG_PUBLIC_KEY=/config/public_key.asc
85 | # Ensure the pg-bkup container is connected to the same network as your database
86 | networks:
87 | - web
88 |
89 | networks:
90 | web:
91 | ```
92 |
93 | ---
94 |
95 | ## Manual Decryption
96 |
97 | If you encrypted your backup using a GPG public key, you must manually decrypt it before restoration. Use the `gnupg` tool for decryption.
98 |
99 | ### Decrypt Using a Passphrase
100 |
101 | ```bash
102 | gpg --batch --passphrase "my-passphrase" \
103 | --output database_20240730_044201.sql.gz \
104 | --decrypt database_20240730_044201.sql.gz.gpg
105 | ```
106 |
107 | ### Decrypt Using a Private Key
108 |
109 | ```bash
110 | gpg --output database_20240730_044201.sql.gz \
111 | --decrypt database_20240730_044201.sql.gz.gpg
112 | ```
113 |
114 | ---
115 |
116 | ## Key Notes
117 |
118 | - **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored directly without manual decryption.
119 | - **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption using the corresponding private key.
120 | - **Security**: Always keep your GPG passphrase and private key secure. Use Kubernetes Secrets or other secure methods to manage sensitive data.
121 |
--------------------------------------------------------------------------------
/pkg/azure.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package pkg
26 |
27 | import (
28 | "fmt"
29 | "github.com/jkaninda/go-storage/pkg/azure"
30 | goutils "github.com/jkaninda/go-utils"
31 | "github.com/jkaninda/mysql-bkup/utils"
32 |
33 | "os"
34 | "path/filepath"
35 | "time"
36 | )
37 |
38 | func azureBackup(db *dbConfig, config *BackupConfig) {
39 | utils.Info("Backup database to Azure Blob Storage")
40 |
41 | // Backup database
42 | err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
43 | if err != nil {
44 | recoverMode(err, "Error backing up database")
45 | return
46 | }
47 | finalFileName := config.backupFileName
48 | if config.encryption {
49 | encryptBackup(config)
50 | finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
51 | }
52 | utils.Info("Uploading backup archive to Azure Blob storage ...")
53 | utils.Info("Backup name is %s", finalFileName)
54 | azureConfig := loadAzureConfig()
55 | azureStorage, err := azure.NewStorage(azure.Config{
56 | ContainerName: azureConfig.containerName,
57 | AccountName: azureConfig.accountName,
58 | AccountKey: azureConfig.accountKey,
59 | RemotePath: config.remotePath,
60 | LocalPath: tmpPath,
61 | })
62 | if err != nil {
63 | utils.Fatal("Error creating Azure storage: %s", err)
64 | }
65 | err = azureStorage.Copy(finalFileName)
66 | if err != nil {
67 | utils.Fatal("Error copying backup file: %s", err)
68 | }
69 | utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
70 | // Get backup info
71 | fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
72 | if err != nil {
73 | utils.Error("Error: %s", err)
74 | }
75 | backupSize = fileInfo.Size()
76 | // Delete backup file from tmp folder
77 | err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
78 | if err != nil {
79 | utils.Error("Error deleting file: %v", err)
80 |
81 | }
82 | if config.prune {
83 | err := azureStorage.Prune(config.backupRetention)
84 | if err != nil {
85 | utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
86 | }
87 |
88 | }
89 |
90 | utils.Info("Backup name is %s", finalFileName)
91 | utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
92 | utils.Info("Uploading backup archive to Azure Blob storage ... done ")
93 |
94 | duration := goutils.FormatDuration(time.Since(startTime), 0)
95 |
96 | // Send notification
97 | utils.NotifySuccess(&utils.NotificationData{
98 | File: finalFileName,
99 | BackupSize: utils.ConvertBytes(uint64(backupSize)),
100 | Database: db.dbName,
101 | Storage: config.storage,
102 | BackupLocation: filepath.Join(config.remotePath, finalFileName),
103 | Duration: duration,
104 | })
105 | // Delete temp
106 | deleteTemp()
107 | utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
108 | }
109 | func azureRestore(db *dbConfig, conf *RestoreConfig) {
110 | utils.Info("Restore database from Azure Blob storage")
111 | azureConfig := loadAzureConfig()
112 | azureStorage, err := azure.NewStorage(azure.Config{
113 | ContainerName: azureConfig.containerName,
114 | AccountName: azureConfig.accountName,
115 | AccountKey: azureConfig.accountKey,
116 | RemotePath: conf.remotePath,
117 | LocalPath: tmpPath,
118 | })
119 | if err != nil {
120 | utils.Fatal("Error creating SSH storage: %s", err)
121 | }
122 |
123 | err = azureStorage.CopyFrom(conf.file)
124 | if err != nil {
125 | utils.Fatal("Error downloading backup file: %s", err)
126 | }
127 | RestoreDatabase(db, conf)
128 | }
129 |
--------------------------------------------------------------------------------
/docs/how-tos/backup-to-s3.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Backup to AWS S3
3 | layout: default
4 | parent: How Tos
5 | nav_order: 2
6 | ---
7 | # Backup to AWS S3
8 |
9 | To store your backups on AWS S3, you can configure the backup process to use the `--storage s3` option. This section explains how to set up and configure S3-based backups.
10 |
11 | ---
12 |
13 | ## Configuration Steps
14 |
15 | 1. **Specify the Storage Type**
16 | Add the `--storage s3` flag to your backup command.
17 |
18 | 2. **Set the S3 Path**
19 | Optionally, specify a custom folder within your S3 bucket where backups will be stored using the `--path` flag.
20 | Example: `--path /my-custom-path`.
21 |
22 | 3. **Required Environment Variables**
23 | The following environment variables are mandatory for S3-based backups:
24 |
25 | - `AWS_S3_ENDPOINT`: The S3 endpoint URL (e.g., `https://s3.amazonaws.com`).
26 | - `AWS_S3_BUCKET_NAME`: The name of the S3 bucket where backups will be stored.
27 | - `AWS_REGION`: The AWS region where the bucket is located (e.g., `us-west-2`).
28 | - `AWS_ACCESS_KEY`: Your AWS access key.
29 | - `AWS_SECRET_KEY`: Your AWS secret key.
30 | - `AWS_DISABLE_SSL`: Set to `"true"` if using an S3 alternative like Minio without SSL (default is `"false"`).
31 | - `AWS_FORCE_PATH_STYLE`: Set to `"true"` if using an S3 alternative like Minio (default is `"false"`).
32 |
33 | ---
34 |
35 | ## Example Configuration
36 |
37 | Below is an example `docker-compose.yml` configuration for backing up to AWS S3:
38 |
39 | ```yaml
40 | services:
41 | mysql-bkup:
42 | # In production, lock your image tag to a specific release version
43 | # instead of using `latest`. Check https://github.com/jkaninda/pg-bkup/releases
44 | # for available releases.
45 | image: jkaninda/pg-bkup
46 | container_name: pg-bkup
47 | command: backup --storage s3 -d database --path /my-custom-path
48 | environment:
49 | - DB_PORT=5432
50 | - DB_HOST=postgres
51 | - DB_NAME=database
52 | - DB_USERNAME=username
53 | - DB_PASSWORD=password
54 | ## AWS Configuration
55 | - AWS_S3_ENDPOINT=https://s3.amazonaws.com
56 | - AWS_S3_BUCKET_NAME=backup
57 | - AWS_REGION=us-west-2
58 | - AWS_ACCESS_KEY=xxxx
59 | - AWS_SECRET_KEY=xxxxx
60 | ## Optional: Disable SSL for S3 alternatives like Minio
61 | - AWS_DISABLE_SSL="false"
62 | ## Optional: Enable path-style access for S3 alternatives like Minio
63 | - AWS_FORCE_PATH_STYLE=false
64 |
65 | # Ensure the mysql-bkup container is connected to the same network as your database
66 | networks:
67 | - web
68 |
69 | networks:
70 | web:
71 | ```
72 |
73 | ---
74 |
75 | ## Recurring Backups to S3
76 |
77 | To schedule recurring backups to S3, use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
78 |
79 | ### Example: Recurring Backup Configuration
80 |
81 | ```yaml
82 | services:
83 | mysql-bkup:
84 | # In production, lock your image tag to a specific release version
85 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
86 | # for available releases.
87 | image: jkaninda/mysql-bkup
88 | container_name: mysql-bkup
89 | command: backup --storage s3 -d database --cron-expression "0 1 * * *"
90 | environment:
91 | - DB_PORT=3306
92 | - DB_HOST=mysql
93 | - DB_NAME=database
94 | - DB_USERNAME=username
95 | - DB_PASSWORD=password
96 | ## AWS Configuration
97 | - AWS_S3_ENDPOINT=https://s3.amazonaws.com
98 | - AWS_S3_BUCKET_NAME=backup
99 | - AWS_REGION=us-west-2
100 | - AWS_ACCESS_KEY=xxxx
101 | - AWS_SECRET_KEY=xxxxx
102 | ## Optional: Define a cron schedule for recurring backups
103 | #- BACKUP_CRON_EXPRESSION=0 1 * * *
104 | ## Optional: Delete old backups after a specified number of days
105 | #- BACKUP_RETENTION_DAYS=7
106 | ## Optional: Disable SSL for S3 alternatives like Minio
107 | - AWS_DISABLE_SSL="false"
108 | ## Optional: Enable path-style access for S3 alternatives like Minio
109 | - AWS_FORCE_PATH_STYLE=false
110 |
111 | # Ensure the pg-bkup container is connected to the same network as your database
112 | networks:
113 | - web
114 |
115 | networks:
116 | web:
117 | ```
118 |
119 | ---
120 |
121 | ## Key Notes
122 |
123 | - **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
124 | - **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
125 | - **S3 Alternatives**: If using an S3 alternative like Minio, set `AWS_DISABLE_SSL="true"` and `AWS_FORCE_PATH_STYLE="true"` as needed.
126 |
127 |
--------------------------------------------------------------------------------
/docs/how-tos/backup-to-ssh.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Backup to SSH or SFTP
3 | layout: default
4 | parent: How Tos
5 | nav_order: 3
6 | ---
7 | # Backup to SFTP or SSH Remote Server
8 |
9 | To store your backups on an `SFTP` or `SSH` remote server instead of the default storage, you can configure the backup process to use the `--storage ssh` or `--storage remote` option.
10 | This section explains how to set up and configure SSH-based backups.
11 |
12 | ---
13 |
14 | ## Configuration Steps
15 |
16 | 1. **Specify the Storage Type**
17 | Add the `--storage ssh` or `--storage remote` flag to your backup command.
18 |
19 | 2. **Set the Remote Path**
20 | Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
21 | Example: `--path /home/jkaninda/backups`.
22 |
23 | 3. **Required Environment Variables**
24 | The following environment variables are mandatory for SSH-based backups:
25 |
26 | - `SSH_HOST`: The hostname or IP address of the remote server.
27 | - `SSH_USER`: The username for SSH authentication.
28 | - `REMOTE_PATH`: The directory on the remote server where backups will be stored.
29 | - `SSH_IDENTIFY_FILE`: The path to the private key file for SSH authentication.
30 | - `SSH_PORT`: The SSH port (default is `22`).
31 | - `SSH_PASSWORD`: (Optional) Use this only if you are not using a private key for authentication.
32 |
33 | {: .note }
34 | **Security Recommendation**: Using a private key (`SSH_IDENTIFY_FILE`) is strongly recommended over password-based authentication (`SSH_PASSWORD`) for better security.
35 |
36 | ---
37 |
38 | ## Example Configuration
39 |
40 | Below is an example `docker-compose.yml` configuration for backing up to an SSH remote server:
41 |
42 | ```yaml
43 | services:
44 | mysql-bkup:
45 | # In production, lock your image tag to a specific release version
46 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
47 | # for available releases.
48 | image: jkaninda/mysql-bkup
49 | container_name: mysql-bkup
50 | command: backup --storage remote -d database
51 | volumes:
52 | - ./id_ed25519:/tmp/id_ed25519
53 | environment:
54 | - DB_PORT=3306
55 | - DB_HOST=mysql
56 | - DB_NAME=database
57 | - DB_USERNAME=username
58 | - DB_PASSWORD=password
59 | ## SSH Configuration
60 | - SSH_HOST="hostname"
61 | - SSH_PORT=22
62 | - SSH_USER=user
63 | - REMOTE_PATH=/home/jkaninda/backups
64 | - SSH_IDENTIFY_FILE=/tmp/id_ed25519
65 | ## Optional: Use password instead of private key (not recommended)
66 | #- SSH_PASSWORD=password
67 |
68 | # Ensure the mysql-bkup container is connected to the same network as your database
69 | networks:
70 | - web
71 |
72 | networks:
73 | web:
74 | ```
75 |
76 | ---
77 |
78 | ## Recurring Backups to SSH Remote Server
79 |
80 | To schedule recurring backups, you can use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable.
81 | This allows you to define a cron schedule for automated backups.
82 |
83 | ### Example: Recurring Backup Configuration
84 |
85 | ```yaml
86 | services:
87 | mysql-bkup:
88 | # In production, lock your image tag to a specific release version
89 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
90 | # for available releases.
91 | image: jkaninda/mysql-bkup
92 | container_name: mysql-bkup
93 | command: backup -d database --storage ssh --cron-expression "@daily"
94 | volumes:
95 | - ./id_ed25519:/tmp/id_ed25519
96 | environment:
97 | - DB_PORT=3306
98 | - DB_HOST=postgres
99 | - DB_NAME=database
100 | - DB_USERNAME=username
101 | - DB_PASSWORD=password
102 | ## SSH Configuration
103 | - SSH_HOST="hostname"
104 | - SSH_PORT=22
105 | - SSH_USER=user
106 | - REMOTE_PATH=/home/jkaninda/backups
107 | - SSH_IDENTIFY_FILE=/tmp/id_ed25519
108 | ## Optional: Delete old backups after a specified number of days
109 | #- BACKUP_RETENTION_DAYS=7
110 | ## Optional: Use password instead of private key (not recommended)
111 | #- SSH_PASSWORD=password
112 |
113 | # Ensure the mysql-bkup container is connected to the same network as your database
114 | networks:
115 | - web
116 |
117 | networks:
118 | web:
119 | ```
120 |
121 | ---
122 |
123 | ## Key Notes
124 |
125 | - **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
126 | - **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
127 | - **Security**: Always prefer private key authentication (`SSH_IDENTIFY_FILE`) over password-based authentication (`SSH_PASSWORD`) for enhanced security.
128 |
129 | ---
--------------------------------------------------------------------------------
/pkg/s3.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package pkg
26 |
27 | import (
28 | "fmt"
29 | "github.com/jkaninda/go-storage/pkg/s3"
30 | goutils "github.com/jkaninda/go-utils"
31 | "github.com/jkaninda/mysql-bkup/utils"
32 |
33 | "os"
34 | "path/filepath"
35 | "time"
36 | )
37 |
38 | func s3Backup(db *dbConfig, config *BackupConfig) {
39 |
40 | utils.Info("Backup database to s3 storage")
41 | // Backup database
42 | err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
43 | if err != nil {
44 | recoverMode(err, "Error backing up database")
45 | return
46 | }
47 | finalFileName := config.backupFileName
48 | if config.encryption {
49 | encryptBackup(config)
50 | finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
51 | }
52 | utils.Info("Uploading backup archive to remote storage S3 ... ")
53 | awsConfig := initAWSConfig()
54 | if config.remotePath == "" {
55 | config.remotePath = awsConfig.remotePath
56 | }
57 | utils.Info("Backup name is %s", finalFileName)
58 | s3Storage, err := s3.NewStorage(s3.Config{
59 | Endpoint: awsConfig.endpoint,
60 | Bucket: awsConfig.bucket,
61 | AccessKey: awsConfig.accessKey,
62 | SecretKey: awsConfig.secretKey,
63 | Region: awsConfig.region,
64 | DisableSsl: awsConfig.disableSsl,
65 | ForcePathStyle: awsConfig.forcePathStyle,
66 | RemotePath: config.remotePath,
67 | LocalPath: tmpPath,
68 | })
69 | if err != nil {
70 | utils.Fatal("Error creating s3 storage: %s", err)
71 | }
72 | err = s3Storage.Copy(finalFileName)
73 | if err != nil {
74 | utils.Fatal("Error copying backup file: %s", err)
75 | }
76 | // Get backup info
77 | fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
78 | if err != nil {
79 | utils.Error("Error: %s", err)
80 | }
81 | backupSize = fileInfo.Size()
82 |
83 | // Delete backup file from tmp folder
84 | err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
85 | if err != nil {
86 | fmt.Println("Error deleting file: ", err)
87 |
88 | }
89 | // Delete old backup
90 | if config.prune {
91 | err := s3Storage.Prune(config.backupRetention)
92 | if err != nil {
93 | utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
94 | }
95 | }
96 | utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
97 | utils.Info("Uploading backup archive to remote storage S3 ... done ")
98 | duration := goutils.FormatDuration(time.Since(startTime), 0)
99 | // Send notification
100 | utils.NotifySuccess(&utils.NotificationData{
101 | File: finalFileName,
102 | BackupSize: utils.ConvertBytes(uint64(backupSize)),
103 | Database: db.dbName,
104 | Storage: config.storage,
105 | BackupLocation: filepath.Join(config.remotePath, finalFileName),
106 | Duration: duration,
107 | })
108 | // Delete temp
109 | deleteTemp()
110 | utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
111 |
112 | }
113 | func s3Restore(db *dbConfig, conf *RestoreConfig) {
114 | utils.Info("Restore database from s3")
115 | awsConfig := initAWSConfig()
116 | if conf.remotePath == "" {
117 | conf.remotePath = awsConfig.remotePath
118 | }
119 | s3Storage, err := s3.NewStorage(s3.Config{
120 | Endpoint: awsConfig.endpoint,
121 | Bucket: awsConfig.bucket,
122 | AccessKey: awsConfig.accessKey,
123 | SecretKey: awsConfig.secretKey,
124 | Region: awsConfig.region,
125 | DisableSsl: awsConfig.disableSsl,
126 | ForcePathStyle: awsConfig.forcePathStyle,
127 | RemotePath: conf.remotePath,
128 | LocalPath: tmpPath,
129 | })
130 | if err != nil {
131 | utils.Fatal("Error creating s3 storage: %s", err)
132 | }
133 | err = s3Storage.CopyFrom(conf.file)
134 | if err != nil {
135 | utils.Fatal("Error download file from S3 storage: %s", err)
136 | }
137 | RestoreDatabase(db, conf)
138 | }
139 |
--------------------------------------------------------------------------------
/docs/how-tos/mutli-backup.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Run multiple database backup schedules in the same container
3 | layout: default
4 | parent: How Tos
5 | nav_order: 11
6 | ---
7 |
8 |
9 | # Multiple Backup Schedules
10 |
11 | This tool supports running multiple database backup schedules within the same container.
12 | You can configure these schedules with different settings using a **configuration file**. This flexibility allows you to manage backups for multiple databases efficiently.
13 |
14 | ---
15 |
16 | ## Configuration File Setup
17 |
18 | The configuration file can be mounted into the container at `/config/config.yaml`, `/config/config.yml`, or specified via the `BACKUP_CONFIG_FILE` environment variable.
19 |
20 | ### Key Features:
21 | - **Global Environment Variables**: Use these for databases that share the same configuration.
22 | - **Database-Specific Overrides**: Override global settings for individual databases by specifying them in the configuration file or using the database name as a prefix or suffix in the variable name (e.g., `DB_HOST_DATABASENAME` or `DATABASENAME_DB_HOST`).
23 | - **Global Cron Expression**: Define a global `cronExpression` in the configuration file to schedule backups for all databases. If omitted, backups will run immediately.
24 | - **Configuration File Path**: Specify the configuration file path using:
25 | - The `BACKUP_CONFIG_FILE` environment variable.
26 | - The `--config` or `-c` flag for the backup command.
27 |
28 | ---
29 |
30 | ## Configuration File Example
31 |
32 | Below is an example configuration file (`config.yaml`) that defines multiple databases and their respective backup settings:
33 |
34 | ```yaml
35 | # Optional: Define a global cron expression for scheduled backups.
36 | # Example: "@every 20m" (runs every 20 minutes). If omitted, backups run immediately.
37 | cronExpression: "" # Optional: Define a global cron expression for scheduled backups.
38 | backupRescueMode: false # Optional: Set to true to enable rescue mode for backups.
39 | databases:
40 | - host: mysql1 # Optional: Overrides DB_HOST or uses DB_HOST_DATABASE1.
41 | port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_DATABASE1.
42 | name: database1 # Required: Database name.
43 | user: database1 # Optional: Overrides DB_USERNAME or uses DB_USERNAME_DATABASE1.
44 | password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_DATABASE1.
45 | path: /s3-path/database1 # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
46 |
47 | - host: mysql2 # Optional: Overrides DB_HOST or uses DB_HOST_LLAP.
48 | port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_LLAP.
49 | name: lldap # Required: Database name.
50 | user: lldap # Optional: Overrides DB_USERNAME or uses DB_USERNAME_LLAP.
51 | password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_LLAP.
52 | path: /s3-path/lldap # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
53 |
54 | - host: mysql3 # Optional: Overrides DB_HOST or uses DB_HOST_KEYCLOAK.
55 | port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_KEYCLOAK.
56 | name: keycloak # Required: Database name.
57 | user: keycloak # Optional: Overrides DB_USERNAME or uses DB_USERNAME_KEYCLOAK.
58 | password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_KEYCLOAK.
59 | path: /s3-path/keycloak # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
60 |
61 | - host: mysql4 # Optional: Overrides DB_HOST or uses DB_HOST_JOPLIN.
62 | port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_JOPLIN.
63 | name: joplin # Required: Database name.
64 | user: joplin # Optional: Overrides DB_USERNAME or uses DB_USERNAME_JOPLIN.
65 | password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_JOPLIN.
66 | path: /s3-path/joplin # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
67 | ```
68 |
69 | ---
70 |
71 | ## Docker Compose Configuration
72 |
73 | To use the configuration file in a Docker Compose setup, mount the file and specify its path using the `BACKUP_CONFIG_FILE` environment variable.
74 |
75 | ### Example: Docker Compose File
76 |
77 | ```yaml
78 | services:
79 | mysql-bkup:
80 | # In production, lock your image tag to a specific release version
81 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
82 | # for available releases.
83 | image: jkaninda/mysql-bkup
84 | container_name: mysql-bkup
85 | command: backup #--config /backup/config.yaml # config file
86 | volumes:
87 | - ./backup:/backup # Mount the backup directory
88 | - ./config.yaml:/backup/config.yaml # Mount the configuration file
89 | environment:
90 | ## Specify the path to the configuration file
91 | - BACKUP_CONFIG_FILE=/backup/config.yaml
92 | # Ensure the mysql-bkup container is connected to the same network as your database
93 | networks:
94 | - web
95 |
96 | networks:
97 | web:
98 | ```
99 |
100 | ---
101 |
102 |
103 |
104 |
--------------------------------------------------------------------------------
/pkg/restore.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package pkg
26 |
27 | import (
28 | "fmt"
29 | "github.com/jkaninda/encryptor"
30 | "github.com/jkaninda/go-storage/pkg/local"
31 | "github.com/jkaninda/mysql-bkup/utils"
32 | "github.com/spf13/cobra"
33 | "os"
34 | "os/exec"
35 | "path/filepath"
36 | )
37 |
38 | func StartRestore(cmd *cobra.Command) {
39 | intro()
40 | dbConf = initDbConfig(cmd)
41 | restoreConf := initRestoreConfig(cmd)
42 |
43 | switch restoreConf.storage {
44 | case "local":
45 | localRestore(dbConf, restoreConf)
46 | case "s3", "S3":
47 | s3Restore(dbConf, restoreConf)
48 | case "ssh", "SSH", "remote":
49 | remoteRestore(dbConf, restoreConf)
50 | case "ftp", "FTP":
51 | ftpRestore(dbConf, restoreConf)
52 | case "azure":
53 | azureRestore(dbConf, restoreConf)
54 | default:
55 | localRestore(dbConf, restoreConf)
56 | }
57 | }
58 | func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
59 | utils.Info("Restore database from local")
60 | basePath := filepath.Dir(restoreConf.file)
61 | fileName := filepath.Base(restoreConf.file)
62 | restoreConf.file = fileName
63 | if basePath == "" || basePath == "." {
64 | basePath = storagePath
65 | }
66 | localStorage := local.NewStorage(local.Config{
67 | RemotePath: basePath,
68 | LocalPath: tmpPath,
69 | })
70 | err := localStorage.CopyFrom(fileName)
71 | if err != nil {
72 | utils.Fatal("Error copying backup file: %s", err)
73 | }
74 | RestoreDatabase(dbConf, restoreConf)
75 |
76 | }
77 |
78 | // RestoreDatabase restores the database from a backup file
79 | func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
80 | if conf.file == "" {
81 | utils.Fatal("Error, file required")
82 | }
83 |
84 | filePath := filepath.Join(tmpPath, conf.file)
85 | rFile, err := os.ReadFile(filePath)
86 | if err != nil {
87 | utils.Fatal("Error reading backup file: %v", err)
88 | }
89 |
90 | extension := filepath.Ext(filePath)
91 | outputFile := RemoveLastExtension(filePath)
92 |
93 | if extension == ".gpg" {
94 | decryptBackup(conf, rFile, outputFile)
95 | }
96 |
97 | restorationFile := filepath.Join(tmpPath, conf.file)
98 | if !utils.FileExists(restorationFile) {
99 | utils.Fatal("File not found: %s", restorationFile)
100 | }
101 |
102 | if err := testDatabaseConnection(db); err != nil {
103 | utils.Fatal("Error connecting to the database: %v", err)
104 | }
105 |
106 | utils.Info("Restoring database...")
107 | restoreDatabaseFile(db, restorationFile)
108 | }
109 |
110 | func decryptBackup(conf *RestoreConfig, rFile []byte, outputFile string) {
111 | if conf.usingKey {
112 | utils.Info("Decrypting backup using private key...")
113 | prKey, err := os.ReadFile(conf.privateKey)
114 | if err != nil {
115 | utils.Fatal("Error reading private key: %v", err)
116 | }
117 | if err := encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase); err != nil {
118 | utils.Fatal("Error decrypting backup: %v", err)
119 | }
120 | } else {
121 | if conf.passphrase == "" {
122 | utils.Fatal("Passphrase or private key required for GPG file.")
123 | }
124 | utils.Info("Decrypting backup using passphrase...")
125 | if err := encryptor.Decrypt(rFile, outputFile, conf.passphrase); err != nil {
126 | utils.Fatal("Error decrypting file: %v", err)
127 | }
128 | conf.file = RemoveLastExtension(conf.file)
129 | }
130 | }
131 |
132 | func restoreDatabaseFile(db *dbConfig, restorationFile string) {
133 | extension := filepath.Ext(restorationFile)
134 | var cmdStr string
135 |
136 | switch extension {
137 | case ".gz":
138 | cmdStr = fmt.Sprintf("zcat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName)
139 | case ".sql":
140 | cmdStr = fmt.Sprintf("cat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName)
141 | default:
142 | utils.Fatal("Unknown file extension: %s", extension)
143 | }
144 |
145 | cmd := exec.Command("sh", "-c", cmdStr)
146 | output, err := cmd.CombinedOutput()
147 | if err != nil {
148 | utils.Fatal("Error restoring database: %v\nOutput: %s", err, string(output))
149 | }
150 |
151 | utils.Info("Database has been restored successfully.")
152 | deleteTemp()
153 | }
154 |
--------------------------------------------------------------------------------
/utils/notification.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package utils
26 |
27 | import (
28 | "bytes"
29 | "crypto/tls"
30 | "encoding/json"
31 | "fmt"
32 | "github.com/go-mail/mail"
33 | "html/template"
34 | "io"
35 | "net/http"
36 | "os"
37 | "path/filepath"
38 | "strings"
39 | "time"
40 | )
41 |
42 | func parseTemplate[T any](data T, fileName string) (string, error) {
43 | // Open the file
44 | tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName))
45 | if err != nil {
46 | return "", err
47 | }
48 |
49 | var buf bytes.Buffer
50 | if err = tmpl.Execute(&buf, data); err != nil {
51 | return "", err
52 | }
53 |
54 | return buf.String(), nil
55 | }
56 |
57 | func SendEmail(subject, body string) error {
58 | Info("Start sending email notification....")
59 | config := loadMailConfig()
60 | emails := strings.Split(config.MailTo, ",")
61 | m := mail.NewMessage()
62 | m.SetHeader("From", config.MailFrom)
63 | m.SetHeader("To", emails...)
64 | m.SetHeader("Subject", subject)
65 | m.SetBody("text/html", body)
66 | d := mail.NewDialer(config.MailHost, config.MailPort, config.MailUserName, config.MailPassword)
67 | d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
68 |
69 | if err := d.DialAndSend(m); err != nil {
70 | Error("Error could not send email : %v", err)
71 | return err
72 | }
73 | Info("Email notification has been sent")
74 | return nil
75 |
76 | }
77 | func sendMessage(msg string) error {
78 |
79 | Info("Sending Telegram notification... ")
80 | chatId := os.Getenv("TG_CHAT_ID")
81 | body, _ := json.Marshal(map[string]string{
82 | "chat_id": chatId,
83 | "text": msg,
84 | })
85 | url := fmt.Sprintf("%s/sendMessage", getTgUrl())
86 | // Create an HTTP post request
87 | request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
88 | if err != nil {
89 | panic(err)
90 | }
91 | request.Header.Add("Content-Type", "application/json")
92 | client := &http.Client{}
93 | response, err := client.Do(request)
94 | if err != nil {
95 | return err
96 | }
97 | code := response.StatusCode
98 | if code == 200 {
99 | Info("Telegram notification has been sent")
100 | return nil
101 | } else {
102 | body, _ := io.ReadAll(response.Body)
103 | Error("Error could not send message, error: %s", string(body))
104 | return fmt.Errorf("error could not send message %s", string(body))
105 | }
106 |
107 | }
108 | func NotifySuccess(notificationData *NotificationData) {
109 | notificationData.BackupReference = backupReference()
110 | // Email notification
111 | err := CheckEnvVars(mailVars)
112 | if err == nil {
113 | body, err := parseTemplate(*notificationData, "email.tmpl")
114 | if err != nil {
115 | Error("Could not parse email template: %v", err)
116 | }
117 | err = SendEmail(fmt.Sprintf("✅ Database Backup Notification – %s", notificationData.Database), body)
118 | if err != nil {
119 | Error("Could not send email: %v", err)
120 | }
121 | }
122 | // Telegram notification
123 | err = CheckEnvVars(vars)
124 | if err == nil {
125 | message, err := parseTemplate(*notificationData, "telegram.tmpl")
126 | if err != nil {
127 | Error("Could not parse telegram template: %v", err)
128 | }
129 |
130 | err = sendMessage(message)
131 | if err != nil {
132 | Error("Could not send Telegram message: %v", err)
133 | }
134 | }
135 | }
136 | func NotifyError(error string) {
137 |
138 | // Email notification
139 | err := CheckEnvVars(mailVars)
140 | if err == nil {
141 | body, err := parseTemplate(ErrorMessage{
142 | Error: error,
143 | EndTime: time.Now().Format(TimeFormat()),
144 | BackupReference: os.Getenv("BACKUP_REFERENCE"),
145 | DatabaseName: DatabaseName,
146 | }, "email-error.tmpl")
147 | if err != nil {
148 | Error("Could not parse error template: %v", err)
149 | }
150 | err = SendEmail("🔴 Urgent: Database Backup Failure Notification", body)
151 | if err != nil {
152 | Error("Could not send email: %v", err)
153 | }
154 | }
155 | // Telegram notification
156 | err = CheckEnvVars(vars)
157 | if err == nil {
158 | message, err := parseTemplate(ErrorMessage{
159 | Error: error,
160 | EndTime: time.Now().Format(TimeFormat()),
161 | BackupReference: os.Getenv("BACKUP_REFERENCE"),
162 | DatabaseName: DatabaseName,
163 | }, "telegram-error.tmpl")
164 | if err != nil {
165 | Error("Could not parse error template: %v", err)
166 |
167 | }
168 |
169 | err = sendMessage(message)
170 | if err != nil {
171 | Error("Could not send telegram message: %v", err)
172 | }
173 | }
174 | }
175 |
176 | func getTgUrl() string {
177 | return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
178 |
179 | }
180 |
--------------------------------------------------------------------------------
/docs/how-tos/receive-notification.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Receive notifications
3 | layout: default
4 | parent: How Tos
5 | nav_order: 13
6 | ---
7 |
8 | # Receive Notifications
9 |
10 | You can configure the system to send email or Telegram notifications when a backup succeeds or fails.
11 |
12 | This section explains how to set up and customize notifications.
13 |
14 | ---
15 |
16 | ## Email Notifications
17 |
18 | To send email notifications, provide SMTP credentials, a sender address, and recipient addresses. Notifications will be sent for both successful and failed backup runs.
19 |
20 | ### Example: Email Notification Configuration
21 |
22 | ```yaml
23 | services:
24 | mysql-bkup:
25 | image: jkaninda/mysql-bkup
26 | container_name: mysql-bkup
27 | command: backup
28 | volumes:
29 | - ./backup:/backup
30 | environment:
31 | - DB_PORT=3306
32 | - DB_HOST=mysql
33 | - DB_NAME=database
34 | - DB_USERNAME=username
35 | - DB_PASSWORD=password
36 | ## SMTP Configuration
37 | - MAIL_HOST=smtp.example.com
38 | - MAIL_PORT=587
39 | - MAIL_USERNAME=your-email@example.com
40 | - MAIL_PASSWORD=your-email-password
41 | - MAIL_FROM=Backup Jobs
42 | ## Multiple recipients separated by a comma
43 | - MAIL_TO=me@example.com,team@example.com,manager@example.com
44 | - MAIL_SKIP_TLS=false
45 | ## Time format for notifications
46 | - TIME_FORMAT=2006-01-02 at 15:04:05
47 | ## Backup reference (e.g., database/cluster name or server name)
48 | - BACKUP_REFERENCE=database/Paris cluster
49 | networks:
50 | - web
51 |
52 | networks:
53 | web:
54 | ```
55 |
56 | ---
57 |
58 | ## Telegram Notifications
59 |
60 | To send Telegram notifications, provide your bot token and chat ID. Notifications will be sent for both successful and failed backup runs.
61 |
62 | ### Example: Telegram Notification Configuration
63 |
64 | ```yaml
65 | services:
66 | mysql-bkup:
67 | image: jkaninda/mysql-bkup
68 | container_name: mysql-bkup
69 | command: backup
70 | volumes:
71 | - ./backup:/backup
72 | environment:
73 | - DB_PORT=3306
74 | - DB_HOST=mysql
75 | - DB_NAME=database
76 | - DB_USERNAME=username
77 | - DB_PASSWORD=password
78 | ## Telegram Configuration
79 | - TG_TOKEN=[BOT ID]:[BOT TOKEN]
80 | - TG_CHAT_ID=your-chat-id
81 | ## Time format for notifications
82 | - TIME_FORMAT=2006-01-02 at 15:04:05
83 | ## Backup reference (e.g., database/cluster name or server name)
84 | - BACKUP_REFERENCE=database/Paris cluster
85 | networks:
86 | - web
87 |
88 | networks:
89 | web:
90 | ```
91 |
92 | ---
93 |
94 | ## Customize Notifications
95 |
96 | You can customize the title and body of notifications using Go templates. Template files must be mounted inside the container at `/config/templates`. The following templates are supported:
97 |
98 | - `email.tmpl`: Template for successful email notifications.
99 | - `telegram.tmpl`: Template for successful Telegram notifications.
100 | - `email-error.tmpl`: Template for failed email notifications.
101 | - `telegram-error.tmpl`: Template for failed Telegram notifications.
102 |
103 | ### Template Data
104 |
105 | The following data is passed to the templates:
106 |
107 | - `Database`: Database name.
108 | - `StartTime`: Backup start time.
109 | - `EndTime`: Backup end time.
110 | - `Storage`: Backup storage type (e.g., local, S3, SSH).
111 | - `BackupLocation`: Backup file location.
112 | - `BackupSize`: Backup file size in bytes.
113 | - `BackupReference`: Backup reference (e.g., database/cluster name or server name).
114 | - `Error`: Error message (only for error templates).
115 |
116 | ---
117 |
118 | ### Example Templates
119 |
120 | #### `email.tmpl` (Successful Backup)
121 |
122 | ```html
123 | Hi,
124 | Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
125 | Backup Details:
126 |
127 | - Database Name: {{.Database}}
128 | - Backup Start Time: {{.StartTime}}
129 | - Backup End Time: {{.EndTime}}
130 | - Backup Storage: {{.Storage}}
131 | - Backup Location: {{.BackupLocation}}
132 | - Backup Size: {{.BackupSize}} bytes
133 | - Backup Reference: {{.BackupReference}}
134 |
135 | Best regards,
136 | ```
137 |
138 | #### `telegram.tmpl` (Successful Backup)
139 |
140 | ```html
141 | ✅ Database Backup Notification – {{.Database}}
142 | Hi,
143 | Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
144 |
145 | Backup Details:
146 | - Database Name: {{.Database}}
147 | - Backup Start Time: {{.StartTime}}
148 | - Backup End Time: {{.EndTime}}
149 | - Backup Storage: {{.Storage}}
150 | - Backup Location: {{.BackupLocation}}
151 | - Backup Size: {{.BackupSize}} bytes
152 | - Backup Reference: {{.BackupReference}}
153 | ```
154 |
155 | #### `email-error.tmpl` (Failed Backup)
156 |
157 | ```html
158 |
159 |
160 |
161 |
162 | 🔴 Urgent: Database Backup Failure Notification
163 |
164 |
165 | Hi,
166 | An error occurred during database backup.
167 | Failure Details:
168 |
169 | - Error Message: {{.Error}}
170 | - Date: {{.EndTime}}
171 | - Backup Reference: {{.BackupReference}}
172 |
173 |
174 |
175 | ```
176 |
177 | #### `telegram-error.tmpl` (Failed Backup)
178 |
179 | ```html
180 | 🔴 Urgent: Database Backup Failure Notification
181 |
182 | An error occurred during database backup.
183 | Failure Details:
184 |
185 | Error Message: {{.Error}}
186 | Date: {{.EndTime}}
187 | Backup Reference: {{.BackupReference}}
188 | ```
189 |
190 | ---
191 |
192 | ## Key Notes
193 |
194 | - **SMTP Configuration**: Ensure your SMTP server supports TLS unless `MAIL_SKIP_TLS` is set to `true`.
195 | - **Telegram Configuration**: Obtain your bot token and chat ID from Telegram.
196 | - **Custom Templates**: Mount custom templates to `/config/templates` to override default notifications.
197 | - **Time Format**: Use the `TIME_FORMAT` environment variable to customize the timestamp format in notifications.
--------------------------------------------------------------------------------
/docs/quickstart/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Quickstart
3 | layout: home
4 | nav_order: 2
5 | ---
6 |
7 | # Quickstart
8 |
9 | This guide provides quick examples for running backups using Docker CLI, Docker Compose, and Kubernetes.
10 |
11 | ---
12 |
13 | ### Simple Backup Using Docker CLI
14 |
15 | To perform a one-time backup, bind your local volume to `/backup` in the container and run the `backup` command:
16 |
17 | ```shell
18 | docker run --rm --network your_network_name \
19 | -v $PWD/backup:/backup/ \
20 | -e "DB_HOST=dbhost" \
21 | -e "DB_PORT=3306" \
22 | -e "DB_USERNAME=username" \
23 | -e "DB_PASSWORD=password" \
24 | jkaninda/mysql-bkup backup -d database_name
25 | ```
26 |
27 | Alternatively, use an environment file (`--env-file`) for configuration:
28 |
29 | ```shell
30 | docker run --rm --network your_network_name \
31 | --env-file your-env-file \
32 | -v $PWD/backup:/backup/ \
33 | jkaninda/mysql-bkup backup -d database_name
34 | ```
35 |
36 | ### Backup All Databases
37 |
38 | To back up all databases on the server, use the `--all-databases` or `-a` flag. By default, this creates individual backup files for each database.
39 |
40 | ```shell
41 | docker run --rm --network your_network_name \
42 | -v $PWD/backup:/backup/ \
43 | -e "DB_HOST=dbhost" \
44 | -e "DB_PORT=3306" \
45 | -e "DB_USERNAME=username" \
46 | -e "DB_PASSWORD=password" \
47 | jkaninda/mysql-bkup backup --all-databases --disable-compression
48 | ```
49 |
50 | > **Note:** Use the `--all-in-one` or `-A` flag to combine backups into a single file.
51 |
52 | ---
53 |
54 | ### Simple Restore Using Docker CLI
55 |
56 | To restore a database, bind your local volume to `/backup` and run the `restore` command:
57 |
58 | ```shell
59 | docker run --rm --network your_network_name \
60 | -v $PWD/backup:/backup/ \
61 | -e "DB_HOST=dbhost" \
62 | -e "DB_PORT=3306" \
63 | -e "DB_USERNAME=username" \
64 | -e "DB_PASSWORD=password" \
65 | jkaninda/mysql-bkup restore -d database_name -f backup_file.sql.gz
66 | ```
67 |
68 | ---
69 |
70 | ### Backup with Docker Compose
71 |
72 | Below is an example of a `docker-compose.yml` file for running a one-time backup:
73 |
74 | ```yaml
75 | services:
76 | pg-bkup:
77 | # In production, pin your image tag to a specific release version instead of `latest`.
78 | # See available releases: https://github.com/jkaninda/mysql-bkup/releases
79 | image: jkaninda/mysql-bkup
80 | container_name: mysql-bkup
81 | command: backup
82 | volumes:
83 | - ./backup:/backup
84 | environment:
85 | - DB_PORT=3306
86 | - DB_HOST=mysql
87 | - DB_NAME=foo
88 | - DB_USERNAME=bar
89 | - DB_PASSWORD=password
90 | - TZ=Europe/Paris
91 | networks:
92 | - web
93 |
94 | networks:
95 | web:
96 | ```
97 |
98 | ---
99 |
100 | ### Recurring Backups with Docker
101 |
102 | You can schedule recurring backups using the `--cron-expression` or `-e` flag:
103 |
104 | ```shell
105 | docker run --rm --network network_name \
106 | -v $PWD/backup:/backup/ \
107 | -e "DB_HOST=hostname" \
108 | -e "DB_USERNAME=user" \
109 | -e "DB_PASSWORD=password" \
110 | jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m"
111 | ```
112 |
113 | For predefined schedules, refer to the [documentation](https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules).
114 |
115 | ---
116 |
117 | ## Deploy on Kubernetes
118 |
119 | For Kubernetes, you can deploy `mysql-bkup` as a Job or CronJob. Below are examples for both.
120 |
121 | ### Kubernetes Backup Job
122 |
123 | This example defines a one-time backup job:
124 |
125 | ```yaml
126 | apiVersion: batch/v1
127 | kind: Job
128 | metadata:
129 | name: backup-job
130 | spec:
131 | ttlSecondsAfterFinished: 100
132 | template:
133 | spec:
134 | containers:
135 | - name: mysql-bkup
136 | # Pin the image tag to a specific release version in production.
137 | # See available releases: https://github.com/jkaninda/mysql-bkup/releases
138 | image: jkaninda/mysql-bkup
139 | command:
140 | - /bin/sh
141 | - -c
142 | - backup -d dbname
143 | resources:
144 | limits:
145 | memory: "128Mi"
146 | cpu: "500m"
147 | env:
148 | - name: DB_HOST
149 | value: "mysql"
150 | - name: DB_USERNAME
151 | value: "user"
152 | - name: DB_PASSWORD
153 | value: "password"
154 | volumeMounts:
155 | - mountPath: /backup
156 | name: backup
157 | volumes:
158 | - name: backup
159 | hostPath:
160 | path: /home/toto/backup # Directory location on the host
161 | type: Directory # Optional field
162 | restartPolicy: Never
163 | ```
164 |
165 | ### Kubernetes CronJob for Scheduled Backups
166 |
167 | For scheduled backups, use a `CronJob`:
168 |
169 | ```yaml
170 | apiVersion: batch/v1
171 | kind: CronJob
172 | metadata:
173 | name: pg-bkup-cronjob
174 | spec:
175 | schedule: "0 2 * * *" # Runs daily at 2 AM
176 | jobTemplate:
177 | spec:
178 | template:
179 | spec:
180 | containers:
181 | - name: pg-bkup
182 | image: jkaninda/mysql-bkup
183 | command:
184 | - /bin/sh
185 | - -c
186 | - backup -d dbname
187 | env:
188 | - name: DB_HOST
189 | value: "mysql"
190 | - name: DB_USERNAME
191 | value: "user"
192 | - name: DB_PASSWORD
193 | value: "password"
194 | volumeMounts:
195 | - mountPath: /backup
196 | name: backup
197 | volumes:
198 | - name: backup
199 | hostPath:
200 | path: /home/toto/backup
201 | type: Directory
202 | restartPolicy: OnFailure
203 | ```
204 |
205 | ---
206 |
207 | ## Key Notes
208 |
209 | - **Volume Binding**: Ensure the `/backup` directory is mounted to persist backup files.
210 | - **Environment Variables**: Use environment variables or an `--env-file` to pass database credentials and other configurations.
211 | - **Cron Expressions**: Use standard cron expressions or predefined schedules for recurring backups.
212 | - **Kubernetes Jobs**: Use Kubernetes `Job` or `CronJob` for running backups in a Kubernetes cluster.
--------------------------------------------------------------------------------
/pkg/helper.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package pkg
26 |
27 | import (
28 | "bytes"
29 | "errors"
30 | "fmt"
31 | goutils "github.com/jkaninda/go-utils"
32 | "github.com/jkaninda/mysql-bkup/utils"
33 | "gopkg.in/yaml.v3"
34 | "os"
35 | "os/exec"
36 | "path/filepath"
37 | "strings"
38 | )
39 |
40 | func intro() {
41 | fmt.Println("Starting MYSQL-BKUP...")
42 | fmt.Printf("Version: %s\n", utils.Version)
43 | fmt.Println("Copyright (c) 2024 Jonas Kaninda")
44 | }
45 |
46 | // copyToTmp copy file to temporary directory
47 | func deleteTemp() {
48 | utils.Info("Deleting %s ...", tmpPath)
49 | err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
50 | if err != nil {
51 | return err
52 | }
53 | // Check if the current item is a file
54 | if !info.IsDir() {
55 | // Delete the file
56 | err = os.Remove(path)
57 | if err != nil {
58 | return err
59 | }
60 | }
61 | return nil
62 | })
63 | if err != nil {
64 | utils.Error("Error deleting files: %v", err)
65 | } else {
66 | utils.Info("Deleting %s ... done", tmpPath)
67 | }
68 | }
69 |
70 | // TestDatabaseConnection tests the database connection
71 | func testDatabaseConnection(db *dbConfig) error {
72 | // Create the mysql client config file
73 | if err := createMysqlClientConfigFile(*db); err != nil {
74 | return errors.New(err.Error())
75 | }
76 | utils.Info("Connecting to %s database ...", db.dbName)
77 | // Set database name for notification error
78 | utils.DatabaseName = db.dbName
79 |
80 | // Prepare the command to test the database connection
81 | cmd := exec.Command("mariadb", fmt.Sprintf("--defaults-file=%s", mysqlClientConfig), db.dbName, "-e", "quit")
82 | // Capture the output
83 | var out bytes.Buffer
84 | cmd.Stdout = &out
85 | cmd.Stderr = &out
86 |
87 | // Run the command
88 | if err := cmd.Run(); err != nil {
89 | return fmt.Errorf("failed to connect to database %s: %v, output: %s", db.dbName, err, out.String())
90 | }
91 |
92 | utils.Info("Successfully connected to %s database", db.dbName)
93 | return nil
94 | }
95 |
96 | // checkPubKeyFile checks gpg public key
97 | func checkPubKeyFile(pubKey string) (string, error) {
98 | // Define possible key file names
99 | keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey}
100 |
101 | // Loop through key file names and check if they exist
102 | for _, keyFile := range keyFiles {
103 | if _, err := os.Stat(keyFile); err == nil {
104 | // File exists
105 | return keyFile, nil
106 | } else if os.IsNotExist(err) {
107 | // File does not exist, continue to the next one
108 | continue
109 | } else {
110 | // An unexpected error occurred
111 | return "", err
112 | }
113 | }
114 |
115 | // Return an error if neither file exists
116 | return "", fmt.Errorf("no public key file found")
117 | }
118 |
119 | // checkPrKeyFile checks private key
120 | func checkPrKeyFile(prKey string) (string, error) {
121 | // Define possible key file names
122 | keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey}
123 |
124 | // Loop through key file names and check if they exist
125 | for _, keyFile := range keyFiles {
126 | if _, err := os.Stat(keyFile); err == nil {
127 | // File exists
128 | return keyFile, nil
129 | } else if os.IsNotExist(err) {
130 | // File does not exist, continue to the next one
131 | continue
132 | } else {
133 | // An unexpected error occurred
134 | return "", err
135 | }
136 | }
137 |
138 | // Return an error if neither file exists
139 | return "", fmt.Errorf("no public key file found")
140 | }
141 |
142 | // readConf reads config file and returns Config
143 | func readConf(configFile string) (*Config, error) {
144 | if utils.FileExists(configFile) {
145 | buf, err := os.ReadFile(configFile)
146 | if err != nil {
147 | return nil, err
148 | }
149 |
150 | c := &Config{}
151 | err = yaml.Unmarshal(buf, c)
152 | if err != nil {
153 | return nil, fmt.Errorf("in file %q: %w", configFile, err)
154 | }
155 |
156 | return c, err
157 | }
158 | return nil, fmt.Errorf("config file %q not found", configFile)
159 | }
160 |
161 | // checkConfigFile checks config files and returns one config file
162 | func checkConfigFile(filePath string) (string, error) {
163 | // Remove the quotes
164 | filePath = strings.Trim(filePath, `"`)
165 | // Define possible config file names
166 | configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}
167 |
168 | // Loop through config file names and check if they exist
169 | for _, configFile := range configFiles {
170 | if _, err := os.Stat(configFile); err == nil {
171 | // File exists
172 | return configFile, nil
173 | } else if os.IsNotExist(err) {
174 | // File does not exist, continue to the next one
175 | continue
176 | } else {
177 | // An unexpected error occurred
178 | return "", err
179 | }
180 | }
181 |
182 | // Return an error if neither file exists
183 | return "", fmt.Errorf("no config file found")
184 | }
185 | func RemoveLastExtension(filename string) string {
186 | if idx := strings.LastIndex(filename, "."); idx != -1 {
187 | return filename[:idx]
188 | }
189 | return filename
190 | }
191 |
192 | // Create mysql client config file
193 | func createMysqlClientConfigFile(db dbConfig) error {
194 | caCertPath := goutils.GetStringEnvWithDefault("DB_SSL_CA", "/etc/ssl/certs/ca-certificates.crt")
195 | sslMode := goutils.GetStringEnvWithDefault("DB_SSL_MODE", "0")
196 | // Create the mysql client config file
197 | mysqlClientConfigFile := filepath.Join(tmpPath, "my.cnf")
198 | mysqlCl := fmt.Sprintf("[client]\nhost=%s\nport=%s\nuser=%s\npassword=%s\nssl-ca=%s\nssl=%s\n", db.dbHost, db.dbPort, db.dbUserName, db.dbPassword, caCertPath, sslMode)
199 | if err := os.WriteFile(mysqlClientConfigFile, []byte(mysqlCl), 0644); err != nil {
200 | return fmt.Errorf("failed to create mysql client config file: %v", err)
201 | }
202 | return nil
203 | }
204 |
--------------------------------------------------------------------------------
/utils/utils.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package utils
26 |
27 | import (
28 | "fmt"
29 | "github.com/robfig/cron/v3"
30 | "github.com/spf13/cobra"
31 | "io"
32 | "io/fs"
33 | "os"
34 | "strconv"
35 | "time"
36 | )
37 |
38 | var Version = ""
39 |
40 | // FileExists checks if the file does exist
41 | func FileExists(filename string) bool {
42 | info, err := os.Stat(filename)
43 | if os.IsNotExist(err) {
44 | return false
45 | }
46 | return !info.IsDir()
47 | }
48 |
49 | func WriteToFile(filePath, content string) error {
50 | file, err := os.Create(filePath)
51 | if err != nil {
52 | return err
53 | }
54 | defer func(file *os.File) {
55 | err := file.Close()
56 | if err != nil {
57 | return
58 |
59 | }
60 | }(file)
61 |
62 | _, err = file.WriteString(content)
63 | return err
64 | }
65 | func DeleteFile(filePath string) error {
66 | err := os.Remove(filePath)
67 | if err != nil {
68 | return fmt.Errorf("failed to delete file: %v", err)
69 | }
70 | return nil
71 | }
72 | func CopyFile(src, dst string) error {
73 | // Open the source file for reading
74 | sourceFile, err := os.Open(src)
75 | if err != nil {
76 | return fmt.Errorf("failed to open source file: %v", err)
77 | }
78 | defer func(sourceFile *os.File) {
79 | err := sourceFile.Close()
80 | if err != nil {
81 | return
82 | }
83 | }(sourceFile)
84 |
85 | // Create the destination file
86 | destinationFile, err := os.Create(dst)
87 | if err != nil {
88 | return fmt.Errorf("failed to create destination file: %v", err)
89 | }
90 | defer func(destinationFile *os.File) {
91 | err := destinationFile.Close()
92 | if err != nil {
93 | return
94 |
95 | }
96 | }(destinationFile)
97 |
98 | // Copy the content from source to destination
99 | _, err = io.Copy(destinationFile, sourceFile)
100 | if err != nil {
101 | return fmt.Errorf("failed to copy file: %v", err)
102 | }
103 |
104 | // Flush the buffer to ensure all data is written
105 | err = destinationFile.Sync()
106 | if err != nil {
107 | return fmt.Errorf("failed to sync destination file: %v", err)
108 | }
109 |
110 | return nil
111 | }
112 | func ChangePermission(filePath string, mod int) {
113 | if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
114 | Fatal("Error changing permissions of %s: %v\n", filePath, err)
115 | }
116 |
117 | }
118 | func IsDirEmpty(name string) (bool, error) {
119 | f, err := os.Open(name)
120 | if err != nil {
121 | return false, err
122 | }
123 | defer func(f *os.File) {
124 | err := f.Close()
125 | if err != nil {
126 | return
127 | }
128 | }(f)
129 |
130 | _, err = f.Readdirnames(1)
131 | if err == nil {
132 | return false, nil
133 | }
134 | return true, nil
135 | }
136 |
137 | func GetEnv(cmd *cobra.Command, flagName, envName string) string {
138 | value, _ := cmd.Flags().GetString(flagName)
139 | if value != "" {
140 | err := os.Setenv(envName, value)
141 | if err != nil {
142 | return value
143 | }
144 | }
145 | return os.Getenv(envName)
146 | }
147 | func FlagGetString(cmd *cobra.Command, flagName string) string {
148 | value, _ := cmd.Flags().GetString(flagName)
149 | if value != "" {
150 | return value
151 |
152 | }
153 | return ""
154 | }
155 | func FlagGetBool(cmd *cobra.Command, flagName string) bool {
156 | value, _ := cmd.Flags().GetBool(flagName)
157 | return value
158 | }
159 |
160 | func SetEnv(key, value string) {
161 |
162 | err := os.Setenv(key, value)
163 | if err != nil {
164 | return
165 | }
166 | }
167 | func GetEnvVariable(envName, oldEnvName string) string {
168 | value := os.Getenv(envName)
169 | if value == "" {
170 | value = os.Getenv(oldEnvName)
171 | if value != "" {
172 | err := os.Setenv(envName, value)
173 | if err != nil {
174 | return value
175 | }
176 | Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
177 | }
178 | }
179 | return value
180 | }
181 |
182 | // CheckEnvVars checks if all the specified environment variables are set
183 | func CheckEnvVars(vars []string) error {
184 | missingVars := []string{}
185 |
186 | for _, v := range vars {
187 | if os.Getenv(v) == "" {
188 | missingVars = append(missingVars, v)
189 | }
190 | }
191 |
192 | if len(missingVars) > 0 {
193 | return fmt.Errorf("missing environment variables: %v", missingVars)
194 | }
195 |
196 | return nil
197 | }
198 |
199 | // MakeDir create directory
200 | func MakeDir(dirPath string) error {
201 | err := os.Mkdir(dirPath, 0700)
202 | if err != nil {
203 | return err
204 | }
205 | return nil
206 | }
207 |
208 | // MakeDirAll create directory
209 | func MakeDirAll(dirPath string) error {
210 | err := os.MkdirAll(dirPath, 0700)
211 | if err != nil {
212 | return err
213 | }
214 | return nil
215 | }
216 | func GetIntEnv(envName string) int {
217 | val := os.Getenv(envName)
218 | if val == "" {
219 | return 0
220 | }
221 | ret, err := strconv.Atoi(val)
222 | if err != nil {
223 | Error("Error: %v", err)
224 | }
225 | return ret
226 | }
227 |
228 | func EnvWithDefault(envName string, defaultValue string) string {
229 | value := os.Getenv(envName)
230 | if value == "" {
231 | return defaultValue
232 | }
233 | return value
234 | }
235 |
236 | // IsValidCronExpression verify cronExpression and returns boolean
237 | func IsValidCronExpression(cronExpr string) bool {
238 | // Parse the cron expression
239 | _, err := cron.ParseStandard(cronExpr)
240 | return err == nil
241 | }
242 |
243 | // CronNextTime returns cronExpression next time
244 | func CronNextTime(cronExpr string) time.Time {
245 | // Parse the cron expression
246 | schedule, err := cron.ParseStandard(cronExpr)
247 | if err != nil {
248 | Error("Error parsing cron expression: %s", err)
249 | return time.Time{}
250 | }
251 | // Get the current time
252 | now := time.Now()
253 | // Get the next scheduled time
254 | next := schedule.Next(now)
255 | return next
256 | }
257 |
258 | // ConvertBytes converts bytes to a human-readable string with the appropriate unit (bytes, MiB, or GiB).
259 | func ConvertBytes(bytes uint64) string {
260 | const (
261 | MiB = 1024 * 1024
262 | GiB = MiB * 1024
263 | )
264 | switch {
265 | case bytes >= GiB:
266 | return fmt.Sprintf("%.2f GiB", float64(bytes)/float64(GiB))
267 | case bytes >= MiB:
268 | return fmt.Sprintf("%.2f MiB", float64(bytes)/float64(MiB))
269 | default:
270 | return fmt.Sprintf("%d bytes", bytes)
271 | }
272 | }
273 |
--------------------------------------------------------------------------------
/pkg/remote.go:
--------------------------------------------------------------------------------
1 | /*
2 | MIT License
3 |
4 | Copyright (c) 2023 Jonas Kaninda
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 | */
24 |
25 | package pkg
26 |
27 | import (
28 | "fmt"
29 | "github.com/jkaninda/go-storage/pkg/ftp"
30 | "github.com/jkaninda/go-storage/pkg/ssh"
31 | goutils "github.com/jkaninda/go-utils"
32 | "github.com/jkaninda/mysql-bkup/utils"
33 |
34 | "os"
35 | "path/filepath"
36 | "time"
37 | )
38 |
39 | func sshBackup(db *dbConfig, config *BackupConfig) {
40 | utils.Info("Backup database to Remote server")
41 | // Backup database
42 | err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
43 | if err != nil {
44 | recoverMode(err, "Error backing up database")
45 | return
46 | }
47 | finalFileName := config.backupFileName
48 | if config.encryption {
49 | encryptBackup(config)
50 | finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
51 | }
52 | utils.Info("Uploading backup archive to remote storage ... ")
53 | sshConfig, err := loadSSHConfig()
54 | if err != nil {
55 | utils.Fatal("Error loading ssh config: %s", err)
56 | }
57 |
58 | sshStorage, err := ssh.NewStorage(ssh.Config{
59 | Host: sshConfig.hostName,
60 | Port: sshConfig.port,
61 | User: sshConfig.user,
62 | Password: sshConfig.password,
63 | IdentifyFile: sshConfig.identifyFile,
64 | RemotePath: config.remotePath,
65 | LocalPath: tmpPath,
66 | })
67 | if err != nil {
68 | utils.Fatal("Error creating SSH storage: %s", err)
69 | }
70 | err = sshStorage.Copy(finalFileName)
71 | if err != nil {
72 | utils.Fatal("Error copying backup file: %s", err)
73 | }
74 | // Get backup info
75 | fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
76 | if err != nil {
77 | utils.Error("Error: %s", err)
78 | }
79 | backupSize = fileInfo.Size()
80 | utils.Info("Backup name is %s", finalFileName)
81 | utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
82 | utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
83 |
84 | // Delete backup file from tmp folder
85 | err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
86 | if err != nil {
87 | utils.Error("Error deleting file: %v", err)
88 |
89 | }
90 | if config.prune {
91 | err := sshStorage.Prune(config.backupRetention)
92 | if err != nil {
93 | utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
94 | }
95 |
96 | }
97 | utils.Info("Uploading backup archive to remote storage ... done ")
98 | duration := goutils.FormatDuration(time.Since(startTime), 0)
99 |
100 | // Send notification
101 | utils.NotifySuccess(&utils.NotificationData{
102 | File: finalFileName,
103 | BackupSize: utils.ConvertBytes(uint64(backupSize)),
104 | Database: db.dbName,
105 | Storage: config.storage,
106 | BackupLocation: filepath.Join(config.remotePath, finalFileName),
107 | Duration: duration,
108 | })
109 | // Delete temp
110 | deleteTemp()
111 | utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
112 |
113 | }
114 | func remoteRestore(db *dbConfig, conf *RestoreConfig) {
115 | utils.Info("Restore database from remote server")
116 | sshConfig, err := loadSSHConfig()
117 | if err != nil {
118 | utils.Fatal("Error loading ssh config: %s", err)
119 | }
120 |
121 | sshStorage, err := ssh.NewStorage(ssh.Config{
122 | Host: sshConfig.hostName,
123 | Port: sshConfig.port,
124 | User: sshConfig.user,
125 | Password: sshConfig.password,
126 | IdentifyFile: sshConfig.identifyFile,
127 | RemotePath: conf.remotePath,
128 | LocalPath: tmpPath,
129 | })
130 | if err != nil {
131 | utils.Fatal("Error creating SSH storage: %s", err)
132 | }
133 | err = sshStorage.CopyFrom(conf.file)
134 | if err != nil {
135 | utils.Fatal("Error copying backup file: %s", err)
136 | }
137 | RestoreDatabase(db, conf)
138 | }
139 | func ftpRestore(db *dbConfig, conf *RestoreConfig) {
140 | utils.Info("Restore database from FTP server")
141 | ftpConfig := loadFtpConfig()
142 | ftpStorage, err := ftp.NewStorage(ftp.Config{
143 | Host: ftpConfig.host,
144 | Port: ftpConfig.port,
145 | User: ftpConfig.user,
146 | Password: ftpConfig.password,
147 | RemotePath: conf.remotePath,
148 | LocalPath: tmpPath,
149 | })
150 | if err != nil {
151 | utils.Fatal("Error creating SSH storage: %s", err)
152 | }
153 | err = ftpStorage.CopyFrom(conf.file)
154 | if err != nil {
155 | utils.Fatal("Error copying backup file: %s", err)
156 | }
157 | RestoreDatabase(db, conf)
158 | }
159 | func ftpBackup(db *dbConfig, config *BackupConfig) {
160 | utils.Info("Backup database to the remote FTP server")
161 |
162 | // Backup database
163 | err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
164 | if err != nil {
165 | recoverMode(err, "Error backing up database")
166 | return
167 | }
168 | finalFileName := config.backupFileName
169 | if config.encryption {
170 | encryptBackup(config)
171 | finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
172 | }
173 | utils.Info("Uploading backup archive to the remote FTP server ... ")
174 | utils.Info("Backup name is %s", finalFileName)
175 | ftpConfig := loadFtpConfig()
176 | ftpStorage, err := ftp.NewStorage(ftp.Config{
177 | Host: ftpConfig.host,
178 | Port: ftpConfig.port,
179 | User: ftpConfig.user,
180 | Password: ftpConfig.password,
181 | RemotePath: config.remotePath,
182 | LocalPath: tmpPath,
183 | })
184 | if err != nil {
185 | utils.Fatal("Error creating SSH storage: %s", err)
186 | }
187 | err = ftpStorage.Copy(finalFileName)
188 | if err != nil {
189 | utils.Fatal("Error copying backup file: %s", err)
190 | }
191 | utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
192 | // Get backup info
193 | fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
194 | if err != nil {
195 | utils.Error("Error: %s", err)
196 | }
197 | backupSize = fileInfo.Size()
198 | // Delete backup file from tmp folder
199 | err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
200 | if err != nil {
201 | utils.Error("Error deleting file: %v", err)
202 |
203 | }
204 | if config.prune {
205 | err := ftpStorage.Prune(config.backupRetention)
206 | if err != nil {
207 | utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
208 | }
209 |
210 | }
211 | utils.Info("Backup name is %s", finalFileName)
212 | utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
213 | utils.Info("Uploading backup archive to the remote FTP server ... done ")
214 | duration := goutils.FormatDuration(time.Since(startTime), 0)
215 |
216 | // Send notification
217 | utils.NotifySuccess(&utils.NotificationData{
218 | File: finalFileName,
219 | BackupSize: utils.ConvertBytes(uint64(backupSize)),
220 | Database: db.dbName,
221 | Storage: config.storage,
222 | BackupLocation: filepath.Join(config.remotePath, finalFileName),
223 | Duration: duration,
224 | })
225 | // Delete temp
226 | deleteTemp()
227 | utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
228 | }
229 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | - nightly
8 | pull_request:
9 | branches:
10 | - main
11 | env:
12 | IMAGE_NAME: mysql-bkup
13 |
14 | jobs:
15 | test:
16 | runs-on: ubuntu-latest
17 | services:
18 | mysql:
19 | image: mysql:9
20 | env:
21 | MYSQL_ROOT_PASSWORD: password
22 | MYSQL_DATABASE: testdb
23 | MYSQL_USER: user
24 | MYSQL_PASSWORD: password
25 | ports:
26 | - 3306:3306
27 | options: >-
28 | --health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
29 | --health-interval=10s
30 | --health-timeout=5s
31 | --health-retries=5
32 | mysql8:
33 | image: mysql:8
34 | env:
35 | MYSQL_ROOT_PASSWORD: password
36 | MYSQL_DATABASE: testdb
37 | MYSQL_USER: user
38 | MYSQL_PASSWORD: password
39 | ports:
40 | - 3308:3306
41 | options: >-
42 | --health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
43 | --health-interval=10s
44 | --health-timeout=5s
45 | --health-retries=5
46 | mysql5:
47 | image: mysql:5
48 | env:
49 | MYSQL_ROOT_PASSWORD: password
50 | MYSQL_DATABASE: testdb
51 | MYSQL_USER: user
52 | MYSQL_PASSWORD: password
53 | ports:
54 | - 3305:3306
55 | options: >-
56 | --health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
57 | --health-interval=10s
58 | --health-timeout=5s
59 | --health-retries=5
60 | steps:
61 | - name: Checkout repository
62 | uses: actions/checkout@v4
63 |
64 | - name: Set up Docker Buildx
65 | uses: docker/setup-buildx-action@v3
66 | - name: Create Minio container
67 | run: |
68 | docker run -d --rm --name minio \
69 | --network host \
70 | -p 9000:9000 \
71 | -e MINIO_ACCESS_KEY=minioadmin \
72 | -e MINIO_SECRET_KEY=minioadmin \
73 | -e MINIO_REGION_NAME="eu" \
74 | minio/minio server /data
75 | echo "Create Minio container completed"
76 | - name: Install MinIO Client (mc)
77 | run: |
78 | curl -O https://dl.min.io/client/mc/release/linux-amd64/mc
79 | chmod +x mc
80 | sudo mv mc /usr/local/bin/
81 |
82 | - name: Wait for MinIO to be ready
83 | run: sleep 5
84 |
85 | - name: Configure MinIO Client
86 | run: |
87 | mc alias set local http://localhost:9000 minioadmin minioadmin
88 | mc alias list
89 |
90 | - name: Create MinIO Bucket
91 | run: |
92 | mc mb local/backups
93 | echo "Bucket backups created successfully."
94 | # Build the Docker image
95 | - name: Build Docker Image
96 | run: |
97 | docker buildx build --build-arg appVersion=test -t ${{ env.IMAGE_NAME }}:latest --load .
98 |
99 | - name: Verify Docker images
100 | run: |
101 | docker images
102 |
103 | - name: Wait for MySQL to be ready
104 | run: |
105 | docker run --rm --network host mysql:9 mysqladmin ping -h 127.0.0.1 -uuser -ppassword --wait
106 | - name: Test restore
107 | run: |
108 | docker run --rm --name ${{ env.IMAGE_NAME }} \
109 | -v ./migrations:/backup/ \
110 | --network host \
111 | -e DB_HOST=127.0.0.1 \
112 | -e DB_USERNAME=root \
113 | -e DB_PASSWORD=password \
114 | -e DB_NAME=testdb \
115 | ${{ env.IMAGE_NAME }}:latest restore -f init.sql
116 | echo "Database restore completed"
117 | - name: Test restore Mysql8
118 | run: |
119 | docker run --rm --name ${{ env.IMAGE_NAME }} \
120 | -v ./migrations:/backup/ \
121 | --network host \
122 | -e DB_HOST=127.0.0.1 \
123 | -e DB_PORT=3308 \
124 | -e DB_USERNAME=root \
125 | -e DB_PASSWORD=password \
126 | -e DB_NAME=testdb \
127 | ${{ env.IMAGE_NAME }}:latest restore -f init.sql
128 | echo "Test restore Mysql8 completed"
129 | - name: Test restore Mysql5
130 | run: |
131 | docker run --rm --name ${{ env.IMAGE_NAME }} \
132 | -v ./migrations:/backup/ \
133 | --network host \
134 | -e DB_HOST=127.0.0.1 \
135 | -e DB_PORT=3305 \
136 | -e DB_USERNAME=root \
137 | -e DB_PASSWORD=password \
138 | -e DB_NAME=testdb \
139 | ${{ env.IMAGE_NAME }}:latest restore -f init.sql
140 | echo "Test restore Mysql5 completed"
141 | - name: Test backup
142 | run: |
143 | docker run --rm --name ${{ env.IMAGE_NAME }} \
144 | -v ./migrations:/backup/ \
145 | --network host \
146 | -e DB_HOST=127.0.0.1 \
147 | -e DB_USERNAME=user \
148 | -e DB_PASSWORD=password \
149 | -e DB_NAME=testdb \
150 | ${{ env.IMAGE_NAME }}:latest backup
151 | echo "Database backup completed"
152 | - name: Test backup Mysql8
153 | run: |
154 | docker run --rm --name ${{ env.IMAGE_NAME }} \
155 | -v ./migrations:/backup/ \
156 | --network host \
157 | -e DB_PORT=3308 \
158 | -e DB_HOST=127.0.0.1 \
159 | -e DB_USERNAME=user \
160 | -e DB_PASSWORD=password \
161 | -e DB_NAME=testdb \
162 | ${{ env.IMAGE_NAME }}:latest backup
163 | echo "Test backup Mysql8 completed"
164 | - name: Test backup Mysql5
165 | run: |
166 | docker run --rm --name ${{ env.IMAGE_NAME }} \
167 | -v ./migrations:/backup/ \
168 | --network host \
169 | -e DB_PORT=3305 \
170 | -e DB_HOST=127.0.0.1 \
171 | -e DB_USERNAME=user \
172 | -e DB_PASSWORD=password \
173 | -e DB_NAME=testdb \
174 | ${{ env.IMAGE_NAME }}:latest backup
175 | echo "Test backup Mysql5 completed"
176 | - name: Test encrypted backup
177 | run: |
178 | docker run --rm --name ${{ env.IMAGE_NAME }} \
179 | -v ./migrations:/backup/ \
180 | --network host \
181 | -e DB_HOST=127.0.0.1 \
182 | -e DB_USERNAME=user \
183 | -e DB_PASSWORD=password \
184 | -e GPG_PASSPHRASE=password \
185 | ${{ env.IMAGE_NAME }}:latest backup -d testdb --disable-compression --custom-name encrypted-bkup
186 | echo "Database encrypted backup completed"
187 | - name: Test restore encrypted backup | testdb -> testdb2
188 | run: |
189 | docker run --rm --name ${{ env.IMAGE_NAME }} \
190 | -v ./migrations:/backup/ \
191 | --network host \
192 | -e DB_HOST=127.0.0.1 \
193 | -e DB_USERNAME=root \
194 | -e DB_PASSWORD=password \
195 | -e GPG_PASSPHRASE=password \
196 | -e DB_NAME=testdb2 \
197 | ${{ env.IMAGE_NAME }}:latest restore -f /backup/encrypted-bkup.sql.gpg
198 | echo "Test restore encrypted backup completed"
199 | - name: Test migrate database testdb -> testdb3
200 | run: |
201 | docker run --rm --name ${{ env.IMAGE_NAME }} \
202 | -v ./migrations:/backup/ \
203 | --network host \
204 | -e DB_HOST=127.0.0.1 \
205 | -e DB_USERNAME=root \
206 | -e DB_PASSWORD=password \
207 | -e GPG_PASSPHRASE=password \
208 | -e DB_NAME=testdb \
209 | -e TARGET_DB_HOST=127.0.0.1 \
210 | -e TARGET_DB_PORT=3306 \
211 | -e TARGET_DB_NAME=testdb3 \
212 | -e TARGET_DB_USERNAME=root \
213 | -e TARGET_DB_PASSWORD=password \
214 | ${{ env.IMAGE_NAME }}:latest migrate
215 | echo "Test migrate database testdb -> testdb3 completed"
216 | - name: Test backup all databases
217 | run: |
218 | docker run --rm --name ${{ env.IMAGE_NAME }} \
219 | -v ./migrations:/backup/ \
220 | --network host \
221 | -e DB_HOST=127.0.0.1 \
222 | -e DB_USERNAME=root \
223 | -e DB_PASSWORD=password \
224 | -e DB_NAME=testdb \
225 | ${{ env.IMAGE_NAME }}:latest backup --all-databases
226 | echo "Database backup completed"
227 | - name: Test multiple backup
228 | run: |
229 | docker run --rm --name ${{ env.IMAGE_NAME }} \
230 | -v ./migrations:/backup/ \
231 | --network host \
232 | -e DB_HOST=127.0.0.1 \
233 | -e TESTDB2_DB_USERNAME=root \
234 | -e TESTDB2_DB_PASSWORD=password \
235 | -e TESTDB2_DB_HOST=127.0.0.1 \
236 | ${{ env.IMAGE_NAME }}:latest backup -c /backup/test_config.yaml
237 | echo "Database backup completed"
238 | - name: Test backup Minio (s3)
239 | run: |
240 | docker run --rm --name ${{ env.IMAGE_NAME }} \
241 | --network host \
242 | -e DB_HOST=127.0.0.1 \
243 | -e DB_USERNAME=user \
244 | -e DB_PASSWORD=password \
245 | -e DB_NAME=testdb \
246 | -e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \
247 | -e AWS_S3_BUCKET_NAME=backups \
248 | -e AWS_ACCESS_KEY=minioadmin \
249 | -e AWS_SECRET_KEY=minioadmin \
250 | -e AWS_DISABLE_SSL="true" \
251 | -e AWS_REGION="eu" \
252 | -e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest backup -s s3 --custom-name minio-backup
253 | echo "Test backup Minio (s3) completed"
254 | - name: Test restore Minio (s3)
255 | run: |
256 | docker run --rm --name ${{ env.IMAGE_NAME }} \
257 | --network host \
258 | -e DB_HOST=127.0.0.1 \
259 | -e DB_USERNAME=user \
260 | -e DB_PASSWORD=password \
261 | -e DB_NAME=testdb \
262 | -e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \
263 | -e AWS_S3_BUCKET_NAME=backups \
264 | -e AWS_ACCESS_KEY=minioadmin \
265 | -e AWS_SECRET_KEY=minioadmin \
266 | -e AWS_DISABLE_SSL="true" \
267 | -e AWS_REGION="eu" \
268 | -e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest restore -s s3 -f minio-backup.sql.gz
269 | echo "Test backup Minio (s3) completed"
270 | - name: Test scheduled backup
271 | run: |
272 | docker run -d --rm --name ${{ env.IMAGE_NAME }} \
273 | -v ./migrations:/backup/ \
274 | --network host \
275 | -e DB_HOST=127.0.0.1 \
276 | -e DB_USERNAME=user \
277 | -e DB_PASSWORD=password \
278 | -e DB_NAME=testdb \
279 | ${{ env.IMAGE_NAME }}:latest backup -e "@every 10s"
280 |
281 | echo "Waiting for backup to be done..."
282 | sleep 25
283 | docker logs ${{ env.IMAGE_NAME }}
284 | echo "Test scheduled backup completed"
285 | # Cleanup: Stop and remove containers
286 | - name: Clean up
287 | run: |
288 | docker stop ${{ env.IMAGE_NAME }} || true
289 | docker rm ${{ env.IMAGE_NAME }} || true
--------------------------------------------------------------------------------
/docs/reference/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Configuration Reference
3 | layout: default
4 | nav_order: 3
5 | ---
6 |
7 | # Configuration Reference
8 |
9 | MySQL backup, restore, and migration processes can be configured using **environment variables** or **CLI flags**.
10 |
11 | ## CLI Utility Usage
12 |
13 | The `mysql-bkup` CLI provides commands and options to manage MySQL backups efficiently.
14 |
15 | | Option | Short Flag | Description |
16 | |-------------------------|------------|-----------------------------------------------------------------------------------------|
17 | | `mysql-bkup` | `bkup` | CLI tool for managing MySQL backups, restoration, and migration. |
18 | | `backup` | | Executes a backup operation. |
19 | | `restore` | | Restores a database from a backup file. |
20 | | `migrate` | | Migrates a database from one instance to another. |
21 | | `--storage` | `-s` | Specifies the storage type (`local`, `s3`, `ssh`, etc.). Default: `local`. |
22 | | `--file` | `-f` | Defines the backup file name for restoration. |
23 | | `--path` | | Sets the storage path (e.g., `/custom_path` for S3 or `/home/foo/backup` for SSH). |
24 | | `--config` | `-c` | Provides a configuration file for multi-database backups (e.g., `/backup/config.yaml`). |
25 | | `--dbname` | `-d` | Specifies the database name to back up or restore. |
26 | | `--port` | `-p` | Defines the database port. Default: `3306`. |
27 | | `--disable-compression` | | Disables compression for database backups. |
28 | | `--cron-expression` | `-e` | Schedules backups using a cron expression (e.g., `0 0 * * *` or `@daily`). |
29 | | `--all-databases` | `-a` | Backs up all databases separately (e.g., `backup --all-databases`). |
30 | | `--all-in-one` | `-A` | Backs up all databases in a single file (e.g., `backup --all-databases --single-file`). |
31 | | `--custom-name` | `` | Sets custom backup name for one time backup |
32 | | `--help` | `-h` | Displays the help message and exits. |
33 | | `--version` | `-V` | Shows version information and exits. |
34 |
35 | ---
36 |
37 | ## Environment Variables
38 |
39 | | Name | Requirement | Description |
40 | |--------------------------------|--------------------------------------|----------------------------------------------------------------------------|
41 | | `DB_PORT` | Optional (default: `3306`) | Database port number. |
42 | | `DB_HOST` | Required | Database host. |
43 | | `DB_NAME` | Optional (if provided via `-d` flag) | Database name. |
44 | | `DB_USERNAME` | Required | Database username. |
45 | | `DB_PASSWORD` | Required | Database password. |
46 | | `DB_SSL_CA` | Optional | Database client CA certificate file |
47 | | `DB_SSL_MODE` | Optional(`0 or 1`) default: `0` | Database client Enable CA validation |
48 | | `AWS_ACCESS_KEY` | Required for S3 storage | AWS S3 Access Key. |
49 | | `AWS_SECRET_KEY` | Required for S3 storage | AWS S3 Secret Key. |
50 | | `AWS_BUCKET_NAME` | Required for S3 storage | AWS S3 Bucket Name. |
51 | | `AWS_REGION` | Required for S3 storage | AWS Region. |
52 | | `AWS_DISABLE_SSL` | Optional | Disable SSL for S3 storage. |
53 | | `AWS_FORCE_PATH_STYLE` | Optional | Force path-style access for S3 storage. |
54 | | `FILE_NAME` | Optional (if provided via `--file`) | File name for restoration (e.g., `.sql`, `.sql.gz`). |
55 | | `GPG_PASSPHRASE` | Optional | GPG passphrase for encrypting/decrypting backups. |
56 | | `GPG_PUBLIC_KEY` | Optional | GPG public key for encrypting backups (e.g., `/config/public_key.asc`). |
57 | | `BACKUP_CRON_EXPRESSION` | Optional (flag `-e`) | Cron expression for scheduled backups. |
58 | | `BACKUP_RETENTION_DAYS` | Optional | Delete backups older than the specified number of days. |
59 | | `BACKUP_CONFIG_FILE` | Optional (flag `-c`) | Configuration file for multi database backup. (e.g: `/backup/config.yaml`) |
60 | | `SSH_HOST` | Required for SSH storage | SSH remote hostname or IP. |
61 | | `SSH_USER` | Required for SSH storage | SSH remote username. |
62 | | `SSH_PASSWORD` | Optional | SSH remote user's password. |
63 | | `SSH_IDENTIFY_FILE` | Optional | SSH remote user's private key. |
64 | | `SSH_PORT` | Optional (default: `22`) | SSH remote server port. |
65 | | `REMOTE_PATH` | Required for SSH/FTP storage | Remote path (e.g., `/home/toto/backup`). |
66 | | `FTP_HOST` | Required for FTP storage | FTP hostname. |
67 | | `FTP_PORT` | Optional (default: `21`) | FTP server port. |
68 | | `FTP_USER` | Required for FTP storage | FTP username. |
69 | | `FTP_PASSWORD` | Required for FTP storage | FTP user password. |
70 | | `TARGET_DB_HOST` | Required for migration | Target database host. |
71 | | `TARGET_DB_PORT` | Optional (default: `5432`) | Target database port. |
72 | | `TARGET_DB_NAME` | Required for migration | Target database name. |
73 | | `TARGET_DB_USERNAME` | Required for migration | Target database username. |
74 | | `TARGET_DB_PASSWORD` | Required for migration | Target database password. |
75 | | `TARGET_DB_URL` | Optional | Target database URL in JDBC URI format. |
76 | | `TG_TOKEN` | Required for Telegram notifications | Telegram token (`BOT-ID:BOT-TOKEN`). |
77 | | `TG_CHAT_ID` | Required for Telegram notifications | Telegram Chat ID. |
78 | | `TZ` | Optional | Time zone for scheduling. |
79 | | `AZURE_STORAGE_CONTAINER_NAME` | Required for Azure Blob Storage | Azure storage container name. |
80 | | `AZURE_STORAGE_ACCOUNT_NAME` | Required for Azure Blob Storage | Azure storage account name. |
81 | | `AZURE_STORAGE_ACCOUNT_KEY` | Required for Azure Blob Storage | Azure storage account key. |
82 |
83 | ---
84 |
85 | ## Scheduled Backups
86 |
87 | ### Running in Scheduled Mode
88 |
89 | - **Docker**: Use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable to schedule backups.
90 | - **Kubernetes**: Use a `CronJob` resource for scheduled backups.
91 |
92 | ### Cron Syntax
93 |
94 | The cron syntax consists of five fields:
95 |
96 | ```conf
97 | * * * * * command
98 | ```
99 |
100 | | Field | Description | Values |
101 | |---------------|------------------------------|----------------|
102 | | Minute | Minute of the hour | `0-59` |
103 | | Hour | Hour of the day | `0-23` |
104 | | Day of Month | Day of the month | `1-31` |
105 | | Month | Month of the year | `1-12` |
106 | | Day of Week | Day of the week (0 = Sunday) | `0-7` |
107 |
108 | #### Examples
109 |
110 | - **Every 30 minutes**: `*/30 * * * *`
111 | - **Every hour at minute 0**: `0 * * * *`
112 | - **Every day at 1:00 AM**: `0 1 * * *`
113 |
114 | ### Predefined Schedules
115 |
116 | | Entry | Description | Equivalent To |
117 | |----------------------------|--------------------------------------------|---------------|
118 | | `@yearly` (or `@annually`) | Run once a year, midnight, Jan. 1st | `0 0 1 1 *` |
119 | | `@monthly` | Run once a month, midnight, first of month | `0 0 1 * *` |
120 | | `@weekly` | Run once a week, midnight between Sat/Sun | `0 0 * * 0` |
121 | | `@daily` (or `@midnight`) | Run once a day, midnight | `0 0 * * *` |
122 | | `@hourly` | Run once an hour, beginning of hour | `0 * * * *` |
123 |
124 | ### Intervals
125 |
126 | You can also schedule backups at fixed intervals using the format:
127 |
128 | ```conf
129 | @every
130 | ```
131 |
132 | - Example: `@every 1h30m10s` runs the backup every 1 hour, 30 minutes, and 10 seconds.
133 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MYSQL-BKUP
2 |
3 | **MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
4 | It supports a variety of storage options and ensures data security through GPG encryption.
5 |
6 | MYSQL-BKUP is designed for seamless deployment on **Docker** and **Kubernetes**, simplifying MySQL backup, restoration, and migration across environments.
7 | It is a lightweight, multi-architecture solution compatible with **Docker**, **Docker Swarm**, **Kubernetes**, and other container orchestration platforms.
8 |
9 |
10 |
11 |
12 |
13 | [](https://github.com/jkaninda/mysql-bkup/actions/workflows/tests.yml)
14 | [](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
15 | [](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
16 | 
17 | 
18 |
19 |
20 | ## Features
21 |
22 | - **Flexible Storage Backends:**
23 | - Local filesystem
24 | - Amazon S3 & S3-compatible storage (e.g., MinIO, Wasabi)
25 | - FTP
26 | - SSH-compatible storage
27 | - Azure Blob storage
28 |
29 | - **Data Security:**
30 | - Backups can be encrypted using **GPG** to ensure confidentiality.
31 |
32 | - **Deployment Flexibility:**
33 | - Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
34 | - Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
35 | - Supports recurring backups of MySQL databases when deployed:
36 | - On Docker for automated backup schedules.
37 | - As a **Job** or **CronJob** on Kubernetes.
38 |
39 | - **Notifications:**
40 | - Get real-time updates on backup success or failure via:
41 | - **Telegram**
42 | - **Email**
43 |
44 | ## 💡Use Cases
45 |
46 | - **Scheduled Backups**: Automate recurring backups using Docker or Kubernetes.
47 | - **Disaster Recovery:** Quickly restore backups to a clean MySQL instance.
48 | - **Database Migration**: Seamlessly move data across environments using the built-in `migrate` feature.
49 | - **Secure Archiving:** Keep backups encrypted and safely stored in the cloud or remote servers.
50 |
51 |
52 | ## ✅ Verified Platforms:
53 | MYSQL-BKUP has been tested and runs successfully on:
54 |
55 | - Docker
56 | - Docker Swarm
57 | - Kubernetes
58 | - OpenShift
59 |
60 | ## Documentation is found at
61 |
62 |
63 | ## Links:
64 |
65 | - [Docker Hub](https://hub.docker.com/r/jkaninda/mysql-bkup)
66 | - [Github](https://github.com/jkaninda/mysql-bkup)
67 |
68 | ## PostgreSQL solution :
69 |
70 | - [PostgreSQL](https://github.com/jkaninda/pg-bkup)
71 |
72 |
73 | ## Quickstart
74 |
75 | ### Simple Backup Using Docker CLI
76 |
77 | To perform a one-time backup, bind your local volume to `/backup` in the container and run the `backup` command:
78 |
79 | ```shell
80 | docker run --rm --network your_network_name \
81 | -v $PWD/backup:/backup/ \
82 | -e "DB_HOST=dbhost" \
83 | -e "DB_PORT=3306" \
84 | -e "DB_USERNAME=username" \
85 | -e "DB_PASSWORD=password" \
86 | jkaninda/mysql-bkup backup -d database_name
87 | ```
88 |
89 | Alternatively, use an environment file (`--env-file`) for configuration:
90 |
91 | ```shell
92 | docker run --rm --network your_network_name \
93 | --env-file your-env-file \
94 | -v $PWD/backup:/backup/ \
95 | jkaninda/mysql-bkup backup -d database_name
96 | ```
97 |
98 | ### Backup All Databases
99 |
100 | To back up all databases on the server, use the `--all-databases` or `-a` flag. By default, this creates individual backup files for each database.
101 |
102 | ```shell
103 | docker run --rm --network your_network_name \
104 | -v $PWD/backup:/backup/ \
105 | -e "DB_HOST=dbhost" \
106 | -e "DB_PORT=3306" \
107 | -e "DB_USERNAME=username" \
108 | -e "DB_PASSWORD=password" \
109 | jkaninda/mysql-bkup backup --all-databases --disable-compression
110 | ```
111 |
112 | > **Note:** Use the `--all-in-one` or `-A` flag to combine backups into a single file.
113 |
114 | ---
115 |
116 | ### Simple Restore Using Docker CLI
117 |
118 | To restore a database, bind your local volume to `/backup` and run the `restore` command:
119 |
120 | ```shell
121 | docker run --rm --network your_network_name \
122 | -v $PWD/backup:/backup/ \
123 | -e "DB_HOST=dbhost" \
124 | -e "DB_PORT=3306" \
125 | -e "DB_USERNAME=username" \
126 | -e "DB_PASSWORD=password" \
127 | jkaninda/mysql-bkup restore -d database_name -f backup_file.sql.gz
128 | ```
129 |
130 | ---
131 |
132 | ### Backup with Docker Compose
133 |
134 | Below is an example of a `docker-compose.yml` file for running a one-time backup:
135 |
136 | ```yaml
137 | services:
138 | pg-bkup:
139 | # In production, pin your image tag to a specific release version instead of `latest`.
140 | # See available releases: https://github.com/jkaninda/mysql-bkup/releases
141 | image: jkaninda/mysql-bkup
142 | container_name: mysql-bkup
143 | command: backup
144 | volumes:
145 | - ./backup:/backup
146 | environment:
147 | - DB_PORT=3306
148 | - DB_HOST=mysql
149 | - DB_NAME=foo
150 | - DB_USERNAME=bar
151 | - DB_PASSWORD=password
152 | - TZ=Europe/Paris
153 | networks:
154 | - web
155 |
156 | networks:
157 | web:
158 | ```
159 |
160 | ---
161 |
162 | ### Recurring Backups with Docker
163 |
164 | You can schedule recurring backups using the `--cron-expression` or `-e` flag:
165 |
166 | ```shell
167 | docker run --rm --network network_name \
168 | -v $PWD/backup:/backup/ \
169 | -e "DB_HOST=hostname" \
170 | -e "DB_USERNAME=user" \
171 | -e "DB_PASSWORD=password" \
172 | jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m"
173 | ```
174 |
175 | For predefined schedules, refer to the [documentation](https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules).
176 |
177 | ---
178 |
179 | ## Deploy on Kubernetes
180 |
181 | For Kubernetes, you can deploy `mysql-bkup` as a Job or CronJob. Below are examples for both.
182 |
183 | ### Kubernetes Backup Job
184 |
185 | This example defines a one-time backup job:
186 |
187 | ```yaml
188 | apiVersion: batch/v1
189 | kind: Job
190 | metadata:
191 | name: backup-job
192 | spec:
193 | ttlSecondsAfterFinished: 100
194 | template:
195 | spec:
196 | containers:
197 | - name: mysql-bkup
198 | # Pin the image tag to a specific release version in production.
199 | # See available releases: https://github.com/jkaninda/mysql-bkup/releases
200 | image: jkaninda/mysql-bkup
201 | command: ["backup", "-d", "dbname"]
202 | resources:
203 | limits:
204 | memory: "128Mi"
205 | cpu: "500m"
206 | env:
207 | - name: DB_HOST
208 | value: "mysql"
209 | - name: DB_USERNAME
210 | value: "user"
211 | - name: DB_PASSWORD
212 | value: "password"
213 | volumeMounts:
214 | - mountPath: /backup
215 | name: backup
216 | volumes:
217 | - name: backup
218 | hostPath:
219 | path: /home/toto/backup # Directory location on the host
220 | type: Directory # Optional field
221 | restartPolicy: Never
222 | ```
223 |
224 | ### Kubernetes CronJob for Scheduled Backups
225 |
226 | For scheduled backups, use a `CronJob`:
227 |
228 | ```yaml
229 | apiVersion: batch/v1
230 | kind: CronJob
231 | metadata:
232 | name: pg-bkup-cronjob
233 | spec:
234 | schedule: "0 2 * * *" # Runs daily at 2 AM
235 | jobTemplate:
236 | spec:
237 | template:
238 | spec:
239 | containers:
240 | - name: pg-bkup
241 | image: jkaninda/mysql-bkup
242 | command: ["backup", "-d", "dbname"]
243 | env:
244 | - name: DB_HOST
245 | value: "mysql"
246 | - name: DB_USERNAME
247 | value: "user"
248 | - name: DB_PASSWORD
249 | value: "password"
250 | volumeMounts:
251 | - mountPath: /backup
252 | name: backup
253 | volumes:
254 | - name: backup
255 | hostPath:
256 | path: /home/toto/backup
257 | type: Directory
258 | restartPolicy: OnFailure
259 | ```
260 |
261 | ---
262 |
263 | ## 🚀 Why Use MYSQL-BKUP?
264 |
265 | **MYSQL-BKUP** isn't just another MySQL backup tool, it's a robust, production-ready solution purpose-built for modern DevOps workflows.
266 |
267 | Here’s why developers, sysadmins, and DevOps choose **MYSQL-BKUP**:
268 |
269 | ### ✅ All-in-One Backup, Restore & Migration
270 |
271 | Whether you're backing up a single database, restoring critical data, or migrating across environments, MYSQL-BKUP handles it all with a **single, unified CLI** no scripting gymnastics required.
272 |
273 |
274 | ### 🔄 Works Everywhere You Deploy
275 |
276 | Designed to be cloud-native:
277 |
278 | * **Runs seamlessly on Docker, Docker Swarm, and Kubernetes**
279 | * Supports **CronJobs** for automated scheduled backups
280 | * Compatible with GitOps and CI/CD workflows
281 |
282 | ### ☁️ Flexible Storage Integrations
283 |
284 | Store your backups **anywhere**:
285 |
286 | * Local disks
287 | * Amazon S3, MinIO, Wasabi, Azure Blob, FTP, SSH
288 |
289 | ### 🔒 Enterprise-Grade Security
290 |
291 | * **GPG Encryption**: Protect sensitive data with optional encryption before storing backups locally or in the cloud.
292 | * **Secure Storage** Options: Supports S3, Azure Blob, SFTP, and SSH with encrypted transfers, keeping backups safe from unauthorized access.
293 |
294 | ### 📬 Instant Notifications
295 |
296 | Stay in the loop with real-time notifications via **Telegram** and **Email**. Know immediately when a backup succeeds—or fails.
297 |
298 | ### 🏃♂️ Lightweight and Fast
299 |
300 | Written in **Go**, MYSQL-BKUP is fast, multi-arch compatible (`amd64`, `arm64`, `arm/v7`), and optimized for minimal memory and CPU usage. Ideal for both cloud and edge deployments.
301 |
302 | ### 🧪 Tested. Verified. Trusted.
303 |
304 | Actively maintained with **automated testing**, **Docker image size optimizations**, and verified support across major container platforms.
305 |
306 | ---
307 | ## Available image registries
308 |
309 | This Docker image is published to both Docker Hub and the GitHub container registry.
310 | Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
311 |
312 | ```
313 | docker pull jkaninda/mysql-bkup
314 | docker pull ghcr.io/jkaninda/mysql-bkup
315 | ```
316 |
317 | Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
318 |
319 | ## References
320 |
321 | We created this image as a simpler and more lightweight alternative to existing solutions. Here’s why:
322 |
323 | - **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
324 | - **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
325 | - **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
326 | - **Kubernetes Support:** Designed to work seamlessly with Kubernetes.
327 |
328 |
329 | ## License
330 |
331 | This project is licensed under the MIT License. See the LICENSE file for details.
332 |
333 | ## Authors
334 |
335 | **Jonas Kaninda**
336 | -
337 |
338 | ## Copyright
339 |
340 | Copyright (c) [2023] [Jonas Kaninda]
341 |
--------------------------------------------------------------------------------
/docs/how-tos/deploy-on-kubernetes.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Deploy on Kubernetes
3 | layout: default
4 | parent: How Tos
5 | nav_order: 9
6 | ---
7 |
8 | # Deploy on Kubernetes
9 |
10 | To deploy MySQL Backup on Kubernetes, you can use a `Job` for one-time backups or restores, and a `CronJob` for recurring backups.
11 |
12 | Below are examples for different use cases.
13 |
14 | ---
15 |
16 | ## Backup Job to S3 Storage
17 |
18 | This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an S3-compatible storage.
19 |
20 | ```yaml
21 | apiVersion: batch/v1
22 | kind: Job
23 | metadata:
24 | name: backup
25 | spec:
26 | template:
27 | spec:
28 | containers:
29 | - name: mysql-bkup
30 | # In production, lock your image tag to a specific release version
31 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
32 | # for available releases.
33 | image: jkaninda/mysql-bkup
34 | command:
35 | - /bin/sh
36 | - -c
37 | - backup --storage s3
38 | resources:
39 | limits:
40 | memory: "128Mi"
41 | cpu: "500m"
42 | env:
43 | - name: DB_PORT
44 | value: "3306"
45 | - name: DB_HOST
46 | value: ""
47 | - name: DB_NAME
48 | value: ""
49 | - name: DB_USERNAME
50 | value: ""
51 | # Use Kubernetes Secrets for sensitive data like passwords
52 | - name: DB_PASSWORD
53 | value: ""
54 | - name: AWS_S3_ENDPOINT
55 | value: "https://s3.amazonaws.com"
56 | - name: AWS_S3_BUCKET_NAME
57 | value: "xxx"
58 | - name: AWS_REGION
59 | value: "us-west-2"
60 | - name: AWS_ACCESS_KEY
61 | value: "xxxx"
62 | - name: AWS_SECRET_KEY
63 | value: "xxxx"
64 | - name: AWS_DISABLE_SSL
65 | value: "false"
66 | - name: AWS_FORCE_PATH_STYLE
67 | value: "false"
68 | restartPolicy: Never
69 | ```
70 |
71 | ---
72 |
73 | ## Backup Job to SSH Remote Server
74 |
75 | This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an SSH remote server.
76 |
77 | ```yaml
78 | apiVersion: batch/v1
79 | kind: Job
80 | metadata:
81 | name: backup
82 | spec:
83 | ttlSecondsAfterFinished: 100
84 | template:
85 | spec:
86 | containers:
87 | - name: mysql-bkup
88 | # In production, lock your image tag to a specific release version
89 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
90 | # for available releases.
91 | image: jkaninda/mysql-bkup
92 | command:
93 | - /bin/sh
94 | - -c
95 | - backup --storage ssh --disable-compression
96 | resources:
97 | limits:
98 | memory: "128Mi"
99 | cpu: "500m"
100 | env:
101 | - name: DB_PORT
102 | value: "3306"
103 | - name: DB_HOST
104 | value: ""
105 | - name: DB_NAME
106 | value: "dbname"
107 | - name: DB_USERNAME
108 | value: "postgres"
109 | # Use Kubernetes Secrets for sensitive data like passwords
110 | - name: DB_PASSWORD
111 | value: ""
112 | - name: SSH_HOST_NAME
113 | value: "xxx"
114 | - name: SSH_PORT
115 | value: "22"
116 | - name: SSH_USER
117 | value: "xxx"
118 | - name: SSH_PASSWORD
119 | value: "xxxx"
120 | - name: SSH_REMOTE_PATH
121 | value: "/home/toto/backup"
122 | # Optional: Required if you want to encrypt your backup
123 | - name: GPG_PASSPHRASE
124 | value: "xxxx"
125 | restartPolicy: Never
126 | ```
127 |
128 | ---
129 |
130 | ## Restore Job
131 |
132 | This example demonstrates how to configure a Kubernetes `Job` to restore a MySQL database from a backup stored on an SSH remote server.
133 |
134 | ```yaml
135 | apiVersion: batch/v1
136 | kind: Job
137 | metadata:
138 | name: restore-job
139 | spec:
140 | ttlSecondsAfterFinished: 100
141 | template:
142 | spec:
143 | containers:
144 | - name: mysql-bkup
145 | # In production, lock your image tag to a specific release version
146 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
147 | # for available releases.
148 | image: jkaninda/mysql-bkup
149 | command:
150 | - /bin/sh
151 | - -c
152 | - restore --storage ssh --file store_20231219_022941.sql.gz
153 | resources:
154 | limits:
155 | memory: "128Mi"
156 | cpu: "500m"
157 | env:
158 | - name: DB_PORT
159 | value: "3306"
160 | - name: DB_HOST
161 | value: ""
162 | - name: DB_NAME
163 | value: "dbname"
164 | - name: DB_USERNAME
165 | value: "postgres"
166 | # Use Kubernetes Secrets for sensitive data like passwords
167 | - name: DB_PASSWORD
168 | value: ""
169 | - name: SSH_HOST_NAME
170 | value: "xxx"
171 | - name: SSH_PORT
172 | value: "22"
173 | - name: SSH_USER
174 | value: "xxx"
175 | - name: SSH_PASSWORD
176 | value: "xxxx"
177 | - name: SSH_REMOTE_PATH
178 | value: "/home/toto/backup"
179 | # Optional: Required if your backup was encrypted
180 | #- name: GPG_PASSPHRASE
181 | # value: "xxxx"
182 | restartPolicy: Never
183 | ```
184 |
185 | ---
186 |
187 | ## Recurring Backup with CronJob
188 |
189 | This example demonstrates how to configure a Kubernetes `CronJob` for recurring backups to an SSH remote server.
190 |
191 | ```yaml
192 | apiVersion: batch/v1
193 | kind: CronJob
194 | metadata:
195 | name: backup-job
196 | spec:
197 | schedule: "* * * * *"
198 | jobTemplate:
199 | spec:
200 | template:
201 | spec:
202 | containers:
203 | - name: mysql-bkup
204 | # In production, lock your image tag to a specific release version
205 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
206 | # for available releases.
207 | image: jkaninda/mysql-bkup
208 | command:
209 | - /bin/sh
210 | - -c
211 | - backup --storage ssh --disable-compression
212 | resources:
213 | limits:
214 | memory: "128Mi"
215 | cpu: "500m"
216 | env:
217 | - name: DB_PORT
218 | value: "3306"
219 | - name: DB_HOST
220 | value: ""
221 | - name: DB_NAME
222 | value: "test"
223 | - name: DB_USERNAME
224 | value: "postgres"
225 | # Use Kubernetes Secrets for sensitive data like passwords
226 | - name: DB_PASSWORD
227 | value: ""
228 | - name: SSH_HOST_NAME
229 | value: "192.168.1.16"
230 | - name: SSH_PORT
231 | value: "2222"
232 | - name: SSH_USER
233 | value: "jkaninda"
234 | - name: SSH_REMOTE_PATH
235 | value: "/config/backup"
236 | - name: SSH_PASSWORD
237 | value: "password"
238 | # Optional: Required if you want to encrypt your backup
239 | #- name: GPG_PASSPHRASE
240 | # value: "xxx"
241 | restartPolicy: Never
242 | ```
243 |
244 | ---
245 |
246 | ## Kubernetes Rootless Deployment
247 |
248 | This example demonstrates how to run the backup container in a rootless environment, suitable for platforms like OpenShift.
249 |
250 | ```yaml
251 | apiVersion: batch/v1
252 | kind: CronJob
253 | metadata:
254 | name: backup-job
255 | spec:
256 | schedule: "* * * * *"
257 | jobTemplate:
258 | spec:
259 | template:
260 | spec:
261 | securityContext:
262 | runAsUser: 1000
263 | runAsGroup: 3000
264 | fsGroup: 2000
265 | containers:
266 | - name: mysql-bkup
267 | # In production, lock your image tag to a specific release version
268 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
269 | # for available releases.
270 | image: jkaninda/mysql-bkup
271 | command:
272 | - /bin/sh
273 | - -c
274 | - backup --storage ssh --disable-compression
275 | resources:
276 | limits:
277 | memory: "128Mi"
278 | cpu: "500m"
279 | env:
280 | - name: DB_PORT
281 | value: "3306"
282 | - name: DB_HOST
283 | value: ""
284 | - name: DB_NAME
285 | value: "test"
286 | - name: DB_USERNAME
287 | value: "postgres"
288 | # Use Kubernetes Secrets for sensitive data like passwords
289 | - name: DB_PASSWORD
290 | value: ""
291 | - name: SSH_HOST_NAME
292 | value: "192.168.1.16"
293 | - name: SSH_PORT
294 | value: "2222"
295 | - name: SSH_USER
296 | value: "jkaninda"
297 | - name: SSH_REMOTE_PATH
298 | value: "/config/backup"
299 | - name: SSH_PASSWORD
300 | value: "password"
301 | # Optional: Required if you want to encrypt your backup
302 | #- name: GPG_PASSPHRASE
303 | # value: "xxx"
304 | restartPolicy: OnFailure
305 | ```
306 |
307 | ---
308 |
309 | ## Migrate Database
310 |
311 | This example demonstrates how to configure a Kubernetes `Job` to migrate a MySQL database from one server to another.
312 |
313 | ```yaml
314 | apiVersion: batch/v1
315 | kind: Job
316 | metadata:
317 | name: migrate-db
318 | spec:
319 | ttlSecondsAfterFinished: 100
320 | template:
321 | spec:
322 | containers:
323 | - name: mysql-bkup
324 | # In production, lock your image tag to a specific release version
325 | # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
326 | # for available releases.
327 | image: jkaninda/mysql-bkup
328 | command:
329 | - /bin/sh
330 | - -c
331 | - migrate
332 | resources:
333 | limits:
334 | memory: "128Mi"
335 | cpu: "500m"
336 | env:
337 | ## Source Database
338 | - name: DB_HOST
339 | value: "postgres"
340 | - name: DB_PORT
341 | value: "3306"
342 | - name: DB_NAME
343 | value: "dbname"
344 | - name: DB_USERNAME
345 | value: "username"
346 | - name: DB_PASSWORD
347 | value: "password"
348 | ## Target Database
349 | - name: TARGET_DB_HOST
350 | value: "target-postgres"
351 | - name: TARGET_DB_PORT
352 | value: "3306"
353 | - name: TARGET_DB_NAME
354 | value: "dbname"
355 | - name: TARGET_DB_USERNAME
356 | value: "username"
357 | - name: TARGET_DB_PASSWORD
358 | value: "password"
359 | restartPolicy: Never
360 | ```
361 |
362 | ---
363 |
364 | ## Key Notes
365 |
366 | - **Security**: Always use Kubernetes Secrets for sensitive data like passwords and access keys.
367 | - **Resource Limits**: Adjust resource limits (`memory` and `cpu`) based on your workload requirements.
368 | - **Cron Schedule**: Use standard cron expressions for scheduling recurring backups.
369 | - **Rootless Deployment**: The image supports running in rootless environments, making it suitable for platforms like OpenShift.
370 |
--------------------------------------------------------------------------------
/docs/old-version/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | layout: page
3 | title: Old version
4 | permalink: /old-version/
5 | ---
6 |
7 | This is the documentation of mysql-backup for all old versions bellow `v1.0`.
8 | In the old version, S3 storage was mounted using s3fs, so we decided to migrate to the official AWS SDK.
9 |
10 | ## Storage:
11 | - local
12 | - s3
13 | - Object storage
14 |
15 | ## Volumes:
16 |
17 | - /s3mnt => S3 mounting path
18 | - /backup => local storage mounting path
19 |
20 | ### Usage
21 |
22 | | Options | Shorts | Usage |
23 | |-----------------------|--------|------------------------------------------------------------------------|
24 | | mysql-bkup | bkup | CLI utility |
25 | | backup | | Backup database operation |
26 | | restore | | Restore database operation |
27 | | history | | Show the history of backup |
28 | | --storage | -s | Storage. local or s3 (default: local) |
29 | | --file | -f | File name to restore |
30 | | --path | | S3 path without file name. eg: /custom_path |
31 | | --dbname | -d | Database name |
32 | | --port | -p | Database port (default: 3306) |
33 | | --mode | -m | Execution mode. default or scheduled (default: default) |
34 | | --disable-compression | | Disable database backup compression |
35 | | --prune | | Delete old backup, default disabled |
36 | | --keep-last | | Delete old backup created more than specified days ago, default 7 days |
37 | | --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
38 | | --help | -h | Print this help message and exit |
39 | | --version | -V | Print version information and exit |
40 |
41 |
42 | ## Environment variables
43 |
44 | | Name | Requirement | Description |
45 | |-------------|--------------------------------------------------|------------------------------------------------------|
46 | | DB_PORT | Optional, default 3306 | Database port number |
47 | | DB_HOST | Required | Database host |
48 | | DB_NAME | Optional if it was provided from the -d flag | Database name |
49 | | DB_USERNAME | Required | Database user name |
50 | | DB_PASSWORD | Required | Database password |
51 | | ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
52 | | SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
53 | | BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
54 | | S3_ENDPOINT | Optional, required for S3 storage | AWS S3 Endpoint |
55 | | FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
56 |
57 |
58 | ## Note:
59 |
60 | Creating a user for backup tasks who has read-only access is recommended!
61 |
62 | > create read-only user
63 |
64 |
65 | ## Backup database :
66 |
67 | Simple backup usage
68 |
69 | ```sh
70 | bkup backup
71 | ```
72 |
73 | ### S3
74 |
75 | ```sh
76 | mysql-bkup backup --storage s3
77 | ```
78 | ## Docker run:
79 |
80 | ```sh
81 | docker run --rm --network your_network_name \
82 | --name mysql-bkup -v $PWD/backup:/backup/ \
83 | -e "DB_HOST=database_host_name" \
84 | -e "DB_USERNAME=username" \
85 | -e "DB_PASSWORD=password" jkaninda/mysql-bkup:v0.7 mysql-bkup backup -d database_name
86 | ```
87 |
88 | ## Docker compose file:
89 | ```yaml
90 | version: '3'
91 | services:
92 | postgres:
93 | image: postgres:14.5
94 | container_name: postgres
95 | restart: unless-stopped
96 | volumes:
97 | - ./postgres:/var/lib/postgresql/data
98 | environment:
99 | POSTGRES_DB: bkup
100 | POSTGRES_PASSWORD: password
101 | POSTGRES_USER: bkup
102 | mysql-bkup:
103 | image: jkaninda/mysql-bkup:v0.7
104 | container_name: mysql-bkup
105 | depends_on:
106 | - postgres
107 | command:
108 | - /bin/sh
109 | - -c
110 | - mysql-bkup backup -d bkup
111 | volumes:
112 | - ./backup:/backup
113 | environment:
114 | - DB_PORT=3306
115 | - DB_HOST=postgres
116 | - DB_NAME=bkup
117 | - DB_USERNAME=bkup
118 | - DB_PASSWORD=password
119 | ```
120 | ## Restore database :
121 |
122 | Simple database restore operation usage
123 |
124 | ```sh
125 | mysql-bkup restore --file database_20231217_115621.sql --dbname database_name
126 | ```
127 |
128 | ```sh
129 | mysql-bkup restore -f database_20231217_115621.sql -d database_name
130 | ```
131 | ### S3
132 |
133 | ```sh
134 | mysql-bkup restore --storage s3 --file database_20231217_115621.sql --dbname database_name
135 | ```
136 |
137 | ## Docker run:
138 |
139 | ```sh
140 | docker run --rm --network your_network_name \
141 | --name mysql-bkup \
142 | -v $PWD/backup:/backup/ \
143 | -e "DB_HOST=database_host_name" \
144 | -e "DB_USERNAME=username" \
145 | -e "DB_PASSWORD=password" \
146 | jkaninda/mysql-bkup:v0.7 mysql-bkup restore -d database_name -f store_20231219_022941.sql.gz
147 | ```
148 |
149 | ## Docker compose file:
150 |
151 | ```yaml
152 | version: '3'
153 | services:
154 | mysql-bkup:
155 | image: jkaninda/mysql-bkup:v0.7
156 | container_name: mysql-bkup
157 | command:
158 | - /bin/sh
159 | - -c
160 | - mysql-bkup restore --file database_20231217_115621.sql -d database_name
161 | volumes:
162 | - ./backup:/backup
163 | environment:
164 | #- FILE_NAME=database_20231217_040238.sql.gz # Optional if file name is set from command
165 | - DB_PORT=3306
166 | - DB_HOST=postgres
167 | - DB_USERNAME=user_name
168 | - DB_PASSWORD=password
169 | ```
170 | ## Run
171 |
172 | ```sh
173 | docker-compose up -d
174 | ```
175 | ## Backup to S3
176 |
177 | ```sh
178 | docker run --rm --privileged \
179 | --device /dev/fuse --name mysql-bkup \
180 | -e "DB_HOST=db_hostname" \
181 | -e "DB_USERNAME=username" \
182 | -e "DB_PASSWORD=password" \
183 | -e "ACCESS_KEY=your_access_key" \
184 | -e "SECRET_KEY=your_secret_key" \
185 | -e "BUCKETNAME=your_bucket_name" \
186 | -e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" \
187 | jkaninda/mysql-bkup:v0.7 mysql-bkup backup -s s3 -d database_name
188 | ```
189 | > To change s3 backup path add this flag : --path /my_customPath . default path is /mysql-bkup
190 |
191 | Simple S3 backup usage
192 |
193 | ```sh
194 | mysql-bkup backup --storage s3 --dbname mydatabase
195 | ```
196 | ```yaml
197 | mysql-bkup:
198 | image: jkaninda/mysql-bkup:v0.7
199 | container_name: mysql-bkup
200 | privileged: true
201 | devices:
202 | - "/dev/fuse"
203 | command:
204 | - /bin/sh
205 | - -c
206 | - mysql-bkup restore --storage s3 -f database_20231217_115621.sql.gz --dbname database_name
207 | environment:
208 | - DB_PORT=3306
209 | - DB_HOST=postgress
210 | - DB_USERNAME=user_name
211 | - DB_PASSWORD=password
212 | - ACCESS_KEY=${ACCESS_KEY}
213 | - SECRET_KEY=${SECRET_KEY}
214 | - BUCKET_NAME=${BUCKET_NAME}
215 | - S3_ENDPOINT=${S3_ENDPOINT}
216 |
217 | ```
218 | ## Run in Scheduled mode
219 |
220 | This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
221 | For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
222 |
223 | Make an automated backup on Docker
224 |
225 | ## Syntax of crontab (field description)
226 |
227 | The syntax is:
228 |
229 | - 1: Minute (0-59)
230 | - 2: Hours (0-23)
231 | - 3: Day (0-31)
232 | - 4: Month (0-12 [12 == December])
233 | - 5: Day of the week(0-7 [7 or 0 == sunday])
234 |
235 | Easy to remember format:
236 |
237 | ```conf
238 | * * * * * command to be executed
239 | ```
240 |
241 | ```conf
242 | - - - - -
243 | | | | | |
244 | | | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
245 | | | | ------- Month (1 - 12)
246 | | | --------- Day of month (1 - 31)
247 | | ----------- Hour (0 - 23)
248 | ------------- Minute (0 - 59)
249 | ```
250 |
251 | > At every 30th minute
252 |
253 | ```conf
254 | */30 * * * *
255 | ```
256 | > “At minute 0.” every hour
257 | ```conf
258 | 0 * * * *
259 | ```
260 |
261 | > “At 01:00.” every day
262 |
263 | ```conf
264 | 0 1 * * *
265 | ```
266 |
267 | ## Example of scheduled mode
268 |
269 | > Docker run :
270 |
271 | ```sh
272 | docker run --rm --name mysql-bkup \
273 | -v $BACKUP_DIR:/backup/ \
274 | -e "DB_HOST=$DB_HOST" \
275 | -e "DB_USERNAME=$DB_USERNAME" \
276 | -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:v0.7 mysql-bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *"
277 | ```
278 |
279 | > With Docker compose
280 |
281 | ```yaml
282 | version: "3"
283 | services:
284 | mysql-bkup:
285 | image: jkaninda/mysql-bkup:v0.7
286 | container_name: mysql-bkup
287 | privileged: true
288 | devices:
289 | - "/dev/fuse"
290 | command:
291 | - /bin/sh
292 | - -c
293 | - mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "*/30 * * * *"
294 | environment:
295 | - DB_PORT=3306
296 | - DB_HOST=postgreshost
297 | - DB_USERNAME=userName
298 | - DB_PASSWORD=${DB_PASSWORD}
299 | - ACCESS_KEY=${ACCESS_KEY}
300 | - SECRET_KEY=${SECRET_KEY}
301 | - BUCKET_NAME=${BUCKET_NAME}
302 | - S3_ENDPOINT=${S3_ENDPOINT}
303 | ```
304 |
305 | ## Kubernetes CronJob
306 |
307 | For Kubernetes, you don't need to run it in scheduled mode.
308 |
309 | Simple Kubernetes CronJob usage:
310 |
311 | ```yaml
312 | apiVersion: batch/v1
313 | kind: CronJob
314 | metadata:
315 | name: bkup-job
316 | spec:
317 | schedule: "0 1 * * *"
318 | jobTemplate:
319 | spec:
320 | template:
321 | spec:
322 | containers:
323 | - name: mysql-bkup
324 | image: jkaninda/mysql-bkup:v0.7
325 | securityContext:
326 | privileged: true
327 | command:
328 | - /bin/sh
329 | - -c
330 | - mysql-bkup backup -s s3 --path /custom_path
331 | env:
332 | - name: DB_PORT
333 | value: "3306"
334 | - name: DB_HOST
335 | value: ""
336 | - name: DB_NAME
337 | value: ""
338 | - name: DB_USERNAME
339 | value: ""
340 | # Please use secret!
341 | - name: DB_PASSWORD
342 | value: ""
343 | - name: ACCESS_KEY
344 | value: ""
345 | - name: SECRET_KEY
346 | value: ""
347 | - name: BUCKET_NAME
348 | value: ""
349 | - name: S3_ENDPOINT
350 | value: "https://s3.us-west-2.amazonaws.com"
351 | restartPolicy: Never
352 | ```
353 |
354 | ## Authors
355 |
356 | **Jonas Kaninda**
357 | -
358 |
359 |
--------------------------------------------------------------------------------