├── .github ├── stale.yml └── workflows │ ├── ci.yaml │ ├── deploy.yaml │ └── release.yaml ├── .gitignore ├── CONTRIBUTORS.md ├── Dockerfile ├── Makefile ├── README.md ├── cmd ├── common_test.go ├── dump.go ├── dump_test.go ├── prune.go ├── prune_test.go ├── restore.go ├── restore_test.go ├── root.go ├── testdata │ ├── config.yml │ └── pattern.yml └── tracer.go ├── docs ├── backup.md ├── configuration.md ├── container_considerations.md ├── contributing.md ├── database_address.md ├── format.md ├── logging.md ├── prune.md ├── restore.md ├── scheduling.md └── security.md ├── entrypoint ├── examples ├── configs │ ├── local.yaml │ ├── remote.yaml │ └── telemetry.yaml └── docker-compose.yaml ├── go.mod ├── go.sum ├── main.go ├── pkg ├── archive │ └── tar.go ├── compression │ ├── bzip2.go │ ├── compressor.go │ ├── gzip.go │ └── none.go ├── config │ ├── process.go │ ├── process_test.go │ └── testdata │ │ └── config.yml ├── core │ ├── const.go │ ├── dump.go │ ├── dumpoptions.go │ ├── dumpresults.go │ ├── executor.go │ ├── prune.go │ ├── prune_test.go │ ├── pruneoptions.go │ ├── restore.go │ ├── restoreoptions.go │ ├── scripts.go │ ├── timer.go │ └── timer_test.go ├── database │ ├── connection.go │ ├── dump.go │ ├── dumpwriter.go │ ├── mysql │ │ ├── date.go │ │ ├── dump.go │ │ ├── sanitize.go │ │ ├── table.go │ │ └── view.go │ ├── restore.go │ └── schemas.go ├── encrypt │ ├── aes256cbc.go │ ├── age.go │ ├── chacha20poly1305.go │ ├── const.go │ ├── encryptor.go │ ├── encryptor_test.go │ ├── pbkdf2aes256cbc.go │ └── smime.go ├── internal │ ├── remote │ │ └── certs.go │ └── test │ │ ├── README.md │ │ └── remote.go ├── remote │ ├── connection.go │ ├── const.go │ ├── get.go │ └── get_test.go ├── storage │ ├── credentials │ │ └── creds.go │ ├── file │ │ └── file.go │ ├── parse.go │ ├── s3 │ │ ├── reader.go │ │ └── s3.go │ ├── smb │ │ └── smb.go │ └── storage.go └── util │ ├── namedreader.go │ ├── parse.go │ └── tracer.go ├── scripts.d └── post-backup │ └── rename_backup.sh.example └── test ├── README.md ├── backup_log_containers.go ├── backup_nolog_containers.go ├── backup_teardown_test.go ├── backup_test.go ├── ctr ├── Dockerfile ├── passdb.tdb ├── secrets.tdb ├── smb.conf └── smbusers └── package_noteardown_test.go /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Configuration for probot-stale - https://github.com/probot/stale 2 | 3 | # Number of days of inactivity before an Issue or Pull Request becomes stale 4 | daysUntilStale: 60 5 | 6 | # Number of days of inactivity before an Issue or Pull Request with the stale label is closed. 7 | # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. 8 | daysUntilClose: 7 9 | 10 | # Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable 11 | exemptLabels: 12 | - pinned 13 | - "[Status] Maybe Later" 14 | 15 | # Set to true to ignore issues in a project (defaults to false) 16 | exemptProjects: false 17 | 18 | # Set to true to ignore issues in a milestone (defaults to false) 19 | exemptMilestones: false 20 | 21 | # Set to true to ignore issues with an assignee (defaults to false) 22 | exemptAssignees: false 23 | 24 | # Label to use when marking as stale 25 | staleLabel: wontfix 26 | 27 | # Comment to post when marking as stale. Set to `false` to disable 28 | markComment: > 29 | This issue has been automatically marked as stale because it has not had 30 | recent activity. It will be closed if no further activity occurs. Thank you 31 | for your contributions. 32 | 33 | # Comment to post when removing the stale label. 34 | # unmarkComment: > 35 | # Your comment here. 36 | 37 | # Comment to post when closing a stale Issue or Pull Request. 38 | # closeComment: > 39 | # Your comment here. 40 | 41 | # Limit the number of actions per hour, from 1-30. Default is 30 42 | limitPerRun: 30 43 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | on: 3 | pull_request: 4 | types: [opened, synchronize, reopened] 5 | paths-ignore: 6 | - '**/*.md' 7 | push: 8 | branches: 9 | - master 10 | - main 11 | paths-ignore: 12 | - '**/*.md' 13 | 14 | env: 15 | IMAGE_NAME: "databack/mysql-backup" 16 | 17 | jobs: 18 | report: 19 | name: Report 20 | runs-on: ubuntu-latest 21 | outputs: 22 | non_docs_changed: ${{ steps.fileschanged.outputs.non_doc_files_changed }} 23 | steps: 24 | - name: ref 25 | run: echo ${{ github.ref }} 26 | - name: event_name 27 | run: echo ${{ github.event_name }} 28 | - name: checkout 29 | uses: actions/checkout@v4 30 | with: 31 | fetch-depth: ${{ github.event_name == 'pull_request' && 2 || 0 }} 32 | - id: fileschanged 33 | run: | 34 | case '${{ github.event_name }}' in 35 | push) 36 | firstCommit='${{ github.event.before }}' 37 | lastCommit='${{ github.event.after }}' 38 | ;; 39 | pull_request) 40 | firstCommit='${{ github.event.pull_request.base.sha }}' 41 | lastCommit='${{ github.event.pull_request.head.sha }}' 42 | ;; 43 | esac 44 | changedFiles=$(git diff --name-only --diff-filter=d "${firstCommit}" "${lastCommit}") 45 | echo "Files changed: $changedFiles" 46 | NON_MD_FILES=$(echo "$changedFiles" | grep -v '\.md$' || true) 47 | if [ -n "$NON_MD_FILES" ]; then 48 | echo "non_doc_files_changed=true" >> $GITHUB_OUTPUT 49 | else 50 | echo "non_doc_files_changed=false" >> $GITHUB_OUTPUT 51 | fi 52 | ci: 53 | name: CI 54 | runs-on: ubuntu-latest 55 | needs: [ report ] 56 | if: needs.report.outputs.non_docs_changed == 'true' 57 | steps: 58 | - name: checkout 59 | uses: actions/checkout@v4 60 | - uses: actions/setup-go@v5 61 | with: 62 | go-version: '1.23' 63 | - name: golangci-lint 64 | uses: golangci/golangci-lint-action@v7 65 | with: 66 | version: v2.1.2 67 | - name: Build 68 | run: go build -o dist/mysql-backup -v . 69 | - name: vet 70 | run: make vet 71 | - name: Test 72 | run: make test 73 | - name: Integration Test 74 | run: make integration_test 75 | - name: Set up QEMU 76 | uses: docker/setup-qemu-action@v3 77 | - name: Set up Docker Buildx 78 | uses: docker/setup-buildx-action@v3 79 | - name: Build and push 80 | id: docker_build 81 | uses: docker/build-push-action@v6 82 | with: 83 | push: false 84 | platforms: linux/amd64,linux/arm64 85 | tags: | 86 | ${{env.IMAGE_NAME}}:${{github.sha}} 87 | - name: test 88 | run: make test 89 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy 2 | on: 3 | push: 4 | branches: 5 | - master 6 | - main 7 | paths-ignore: 8 | - '**/*.md' 9 | 10 | env: 11 | IMAGE_NAME: "databack/mysql-backup" 12 | 13 | jobs: 14 | report: 15 | name: Report 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: ref 19 | run: echo ${{ github.ref }} 20 | - name: event_name 21 | run: echo ${{ github.event_name }} 22 | deploy: 23 | name: Deploy Image 24 | runs-on: ubuntu-latest 25 | steps: 26 | - name: checkout 27 | uses: actions/checkout@v4 28 | - name: Set up QEMU 29 | uses: docker/setup-qemu-action@v3 30 | - name: Set up Docker Buildx 31 | uses: docker/setup-buildx-action@v3 32 | - name: Login to DockerHub 33 | uses: docker/login-action@v3 34 | with: 35 | username: ${{ secrets.DOCKER_USERNAME }} 36 | password: ${{ secrets.DOCKER_PASSWORD }} 37 | - name: Build and push 38 | id: docker_build 39 | uses: docker/build-push-action@v6 40 | with: 41 | push: true 42 | platforms: linux/amd64,linux/arm64 43 | tags: | 44 | ${{env.IMAGE_NAME}}:${{github.sha}} 45 | ${{env.IMAGE_NAME}}:master 46 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | tags: 5 | - 'v*' 6 | 7 | env: 8 | IMAGE_NAME: databack/mysql-backup 9 | 10 | jobs: 11 | report: 12 | name: Report 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: ref 16 | run: echo ${{ github.ref }} 17 | - name: event_name 18 | run: echo ${{ github.event_name }} 19 | release: 20 | name: Release 21 | runs-on: ubuntu-latest 22 | steps: 23 | - name: Checkout 24 | uses: actions/checkout@v4 25 | - name: Set up Go 26 | uses: actions/setup-go@v5 27 | with: 28 | go-version: 1.23 29 | - name: Build for all platforms 30 | run: | 31 | make build-all 32 | - name: Release 33 | uses: softprops/action-gh-release@v2 34 | with: 35 | files: | 36 | dist/* 37 | - name: Set up QEMU 38 | uses: docker/setup-qemu-action@v3 39 | - name: Set up Docker Buildx 40 | uses: docker/setup-buildx-action@v3 41 | - name: Login to DockerHub 42 | uses: docker/login-action@v3 43 | with: 44 | username: ${{ secrets.DOCKER_USERNAME }} 45 | password: ${{ secrets.DOCKER_PASSWORD }} 46 | - name: Docker meta 47 | id: meta 48 | uses: docker/metadata-action@v5 49 | with: 50 | # list of Docker images to use as base name for tags 51 | images: | 52 | ${{env.IMAGE_NAME}} 53 | # generate Docker tags based on the following events/attributes 54 | # for any semver, including rc and alpha, etc. take the tag as is 55 | # for ones that are exactly X.Y.Z, also tag it as latest 56 | tags: | 57 | type=semver,pattern={{version}} 58 | type=semver,pattern=v{{major}}.{{minor}}.{{patch}},value=latest 59 | - name: Build and push semver tag 60 | id: docker_build_push_semver 61 | uses: docker/build-push-action@v6 62 | with: 63 | push: true 64 | platforms: linux/amd64,linux/arm64 65 | tags: | 66 | ${{ steps.meta.outputs.tags }} 67 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dist/ 2 | tmp/ 3 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | * Avi Deitcher 4 | * Juan Baptiste 5 | * Luke Barton 6 | * Anish Dhanka 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # mysql backup image 2 | FROM golang:1.23.7-alpine3.21 AS build 3 | 4 | COPY . /src/mysql-backup 5 | WORKDIR /src/mysql-backup 6 | 7 | RUN mkdir /out && go build -o /out/mysql-backup . 8 | 9 | # we would do from scratch, but we need basic utilities in order to support pre/post scripts 10 | FROM alpine:3.20 AS runtime 11 | LABEL org.opencontainers.image.authors="https://github.com/databacker" 12 | 13 | # set us up to run as non-root user 14 | RUN apk add --no-cache bash && \ 15 | addgroup -g 1005 appuser && \ 16 | adduser -u 1005 -G appuser -D appuser 17 | 18 | USER appuser 19 | 20 | COPY --from=build /out/mysql-backup /mysql-backup 21 | COPY entrypoint /entrypoint 22 | 23 | ENV DB_DUMP_PRE_BACKUP_SCRIPTS="/scripts.d/pre-backup/" 24 | ENV DB_DUMP_POST_BACKUP_SCRIPTS="/scripts.d/post-backup/" 25 | ENV DB_DUMP_PRE_RESTORE_SCRIPTS="/scripts.d/pre-restore/" 26 | ENV DB_DUMP_POST_RESTORE_SCRIPTS="/scripts.d/post-restore/" 27 | 28 | # start 29 | ENTRYPOINT ["/entrypoint"] 30 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: build push test 2 | 3 | TAG ?= $(shell git log -n 1 --pretty=format:"%H") 4 | IMAGE ?= databack/mysql-backup 5 | BUILDIMAGE ?= $(IMAGE):build 6 | TARGET ?= $(IMAGE):$(TAG) 7 | OCIPLATFORMS ?= linux/amd64,linux/arm64 8 | LOCALPLATFORMS ?= linux/386 linux/amd64 linux/arm64 darwin/amd64 darwin/arm64 windows/amd64 windows/arm64 windows/386 9 | DIST ?= dist 10 | GOOS?=$(shell uname -s | tr '[:upper:]' '[:lower:]') 11 | GOARCH?=$(shell uname -m) 12 | BIN ?= $(DIST)/mysql-backup-$(GOOS)-$(GOARCH) 13 | 14 | build-docker: 15 | docker buildx build -t $(BUILDIMAGE) --platform $(OCIPLATFORMS) . 16 | 17 | .PRECIOUS: $(foreach platform,$(LOCALPLATFORMS),$(DIST)/mysql-backup-$(subst /,-,$(platform))) 18 | 19 | build-all: $(foreach platform,$(LOCALPLATFORMS),build-local-$(subst /,-,$(platform))) 20 | 21 | build-local-%: $(DIST)/mysql-backup-%; 22 | 23 | $(DIST): 24 | mkdir -p $@ 25 | 26 | $(DIST)/mysql-backup-%: GOOS=$(word 1,$(subst -, ,$*)) 27 | $(DIST)/mysql-backup-%: GOARCH=$(word 2,$(subst -, ,$*)) 28 | $(DIST)/mysql-backup-%: $(DIST) 29 | GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $@ . 30 | 31 | build-local: $(BIN) 32 | 33 | push: build 34 | docker tag $(BUILDIMAGE) $(TARGET) 35 | docker push $(TARGET) 36 | 37 | integration_test: 38 | go test -v ./test --tags=integration 39 | 40 | integration_test_debug: 41 | dlv --wd=./test test ./test --build-flags="-tags=integration" 42 | 43 | vet: 44 | go vet --tags=integration ./... 45 | 46 | test: unit_test integration_test 47 | 48 | unit_test: 49 | go test -v ./... 50 | 51 | .PHONY: clean-test-stop clean-test-remove clean-test 52 | clean-test-stop: 53 | @echo Kill Containers 54 | $(eval IDS:=$(strip $(shell docker ps --filter label=mysqltest -q))) 55 | @if [ -n "$(IDS)" ]; then docker kill $(IDS); fi 56 | @echo 57 | 58 | clean-test-remove: 59 | @echo Remove Containers 60 | $(eval IDS:=$(shell docker ps -a --filter label=mysqltest -q)) 61 | @if [ -n "$(IDS)" ]; then docker rm $(IDS); fi 62 | @echo 63 | 64 | clean-test: clean-test-stop clean-test-remove 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mysql-backup 2 | 3 | Back up mysql databases to... anywhere! 4 | 5 | ## Overview 6 | 7 | mysql-backup is a simple way to do MySQL database backups and restores, as well as manage your backups. 8 | 9 | It has the following features: 10 | 11 | * dump and restore 12 | * dump to local filesystem or to SMB server 13 | * select database user and password 14 | * connect to any container running on the same system 15 | * select how often to run a dump 16 | * select when to start the first dump, whether time of day or relative to container start time 17 | * prune backups older than a specific time period or quantity 18 | 19 | Please see [CONTRIBUTORS.md](./CONTRIBUTORS.md) for a list of contributors. 20 | 21 | ## Versions 22 | 23 | This is the latest version, based on the complete rebuild of the codebase for 1.0.0 release based on 24 | golang, completed in late 2023. 25 | 26 | ## Support 27 | 28 | Support is available at the [databack Slack channel](http://databack.slack.com); register [here](https://join.slack.com/t/databack/shared_invite/zt-1cnbo2zfl-0dQS895icOUQy31RAruf7w). We accept issues here and general support questions on Slack. 29 | 30 | If you are interested in commercial support, please contact us via Slack above. 31 | 32 | ## Running `mysql-backup` 33 | 34 | `mysql-backup` is available both as a single standalone binary, and as a container image. 35 | 36 | ## Backup 37 | 38 | To run a backup, launch `mysql-backup` - as a container or as a binary - with the correct parameters. 39 | 40 | For example: 41 | 42 | ````bash 43 | docker run -d --restart=always -e DB_DUMP_FREQUENCY=60 -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/local/file/path -e DB_SERVER=my-db-address -v /local/file/path:/db databack/mysql-backup dump 44 | 45 | # or 46 | 47 | mysql-backup dump --frequency=60 --begin=2330 --target=/local/file/path --server=my-db-address 48 | 49 | # or to connect to a local mysqld via the unix domain socket as the current user 50 | 51 | mysql-backup dump --frequency=60 --begin=2330 --target=/local/file/path --server=/run/mysqld/mysqld.sock 52 | ```` 53 | 54 | Or `mysql-backup --config-file=/path/to/config/file.yaml` where `/path/to/config/file.yaml` is a file 55 | with the following contents: 56 | 57 | ```yaml 58 | server: my-db-address 59 | dump: 60 | frequency: 60 61 | begin: 2330 62 | target: /local/file/path 63 | ``` 64 | 65 | The above will run a dump every 60 minutes, beginning at the next 2330 local time, from the database accessible in the container `my-db-address`. 66 | 67 | ````bash 68 | docker run -d --restart=always -e DB_USER=user123 -e DB_PASS=pass123 -e DB_DUMP_FREQUENCY=60 -e DB_DUMP_BEGIN=2330 -e DB_DUMP_TARGET=/db -e DB_SERVER=my-db-address -v /local/file/path:/db databack/mysql-backup dump 69 | 70 | # or 71 | 72 | mysql-backup dump --user=user123 --pass=pass123 --frequency=60 --begin=2330 --target=/local/file/path --server=my-db-address --port=3306 73 | ```` 74 | 75 | See [backup](./docs/backup.md) for a more detailed description of performing backups. 76 | 77 | See [configuration](./docs/configuration.md) for a detailed list of all configuration options. 78 | 79 | 80 | ## Restore 81 | 82 | To perform a restore, you simply run the process in reverse. You still connect to a database, but instead of the 83 | dump command, you pass it the restore command. Instead of a dump target, you pass it a restore target. 84 | 85 | ### Dump Restore 86 | 87 | If you wish to run a restore to an existing database, you can use mysql-backup to do a restore. 88 | 89 | You need only the following environment variables: 90 | 91 | __You should consider the [use of `--env-file=`](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables-e-env-env-file) to keep your secrets out of your shell history__ 92 | 93 | * `DB_SERVER`: hostname or unix domain socket path (starting with a slash) to connect to database. Required. 94 | * `DB_PORT`: port to use to connect to database. Optional, defaults to `3306` 95 | * `DB_USER`: username for the database 96 | * `DB_PASS`: password for the database 97 | * `DB_DUMP_INCLUDE`: names of databases to restore separated by spaces. Required if `SINGLE_DATABASE=true`. 98 | * `SINGLE_DATABASE`: If is set to `true`, `DB_DUMP_INCLUDE` is required and must contain exactly one database name. Mysql command will then run with `--database=$DB_DUMP_INCLUDE` flag. This avoids the need of `USE ;` statement, which is useful when restoring from a file saved with `SINGLE_DATABASE` set to `true`. 99 | * `DB_RESTORE_TARGET`: path to the actual restore file, which should be a compressed dump file. The target can be an absolute path, which should be volume mounted, an smb or S3 URL, similar to the target. 100 | * `DB_DEBUG`: if `true`, dump copious outputs to the container logs while restoring. 101 | * To use the S3 driver `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_DEFAULT_REGION` will need to be defined. 102 | 103 | Examples: 104 | 105 | 1. Restore from a local file: `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -v /local/path:/backup databack/mysql-backup restore` 106 | 2. Restore from a local file using ssl: `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -e RESTORE_OPTS="--ssl-cert /certs/client-cert.pem --ssl-key /certs/client-key.pem" -v /local/path:/backup -v /local/certs:/certs databack/mysql-backup restore` 107 | 2. Restore from an SMB file: `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=smb://smbserver/share1/backup/db_backup_201509271627.gz databack/mysql-backup restore` 108 | 3. Restore from an S3 file: `docker run -e DB_SERVER=gotodb.example.com -e AWS_ACCESS_KEY_ID=awskeyid -e AWS_SECRET_ACCESS_KEY=secret -e AWS_REGION=eu-central-1 -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=s3://bucket/path/db_backup_201509271627.gz databack/mysql-backup restore ` 109 | 110 | ### Restore specific databases 111 | If you have multiple schemas in your database, you can choose to restore only some of them. 112 | 113 | To do this, you must restore using `DB_DUMP_INCLUDE` to specify the schemas you want restored. 114 | 115 | When doing this, schemas will be restored with their original name. To restore under other names, you must use `SINGLE_DATABASE=true` on both dump and restore, and you can only do it one schema at a time. 116 | 117 | #### Examples: 118 | 1. Dump a multi-schemas database and restore only some of them: 119 | * `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -v /local/path:/backup databack/mysql-backup dump ` 120 | * `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -e DB_DUMP_INCLUDE="database1 database3" -v /local/path:/backup databack/mysql-backup restore` 121 | 2. Dump and restore a schema under a different name: 122 | * `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e SINGLE_DATABASE=true -e DB_DUMP_INCLUDE=database1 -v /local/path:/backup databack/mysql-backup dump` 123 | * `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/db_backup_201509271627.gz -e SINGLE_DATABASE=true DB_DUMP_INCLUDE=newdatabase1 -v /local/path:/backup databack/mysql-backup restore` 124 | 125 | See [restore](./docs/restore.md) for a more detailed description of performing restores. 126 | 127 | See [configuration](./docs/configuration.md) for a detailed list of all configuration options. 128 | 129 | ## License 130 | Released under the MIT License. 131 | Copyright Avi Deitcher https://github.com/deitch 132 | -------------------------------------------------------------------------------- /cmd/common_test.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | 7 | "github.com/databacker/mysql-backup/pkg/core" 8 | "github.com/go-test/deep" 9 | log "github.com/sirupsen/logrus" 10 | "github.com/stretchr/testify/mock" 11 | ) 12 | 13 | type mockExecs struct { 14 | mock.Mock 15 | logger *log.Logger 16 | } 17 | 18 | func newMockExecs() *mockExecs { 19 | m := &mockExecs{} 20 | return m 21 | } 22 | 23 | func (m *mockExecs) Dump(ctx context.Context, opts core.DumpOptions) (core.DumpResults, error) { 24 | args := m.Called(opts) 25 | return core.DumpResults{}, args.Error(0) 26 | } 27 | 28 | func (m *mockExecs) Restore(ctx context.Context, opts core.RestoreOptions) error { 29 | args := m.Called(opts) 30 | return args.Error(0) 31 | } 32 | 33 | func (m *mockExecs) Prune(ctx context.Context, opts core.PruneOptions) error { 34 | args := m.Called(opts) 35 | return args.Error(0) 36 | } 37 | func (m *mockExecs) Timer(timerOpts core.TimerOptions, cmd func() error) error { 38 | args := m.Called(timerOpts) 39 | err := args.Error(0) 40 | if err != nil { 41 | return err 42 | } 43 | return cmd() 44 | } 45 | 46 | func (m *mockExecs) SetLogger(logger *log.Logger) { 47 | m.logger = logger 48 | } 49 | 50 | func (m *mockExecs) GetLogger() *log.Logger { 51 | return m.logger 52 | } 53 | 54 | func equalIgnoreFields(a, b interface{}, fields []string) bool { 55 | va := reflect.ValueOf(a) 56 | vb := reflect.ValueOf(b) 57 | 58 | // Check if both a and b are struct types 59 | if va.Kind() != reflect.Struct || vb.Kind() != reflect.Struct { 60 | return false 61 | } 62 | 63 | // Make a map of fields to ignore for quick lookup 64 | ignoreMap := make(map[string]bool) 65 | for _, f := range fields { 66 | ignoreMap[f] = true 67 | } 68 | 69 | // Compare fields that are not in the ignore list 70 | for i := 0; i < va.NumField(); i++ { 71 | field := va.Type().Field(i).Name 72 | if !ignoreMap[field] { 73 | vaField := va.Field(i).Interface() 74 | vbField := vb.Field(i).Interface() 75 | diff := deep.Equal(vaField, vbField) 76 | if diff != nil { 77 | return false 78 | } 79 | } 80 | } 81 | 82 | return true 83 | } 84 | -------------------------------------------------------------------------------- /cmd/prune.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/google/uuid" 9 | "github.com/spf13/cobra" 10 | "github.com/spf13/viper" 11 | 12 | "github.com/databacker/mysql-backup/pkg/core" 13 | "github.com/databacker/mysql-backup/pkg/util" 14 | ) 15 | 16 | func pruneCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, error) { 17 | if cmdConfig == nil { 18 | return nil, fmt.Errorf("cmdConfig is nil") 19 | } 20 | var v *viper.Viper 21 | var cmd = &cobra.Command{ 22 | Use: "prune", 23 | Short: "prune older backups", 24 | Long: `Prune older backups based on a retention period. Can be number of backups or time-based. 25 | For time-based, the format is: 1d, 1w, 1m, 1y for days, weeks, months, years, respectively. 26 | For number-based, the format is: 1c, 2c, 3c, etc. for the count of backups to keep. 27 | 28 | For time-based, prune always converts the time to hours, and then rounds up. This means that 2d is treated as 48h, and 29 | any backups must be at least 48 full hours ago to be pruned. 30 | `, 31 | PreRun: func(cmd *cobra.Command, args []string) { 32 | bindFlags(cmd, v) 33 | }, 34 | RunE: func(cmd *cobra.Command, args []string) error { 35 | cmdConfig.logger.Debug("starting prune") 36 | ctx := context.Background() 37 | // this is the tracer that we will use throughout the entire run 38 | defer func() { 39 | tp := getTracerProvider() 40 | _ = tp.ForceFlush(ctx) 41 | _ = tp.Shutdown(ctx) 42 | }() 43 | tracer := getTracer("prune") 44 | ctx = util.ContextWithTracer(ctx, tracer) 45 | _, startupSpan := tracer.Start(ctx, "startup") 46 | retention := v.GetString("retention") 47 | targetURLs := v.GetStringSlice("target") 48 | 49 | targets, err := parseTargets(targetURLs, cmdConfig) 50 | if err != nil { 51 | return fmt.Errorf("error parsing targets: %v", err) 52 | } 53 | if len(targets) == 0 { 54 | return fmt.Errorf("no targets specified") 55 | } 56 | 57 | if retention == "" && cmdConfig.configuration != nil && cmdConfig.configuration.Prune.Retention != nil { 58 | retention = *cmdConfig.configuration.Prune.Retention 59 | } 60 | 61 | // timer options 62 | timerOpts := parseTimerOptions(v, cmdConfig.configuration) 63 | 64 | var executor execs 65 | executor = &core.Executor{} 66 | if passedExecs != nil { 67 | executor = passedExecs 68 | } 69 | executor.SetLogger(cmdConfig.logger) 70 | // done with the startup 71 | startupSpan.End() 72 | 73 | if err := executor.Timer(timerOpts, func() error { 74 | uid := uuid.New() 75 | return executor.Prune(ctx, core.PruneOptions{Targets: targets, Retention: retention, Run: uid}) 76 | }); err != nil { 77 | return fmt.Errorf("error running prune: %w", err) 78 | } 79 | executor.GetLogger().Info("Pruning complete") 80 | return nil 81 | }, 82 | } 83 | // target - where the backup is 84 | v = viper.New() 85 | v.SetEnvPrefix("db_restore") 86 | v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) 87 | v.AutomaticEnv() 88 | 89 | flags := cmd.Flags() 90 | flags.String("target", "", "full URL target to the directory where the backups are stored. Can be a file URL, or a reference to a target in the configuration file, e.g. `config://targetname`.") 91 | 92 | // retention 93 | flags.String("retention", "", "Retention period for backups. REQUIRED. Can be number of backups or time-based. For time-based, the format is: 1d, 1w, 1m, 1y for days, weeks, months, years, respectively. For number-based, the format is: 1c, 2c, 3c, etc. for the count of backups to keep.") 94 | 95 | // frequency 96 | flags.Int("frequency", defaultFrequency, "how often to run prunes, in minutes") 97 | 98 | // begin 99 | flags.String("begin", defaultBegin, "What time to do the first prune. Must be in one of two formats: Absolute: HHMM, e.g. `2330` or `0415`; or Relative: +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half") 100 | 101 | // cron 102 | flags.String("cron", "", "Set the prune schedule using standard [crontab syntax](https://en.wikipedia.org/wiki/Cron), a single line.") 103 | 104 | // once 105 | flags.Bool("once", false, "Override all other settings and run the prune once immediately and exit. Useful if you use an external scheduler (e.g. as part of an orchestration solution like Cattle or Docker Swarm or [kubernetes cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/)) and don't want the container to do the scheduling internally.") 106 | 107 | return cmd, nil 108 | } 109 | -------------------------------------------------------------------------------- /cmd/prune_test.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "io" 5 | "net/url" 6 | "testing" 7 | 8 | "github.com/databacker/mysql-backup/pkg/core" 9 | "github.com/databacker/mysql-backup/pkg/storage" 10 | "github.com/databacker/mysql-backup/pkg/storage/file" 11 | "github.com/stretchr/testify/mock" 12 | ) 13 | 14 | func TestPruneCmd(t *testing.T) { 15 | t.Parallel() 16 | fileTarget := "file:///foo/bar" 17 | fileTargetURL, _ := url.Parse(fileTarget) 18 | 19 | tests := []struct { 20 | name string 21 | args []string // "dump" will be prepended automatically 22 | config string 23 | wantErr bool 24 | expectedPruneOptions core.PruneOptions 25 | expectedTimerOptions core.TimerOptions 26 | }{ 27 | {"invalid target URL", []string{"--target", "def"}, "", true, core.PruneOptions{}, core.TimerOptions{}}, 28 | {"file URL", []string{"--target", fileTarget, "--retention", "1h"}, "", false, core.PruneOptions{Targets: []storage.Storage{file.New(*fileTargetURL)}, Retention: "1h"}, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}}, 29 | {"config file", []string{"--config-file", "testdata/config.yml"}, "", false, core.PruneOptions{Targets: []storage.Storage{file.New(*fileTargetURL)}, Retention: "1h"}, core.TimerOptions{Frequency: defaultFrequency, Begin: defaultBegin}}, 30 | } 31 | 32 | for _, tt := range tests { 33 | t.Run(tt.name, func(t *testing.T) { 34 | m := newMockExecs() 35 | m.On("Prune", mock.MatchedBy(func(pruneOpts core.PruneOptions) bool { 36 | if equalIgnoreFields(pruneOpts, tt.expectedPruneOptions, []string{"Run"}) { 37 | return true 38 | } 39 | t.Errorf("pruneOpts compare failed: %#v %#v", pruneOpts, tt.expectedPruneOptions) 40 | return false 41 | })).Return(nil) 42 | m.On("Timer", tt.expectedTimerOptions).Return(nil) 43 | cmd, err := rootCmd(m) 44 | if err != nil { 45 | t.Fatal(err) 46 | } 47 | cmd.SetOutput(io.Discard) 48 | cmd.SetArgs(append([]string{"prune"}, tt.args...)) 49 | err = cmd.Execute() 50 | switch { 51 | case err == nil && tt.wantErr: 52 | t.Fatal("missing error") 53 | case err != nil && !tt.wantErr: 54 | t.Fatal(err) 55 | case err == nil: 56 | m.AssertExpectations(t) 57 | } 58 | }) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /cmd/restore.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/google/uuid" 9 | "github.com/spf13/cobra" 10 | "github.com/spf13/viper" 11 | 12 | "github.com/databacker/api/go/api" 13 | "github.com/databacker/mysql-backup/pkg/compression" 14 | "github.com/databacker/mysql-backup/pkg/core" 15 | "github.com/databacker/mysql-backup/pkg/storage" 16 | "github.com/databacker/mysql-backup/pkg/util" 17 | ) 18 | 19 | func restoreCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, error) { 20 | if cmdConfig == nil { 21 | return nil, fmt.Errorf("cmdConfig is nil") 22 | } 23 | var v *viper.Viper 24 | var cmd = &cobra.Command{ 25 | Use: "restore", 26 | Short: "restore a dump", 27 | Long: `Restore a database dump from a given location.`, 28 | PreRun: func(cmd *cobra.Command, args []string) { 29 | bindFlags(cmd, v) 30 | }, 31 | Args: cobra.MinimumNArgs(1), 32 | RunE: func(cmd *cobra.Command, args []string) error { 33 | cmdConfig.logger.Debug("starting restore") 34 | ctx := context.Background() 35 | tracer := getTracer("restore") 36 | defer func() { 37 | tp := getTracerProvider() 38 | _ = tp.ForceFlush(ctx) 39 | _ = tp.Shutdown(ctx) 40 | }() 41 | ctx = util.ContextWithTracer(ctx, tracer) 42 | _, startupSpan := tracer.Start(ctx, "startup") 43 | targetFile := args[0] 44 | target := v.GetString("target") 45 | // get databases namesand mappings 46 | databasesMap := make(map[string]string) 47 | databases := strings.TrimSpace(v.GetString("database")) 48 | if databases != "" { 49 | for _, db := range strings.Split(databases, ",") { 50 | parts := strings.SplitN(db, ":", 2) 51 | if len(parts) != 2 { 52 | return fmt.Errorf("invalid database mapping: %s", db) 53 | } 54 | databasesMap[parts[0]] = parts[1] 55 | } 56 | } 57 | 58 | // compression algorithm: check config, then CLI/env var overrides 59 | var ( 60 | compressionAlgo string 61 | compressor compression.Compressor 62 | err error 63 | ) 64 | if cmdConfig.configuration != nil && cmdConfig.configuration.Dump.Compression != nil { 65 | compressionAlgo = *cmdConfig.configuration.Dump.Compression 66 | } 67 | compressionVar := v.GetString("compression") 68 | if compressionVar != "" { 69 | compressionAlgo = compressionVar 70 | } 71 | if compressionAlgo != "" { 72 | compressor, err = compression.GetCompressor(compressionAlgo) 73 | if err != nil { 74 | return fmt.Errorf("failure to get compression '%s': %v", compressionAlgo, err) 75 | } 76 | } 77 | 78 | // target URL can reference one from the config file, or an absolute one 79 | // if it's not in the config file, it's an absolute one 80 | // if it is in the config file, it's a reference to one of the targets in the config file 81 | u, err := util.SmartParse(target) 82 | if err != nil { 83 | return fmt.Errorf("invalid target url: %v", err) 84 | } 85 | var store storage.Storage 86 | if u.Scheme == "config" { 87 | // get the target name 88 | targetName := u.Host 89 | // get the target from the config file 90 | if cmdConfig.configuration == nil { 91 | return fmt.Errorf("no configuration file found") 92 | } 93 | var targetStructures map[string]api.Target 94 | if cmdConfig.configuration.Targets != nil { 95 | targetStructures = *cmdConfig.configuration.Targets 96 | } 97 | 98 | if target, ok := targetStructures[targetName]; !ok { 99 | return fmt.Errorf("target %s not found in configuration", targetName) 100 | } else { 101 | if store, err = storage.FromTarget(target); err != nil { 102 | return fmt.Errorf("error creating storage for target %s: %v", targetName, err) 103 | } 104 | } 105 | // need to add the path to the specific target file 106 | } else { 107 | // parse the target URL 108 | store, err = storage.ParseURL(target, cmdConfig.creds) 109 | if err != nil { 110 | return fmt.Errorf("invalid target url: %v", err) 111 | } 112 | } 113 | var executor execs 114 | executor = &core.Executor{} 115 | if passedExecs != nil { 116 | executor = passedExecs 117 | } 118 | executor.SetLogger(cmdConfig.logger) 119 | 120 | // at this point, any errors should not have usage 121 | cmd.SilenceUsage = true 122 | uid := uuid.New() 123 | restoreOpts := core.RestoreOptions{ 124 | Target: store, 125 | TargetFile: targetFile, 126 | Compressor: compressor, 127 | DatabasesMap: databasesMap, 128 | DBConn: cmdConfig.dbconn, 129 | Run: uid, 130 | } 131 | startupSpan.End() 132 | if err := executor.Restore(ctx, restoreOpts); err != nil { 133 | return fmt.Errorf("error restoring: %v", err) 134 | } 135 | executor.GetLogger().Info("Restore complete") 136 | return nil 137 | }, 138 | } 139 | // target - where the backup is 140 | v = viper.New() 141 | v.SetEnvPrefix("db_restore") 142 | v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) 143 | v.AutomaticEnv() 144 | 145 | flags := cmd.Flags() 146 | flags.String("target", "", "full URL target to the backup that you wish to restore") 147 | if err := cmd.MarkFlagRequired("target"); err != nil { 148 | return nil, err 149 | } 150 | 151 | // compression 152 | flags.String("compression", defaultCompression, "Compression to use. Supported are: `gzip`, `bzip2`, `none`") 153 | 154 | // specific database to which to restore 155 | flags.String("database", "", "Mapping of from:to database names to which to restore, comma-separated, e.g. foo:bar,buz:qux. Replaces the `USE ` clauses in a backup file. If blank, uses the file as is.") 156 | 157 | // pre-restore scripts 158 | flags.String("pre-restore-scripts", "", "Directory wherein any file ending in `.sh` will be run after retrieving the dump file but pre-restore.") 159 | 160 | // post-restore scripts 161 | flags.String("post-restore-scripts", "", "Directory wherein any file ending in `.sh` will be run post-restore.") 162 | 163 | return cmd, nil 164 | } 165 | -------------------------------------------------------------------------------- /cmd/restore_test.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "io" 5 | "net/url" 6 | "testing" 7 | 8 | "github.com/databacker/mysql-backup/pkg/compression" 9 | "github.com/databacker/mysql-backup/pkg/core" 10 | "github.com/databacker/mysql-backup/pkg/database" 11 | "github.com/databacker/mysql-backup/pkg/storage/file" 12 | "github.com/stretchr/testify/mock" 13 | ) 14 | 15 | func TestRestoreCmd(t *testing.T) { 16 | t.Parallel() 17 | 18 | fileTarget := "file:///foo/bar" 19 | fileTargetURL, _ := url.Parse(fileTarget) 20 | 21 | tests := []struct { 22 | name string 23 | args []string // "restore" will be prepended automatically 24 | config string 25 | wantErr bool 26 | expectedRestoreOptions core.RestoreOptions 27 | //expectedTarget storage.Storage 28 | //expectedTargetFile string 29 | //expectedDbconn database.Connection 30 | //expectedDatabasesMap map[string]string 31 | //expectedCompressor compression.Compressor 32 | }{ 33 | {"missing server and target options", []string{""}, "", true, core.RestoreOptions{}}, 34 | {"invalid target URL", []string{"--server", "abc", "--target", "def"}, "", true, core.RestoreOptions{}}, 35 | {"valid URL missing dump filename", []string{"--server", "abc", "--target", "file:///foo/bar"}, "", true, core.RestoreOptions{}}, 36 | {"valid file URL", []string{"--server", "abc", "--target", fileTarget, "filename.tgz", "--verbose", "2"}, "", false, core.RestoreOptions{Target: file.New(*fileTargetURL), TargetFile: "filename.tgz", DBConn: database.Connection{Host: "abc", Port: defaultPort}, DatabasesMap: map[string]string{}, Compressor: &compression.GzipCompressor{}}}, 37 | } 38 | 39 | for _, tt := range tests { 40 | t.Run(tt.name, func(t *testing.T) { 41 | m := newMockExecs() 42 | m.On("Restore", mock.MatchedBy(func(restoreOpts core.RestoreOptions) bool { 43 | if equalIgnoreFields(restoreOpts, tt.expectedRestoreOptions, []string{"Run"}) { 44 | return true 45 | } 46 | t.Errorf("restoreOpts compare failed: %#v %#v", restoreOpts, tt.expectedRestoreOptions) 47 | return false 48 | })).Return(nil) 49 | cmd, err := rootCmd(m) 50 | if err != nil { 51 | t.Fatal(err) 52 | } 53 | cmd.SetOutput(io.Discard) 54 | cmd.SetArgs(append([]string{"restore"}, tt.args...)) 55 | err = cmd.Execute() 56 | switch { 57 | case err == nil && tt.wantErr: 58 | t.Fatal("missing error") 59 | case err != nil && !tt.wantErr: 60 | t.Fatal(err) 61 | case err == nil: 62 | m.AssertExpectations(t) 63 | } 64 | 65 | }) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /cmd/testdata/config.yml: -------------------------------------------------------------------------------- 1 | version: config.databack.io/v1 2 | kind: local 3 | 4 | spec: 5 | database: 6 | server: abcd 7 | port: 3306 8 | credentials: 9 | username: user2 10 | password: xxxx2 11 | 12 | targets: 13 | local: 14 | type: file 15 | url: file:///foo/bar 16 | other: 17 | type: file 18 | url: /foo/bar 19 | 20 | dump: 21 | targets: 22 | - local 23 | 24 | prune: 25 | retention: "1h" -------------------------------------------------------------------------------- /cmd/testdata/pattern.yml: -------------------------------------------------------------------------------- 1 | version: config.databack.io/v1 2 | kind: local 3 | 4 | spec: 5 | database: 6 | server: abcd 7 | port: 3306 8 | credentials: 9 | username: user2 10 | password: xxxx2 11 | 12 | targets: 13 | local: 14 | type: file 15 | url: file:///foo/bar 16 | other: 17 | type: file 18 | url: /foo/bar 19 | 20 | dump: 21 | filenamePattern: "foo_{{ .now }}.{{ .compression }}" 22 | targets: 23 | - local 24 | 25 | prune: 26 | retention: "1h" -------------------------------------------------------------------------------- /cmd/tracer.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "go.opentelemetry.io/otel" 7 | sdktrace "go.opentelemetry.io/otel/sdk/trace" 8 | "go.opentelemetry.io/otel/trace" 9 | ) 10 | 11 | const ( 12 | appName = "mysql-backup" 13 | ) 14 | 15 | // getTracer get a global tracer for the application, which incorporates both the name of the application 16 | // and the command that is being run. 17 | func getTracer(cmd string) trace.Tracer { 18 | return getTracerProvider().Tracer(fmt.Sprintf("%s/%s", appName, cmd)) 19 | } 20 | 21 | func getTracerProvider() *sdktrace.TracerProvider { 22 | tp, ok := otel.GetTracerProvider().(*sdktrace.TracerProvider) 23 | if !ok { 24 | return nil 25 | } 26 | return tp 27 | } 28 | -------------------------------------------------------------------------------- /docs/container_considerations.md: -------------------------------------------------------------------------------- 1 | # Container Considerations 2 | 3 | There are certain special considerations when running in a container. 4 | 5 | ## Permissions 6 | 7 | By default, the backup/restore process does **not** run as root (UID O). Whenever possible, you should run processes (not just in containers) as users other than root. In this case, it runs as username `appuser` with UID/GID `1005`. 8 | 9 | In most scenarios, this will not affect your backup process negatively. However, if you are using the "Local" dump target, i.e. your `DB_DUMP_TARGET` starts with `/` - and, most likely, is a volume mounted into the container - you can run into permissions issues. For example, if your mounted directory is owned by root on the host, then the backup process will be unable to write to it. 10 | 11 | In this case, you have two options: 12 | 13 | * Run the container as root, `docker run --user 0 ... ` or, in i`docker-compose.yml`, `user: "0"` 14 | * Ensure your mounted directory is writable as UID or GID `1005`. 15 | 16 | ## Nice 17 | 18 | mysql backups can be resource intensive. When running using the CLI, it is up to you to use 19 | `nice`/`ionice` to control it, if you so desire. If running in a container, you can tell the 20 | container to be "nicer" but setting `NICE=true`. 21 | 22 | For more information, see https://13rac1.com/articles/2013/03/forcing-mysqldump-always-be-nice-cpu-and-io/ 23 | 24 | ## File Dump Target 25 | 26 | When backing up, the dump target is the location where the dump should be placed. When running in a container, 27 | defaults to `/backup` in the container. Of course, having the backup in the container does not help very much, so we very strongly recommend you volume mount it outside somewhere. For example: 28 | 29 | ```bash 30 | docker run -v /path/to/backup:/mybackup -e DB_DUMP_TARGET=/mybackup ... 31 | ``` -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Build Process 4 | 5 | This github repo is the source for the mysql-backup image. The actual image is stored on the docker hub at `databack/mysql-backup`, and is triggered with each commit to the source by automated build via Webhooks. 6 | 7 | There are 2 builds: 1 for version based on the git tag, and another for the particular version number. 8 | 9 | ## Tests 10 | 11 | The tests all run in docker containers, to avoid the need to install anything other than `make` and `docker`, and even can run over remote docker connections, avoiding any local bind-mounts. To run all tests: 12 | 13 | ``` 14 | make test 15 | ``` 16 | 17 | To run with debugging 18 | 19 | ``` 20 | make test DEBUG=debug 21 | ``` 22 | 23 | The above will generate _copious_ outputs, so you might want to redirect stdout and stderr to a file. 24 | 25 | This runs each of the several testing targets, each of which is a script in `test/test_*.sh`, which sets up tests, builds containers, runs the tests, and collects the output. 26 | -------------------------------------------------------------------------------- /docs/database_address.md: -------------------------------------------------------------------------------- 1 | # Connecting to the Database 2 | 3 | In order to perform the actual dump or restore, `mysql-backup` needs to connect to the database. You **must** pass the database address via configuration. For example, to set the address to `my-db-address`: 4 | 5 | * Environment variable: `DB_SERVER=my-db-address` 6 | * CLI flag: `--server=my-db-address` 7 | * Config file: 8 | ```yaml 9 | db-server: my-db-address 10 | ``` 11 | 12 | The address itself, in the above example `my-db-address`, can be a hostname, ip address, or path to a unix domain socket , as long as it is 13 | accessible from where the `mysql-backup` runs. 14 | 15 | The default port is `3306`, the normal default port for mysql. You can override the default port of `3306` via 16 | configuration. For example, to set the port to `3456`: 17 | 18 | * Environment variable: `DB_PORT=3456` 19 | * CLI flag: `--port=3456` 20 | * Config file: 21 | ```yaml 22 | db-port: 3456 23 | ``` 24 | -------------------------------------------------------------------------------- /docs/format.md: -------------------------------------------------------------------------------- 1 | The dump file is a tar.gz with one file per database. 2 | 3 | The backup file _always_ dumps at the database server level, i.e. it will 4 | call `USE DATABASE ` for each database to be backed up, 5 | and will include `CREATE DATABASE` and `USE DATABASE` in the backup file. 6 | 7 | This is equivalent to passing [`--databases `](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html#option_mysqldump_databases) to `mysqldump`: 8 | 9 | > With this option, it treats all name arguments as database names. CREATE DATABASE and USE statements are included in the output before each new database. 10 | 11 | Or [`--all-databases`](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html#option_mysqldump_all-databases): 12 | 13 | > This is the same as using the --databases option and naming all the databases on the command line. 14 | 15 | 16 | -------------------------------------------------------------------------------- /docs/logging.md: -------------------------------------------------------------------------------- 1 | # Logging 2 | 3 | Logging is provided on standard out (stdout) and standard error (stderr). The log level can be set 4 | using `--v` to the following levels: 5 | 6 | - `--v=0` (default): log level set to `INFO` 7 | - `--v=1`: log level set to `DEBUG` 8 | - `--v=2`: log level set to `TRACE` 9 | 10 | Log output and basic metrics can be sent to a remote service, using the 11 | [configuration options](./configuration.md). 12 | 13 | The remote log service includes the following information: 14 | 15 | - backup start timestamp 16 | - backup config, including command-line options (scrubbed of sensitive data) 17 | - backup logs 18 | - backup success or failure timestamp and duration 19 | 20 | Log levels up to debug are sent to the remote service. Trace logs are not sent to the remote service, and are 21 | used for local debugging only. 22 | -------------------------------------------------------------------------------- /docs/prune.md: -------------------------------------------------------------------------------- 1 | # Pruning 2 | 3 | Pruning is the process of removing backups that no longer are needed. 4 | 5 | mysql-backup does **not** do this by default; it is up to you to enable this feature, if you want it. 6 | 7 | ## Launching Pruning 8 | 9 | Pruning happens only in the following scenarios: 10 | 11 | * During pruning runs 12 | * During backup runs 13 | 14 | It does not occur during restore runs. 15 | 16 | ### Pruning Runs 17 | 18 | You can start `mysql-backup` with the command `prune` to run a pruning operation. It will prune any backups that are no longer needed. 19 | 20 | Like backups, it can run once and then exit, or it can run on a schedule. 21 | 22 | It uses the same configuration options for scheduling as backups, see the [scheduling](./scheduling.md) documentation for more information, 23 | specifically the section about [Scheduling Options](./scheduling.md#scheduling-options). 24 | 25 | ### Backup Runs 26 | 27 | When running `mysql-backup` in backup mode, it _optionally_ can also prune older backups before each backup run. 28 | When enabled, it will prune any backups that fit the pruning criteria. 29 | 30 | ## Pruning Criteria 31 | 32 | Pruning can be on the basis of the _age_ of a specific backup, or the _number_ of backups. Both are set by the configuration setting: 33 | 34 | * Environment variable: `DB_DUMP_RETENTION=` 35 | * CLI flag: `dump --retention=` or `prune --retention=` 36 | * Config file: 37 | ```yaml 38 | prune: 39 | retention: 40 | ``` 41 | 42 | The value of retention always is an integer followed by a letter. The letter can one of: 43 | 44 | * `h` - hours, e.g. `2h` 45 | * `d` - days, e.g. `3d` 46 | * `w` - weeks, e.g. `4w` 47 | * `m` - months, e.g. `3m` 48 | * `y` - years, e.g. `5y` 49 | * `c` - count, how many backup to keep, e.g. `10c`; this could have been simply `10`, but was kept as `c` to avoid accidental confusion with the other options. 50 | 51 | Most of these are interchangeable, e.g. `3d` is the same as `72h`, and `4w` is the same as `28d` is the same as `672h`. 52 | 53 | When calculating whether or not to prune, `mysql-backup` __always__ converts the amount to hours, and then errs on the side of caution. 54 | For example, if provided `7d`, it will convert that to `168h`, and then prune any backups older than 168 full hours. If it is 167 hours and 59 minutes old, it 55 | will not be pruned. 56 | 57 | ## Determining backup age 58 | 59 | Pruning depends on the name of the backup file, rather than the timestamp on the target filesystem, as the latter can be unreliable. 60 | This means that the filename must be of a known pattern. 61 | 62 | As of this writing, pruning only work for backup files whose filename uses the default naming scheme, as described in 63 | ["Dump File" in backup documentation](./backup.md#dump-file). We hope to support custom filenames in the future. 64 | -------------------------------------------------------------------------------- /docs/restore.md: -------------------------------------------------------------------------------- 1 | # Restoring 2 | 3 | Restoring uses the same database, SMB and S3 configuration options as [backup](./backup.md). 4 | 5 | Like dump, you point it at a target, which is a location for backups, select a backup file, 6 | and it will restore the database from that file in that target. 7 | The primary difference is the use of restore target, instead of a dump target. This follows the same syntax as 8 | the dump target, but instead of a dump _directory_, it is the actual restore _file_, which should be a 9 | compressed dump file. 10 | 11 | In order to restore, you need the following: 12 | 13 | * A storage target - directory, SMB or S3 - to restore from 14 | * A dump file in the storage target, which can come from any of your targets or a local file (which itself is a target) 15 | * A database to restore to, along with access credentials 16 | * Optionally, pre- and post-restore processing scripts 17 | 18 | ## Configuring restore 19 | 20 | `restore` **always** must have one argument, the name of the file in the target from which to restore. E.g. 21 | 22 | ```bash 23 | $ restore db_backup_201509271627.gz 24 | ``` 25 | 26 | You can provide the target via environment variables, CLI or the config file. 27 | 28 | ### Environment variables and CLI 29 | 30 | From a local file: 31 | 32 | * Environment variable: `DB_RESTORE_TARGET=/backup/ restore db_backup_201509271627.gz` 33 | * Command line: `restore --target=/backup/ db_backup_201509271627.gz` 34 | 35 | From S3: 36 | 37 | * Environment variable: `DB_RESTORE_TARGET=s3://mybucket/ restore db_backup_201509271627.gz` 38 | * Command line: `restore --target=s3://mybucket/ db_backup_201509271627.gz` 39 | 40 | From SMB: 41 | 42 | * Environment variable: `DB_RESTORE_TARGET=smb://myserver/myshare/ restore db_backup_201509271627.gz` 43 | * Command line: `restore --target=smb://myserver/myshare/ restore db_backup_201509271627.gz` 44 | 45 | The credentials are provided using the same CLI flags and/or environment variables as described in [backup](./docs/backup.md). 46 | 47 | ### Config file 48 | 49 | A config file may already contain much useful information: 50 | 51 | * targets and their credentials 52 | * database connectivity information and credentials 53 | * pre- and post-restore processing scripts 54 | 55 | In order to restore from a config file, you provide a `--target` that references one of the existing targets. The URL 56 | begins with `config://` as the scheme, followed by the name of the target. For example, if you have a target named 57 | `mybucket`, then you can restore to it with: 58 | 59 | ```bash 60 | $ mysql-backup restore --target=config://mybucket/ db_backup_201509271627.gz 61 | ``` 62 | 63 | Since the target is `config://`, it will use the configuration information for that target from the config file. 64 | It references the target named `mybucket`, including the provided configuration and credentials. Within that target, 65 | it then retrieves the file named `db_backup_201509271627.gz` and restores it. 66 | 67 | As you did not specify a database, it will use the database information from the config file as well. 68 | 69 | ### Restore when using docker-compose 70 | 71 | `docker-compose` automagically creates a network when started. `docker run` simply attaches to the bridge network. If you are trying to communicate with a mysql container started by docker-compose, you'll need to specify the network in your command arguments. You can use `docker network ls` to see what network is being used, or you can declare a network in your docker-compose.yml. 72 | 73 | #### Example: 74 | 75 | `docker run -e DB_SERVER=gotodb.example.com -e DB_USER=user123 -e DB_PASS=pass123 -e DB_RESTORE_TARGET=/backup/ -v /local/path:/backup --network="skynet" databack/mysql-backup restore db_backup_201509271627.gz` 76 | 77 | ### Using docker secrets 78 | 79 | Environment variables used in this image can be passed in files as well. This is useful when you are using docker secrets for storing sensitive information. 80 | 81 | As you can set environment variable with `-e ENVIRONMENT_VARIABLE=value`, you can also use `-e ENVIRONMENT_VARIABLE_FILE=/path/to/file`. Contents of that file will be assigned to the environment variable. 82 | 83 | **Example:** 84 | 85 | ```bash 86 | $ docker run -d \ 87 | -e DB_HOST_FILE=/run/secrets/DB_HOST \ 88 | -e DB_USER_FILE=/run/secrets/DB_USER \ 89 | -e DB_PASS_FILE=/run/secrets/DB_PASS \ 90 | -v /local/file/path:/db \ 91 | databack/mysql-backup 92 | ``` 93 | 94 | ### Restore pre and post processing 95 | 96 | As with backups pre and post processing, you have pre- and post-restore processing. 97 | 98 | This is useful if you need to restore a backup file that includes some files along with the database dump. 99 | For example, to restore a _WordPress_ install, you would uncompress a tarball containing 100 | the db backup and a second tarball with the contents of a WordPress install on 101 | `pre-restore`. Then on `post-restore`, uncompress the WordPress files on the container's web server root directory. 102 | 103 | In order to perform pre-restore processing, set the pre-restore processing directory, and `mysql-backup` 104 | will execute any file that ends in `.sh`. For example: 105 | 106 | * Environment variable: `DB_DUMP_PRE_RESTORE_SCRIPTS=/scripts.d/pre-restore` 107 | * Command line: `restore --pre-restore-scripts=/scripts.d/pre-restore` 108 | * Config file: 109 | ```yaml 110 | restore: 111 | scripts: 112 | pre-restore: /scripts.d/pre-restore 113 | ``` 114 | 115 | When running in a container, these are set automatically to `/scripts.d/pre-restore` and `/scripts.d/post-restore` 116 | respectively. 117 | 118 | For an example take a look at the post-backup examples, all variables defined for post-backup scripts are available for pre-processing too. Also don't forget to add the same host volumes for `pre-restore` and `post-restore` directories as described for post-backup processing. 119 | 120 | ### Restoring to a different database 121 | 122 | The dump files normally contain a `CREATE DATABASE ` statement, to create the database if it 123 | does not exist, followed by a `USE ;` statement, which tells MySQL which database to continue the restore into. 124 | 125 | Sometimes, you wish to restore a dump file to a different database. 126 | For example, you dumped a database named `FOO`, and wish to restore it to a database named `BAR`. 127 | The dump file will have: 128 | 129 | ```sql 130 | CREATE DATABASE `FOO`; 131 | USE `FOO`; 132 | ``` 133 | 134 | `mysql-backup` can be instructed to restore `FOO` into `BAR` instead, as well as ensuring `BAR` exists. 135 | Use the `--database` option to to provide a mapping of `FROM` to `TO` database names. 136 | 137 | Continuing our example, to restore a dump file that has `USE FOO;` in it, 138 | 139 | * Environment variable: `DB_RESTORE_DATABASE=FOO:BAR` 140 | * Command line: `restore --database=FOO:BAR` 141 | 142 | You can have multiple mappings by separating them with commas. For example: 143 | 144 | * Environment variable: `DB_RESTORE_DATABASE=FOO:BAR,BAZ:QUX` 145 | * Command line: `restore --database=FOO:BAR,BAZ:QUX` 146 | 147 | Database names are case-insensitive, as they are in mysql. 148 | 149 | There is no config file support for mappings. 150 | 151 | When the restore runs, it will do the following: 152 | 153 | 1. If the dump file has `USE ;` in it, it will be replaced with `USE ;` where `` is the `TO` database name. 154 | 1. Run the restore, which will restore into the `TO` database name. 155 | 156 | If the dump file does *not* have the `USE ;` statement in it, for example, if it was created with 157 | `mysql-backup dump --no-database-name`, then it simply restores as is. Be careful with this. 158 | -------------------------------------------------------------------------------- /docs/scheduling.md: -------------------------------------------------------------------------------- 1 | # Backup Scheduling 2 | 3 | `mysql-backup` can be run either once, doing a backup and exiting, or as a long-running task, 4 | backing up on schedule. 5 | 6 | There are several options for scheduling how often a backup should run: 7 | 8 | * run just once and exit. 9 | * run every x minutes, optionally delaying the first one by a certain amount of time 10 | * run on a schedule. 11 | 12 | 13 | ## Order of Priority 14 | 15 | The scheduling options have an order of priority: 16 | 17 | 1. If run once is set, it will run immediately and exit, ignoring all other scheduling options. 18 | 2. If cron is set, it runs according to the cron schedule, ignoring frequency and delayed start. 19 | 3. Frequency and optionally delayed start are used. 20 | 21 | ## Scheduling Options 22 | 23 | ### Run once 24 | 25 | You can set it to run just once via: 26 | 27 | * Environment variable: `DB_DUMP_ONCE=true` 28 | * CLI flag: `dump --once` 29 | * Config file: 30 | ```yaml 31 | dump: 32 | run-once: true 33 | ``` 34 | 35 | If you set it to run just once, the backup will run once and then exit. 36 | 37 | **This overrides all other scheduling options**. 38 | 39 | This is useful for one-offs, or if `mysql-backup` is being run via an external scheduler, such as cron 40 | or [kubernetes cron jobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/), and thus 41 | don't want `mysql-backup` to do the scheduling internally. 42 | 43 | ### Cron Scheduling 44 | 45 | You can set a cron schedule via: 46 | 47 | * Environment variable: `CRON_SCHEDULE=0 * * * *` 48 | * CLI flag: `dump --cron="0 * * * *"` 49 | * Config file: 50 | ```yaml 51 | dump: 52 | cron: 0 * * * * 53 | ``` 54 | 55 | The cron dump schedule option uses standard [crontab syntax](https://en.wikipedia.org/wiki/Cron), a 56 | single line. 57 | 58 | If a cron-scheduled backup takes longer than the beginning of the next backup window, it will be skipped. For example, if your cron line is scheduled to backup every hour, and the backup that runs at 13:00 finishes at 14:05, the next backup will not be immediate, but rather at 15:00. 59 | 60 | ### Frequency and Delayed Start 61 | 62 | If neither run once nor cron is set, then `mysql-backup` will use the frequency and optional delayed start options. 63 | 64 | The value for each is minutes. Thus, you can set backup to run every hour by setting the frequency to `60`. 65 | Similarly, you can delay start by 2 hours by setting the delayed start to `120`. 66 | 67 | You can set the frequency start via: 68 | 69 | * Environment variable: `DB_DUMP_FREQUENCY=60` 70 | * CLI flag: `dump --frequency=60` 71 | * Config file: 72 | ```yaml 73 | dump: 74 | frequency: 60 75 | ``` 76 | 77 | You can set the delayed start via: 78 | 79 | * Environment variable: `DB_DUMP_DELAY=120` 80 | * CLI flag: `dump --delay=120` 81 | * Config file: 82 | ```yaml 83 | dump: 84 | delay: 120 85 | ``` 86 | -------------------------------------------------------------------------------- /docs/security.md: -------------------------------------------------------------------------------- 1 | # Security 2 | 3 | ## Database and Targets 4 | 5 | `mysql-backup` uses standard libraries for accessing remote services, including the database to backup 6 | or restore, and targets for saving backups, restoring backups, or pruning. 7 | 8 | ## Logs 9 | 10 | Logs never should include credentials or other secrets, including at the most detailed level like `trace`. If, despite our efforts, 11 | you see confidential information in logs, please report an issue immediately. 12 | 13 | ## Telemetry 14 | 15 | Remote telemetry services store your logs, as well as details about when backups occurred, if there were any errors, 16 | and how long they took. This means that the telemetry services knows: 17 | 18 | * The names of the databases you back up 19 | * The names of the targets you use 20 | * The times of backups 21 | * The duration of backups 22 | * The success or failure of backups 23 | * Backup logs. As described above in [Logs](#logs), logs never should include credentials or other secrets. 24 | 25 | Telemetry services do not store your credentials or other secrets, nor do they store the contents of your backups. 26 | They _do_ know the names of your database tables, as those appear in the logs. 27 | 28 | ## Remote Configuration 29 | 30 | Remote configuration services store your configuration, including the names of your databases and targets, as well as 31 | credentials. However, they only have that data encrypted in a way that only you can decrypt. When you load configuration 32 | into the remote service, it is encrypted locally to you, and then stored as an encrypted blob. The remote service never 33 | sees your unencrypted data. 34 | 35 | The data is decrypted by `mysql-backup` locally on your machine, when you retrieve the configuration. 36 | 37 | Your access token to the remote service, stored in your local configuration file, is a 38 | [Curve25519 private key](https://en.wikipedia.org/wiki/Curve25519), which authenticates 39 | you to the remote service. The remote service never sees this key, only the public key, which is used to verify your identity. 40 | 41 | This key is then used to decrypt the configuration blob, which is used to configure `mysql-backup`. 42 | 43 | In configuration files, the key is stored base64-encoded. 44 | -------------------------------------------------------------------------------- /entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | NICE_CMD= 4 | # if we asked to do by schema, then we need to get a list of all of the databases, take each, and then tar and zip them 5 | if [ "$NICE" = "true" ]; then 6 | NICE_CMD="nice -n19 ionice -c2" 7 | fi 8 | 9 | ${NICE_CMD} /mysql-backup $@ 10 | -------------------------------------------------------------------------------- /examples/configs/local.yaml: -------------------------------------------------------------------------------- 1 | # sample configuration file for config entirely local, not using remote config service 2 | # can be overridden by command-line arguments 3 | 4 | # standard reference of type and version 5 | version: config.databack.io/v1 6 | kind: local 7 | 8 | spec: 9 | 10 | # set logging level, one of: error,warning,info,debug,trace; default is info 11 | logging: info 12 | 13 | # dump, or backup, configuration 14 | dump: 15 | include: # optional, otherwise will do all tables except system tables 16 | - table1 17 | - table2 18 | exclude: # optional, otherwise will do all tables except system tables 19 | - table3 20 | - table4 21 | safechars: true # defaults to false 22 | noDatabaseName: false # remove the `USE ` statement from backup files, defaults to false 23 | # schedule to dump, can use one of: cron, frequency, once. If frequency is set, begin will be checked 24 | schedule: 25 | once: true # run only once and exit; ignores all other scheduling. Defaults to false 26 | cron: "0 10 * * *" 27 | frequency: 1440 # in minutes 28 | begin: +25 # What time to do the first dump. Must be in one of two formats: Absolute: HHMM, e.g. `2330` or `0415`; or Relative: +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half" 29 | compression: gzip # defaults to gzip 30 | compact: true # defaults to false 31 | maxAllowedPacket: 4194304 # defaults to 4194304 32 | filenamePattern: db_backup_{{ .now }}.{{ .compression }} 33 | scripts: 34 | preBackup: /path/to/prescripts/ 35 | postBackup: /path/to/postscripts/ 36 | # list of named targets to backup to, from the index below 37 | targets: 38 | - s3 39 | - file 40 | - otherfile 41 | - smbshare 42 | 43 | restore: 44 | scripts: 45 | preRestore: /path/to/prescripts/ 46 | postRestore: /path/to/postscripts/ 47 | 48 | # database configuration 49 | database: 50 | server: host 51 | port: port 52 | credentials: 53 | username: user 54 | password: password 55 | 56 | # targets. Each target is a location, as well as credentials and config, as needed 57 | targets: 58 | s3: 59 | type: s3 60 | url: s3://bucket.us-west.amazonaws.com/databackup 61 | details: 62 | region: us-west-1 63 | endpoint: https://s3.us-west-1.amazonaws.com 64 | accessKeyId: access_key_id 65 | secretAccessKey: secret_access_key 66 | file: 67 | type: file 68 | url: file:///tmp/databackup 69 | otherfile: 70 | type: file 71 | url: /tmp/databackup 72 | smbshare: 73 | type: smb 74 | url: smb://cifshost:2125/databackup 75 | details: 76 | domain: mydomain 77 | username: user 78 | password: password 79 | -------------------------------------------------------------------------------- /examples/configs/remote.yaml: -------------------------------------------------------------------------------- 1 | # sample configuration file for config entirely from remote service. When retrieving from remote, 2 | # will also be told how to handle telemetry. 3 | # can be overridden by command-line arguments 4 | 5 | # standard reference of type and version 6 | version: config.databack.io/v1 7 | kind: remote 8 | 9 | # receives the config from the config service, so nothing else needed 10 | spec: 11 | url: https://config.databack.io 12 | # sha256 fingerprint of certificates for the server, or one or more of the certificates in the signing chain; unneeded if the server is using a certificate signed by a well-known CA 13 | # this is a sample fingerprint only 14 | # DO NOT USE THIS FINGERPRINT; GET THE ACTUAL ONE FROM YOUR REMOTE SERVER! 15 | certificates: sha256:69729b8e15a86efc177a57afb7171dfc64add28c2fca8cf1507e34453ccb1470 16 | # base64-encoded Curve25519 private key for authentication to the server, as well as decrypting the provided configuration 17 | # this is a sample key only 18 | # DO NOT USE THIS KEY; GENERATE YOUR OWN! 19 | credentials: BwMqVfr1myxqX8tikIPYCyNtpHgMLIg/2nUE+pLQnTE= 20 | -------------------------------------------------------------------------------- /examples/configs/telemetry.yaml: -------------------------------------------------------------------------------- 1 | # sample configuration file for telemetry service only; everything else is local 2 | # can be overridden by command-line arguments or remote, if configured 3 | 4 | # only needed if registered to send logs and results to a telemetry service 5 | # and not defined in the config service. Normally, you can just use the config 6 | # to get the telemetry info 7 | 8 | # standard reference of type and version 9 | version: config.databack.io/v1 10 | kind: local 11 | 12 | spec: 13 | 14 | # set logging level, one of: error,warning,info,debug,trace; default is info 15 | logging: info 16 | 17 | telemetry: 18 | url: https://telemetry.databack.io 19 | # sha256 fingerprint of certificate for the telemetry server, or one of the certificates in the signing chain; unneeded if the server is using a certificate signed by a well-known CA 20 | # this is a sample fingerprint only 21 | # DO NOT USE THIS FINGERPRINT; GET THE ACTUAL ONE FROM YOUR REMOTE SERVER! 22 | certificates: sha256:69729b8e15a86efc177a57afb7171dfc64add28c2fca8cf1507e34453ccb1470 23 | # base64-encoded Curve25519 private key for authentication to the telemetry server 24 | # this is a sample key only 25 | # DO NOT USE THIS KEY; GENERATE YOUR OWN! 26 | credentials: BwMqVfr1myxqX8tikIPYCyNtpHgMLIg/2nUE+pLQnTE= 27 | # only needed if required by endpoint 28 | 29 | # dump, or backup, configuration 30 | dump: 31 | include: # optional, otherwise will do all tables except system tables 32 | - table1 33 | - table2 34 | exclude: # optional, otherwise will do all tables except system tables 35 | - table3 36 | - table4 37 | safechars: true # defaults to false 38 | noDatabaseName: false # remove the `USE ` statement from backup files, defaults to false 39 | # schedule to dump, can use one of: cron, frequency, once. If frequency is set, begin will be checked 40 | schedule: 41 | once: true # run only once and exit; ignores all other scheduling. Defaults to false 42 | cron: "0 10 * * *" 43 | frequency: 1440 # in minutes 44 | begin: 25 # minutes from initialization 45 | compression: gzip # defaults to gzip 46 | filenamePattern: db_backup_{{ .now }}.{{ .compression }} 47 | scripts: 48 | preBackup: /path/to/prescripts/ 49 | postBackup: /path/to/postscripts/ 50 | # list of named targets to backup to, from the index below 51 | targets: 52 | - s3 53 | - file 54 | - otherfile 55 | - smbshare 56 | 57 | restore: 58 | scripts: 59 | preRestore: /path/to/prescripts/ 60 | postRestore: /path/to/postscripts/ 61 | 62 | # database configuration 63 | database: 64 | server: host 65 | port: port 66 | credentials: 67 | username: user 68 | password: password 69 | 70 | # targets. Each target is a location, as well as credentials and config, as needed 71 | targets: 72 | s3: 73 | type: s3 74 | url: s3://bucket.us-west.amazonaws.com/databackup 75 | region: us-west-1 76 | endpoint: https://s3.us-west-1.amazonaws.com 77 | credentials: 78 | accessKeyId: access_key_id 79 | secretAccessKey: secret_access_key 80 | file: 81 | type: file 82 | url: file:///tmp/databackup 83 | otherfile: 84 | type: file 85 | url: /tmp/databackup 86 | smbshare: 87 | type: smb 88 | url: smb://cifshost:2125/databackup 89 | credentials: 90 | domain: mydomain 91 | username: user 92 | password: password 93 | -------------------------------------------------------------------------------- /examples/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | volumes: 3 | dbmysql: 4 | wordpress: 5 | backup: 6 | 7 | services: 8 | mysql: 9 | image: mysql:8.2.0 10 | environment: 11 | MYSQL_DATABASE: wordpress 12 | MYSQL_USER: wordpress 13 | MYSQL_PASSWORD: wordpress 14 | MYSQL_ROOT_PASSWORD: wordpress_root 15 | TZ: UTC 16 | volumes: 17 | - dbmysql:/var/lib/mysql 18 | command: --datadir=/var/lib/mysql/data --character-set-server=utf8 --collation-server=utf8_bin 19 | healthcheck: 20 | test: mysqladmin ping -h localhost 21 | start_period: 10s 22 | interval: 10s 23 | timeout: 5s 24 | retries: 5 25 | 26 | wordpress: 27 | image: wordpress 28 | environment: 29 | - WORDPRESS_DB_HOST=mysql 30 | - WORDPRESS_DB_USER=wordpress 31 | - WORDPRESS_DB_PASSWORD=wordpress 32 | - WORDPRESS_DB_NAME=wordpress 33 | volumes: 34 | - wordpress:/var/www/html 35 | ports: 36 | - "81:80" 37 | 38 | mysql-backup: 39 | image: databack/mysql-backup:1.0.0 # use appropriate image tags 40 | environment: 41 | DB_SERVER: mysql 42 | DB_USER: wordpress 43 | DB_PASS: wordpress 44 | DB_DUMP_TARGET: s3://dumps/WORDPRESS 45 | DB_DUMP_BEGIN: 1345 46 | AWS_ACCESS_KEY_ID: access 47 | AWS_SECRET_ACCESS_KEY: SUPERKEYPOWERSON 48 | AWS_ENDPOINT_URL: https://minio.mydomain.local 49 | COMPRESSION: bzip2 50 | command: dump 51 | depends_on: 52 | mysql: 53 | condition: service_healthy 54 | 55 | mysql-backup-multiple-targets: 56 | image: databack/mysql-backup:1.0.0 # use appropriate image tags 57 | environment: 58 | DB_SERVER: mysql 59 | DB_USER: wordpress 60 | DB_PASS: wordpress 61 | DB_DUMP_TARGET: "/db s3://dumps/WORDPRESS" 62 | DB_DUMP_BEGIN: 1345 63 | AWS_ACCESS_KEY_ID: access 64 | AWS_SECRET_ACCESS_KEY: SUPERKEYPOWERSON 65 | AWS_ENDPOINT_URL: https://minio.mydomain.local 66 | COMPRESSION: bzip2 67 | TZ: UTC 68 | command: dump 69 | depends_on: 70 | mysql: 71 | condition: service_healthy 72 | volumes: 73 | - backup:/db 74 | 75 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/databacker/mysql-backup 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.23.1 6 | 7 | require ( 8 | github.com/aws/aws-sdk-go-v2 v1.32.3 9 | github.com/aws/aws-sdk-go-v2/config v1.28.1 10 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 11 | github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 12 | github.com/docker/docker v26.1.5+incompatible 13 | github.com/docker/go-connections v0.4.0 14 | github.com/go-sql-driver/mysql v1.7.1 15 | github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877 16 | github.com/moby/moby v26.1.0+incompatible 17 | github.com/robfig/cron/v3 v3.0.1 18 | github.com/sirupsen/logrus v1.9.3 19 | github.com/spf13/cobra v1.8.0 20 | github.com/spf13/pflag v1.0.5 21 | github.com/spf13/viper v1.6.3 22 | github.com/stretchr/testify v1.9.0 23 | gopkg.in/yaml.v3 v3.0.1 24 | ) 25 | 26 | require ( 27 | github.com/aws/aws-sdk-go-v2/credentials v1.17.42 28 | github.com/cloudsoda/go-smb2 v0.0.0-20231106205947-b0758ecc4c67 29 | github.com/dsnet/compress v0.0.1 30 | github.com/go-test/deep v1.1.0 31 | ) 32 | 33 | require ( 34 | github.com/databacker/api/go/api v0.0.0-20250423183243-7775066c265e 35 | github.com/google/go-cmp v0.6.0 36 | go.opentelemetry.io/otel v1.31.0 37 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 38 | go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 39 | go.opentelemetry.io/otel/sdk v1.31.0 40 | go.opentelemetry.io/otel/trace v1.31.0 41 | ) 42 | 43 | require ( 44 | filippo.io/age v1.2.1 // indirect 45 | github.com/InfiniteLoopSpace/go_S-MIME v0.0.0-20181221134359-3f58f9a4b2b6 // indirect 46 | github.com/cenkalti/backoff/v4 v4.2.1 // indirect 47 | github.com/containerd/log v0.1.0 // indirect 48 | github.com/distribution/reference v0.6.0 // indirect 49 | github.com/felixge/httpsnoop v1.0.3 // indirect 50 | github.com/github/smimesign v0.2.0 // indirect 51 | github.com/go-logr/logr v1.4.2 // indirect 52 | github.com/go-logr/stdr v1.2.2 // indirect 53 | github.com/golang/protobuf v1.5.4 // indirect 54 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect 55 | github.com/moby/docker-image-spec v1.3.1 // indirect 56 | github.com/moby/sys/user v0.3.0 // indirect 57 | github.com/moby/sys/userns v0.1.0 // indirect 58 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect 59 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect 60 | go.opentelemetry.io/otel/metric v1.31.0 // indirect 61 | go.opentelemetry.io/proto/otlp v1.0.0 // indirect 62 | google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect 63 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect 64 | google.golang.org/grpc v1.59.0 // indirect 65 | google.golang.org/protobuf v1.35.2 // indirect 66 | ) 67 | 68 | require ( 69 | github.com/Microsoft/go-winio v0.6.2 // indirect 70 | github.com/aws/aws-sdk-go v1.44.256 // indirect 71 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect 72 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 // indirect 73 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect 74 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect 75 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect 76 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 // indirect 77 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect 78 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect 79 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect 80 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect 81 | github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 // indirect 82 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 // indirect 83 | github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 // indirect 84 | github.com/aws/smithy-go v1.22.0 // indirect 85 | github.com/containerd/containerd v1.7.27 // indirect 86 | github.com/davecgh/go-spew v1.1.1 // indirect 87 | github.com/docker/go-units v0.5.0 // indirect 88 | github.com/fsnotify/fsnotify v1.6.0 // indirect 89 | github.com/geoffgarside/ber v1.1.0 // indirect 90 | github.com/gogo/protobuf v1.3.2 // indirect 91 | github.com/google/uuid v1.6.0 92 | github.com/hashicorp/hcl v1.0.0 // indirect 93 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 94 | github.com/klauspost/compress v1.16.7 // indirect 95 | github.com/magiconair/properties v1.8.1 // indirect 96 | github.com/mitchellh/mapstructure v1.1.2 // indirect 97 | github.com/moby/patternmatcher v0.5.0 // indirect 98 | github.com/moby/sys/sequential v0.5.0 // indirect 99 | github.com/moby/term v0.5.0 // indirect 100 | github.com/morikuni/aec v1.0.0 // indirect 101 | github.com/opencontainers/go-digest v1.0.0 // indirect 102 | github.com/opencontainers/image-spec v1.1.0 // indirect 103 | github.com/pelletier/go-toml v1.9.5 // indirect 104 | github.com/pkg/errors v0.9.1 // indirect 105 | github.com/pmezard/go-difflib v1.0.0 // indirect 106 | github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect 107 | github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect 108 | github.com/spf13/afero v1.2.2 // indirect 109 | github.com/spf13/cast v1.3.0 // indirect 110 | github.com/spf13/jwalterweatherman v1.0.0 // indirect 111 | github.com/stretchr/objx v0.5.2 // indirect 112 | github.com/subosito/gotenv v1.2.0 // indirect 113 | golang.org/x/crypto v0.37.0 114 | golang.org/x/net v0.38.0 // indirect 115 | golang.org/x/sys v0.32.0 // indirect 116 | golang.org/x/text v0.24.0 // indirect 117 | golang.org/x/tools v0.22.0 // indirect 118 | gopkg.in/ini.v1 v1.51.0 // indirect 119 | gopkg.in/yaml.v2 v2.4.0 // indirect 120 | gotest.tools/v3 v3.4.0 // indirect 121 | ) 122 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/databacker/mysql-backup/cmd" 5 | ) 6 | 7 | func main() { 8 | cmd.Execute() 9 | } 10 | -------------------------------------------------------------------------------- /pkg/archive/tar.go: -------------------------------------------------------------------------------- 1 | package archive 2 | 3 | import ( 4 | "archive/tar" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | ) 11 | 12 | func Tar(src string, writer io.WriteCloser) error { 13 | 14 | // ensure the src actually exists before trying to tar it 15 | if _, err := os.Stat(src); err != nil { 16 | return fmt.Errorf("unable to tar files - %v", err.Error()) 17 | } 18 | 19 | tw := tar.NewWriter(writer) 20 | // defers are executed via a stack, so LIFO 21 | // important we close the tw before the underlying writer 22 | defer func() { _ = tw.Close(); _ = writer.Close() }() 23 | 24 | // walk path 25 | return filepath.Walk(src, func(file string, fi os.FileInfo, err error) error { 26 | 27 | // return on any error 28 | if err != nil { 29 | return err 30 | } 31 | 32 | // return on non-regular files (thanks to [kumo](https://medium.com/@komuw/just-like-you-did-fbdd7df829d3) for this suggested update) 33 | if !fi.Mode().IsRegular() { 34 | return nil 35 | } 36 | 37 | // create a new dir/file header 38 | header, err := tar.FileInfoHeader(fi, fi.Name()) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | // update the name to correctly reflect the desired destination when untaring 44 | header.Name = strings.TrimPrefix(strings.ReplaceAll(file, src, ""), string(filepath.Separator)) 45 | 46 | // write the header 47 | if err := tw.WriteHeader(header); err != nil { 48 | return err 49 | } 50 | 51 | // open files for taring 52 | f, err := os.Open(file) 53 | if err != nil { 54 | return err 55 | } 56 | 57 | // copy file data into tar writer 58 | if _, err := io.Copy(tw, f); err != nil { 59 | return err 60 | } 61 | 62 | // manually close here after each file operation; defering would cause each file close 63 | // to wait until all operations have completed. 64 | _ = f.Close() 65 | 66 | return nil 67 | }) 68 | } 69 | 70 | func Untar(r io.Reader, dst string) error { 71 | tr := tar.NewReader(r) 72 | 73 | for { 74 | header, err := tr.Next() 75 | 76 | switch { 77 | 78 | // if no more files are found return 79 | case err == io.EOF: 80 | return nil 81 | 82 | // return any other error 83 | case err != nil: 84 | return err 85 | 86 | // if the header is nil, just skip it (not sure how this happens) 87 | case header == nil: 88 | continue 89 | } 90 | 91 | // the target location where the dir/file should be created 92 | target := filepath.Join(dst, header.Name) 93 | 94 | // the following switch could also be done using fi.Mode(), not sure if there 95 | // a benefit of using one vs. the other. 96 | // fi := header.FileInfo() 97 | 98 | // check the file type 99 | switch header.Typeflag { 100 | 101 | // if its a dir and it doesn't exist create it 102 | case tar.TypeDir: 103 | if _, err := os.Stat(target); err != nil { 104 | if err := os.MkdirAll(target, 0755); err != nil { 105 | return err 106 | } 107 | } 108 | 109 | // if it's a file create it 110 | case tar.TypeReg: 111 | f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) 112 | if err != nil { 113 | return err 114 | } 115 | 116 | // copy over contents 117 | if _, err := io.Copy(f, tr); err != nil { 118 | return err 119 | } 120 | 121 | // manually close here after each file operation; defering would cause each file close 122 | // to wait until all operations have completed. 123 | _ = f.Close() 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /pkg/compression/bzip2.go: -------------------------------------------------------------------------------- 1 | package compression 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/dsnet/compress/bzip2" 7 | ) 8 | 9 | var _ Compressor = &Bzip2Compressor{} 10 | 11 | type Bzip2Compressor struct { 12 | } 13 | 14 | func (b *Bzip2Compressor) Uncompress(in io.Reader) (io.Reader, error) { 15 | return bzip2.NewReader(in, nil) 16 | } 17 | 18 | func (b *Bzip2Compressor) Compress(out io.Writer) (io.WriteCloser, error) { 19 | return bzip2.NewWriter(out, nil) 20 | } 21 | func (b *Bzip2Compressor) Extension() string { 22 | return "tbz2" 23 | } 24 | -------------------------------------------------------------------------------- /pkg/compression/compressor.go: -------------------------------------------------------------------------------- 1 | package compression 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | ) 7 | 8 | type Compressor interface { 9 | Uncompress(in io.Reader) (io.Reader, error) 10 | Compress(out io.Writer) (io.WriteCloser, error) 11 | Extension() string 12 | } 13 | 14 | func GetCompressor(name string) (Compressor, error) { 15 | switch name { 16 | case "gzip": 17 | return &GzipCompressor{}, nil 18 | case "bzip2": 19 | return &Bzip2Compressor{}, nil 20 | case "none": 21 | return &NoCompressor{}, nil 22 | default: 23 | return nil, fmt.Errorf("unknown compression format: %s", name) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /pkg/compression/gzip.go: -------------------------------------------------------------------------------- 1 | package compression 2 | 3 | import ( 4 | "compress/gzip" 5 | "io" 6 | ) 7 | 8 | var _ Compressor = &GzipCompressor{} 9 | 10 | type GzipCompressor struct { 11 | } 12 | 13 | func (g *GzipCompressor) Uncompress(in io.Reader) (io.Reader, error) { 14 | return gzip.NewReader(in) 15 | } 16 | 17 | func (g *GzipCompressor) Compress(out io.Writer) (io.WriteCloser, error) { 18 | return gzip.NewWriter(out), nil 19 | } 20 | func (g *GzipCompressor) Extension() string { 21 | return "tgz" 22 | } 23 | -------------------------------------------------------------------------------- /pkg/compression/none.go: -------------------------------------------------------------------------------- 1 | package compression 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | var _ Compressor = &NoCompressor{} 8 | 9 | type NoCompressor struct { 10 | } 11 | 12 | func (n *NoCompressor) Uncompress(in io.Reader) (io.Reader, error) { 13 | return in, nil 14 | } 15 | 16 | func (n *NoCompressor) Compress(out io.Writer) (io.WriteCloser, error) { 17 | return &nopWriteCloser{out}, nil 18 | } 19 | func (n *NoCompressor) Extension() string { 20 | return "tar" 21 | } 22 | 23 | type nopWriteCloser struct { 24 | io.Writer 25 | } 26 | 27 | func (n *nopWriteCloser) Close() error { 28 | return nil 29 | } 30 | -------------------------------------------------------------------------------- /pkg/config/process_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "bytes" 5 | "crypto/aes" 6 | "crypto/cipher" 7 | "crypto/ecdh" 8 | "crypto/ed25519" 9 | cryptorand "crypto/rand" 10 | "crypto/sha256" 11 | "encoding/base64" 12 | "errors" 13 | "io" 14 | "net/http" 15 | "os" 16 | "strings" 17 | "testing" 18 | 19 | utiltest "github.com/databacker/mysql-backup/pkg/internal/test" 20 | "golang.org/x/crypto/hkdf" 21 | "golang.org/x/crypto/nacl/box" 22 | "gopkg.in/yaml.v3" 23 | 24 | "github.com/databacker/api/go/api" 25 | "github.com/google/go-cmp/cmp" 26 | ) 27 | 28 | func TestGetRemoteConfig(t *testing.T) { 29 | configFile := "./testdata/config.yml" 30 | content, err := os.ReadFile(configFile) 31 | if err != nil { 32 | t.Fatalf("failed to read config file: %v", err) 33 | } 34 | var validConfig api.Config 35 | if err := yaml.Unmarshal(content, &validConfig); err != nil { 36 | t.Fatalf("failed to unmarshal config: %v", err) 37 | } 38 | // start the server before the tests 39 | server, fingerprint, clientKeys, err := utiltest.StartServer(1, func(w http.ResponseWriter, r *http.Request) { 40 | var buf bytes.Buffer 41 | f, err := os.Open(configFile) 42 | if err != nil { 43 | w.WriteHeader(http.StatusInternalServerError) 44 | _, _ = w.Write([]byte(err.Error())) 45 | return 46 | } 47 | if _, err = buf.ReadFrom(f); err != nil { 48 | w.WriteHeader(http.StatusInternalServerError) 49 | _, _ = w.Write([]byte(err.Error())) 50 | return 51 | } 52 | if _, err := w.Write(buf.Bytes()); err != nil { 53 | w.WriteHeader(http.StatusInternalServerError) 54 | _, _ = w.Write([]byte(err.Error())) 55 | return 56 | } 57 | w.WriteHeader(http.StatusOK) 58 | }) 59 | if err != nil { 60 | t.Fatalf("failed to start server: %v", err) 61 | } 62 | defer server.Close() 63 | tests := []struct { 64 | name string 65 | url string 66 | err string 67 | config api.Config 68 | }{ 69 | {"no url", "", "unsupported protocol scheme", api.Config{}}, 70 | {"invalid server", "https://foo.bar/com", "no such host", api.Config{}}, 71 | {"no path", "https://google.com/foo/bar/abc", "invalid config file", api.Config{}}, 72 | {"nothing listening", "https://localhost:12345/foo/bar/abc", "connection refused", api.Config{}}, 73 | {"valid", server.URL, "", validConfig}, 74 | } 75 | for _, tt := range tests { 76 | t.Run(tt.name, func(t *testing.T) { 77 | creds := base64.StdEncoding.EncodeToString(clientKeys[0]) 78 | spec := api.RemoteSpec{ 79 | URL: &tt.url, 80 | Certificates: &[]string{fingerprint}, 81 | Credentials: &creds, 82 | } 83 | conf, err := getRemoteConfig(spec) 84 | switch { 85 | case tt.err == "" && err != nil: 86 | t.Fatalf("unexpected error: %v", err) 87 | case tt.err != "" && err == nil: 88 | t.Fatalf("expected error: %s", tt.err) 89 | case tt.err != "" && !strings.Contains(err.Error(), tt.err): 90 | t.Fatalf("mismatched error: %s, got: %v", tt.err, err) 91 | default: 92 | diff := cmp.Diff(tt.config, conf) 93 | if diff != "" { 94 | t.Fatalf("mismatched config: %s", diff) 95 | } 96 | } 97 | }) 98 | } 99 | 100 | } 101 | 102 | func TestDecryptConfig(t *testing.T) { 103 | configFile := "./testdata/config.yml" 104 | content, err := os.ReadFile(configFile) 105 | if err != nil { 106 | t.Fatalf("failed to read config file: %v", err) 107 | } 108 | var validConfig api.Config 109 | if err := yaml.Unmarshal(content, &validConfig); err != nil { 110 | t.Fatalf("failed to unmarshal config: %v", err) 111 | } 112 | 113 | senderCurve := ecdh.X25519() 114 | senderPrivateKey, err := senderCurve.GenerateKey(cryptorand.Reader) 115 | if err != nil { 116 | t.Fatalf("failed to generate sender random seed: %v", err) 117 | } 118 | senderPublicKey := senderPrivateKey.PublicKey() 119 | senderPublicKeyBytes := senderPublicKey.Bytes() 120 | 121 | recipientCurve := ecdh.X25519() 122 | recipientPrivateKey, err := recipientCurve.GenerateKey(cryptorand.Reader) 123 | if err != nil { 124 | t.Fatalf("failed to generate recipient random seed: %v", err) 125 | } 126 | recipientPublicKey := recipientPrivateKey.PublicKey() 127 | recipientPublicKeyBytes := recipientPublicKey.Bytes() 128 | 129 | var recipientPublicKeyArray, senderPrivateKeyArray [32]byte 130 | copy(recipientPublicKeyArray[:], recipientPublicKeyBytes) 131 | copy(senderPrivateKeyArray[:], senderPrivateKey.Bytes()) 132 | 133 | senderPublicKeyB64 := base64.StdEncoding.EncodeToString(senderPublicKeyBytes) 134 | 135 | recipientPublicKeyB64 := base64.StdEncoding.EncodeToString(recipientPublicKeyBytes) 136 | 137 | // compute the shared secret using the sender's private key and the recipient's public key 138 | var sharedSecret [32]byte 139 | box.Precompute(&sharedSecret, &recipientPublicKeyArray, &senderPrivateKeyArray) 140 | 141 | // Derive the symmetric key using HKDF with the shared secret 142 | hkdfReader := hkdf.New(sha256.New, sharedSecret[:], nil, []byte(api.SymmetricKey)) 143 | symmetricKey := make([]byte, 32) // AES-GCM requires 32 bytes 144 | if _, err := hkdfReader.Read(symmetricKey); err != nil { 145 | t.Fatalf("failed to derive symmetric key: %v", err) 146 | } 147 | 148 | // Create AES cipher block 149 | block, err := aes.NewCipher(symmetricKey) 150 | if err != nil { 151 | t.Fatalf("failed to create AES cipher") 152 | } 153 | // Create GCM instance 154 | aesGCM, err := cipher.NewGCM(block) 155 | if err != nil { 156 | t.Fatalf("failed to create AES-GCM") 157 | } 158 | 159 | // Generate a random nonce 160 | nonce := make([]byte, aesGCM.NonceSize()) 161 | _, err = cryptorand.Read(nonce) 162 | if err != nil { 163 | t.Fatalf("failed to generate nonce") 164 | } 165 | 166 | // Encrypt the plaintext 167 | ciphertext := aesGCM.Seal(nil, nonce, content, nil) 168 | 169 | // Embed the nonce in the ciphertext 170 | fullCiphertext := append(nonce, ciphertext...) 171 | 172 | algo := api.EncryptedSpecAlgorithmAes256Gcm 173 | data := base64.StdEncoding.EncodeToString(fullCiphertext) 174 | 175 | // this is a valid spec, we want to be able to change fields 176 | // without modifying the original, so we have a utility function after 177 | validSpec := api.EncryptedSpec{ 178 | Algorithm: &algo, 179 | Data: &data, 180 | RecipientPublicKey: &recipientPublicKeyB64, 181 | SenderPublicKey: &senderPublicKeyB64, 182 | } 183 | 184 | // copy a spec, changing specific fields 185 | copyModifySpec := func(opts ...func(*api.EncryptedSpec)) api.EncryptedSpec { 186 | copy := validSpec 187 | for _, opt := range opts { 188 | opt(©) 189 | } 190 | return copy 191 | } 192 | 193 | unusedSeed := make([]byte, ed25519.SeedSize) 194 | if _, err := io.ReadFull(cryptorand.Reader, unusedSeed); err != nil { 195 | t.Fatalf("failed to generate sender random seed: %v", err) 196 | } 197 | 198 | // recipient private key credentials 199 | recipientCreds := []string{base64.StdEncoding.EncodeToString(recipientPrivateKey.Bytes())} 200 | unusedCreds := []string{base64.StdEncoding.EncodeToString(unusedSeed)} 201 | 202 | tests := []struct { 203 | name string 204 | inSpec api.EncryptedSpec 205 | credentials []string 206 | config api.Config 207 | err error 208 | }{ 209 | {"no algorithm", copyModifySpec(func(s *api.EncryptedSpec) { s.Algorithm = nil }), recipientCreds, api.Config{}, errors.New("empty algorithm")}, 210 | {"no data", copyModifySpec(func(s *api.EncryptedSpec) { s.Data = nil }), recipientCreds, api.Config{}, errors.New("empty data")}, 211 | {"bad base64 data", copyModifySpec(func(s *api.EncryptedSpec) { data := "abcdef"; s.Data = &data }), recipientCreds, api.Config{}, errors.New("failed to decode encrypted data: illegal base64 data")}, 212 | {"short encrypted data", copyModifySpec(func(s *api.EncryptedSpec) { 213 | data := base64.StdEncoding.EncodeToString([]byte("abcdef")) 214 | s.Data = &data 215 | }), recipientCreds, api.Config{}, errors.New("invalid encrypted data length")}, 216 | {"invalid encrypted data", copyModifySpec(func(s *api.EncryptedSpec) { 217 | bad := nonce 218 | bad = append(bad, 1, 2, 3, 4) 219 | data := base64.StdEncoding.EncodeToString(bad) 220 | s.Data = &data 221 | }), recipientCreds, api.Config{}, errors.New("failed to decrypt data: cipher: message authentication failed")}, 222 | {"empty credentials", validSpec, nil, api.Config{}, errors.New("no private key found that matches public key")}, 223 | {"unmatched credentials", validSpec, unusedCreds, api.Config{}, errors.New("no private key found that matches public key")}, 224 | {"success with just one credential", validSpec, recipientCreds, validConfig, nil}, 225 | {"success with multiple credentials", validSpec, append(recipientCreds, unusedCreds...), validConfig, nil}, 226 | } 227 | for _, tt := range tests { 228 | t.Run(tt.name, func(t *testing.T) { 229 | conf, err := decryptConfig(tt.inSpec, tt.credentials) 230 | switch { 231 | case err == nil && tt.err != nil: 232 | t.Fatalf("expected error: %v", tt.err) 233 | case err != nil && tt.err == nil: 234 | t.Fatalf("unexpected error: %v", err) 235 | case err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error()): 236 | t.Fatalf("mismatched error: %v", err) 237 | } 238 | diff := cmp.Diff(tt.config, conf) 239 | if diff != "" { 240 | t.Fatalf("mismatched config: %s", diff) 241 | } 242 | }) 243 | } 244 | } 245 | -------------------------------------------------------------------------------- /pkg/config/testdata/config.yml: -------------------------------------------------------------------------------- 1 | version: config.databack.io/v1 2 | kind: local 3 | 4 | spec: 5 | database: 6 | server: abcd 7 | port: 3306 8 | credentials: 9 | username: user2 10 | password: xxxx2 11 | 12 | targets: 13 | local: 14 | type: file 15 | url: file:///foo/bar 16 | other: 17 | type: file 18 | url: /foo/bar 19 | 20 | dump: 21 | targets: 22 | - local 23 | 24 | prune: 25 | retention: "1h" -------------------------------------------------------------------------------- /pkg/core/const.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | const ( 4 | DefaultFilenamePattern = "db_backup_{{ .now }}.{{ .compression }}" 5 | ) 6 | -------------------------------------------------------------------------------- /pkg/core/dump.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "path" 8 | "path/filepath" 9 | "strings" 10 | "text/template" 11 | "time" 12 | 13 | log "github.com/sirupsen/logrus" 14 | "go.opentelemetry.io/otel/attribute" 15 | "go.opentelemetry.io/otel/codes" 16 | 17 | "github.com/databacker/mysql-backup/pkg/archive" 18 | "github.com/databacker/mysql-backup/pkg/database" 19 | "github.com/databacker/mysql-backup/pkg/util" 20 | ) 21 | 22 | // Dump run a single dump, based on the provided opts 23 | func (e *Executor) Dump(ctx context.Context, opts DumpOptions) (DumpResults, error) { 24 | results := DumpResults{Start: time.Now()} 25 | tracer := util.GetTracerFromContext(ctx) 26 | ctx, span := tracer.Start(ctx, "dump") 27 | defer func() { 28 | results.End = time.Now() 29 | span.End() 30 | }() 31 | 32 | targets := opts.Targets 33 | safechars := opts.Safechars 34 | dbnames := opts.DBNames 35 | dbconn := opts.DBConn 36 | compressor := opts.Compressor 37 | encryptor := opts.Encryptor 38 | compact := opts.Compact 39 | triggers := opts.Triggers 40 | routines := opts.Routines 41 | suppressUseDatabase := opts.SuppressUseDatabase 42 | maxAllowedPacket := opts.MaxAllowedPacket 43 | filenamePattern := opts.FilenamePattern 44 | logger := e.Logger.WithField("run", opts.Run.String()) 45 | logger.Level = e.Logger.Level 46 | 47 | now := time.Now() 48 | results.Time = now 49 | 50 | timepart := now.Format(time.RFC3339) 51 | logger.Infof("beginning dump %s", timepart) 52 | if safechars { 53 | timepart = strings.ReplaceAll(timepart, ":", "-") 54 | } 55 | results.Timestamp = timepart 56 | span.SetAttributes(attribute.String("timestamp", timepart)) 57 | 58 | // sourceFilename: file that the uploader looks for when performing the upload 59 | // targetFilename: the remote file that is actually uploaded 60 | sourceFilename := fmt.Sprintf("db_backup_%s.%s", timepart, compressor.Extension()) 61 | targetFilename, err := ProcessFilenamePattern(filenamePattern, now, timepart, compressor.Extension()) 62 | if err != nil { 63 | return results, fmt.Errorf("failed to process filename pattern: %v", err) 64 | } 65 | span.SetAttributes(attribute.String("source-filename", sourceFilename), attribute.String("target-filename", targetFilename)) 66 | 67 | // create a temporary working directory 68 | tmpdir, err := os.MkdirTemp("", "databacker_backup") 69 | if err != nil { 70 | return results, fmt.Errorf("failed to make temporary working directory: %v", err) 71 | } 72 | defer func() { _ = os.RemoveAll(tmpdir) }() 73 | // execute pre-backup scripts if any 74 | if err := preBackup(ctx, timepart, path.Join(tmpdir, sourceFilename), tmpdir, opts.PreBackupScripts, logger.Level == log.DebugLevel); err != nil { 75 | return results, fmt.Errorf("error running pre-restore: %v", err) 76 | } 77 | 78 | // do the dump(s) 79 | workdir, err := os.MkdirTemp("", "databacker_cache") 80 | if err != nil { 81 | return results, fmt.Errorf("failed to make temporary cache directory: %v", err) 82 | } 83 | defer func() { _ = os.RemoveAll(workdir) }() 84 | 85 | dw := make([]database.DumpWriter, 0) 86 | 87 | // do we back up all schemas, or just provided ones 88 | span.SetAttributes(attribute.Bool("provided-schemas", len(dbnames) != 0)) 89 | if len(dbnames) == 0 { 90 | if dbnames, err = database.GetSchemas(dbconn); err != nil { 91 | return results, fmt.Errorf("failed to list database schemas: %v", err) 92 | } 93 | } 94 | span.SetAttributes(attribute.StringSlice("actual-schemas", dbnames)) 95 | for _, s := range dbnames { 96 | outFile := path.Join(workdir, fmt.Sprintf("%s_%s.sql", s, timepart)) 97 | f, err := os.Create(outFile) 98 | if err != nil { 99 | return results, fmt.Errorf("failed to create dump file '%s': %v", outFile, err) 100 | } 101 | dw = append(dw, database.DumpWriter{ 102 | Schemas: []string{s}, 103 | Writer: f, 104 | }) 105 | } 106 | results.DumpStart = time.Now() 107 | dbDumpCtx, dbDumpSpan := tracer.Start(ctx, "database_dump") 108 | if err := database.Dump(dbDumpCtx, dbconn, database.DumpOpts{ 109 | Compact: compact, 110 | Triggers: triggers, 111 | Routines: routines, 112 | SuppressUseDatabase: suppressUseDatabase, 113 | MaxAllowedPacket: maxAllowedPacket, 114 | }, dw); err != nil { 115 | dbDumpSpan.SetStatus(codes.Error, err.Error()) 116 | dbDumpSpan.End() 117 | return results, fmt.Errorf("failed to dump database: %v", err) 118 | } 119 | results.DumpEnd = time.Now() 120 | dbDumpSpan.SetStatus(codes.Ok, "completed") 121 | dbDumpSpan.End() 122 | 123 | // create my tar writer to archive it all together 124 | // WRONG: THIS WILL CAUSE IT TO TRY TO LOOP BACK ON ITSELF 125 | _, tarSpan := tracer.Start(ctx, "output_tar") 126 | outFile := path.Join(tmpdir, sourceFilename) 127 | f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY, 0o644) 128 | if err != nil { 129 | tarSpan.SetStatus(codes.Error, err.Error()) 130 | tarSpan.End() 131 | return results, fmt.Errorf("failed to open output file '%s': %v", outFile, err) 132 | } 133 | defer func() { _ = f.Close() }() 134 | cw, err := compressor.Compress(f) 135 | if err != nil { 136 | tarSpan.SetStatus(codes.Error, err.Error()) 137 | tarSpan.End() 138 | return results, fmt.Errorf("failed to create compressor: %v", err) 139 | } 140 | if encryptor != nil { 141 | cw, err = encryptor.Encrypt(cw) 142 | if err != nil { 143 | tarSpan.SetStatus(codes.Error, err.Error()) 144 | tarSpan.End() 145 | return results, fmt.Errorf("failed to create encryptor: %v", err) 146 | } 147 | } 148 | if err := archive.Tar(workdir, cw); err != nil { 149 | tarSpan.SetStatus(codes.Error, err.Error()) 150 | tarSpan.End() 151 | return results, fmt.Errorf("error creating the compressed archive: %v", err) 152 | } 153 | // we need to close it explicitly before moving ahead 154 | defer func() { _ = f.Close() }() 155 | tarSpan.SetStatus(codes.Ok, "completed") 156 | tarSpan.End() 157 | 158 | // execute post-backup scripts if any 159 | if err := postBackup(ctx, timepart, path.Join(tmpdir, sourceFilename), tmpdir, opts.PostBackupScripts, logger.Level == log.DebugLevel); err != nil { 160 | return results, fmt.Errorf("error running pre-restore: %v", err) 161 | } 162 | 163 | // upload to each destination 164 | uploadCtx, uploadSpan := tracer.Start(ctx, "upload") 165 | for _, t := range targets { 166 | uploadResult := UploadResult{Target: t.URL(), Start: time.Now()} 167 | targetCleanFilename := t.Clean(targetFilename) 168 | logger.Debugf("uploading via protocol %s from %s to %s", t.Protocol(), sourceFilename, targetCleanFilename) 169 | copied, err := t.Push(uploadCtx, targetCleanFilename, filepath.Join(tmpdir, sourceFilename), logger) 170 | if err != nil { 171 | uploadSpan.SetStatus(codes.Error, err.Error()) 172 | uploadSpan.End() 173 | return results, fmt.Errorf("failed to push file: %v", err) 174 | } 175 | logger.Debugf("completed copying %d bytes", copied) 176 | uploadResult.Filename = targetCleanFilename 177 | uploadResult.End = time.Now() 178 | results.Uploads = append(results.Uploads, uploadResult) 179 | } 180 | uploadSpan.SetStatus(codes.Ok, "completed") 181 | uploadSpan.End() 182 | 183 | return results, nil 184 | } 185 | 186 | // run pre-backup scripts, if they exist 187 | func preBackup(ctx context.Context, timestamp, dumpfile, dumpdir, preBackupDir string, debug bool) error { 188 | // construct any additional environment 189 | env := map[string]string{ 190 | "NOW": timestamp, 191 | "DUMPFILE": dumpfile, 192 | "DUMPDIR": dumpdir, 193 | "DB_DEBUG": fmt.Sprintf("%v", debug), 194 | } 195 | ctx, span := util.GetTracerFromContext(ctx).Start(ctx, "pre-backup") 196 | defer span.End() 197 | return runScripts(ctx, preBackupDir, env) 198 | } 199 | 200 | func postBackup(ctx context.Context, timestamp, dumpfile, dumpdir, postBackupDir string, debug bool) error { 201 | // construct any additional environment 202 | env := map[string]string{ 203 | "NOW": timestamp, 204 | "DUMPFILE": dumpfile, 205 | "DUMPDIR": dumpdir, 206 | "DB_DEBUG": fmt.Sprintf("%v", debug), 207 | } 208 | ctx, span := util.GetTracerFromContext(ctx).Start(ctx, "post-backup") 209 | defer span.End() 210 | return runScripts(ctx, postBackupDir, env) 211 | } 212 | 213 | // ProcessFilenamePattern takes a template pattern and processes it with the current time. 214 | // Passes the timestamp as a string, because it sometimes gets changed for safechars. 215 | func ProcessFilenamePattern(pattern string, now time.Time, timestamp, ext string) (string, error) { 216 | if pattern == "" { 217 | pattern = DefaultFilenamePattern 218 | } 219 | tmpl, err := template.New("filename").Parse(pattern) 220 | if err != nil { 221 | return "", fmt.Errorf("failed to parse filename pattern: %v", err) 222 | } 223 | var buf strings.Builder 224 | if err := tmpl.Execute(&buf, map[string]string{ 225 | "now": timestamp, 226 | "year": now.Format("2006"), 227 | "month": now.Format("01"), 228 | "day": now.Format("02"), 229 | "hour": now.Format("15"), 230 | "minute": now.Format("04"), 231 | "second": now.Format("05"), 232 | "compression": ext, 233 | }); err != nil { 234 | return "", fmt.Errorf("failed to execute filename pattern: %v", err) 235 | } 236 | return buf.String(), nil 237 | } 238 | -------------------------------------------------------------------------------- /pkg/core/dumpoptions.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "github.com/databacker/mysql-backup/pkg/compression" 5 | "github.com/databacker/mysql-backup/pkg/database" 6 | "github.com/databacker/mysql-backup/pkg/encrypt" 7 | "github.com/databacker/mysql-backup/pkg/storage" 8 | "github.com/google/uuid" 9 | ) 10 | 11 | type DumpOptions struct { 12 | Targets []storage.Storage 13 | Safechars bool 14 | DBNames []string 15 | DBConn database.Connection 16 | Compressor compression.Compressor 17 | Encryptor encrypt.Encryptor 18 | Exclude []string 19 | PreBackupScripts string 20 | PostBackupScripts string 21 | Compact bool 22 | Triggers bool 23 | Routines bool 24 | SuppressUseDatabase bool 25 | MaxAllowedPacket int 26 | Run uuid.UUID 27 | FilenamePattern string 28 | } 29 | -------------------------------------------------------------------------------- /pkg/core/dumpresults.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import "time" 4 | 5 | // DumpResults lists results of the dump. 6 | type DumpResults struct { 7 | Start time.Time 8 | End time.Time 9 | Time time.Time 10 | Timestamp string 11 | DumpStart time.Time 12 | DumpEnd time.Time 13 | Uploads []UploadResult 14 | } 15 | 16 | // UploadResult lists results of an individual upload 17 | type UploadResult struct { 18 | Target string 19 | Filename string 20 | Start time.Time 21 | End time.Time 22 | } 23 | -------------------------------------------------------------------------------- /pkg/core/executor.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | ) 6 | 7 | type Executor struct { 8 | Logger *log.Logger 9 | } 10 | 11 | func (e *Executor) SetLogger(logger *log.Logger) { 12 | e.Logger = logger 13 | } 14 | 15 | func (e *Executor) GetLogger() *log.Logger { 16 | return e.Logger 17 | } 18 | -------------------------------------------------------------------------------- /pkg/core/prune.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "path" 8 | "regexp" 9 | "slices" 10 | "strconv" 11 | "time" 12 | 13 | "github.com/databacker/mysql-backup/pkg/storage" 14 | "github.com/databacker/mysql-backup/pkg/util" 15 | "github.com/sirupsen/logrus" 16 | "go.opentelemetry.io/otel/attribute" 17 | "go.opentelemetry.io/otel/codes" 18 | ) 19 | 20 | // filenameRE is a regular expression to match a backup filename 21 | var filenameRE = regexp.MustCompile(`^db_backup_(\d{4})-(\d{2})-(\d{2})T(\d{2})[:-](\d{2})[:-](\d{2})Z\.\w+$`) 22 | 23 | // Prune prune older backups 24 | func (e *Executor) Prune(ctx context.Context, opts PruneOptions) error { 25 | tracer := util.GetTracerFromContext(ctx) 26 | tracerCtx, span := tracer.Start(ctx, "prune") 27 | defer span.End() 28 | logger := e.Logger.WithField("run", opts.Run.String()) 29 | logger.Level = e.Logger.Level 30 | logger.Info("beginning prune") 31 | var ( 32 | now = opts.Now 33 | ) 34 | if now.IsZero() { 35 | now = time.Now() 36 | } 37 | if len(opts.Targets) == 0 { 38 | return errors.New("no targets") 39 | } 40 | 41 | retainHours, err1 := convertToHours(opts.Retention) 42 | retainCount, err2 := convertToCount(opts.Retention) 43 | if (err1 != nil && err2 != nil) || (retainHours <= 0 && retainCount <= 0) { 44 | return fmt.Errorf("invalid retention string: %s", opts.Retention) 45 | } 46 | 47 | for _, target := range opts.Targets { 48 | if err := pruneTarget(tracerCtx, logger, target, now, retainHours, retainCount); err != nil { 49 | return fmt.Errorf("failed to prune target %s: %v", target.URL(), err) 50 | } 51 | } 52 | 53 | return nil 54 | } 55 | 56 | // pruneTarget prunes an individual target 57 | func pruneTarget(ctx context.Context, logger *logrus.Entry, target storage.Storage, now time.Time, retainHours, retainCount int) error { 58 | var ( 59 | pruned int 60 | candidates, ignored, invalidDate []string 61 | ) 62 | ctx, span := util.GetTracerFromContext(ctx).Start(ctx, fmt.Sprintf("pruneTarget %s", target.URL())) 63 | defer span.End() 64 | 65 | logger.Debugf("pruning target %s", target.URL()) 66 | files, err := target.ReadDir(ctx, "", logger) 67 | if err != nil { 68 | span.SetStatus(codes.Error, fmt.Sprintf("failed to read directory: %v", err)) 69 | return fmt.Errorf("failed to read directory: %v", err) 70 | } 71 | 72 | // create a slice with the filenames and their calculated times - these are *not* the timestamp times, but the times calculated from the filenames 73 | var filesWithTimes []fileWithTime 74 | 75 | for _, fileInfo := range files { 76 | filename := fileInfo.Name() 77 | // this should be the basename, but sometimes it is a full path, like in S3, so we will be careful to trim 78 | // to basename. If it already is basename, nothing should be affected 79 | baseFilename := path.Base(filename) 80 | matches := filenameRE.FindStringSubmatch(baseFilename) 81 | if matches == nil { 82 | logger.Debugf("ignoring filename that is not standard backup pattern: %s", filename) 83 | ignored = append(ignored, filename) 84 | continue 85 | } 86 | logger.Debugf("checking filename that is standard backup pattern: %s", filename) 87 | 88 | // Parse the date from the filename 89 | year, month, day, hour, minute, second := matches[1], matches[2], matches[3], matches[4], matches[5], matches[6] 90 | dateTimeStr := fmt.Sprintf("%s-%s-%sT%s:%s:%sZ", year, month, day, hour, minute, second) 91 | filetime, err := time.Parse(time.RFC3339, dateTimeStr) 92 | if err != nil { 93 | logger.Debugf("Error parsing date from filename %s: %v; ignoring", filename, err) 94 | invalidDate = append(invalidDate, filename) 95 | continue 96 | } 97 | filesWithTimes = append(filesWithTimes, fileWithTime{ 98 | filename: filename, 99 | filetime: filetime, 100 | }) 101 | } 102 | 103 | switch { 104 | case retainHours > 0: 105 | // if we had retainHours, we go through all of the files and find any whose timestamp is older than now-retainHours 106 | for _, f := range filesWithTimes { 107 | // Check if the file is within 'retain' hours from 'now' 108 | age := now.Sub(f.filetime).Hours() 109 | if age < float64(retainHours) { 110 | logger.Debugf("file %s is %f hours old", f.filename, age) 111 | logger.Debugf("keeping file %s", f.filename) 112 | continue 113 | } 114 | logger.Debugf("Adding candidate file: %s", f.filename) 115 | candidates = append(candidates, f.filename) 116 | } 117 | case retainCount > 0: 118 | // if we had retainCount, we sort all of the files by timestamp, and add to the list all except the retainCount most recent 119 | slices.SortFunc(filesWithTimes, func(i, j fileWithTime) int { 120 | switch { 121 | case i.filetime.Before(j.filetime): 122 | return -1 123 | case i.filetime.After(j.filetime): 124 | return 1 125 | } 126 | return 0 127 | }) 128 | slices.Reverse(filesWithTimes) 129 | if retainCount < len(filesWithTimes) { 130 | for i := 0 + retainCount; i < len(filesWithTimes); i++ { 131 | logger.Debugf("Adding candidate file %s:", filesWithTimes[i].filename) 132 | candidates = append(candidates, filesWithTimes[i].filename) 133 | } 134 | } 135 | default: 136 | span.SetStatus(codes.Error, "invalid retention time") 137 | return fmt.Errorf("invalid retention time %d count %d hours", retainCount, retainHours) 138 | } 139 | 140 | // we have the list, remove them all 141 | span.SetAttributes(attribute.StringSlice("candidates", candidates), attribute.StringSlice("ignored", ignored), attribute.StringSlice("invalidDate", invalidDate)) 142 | for _, filename := range candidates { 143 | if err := target.Remove(ctx, filename, logger); err != nil { 144 | return fmt.Errorf("failed to remove file %s: %v", filename, err) 145 | } 146 | pruned++ 147 | } 148 | logger.Debugf("pruning %d files from target %s", pruned, target.URL()) 149 | span.SetStatus(codes.Ok, fmt.Sprintf("pruned %d files", pruned)) 150 | return nil 151 | } 152 | 153 | // convertToHours takes a string with format "" and converts it to hours. 154 | // The unit can be 'h' (hours), 'd' (days), 'w' (weeks), 'm' (months), 'y' (years). 155 | // Assumes 30 days in a month and 365 days in a year for conversion. 156 | func convertToHours(input string) (int, error) { 157 | re := regexp.MustCompile(`^(\d+)([hdwmy])$`) 158 | matches := re.FindStringSubmatch(input) 159 | 160 | if matches == nil { 161 | return 0, fmt.Errorf("invalid format: %s", input) 162 | } 163 | 164 | value, err := strconv.Atoi(matches[1]) 165 | if err != nil { 166 | return 0, fmt.Errorf("invalid number: %s", matches[1]) 167 | } 168 | 169 | unit := matches[2] 170 | switch unit { 171 | case "h": 172 | return value, nil 173 | case "d": 174 | return value * 24, nil 175 | case "w": 176 | return value * 24 * 7, nil 177 | case "m": 178 | return value * 24 * 30, nil // Approximation 179 | case "y": 180 | return value * 24 * 365, nil // Approximation 181 | default: 182 | return 0, errors.New("invalid unit") 183 | } 184 | } 185 | 186 | // convertToCount takes a string with format "" and converts it to count. 187 | // The unit can be 'c' (count) 188 | func convertToCount(input string) (int, error) { 189 | re := regexp.MustCompile(`^(\d+)([c])$`) 190 | matches := re.FindStringSubmatch(input) 191 | 192 | if matches == nil { 193 | return 0, fmt.Errorf("invalid format: %s", input) 194 | } 195 | 196 | value, err := strconv.Atoi(matches[1]) 197 | if err != nil { 198 | return 0, fmt.Errorf("invalid number: %s", matches[1]) 199 | } 200 | 201 | unit := matches[2] 202 | switch unit { 203 | case "c": 204 | return value, nil 205 | default: 206 | return 0, errors.New("invalid unit") 207 | } 208 | } 209 | 210 | type fileWithTime struct { 211 | filename string 212 | filetime time.Time 213 | } 214 | -------------------------------------------------------------------------------- /pkg/core/prune_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net/http/httptest" 8 | "os" 9 | "path" 10 | "slices" 11 | "testing" 12 | "time" 13 | 14 | "github.com/databacker/mysql-backup/pkg/storage" 15 | "github.com/databacker/mysql-backup/pkg/storage/credentials" 16 | 17 | "github.com/johannesboyne/gofakes3" 18 | "github.com/johannesboyne/gofakes3/backend/s3mem" 19 | log "github.com/sirupsen/logrus" 20 | "github.com/stretchr/testify/assert" 21 | ) 22 | 23 | func TestConvertToHours(t *testing.T) { 24 | tests := []struct { 25 | input string 26 | output int 27 | err error 28 | }{ 29 | {"2h", 2, nil}, 30 | {"3w", 3 * 7 * 24, nil}, 31 | {"5d", 5 * 24, nil}, 32 | {"1m", 30 * 24, nil}, 33 | {"1y", 365 * 24, nil}, 34 | {"100x", 0, fmt.Errorf("invalid format: 100x")}, 35 | } 36 | for _, tt := range tests { 37 | hours, err := convertToHours(tt.input) 38 | switch { 39 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil): 40 | t.Errorf("expected error %v, got %v", tt.err, err) 41 | case err != nil && tt.err != nil && err.Error() != tt.err.Error(): 42 | t.Errorf("expected error %v, got %v", tt.err, err) 43 | case hours != tt.output: 44 | t.Errorf("input %s expected %d, got %d", tt.input, tt.output, hours) 45 | } 46 | } 47 | } 48 | 49 | func TestPrune(t *testing.T) { 50 | // we use a fixed list of file before, and a subset of them for after 51 | // db_backup_YYYY-MM-DDTHH:mm:ssZ. 52 | // our list of timestamps should give us these files, of the following time ago: 53 | // 0.25h, 1h, 2h, 3h, 24h (1d), 36h (1.5d), 48h (2d), 60h (2.5d) 72h(3d), 54 | // 167h (1w-1h), 168h (1w), 240h (1.5w) 336h (2w), 576h (2.5w), 504h (3w) 55 | // 744h (3.5w), 720h (1m), 1000h (1.5m), 1440h (2m), 1800h (2.5m), 2160h (3m), 56 | // 8760h (1y), 12000h (1.5y), 17520h (2y) 57 | // we use a fixed starting time to make it consistent. 58 | now := time.Date(2021, 1, 1, 0, 30, 0, 0, time.UTC) 59 | hoursAgo := []float32{0.25, 1, 2, 3, 24, 36, 48, 60, 72, 167, 168, 240, 336, 504, 576, 744, 720, 1000, 1440, 1800, 2160, 8760, 12000, 17520} 60 | // convert to filenames 61 | var filenames, safefilenames []string 62 | for _, h := range hoursAgo { 63 | // convert the time diff into a duration, do not forget the negative 64 | duration, err := time.ParseDuration(fmt.Sprintf("-%fh", h)) 65 | if err != nil { 66 | t.Fatalf("failed to parse duration: %v", err) 67 | } 68 | // convert it into a time.Time 69 | // and add 30 mins to our "now" time. 70 | relativeTime := now.Add(duration).Add(-30 * time.Minute) 71 | // convert that into the filename 72 | filename := fmt.Sprintf("db_backup_%sZ.gz", relativeTime.Format("2006-01-02T15:04:05")) 73 | filenames = append(filenames, filename) 74 | safefilename := fmt.Sprintf("db_backup_%sZ.gz", relativeTime.Format("2006-01-02T15-04-05")) 75 | safefilenames = append(safefilenames, safefilename) 76 | } 77 | tests := []struct { 78 | name string 79 | opts PruneOptions 80 | beforeFiles []string 81 | afterFiles []string 82 | err error 83 | }{ 84 | {"no targets", PruneOptions{Retention: "1h", Now: now}, nil, nil, fmt.Errorf("no targets")}, 85 | {"invalid format", PruneOptions{Retention: "100x", Now: now}, filenames, filenames[0:1], fmt.Errorf("invalid retention string: 100x")}, 86 | // 1 hour - file[1] is 1h+30m = 1.5h, so it should be pruned 87 | {"1 hour", PruneOptions{Retention: "1h", Now: now}, filenames, filenames[0:1], nil}, 88 | // 2 hours - file[2] is 2h+30m = 2.5h, so it should be pruned 89 | {"2 hours", PruneOptions{Retention: "2h", Now: now}, filenames, filenames[0:2], nil}, 90 | // 2 days - file[6] is 48h+30m = 48.5h, so it should be pruned 91 | {"2 days", PruneOptions{Retention: "2d", Now: now}, filenames, filenames[0:6], nil}, 92 | // 3 weeks - file[13] is 504h+30m = 504.5h, so it should be pruned 93 | {"3 weeks", PruneOptions{Retention: "3w", Now: now}, filenames, filenames[0:13], nil}, 94 | // 2 most recent files 95 | {"2 most recent", PruneOptions{Retention: "2c", Now: now}, filenames, filenames[0:2], nil}, 96 | 97 | // repeat for safe file names 98 | {"1 hour safe names", PruneOptions{Retention: "1h", Now: now}, safefilenames, safefilenames[0:1], nil}, 99 | // 2 hours - file[2] is 2h+30m = 2.5h, so it should be pruned 100 | {"2 hours safe names", PruneOptions{Retention: "2h", Now: now}, safefilenames, safefilenames[0:2], nil}, 101 | // 2 days - file[6] is 48h+30m = 48.5h, so it should be pruned 102 | {"2 days safe names", PruneOptions{Retention: "2d", Now: now}, safefilenames, safefilenames[0:6], nil}, 103 | // 3 weeks - file[13] is 504h+30m = 504.5h, so it should be pruned 104 | {"3 weeks safe names", PruneOptions{Retention: "3w", Now: now}, safefilenames, safefilenames[0:13], nil}, 105 | // 2 most recent files 106 | {"2 most recent safe names", PruneOptions{Retention: "2c", Now: now}, safefilenames, safefilenames[0:2], nil}, 107 | } 108 | for _, targetType := range []string{"file", "s3"} { 109 | t.Run(targetType, func(t *testing.T) { 110 | for _, tt := range tests { 111 | t.Run(tt.name, func(t *testing.T) { 112 | ctx := context.Background() 113 | logger := log.New() 114 | logger.Out = io.Discard 115 | // create a temporary directory 116 | // create beforeFiles in the directory and create a target, but only if there are beforeFiles 117 | // this lets us also test no targets, which should generate an error 118 | if len(tt.beforeFiles) > 0 { 119 | var ( 120 | store storage.Storage 121 | err error 122 | ) 123 | switch targetType { 124 | case "file": 125 | // add our tempdir as the target 126 | workDir := t.TempDir() 127 | store, err = storage.ParseURL(fmt.Sprintf("file://%s", workDir), credentials.Creds{}) 128 | if err != nil { 129 | t.Errorf("failed to parse file url: %v", err) 130 | return 131 | } 132 | case "s3": 133 | bucketName := "mytestbucket" 134 | s3backend := s3mem.New() 135 | // create the bucket we will use for tests 136 | if err := s3backend.CreateBucket(bucketName); err != nil { 137 | t.Errorf("failed to create bucket: %v", err) 138 | return 139 | } 140 | s3 := gofakes3.New(s3backend) 141 | s3server := httptest.NewServer(s3.Server()) 142 | defer s3server.Close() 143 | s3url := s3server.URL 144 | store, err = storage.ParseURL(fmt.Sprintf("s3://%s/%s", bucketName, bucketName), credentials.Creds{AWS: credentials.AWSCreds{ 145 | Endpoint: s3url, 146 | AccessKeyID: "abcdefg", 147 | SecretAccessKey: "1234567", 148 | Region: "us-east-1", 149 | PathStyle: true, 150 | }}) 151 | if err != nil { 152 | t.Errorf("failed to parse s3 url: %v", err) 153 | return 154 | } 155 | default: 156 | t.Errorf("unknown target type: %s", targetType) 157 | return 158 | } 159 | 160 | tt.opts.Targets = append(tt.opts.Targets, store) 161 | 162 | for _, filename := range tt.beforeFiles { 163 | // we need an empty file to push 164 | srcDir := t.TempDir() 165 | srcFile := fmt.Sprintf("%s/%s", srcDir, "src") 166 | if err := os.WriteFile(srcFile, nil, 0644); err != nil { 167 | t.Errorf("failed to create file %s: %v", srcFile, err) 168 | return 169 | } 170 | 171 | // now push that same empty file each time; we do not care about contents, only that the target file exists 172 | if _, err := store.Push(ctx, filename, srcFile, log.NewEntry(logger)); err != nil { 173 | t.Errorf("failed to create file %s: %v", filename, err) 174 | return 175 | } 176 | } 177 | } 178 | 179 | // run Prune 180 | executor := Executor{ 181 | Logger: logger, 182 | } 183 | err := executor.Prune(ctx, tt.opts) 184 | switch { 185 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil): 186 | t.Errorf("expected error %v, got %v", tt.err, err) 187 | case err != nil && tt.err != nil && err.Error() != tt.err.Error(): 188 | t.Errorf("expected error %v, got %v", tt.err, err) 189 | case err != nil: 190 | return 191 | } 192 | // check files match 193 | files, err := tt.opts.Targets[0].ReadDir(ctx, "", log.NewEntry(logger)) 194 | if err != nil { 195 | t.Errorf("failed to read directory: %v", err) 196 | return 197 | } 198 | var afterFiles []string 199 | for _, file := range files { 200 | afterFiles = append(afterFiles, path.Base(file.Name())) 201 | } 202 | afterFilesSorted, ttAfterFilesSorted := slices.Clone(afterFiles), slices.Clone(tt.afterFiles) 203 | slices.Sort(afterFilesSorted) 204 | slices.Sort(ttAfterFilesSorted) 205 | assert.ElementsMatch(t, ttAfterFilesSorted, afterFilesSorted) 206 | }) 207 | } 208 | }) 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /pkg/core/pruneoptions.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/databacker/mysql-backup/pkg/storage" 7 | "github.com/google/uuid" 8 | ) 9 | 10 | type PruneOptions struct { 11 | Targets []storage.Storage 12 | Retention string 13 | Now time.Time 14 | Run uuid.UUID 15 | } 16 | -------------------------------------------------------------------------------- /pkg/core/restore.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path" 9 | 10 | "github.com/databacker/mysql-backup/pkg/archive" 11 | "github.com/databacker/mysql-backup/pkg/database" 12 | "github.com/databacker/mysql-backup/pkg/util" 13 | "go.opentelemetry.io/otel/attribute" 14 | "go.opentelemetry.io/otel/codes" 15 | ) 16 | 17 | const ( 18 | preRestoreDir = "/scripts.d/pre-restore" 19 | postRestoreDir = "/scripts.d/post-restore" 20 | tmpRestoreFile = "/tmp/restorefile" 21 | ) 22 | 23 | // Restore restore a specific backup into the database 24 | func (e *Executor) Restore(ctx context.Context, opts RestoreOptions) error { 25 | tracer := util.GetTracerFromContext(ctx) 26 | ctx, span := tracer.Start(ctx, "restore") 27 | defer span.End() 28 | logger := e.Logger.WithField("run", opts.Run.String()) 29 | logger.Level = e.Logger.Level 30 | 31 | logger.Info("beginning restore") 32 | // execute pre-restore scripts if any 33 | if err := preRestore(ctx, opts.Target.URL()); err != nil { 34 | return fmt.Errorf("error running pre-restore: %v", err) 35 | } 36 | 37 | logger.Debugf("restoring via %s protocol, temporary file location %s", opts.Target.Protocol(), tmpRestoreFile) 38 | 39 | _, pullSpan := tracer.Start(ctx, "pull file") 40 | pullSpan.SetAttributes( 41 | attribute.String("target", opts.Target.URL()), 42 | attribute.String("targetfile", opts.TargetFile), 43 | attribute.String("tmpfile", tmpRestoreFile), 44 | ) 45 | copied, err := opts.Target.Pull(ctx, opts.TargetFile, tmpRestoreFile, logger) 46 | if err != nil { 47 | pullSpan.RecordError(err) 48 | pullSpan.End() 49 | return fmt.Errorf("failed to pull target %s: %v", opts.Target, err) 50 | } 51 | pullSpan.SetAttributes( 52 | attribute.Int64("copied", copied), 53 | ) 54 | pullSpan.SetStatus(codes.Ok, "completed") 55 | pullSpan.End() 56 | logger.Debugf("completed copying %d bytes", copied) 57 | 58 | // successfully download file, now restore it 59 | tmpdir, err := os.MkdirTemp("", "restore") 60 | if err != nil { 61 | return fmt.Errorf("unable to create temporary working directory: %v", err) 62 | } 63 | defer func() { _ = os.RemoveAll(tmpdir) }() 64 | f, err := os.Open(tmpRestoreFile) 65 | if f == nil { 66 | return fmt.Errorf("unable to read the temporary download file: %v", err) 67 | } 68 | defer func() { _ = f.Close() }() 69 | defer func() { _ = os.Remove(tmpRestoreFile) }() 70 | 71 | // create my tar reader to put the files in the directory 72 | _, tarSpan := tracer.Start(ctx, "input_tar") 73 | cr, err := opts.Compressor.Uncompress(f) 74 | if err != nil { 75 | tarSpan.SetStatus(codes.Error, fmt.Sprintf("unable to create an uncompressor: %v", err)) 76 | tarSpan.End() 77 | return fmt.Errorf("unable to create an uncompressor: %v", err) 78 | } 79 | if err := archive.Untar(cr, tmpdir); err != nil { 80 | tarSpan.SetStatus(codes.Error, fmt.Sprintf("error extracting the file: %v", err)) 81 | tarSpan.End() 82 | return fmt.Errorf("error extracting the file: %v", err) 83 | } 84 | tarSpan.SetStatus(codes.Ok, "completed") 85 | tarSpan.End() 86 | 87 | // run through each file and apply it 88 | dbRestoreCtx, dbRestoreSpan := tracer.Start(ctx, "database_restore") 89 | files, err := os.ReadDir(tmpdir) 90 | if err != nil { 91 | dbRestoreSpan.SetStatus(codes.Error, fmt.Sprintf("failed to find extracted files to restore: %v", err)) 92 | dbRestoreSpan.End() 93 | return fmt.Errorf("failed to find extracted files to restore: %v", err) 94 | } 95 | var ( 96 | readers = make([]io.ReadSeeker, 0) 97 | fileNames []string 98 | ) 99 | for _, f := range files { 100 | // ignore directories 101 | if f.IsDir() { 102 | continue 103 | } 104 | file, err := os.Open(path.Join(tmpdir, f.Name())) 105 | if err != nil { 106 | continue 107 | } 108 | defer func() { _ = file.Close() }() 109 | readers = append(readers, file) 110 | fileNames = append(fileNames, f.Name()) 111 | } 112 | dbRestoreSpan.SetAttributes(attribute.StringSlice("files", fileNames)) 113 | if err := database.Restore(dbRestoreCtx, opts.DBConn, opts.DatabasesMap, readers); err != nil { 114 | dbRestoreSpan.SetStatus(codes.Error, fmt.Sprintf("failed to restore database: %v", err)) 115 | dbRestoreSpan.End() 116 | return fmt.Errorf("failed to restore database: %v", err) 117 | } 118 | dbRestoreSpan.SetStatus(codes.Ok, "completed") 119 | dbRestoreSpan.End() 120 | 121 | // execute post-restore scripts if any 122 | if err := postRestore(ctx, opts.Target.URL()); err != nil { 123 | return fmt.Errorf("error running post-restove: %v", err) 124 | } 125 | return nil 126 | } 127 | 128 | // run pre-restore scripts, if they exist 129 | func preRestore(ctx context.Context, target string) error { 130 | // construct any additional environment 131 | env := map[string]string{ 132 | "DB_RESTORE_TARGET": target, 133 | } 134 | ctx, span := util.GetTracerFromContext(ctx).Start(ctx, "pre-restore") 135 | defer span.End() 136 | return runScripts(ctx, preRestoreDir, env) 137 | } 138 | 139 | func postRestore(ctx context.Context, target string) error { 140 | // construct any additional environment 141 | env := map[string]string{ 142 | "DB_RESTORE_TARGET": target, 143 | } 144 | ctx, span := util.GetTracerFromContext(ctx).Start(ctx, "post-restore") 145 | defer span.End() 146 | return runScripts(ctx, postRestoreDir, env) 147 | } 148 | -------------------------------------------------------------------------------- /pkg/core/restoreoptions.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "github.com/databacker/mysql-backup/pkg/compression" 5 | "github.com/databacker/mysql-backup/pkg/database" 6 | "github.com/databacker/mysql-backup/pkg/storage" 7 | "github.com/google/uuid" 8 | ) 9 | 10 | type RestoreOptions struct { 11 | Target storage.Storage 12 | TargetFile string 13 | DBConn database.Connection 14 | DatabasesMap map[string]string 15 | Compressor compression.Compressor 16 | Run uuid.UUID 17 | } 18 | -------------------------------------------------------------------------------- /pkg/core/scripts.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io/fs" 7 | "os" 8 | "os/exec" 9 | "path" 10 | 11 | "github.com/databacker/mysql-backup/pkg/util" 12 | "go.opentelemetry.io/otel/codes" 13 | ) 14 | 15 | // runScripts run scripts in a directory with a given environment. 16 | func runScripts(ctx context.Context, dir string, env map[string]string) error { 17 | tracer := util.GetTracerFromContext(ctx) 18 | 19 | files, err := os.ReadDir(dir) 20 | // if the directory does not exist, do not worry about it 21 | if err != nil && os.IsNotExist(err) { 22 | return nil 23 | } 24 | for _, f := range files { 25 | _, span := tracer.Start(ctx, f.Name()) 26 | if err := runScript(ctx, dir, f, env); err != nil { 27 | span.SetStatus(codes.Error, err.Error()) 28 | return err 29 | } 30 | span.SetStatus(codes.Ok, "completed") 31 | span.End() 32 | } 33 | return nil 34 | } 35 | 36 | func runScript(ctx context.Context, dir string, f fs.DirEntry, env map[string]string) error { 37 | // ignore directories and any files we cannot execute 38 | fi, err := f.Info() 39 | if err != nil { 40 | return fmt.Errorf("error getting file info %s: %v", f.Name(), err) 41 | } 42 | if f.IsDir() || fi.Mode()&0111 == 0 { 43 | return nil 44 | } 45 | // execute the file 46 | envSlice := os.Environ() 47 | for k, v := range env { 48 | envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v)) 49 | } 50 | cmd := exec.Command(path.Join(dir, f.Name())) 51 | cmd.Env = envSlice 52 | if err := cmd.Run(); err != nil { 53 | return fmt.Errorf("error running file %s: %v", f.Name(), err) 54 | } 55 | return nil 56 | } 57 | -------------------------------------------------------------------------------- /pkg/core/timer.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "regexp" 7 | "strconv" 8 | "time" 9 | 10 | "github.com/robfig/cron/v3" 11 | ) 12 | 13 | type TimerOptions struct { 14 | Once bool 15 | Cron string 16 | Begin string 17 | Frequency int 18 | } 19 | 20 | type Update struct { 21 | // Last whether or not this is the last update, and no more will be coming. 22 | // If true, perform this action and then end. 23 | Last bool 24 | } 25 | 26 | func sendTimer(c chan Update, last bool) { 27 | // make the channel write non-blocking 28 | select { 29 | case c <- Update{Last: last}: 30 | default: 31 | } 32 | } 33 | 34 | // Time start a timer that tells when to run an activity, based on its options. 35 | // Each time to run an activity is indicated via a message in a channel. 36 | func Timer(opts TimerOptions) (<-chan Update, error) { 37 | var ( 38 | delay time.Duration 39 | err error 40 | ) 41 | 42 | // parse the options to determine our delays 43 | if opts.Cron != "" { 44 | // calculate delay until next cron moment as defined 45 | now := time.Now().UTC() 46 | delay, err = waitForCron(opts.Cron, now) 47 | if err != nil { 48 | return nil, fmt.Errorf("invalid cron format '%s': %v", opts.Cron, err) 49 | } 50 | } else if opts.Begin != "" { 51 | // calculate delay based on begin time 52 | now := time.Now().UTC() 53 | delay, err = waitForBeginTime(opts.Begin, now) 54 | if err != nil { 55 | return nil, fmt.Errorf("invalid begin option '%s': %v", opts.Begin, err) 56 | } 57 | } 58 | 59 | // if delayMins is 0, this will do nothing, so it does not hurt 60 | time.Sleep(delay) 61 | 62 | c := make(chan Update) 63 | go func(opts TimerOptions) { 64 | // when this goroutine ends, close the channel 65 | defer close(c) 66 | 67 | // if once, ignore all delays and go 68 | if opts.Once { 69 | sendTimer(c, true) 70 | return 71 | } 72 | 73 | // create our delay and timer loop and go 74 | for { 75 | lastRun := time.Now().UTC() 76 | 77 | // not once - run the first backup 78 | sendTimer(c, false) 79 | 80 | if opts.Cron != "" { 81 | now := time.Now().UTC() 82 | delay, _ = waitForCron(opts.Cron, now) 83 | } else { 84 | // calculate how long until the next run 85 | // just take our last start time, and add the frequency until it is past our 86 | // current time. We cannot just take the last time and add, 87 | // because it might have been during a backup run 88 | now := time.Now().UTC() 89 | diff := int(now.Sub(lastRun).Minutes()) 90 | // make sure we at least wait one full frequency 91 | if diff == 0 { 92 | diff += opts.Frequency 93 | } 94 | passed := diff % opts.Frequency 95 | delay = time.Duration(opts.Frequency-passed) * time.Minute 96 | } 97 | 98 | // if delayMins is 0, this will do nothing, so it does not hurt 99 | time.Sleep(delay) 100 | } 101 | }(opts) 102 | return c, nil 103 | } 104 | 105 | func waitForBeginTime(begin string, from time.Time) (time.Duration, error) { 106 | 107 | // calculate how long to wait 108 | minsRe, err := regexp.Compile(`^\+([0-9]+)$`) 109 | if err != nil { 110 | return time.Duration(0), fmt.Errorf("invalid matcher for checking begin delay options: %v", err) 111 | } 112 | timeRe, err := regexp.Compile(`^([0-9][0-9])([0-9][0-9])$`) 113 | if err != nil { 114 | return time.Duration(0), fmt.Errorf("invalid matcher for checking begin delay options: %v", err) 115 | } 116 | 117 | // first look for +MM, which means delay MM minutes 118 | delayMinsParts := minsRe.FindStringSubmatch(begin) 119 | startTimeParts := timeRe.FindStringSubmatch(begin) 120 | 121 | var delay time.Duration 122 | switch { 123 | case len(delayMinsParts) > 1: 124 | delayMins, err := strconv.Atoi(delayMinsParts[1]) 125 | if err != nil { 126 | return time.Duration(0), fmt.Errorf("invalid format for begin delay '%s': %v", begin, err) 127 | } 128 | delay = time.Duration(delayMins) * time.Minute 129 | case len(startTimeParts) > 2: 130 | hour, err := strconv.Atoi(startTimeParts[1]) 131 | if err != nil { 132 | return time.Duration(0), fmt.Errorf("invalid format for begin delay '%s': %v", begin, err) 133 | } 134 | minute, err := strconv.Atoi(startTimeParts[2]) 135 | if err != nil { 136 | return time.Duration(0), fmt.Errorf("invalid format for begin delay '%s': %v", begin, err) 137 | } 138 | 139 | // convert that start time into a Duration to wait 140 | today := time.Date(from.Year(), from.Month(), from.Day(), hour, minute, from.Second(), from.Nanosecond(), time.UTC) 141 | if today.After(from) { 142 | delay = today.Sub(from) 143 | } else { 144 | // add one day 145 | delay = today.Add(24 * time.Hour).Sub(from) 146 | } 147 | default: 148 | return time.Duration(0), fmt.Errorf("invalid format for begin delay '%s'", begin) 149 | } 150 | return delay, nil 151 | } 152 | 153 | // waitForCron given the current time and a cron string, calculate the Duration 154 | // until the next time we will match the cron 155 | func waitForCron(cronExpr string, from time.Time) (time.Duration, error) { 156 | sched, err := cron.ParseStandard(cronExpr) 157 | if err != nil { 158 | return time.Duration(0), err 159 | } 160 | // sched.Next() returns the next time that the cron expression will match, beginning in 1ns; 161 | // we allow matching current time, so we do it from 1ns 162 | next := sched.Next(from.Add(-1 * time.Nanosecond)) 163 | return next.Sub(from), nil 164 | } 165 | 166 | // Timer runs a command on a timer 167 | func (e *Executor) Timer(timerOpts TimerOptions, cmd func() error) error { 168 | c, err := Timer(timerOpts) 169 | if err != nil { 170 | e.Logger.Errorf("error creating timer: %v", err) 171 | os.Exit(1) 172 | } 173 | // block and wait for it 174 | for update := range c { 175 | if err := cmd(); err != nil { 176 | return fmt.Errorf("error running command: %w", err) 177 | } 178 | if update.Last { 179 | break 180 | } 181 | } 182 | return nil 183 | } 184 | -------------------------------------------------------------------------------- /pkg/core/timer_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestWaitForCron(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | cron string 13 | from string 14 | wait time.Duration 15 | err error 16 | }{ 17 | {"current minute", "1 * * * *", "2018-10-10T10:01:00Z", 0, nil}, 18 | {"next minute", "1 * * * *", "2018-10-10T10:00:00Z", 1 * time.Minute, nil}, 19 | {"next day by hour", "* 1 * * *", "2018-10-10T10:00:00Z", 15 * time.Hour, nil}, 20 | {"current minute but seconds in", "1 * * * *", "2018-10-10T10:01:10Z", 59*time.Minute + 50*time.Second, nil}, // this line tests that we use the current minute, and not wait for "-10" 21 | {"midnight next day", "0 0 * * *", "2021-11-30T10:00:00Z", 14 * time.Hour, nil}, 22 | {"first day next month in next year", "0 0 1 * *", "2020-12-30T10:00:00Z", 14*time.Hour + 24*time.Hour, nil}, // this line tests that we can handle rolling month correctly 23 | } 24 | for _, tt := range tests { 25 | t.Run(tt.name, func(t *testing.T) { 26 | from, err := time.Parse(time.RFC3339, tt.from) 27 | if err != nil { 28 | t.Fatalf("unable to parse from %s: %v", tt.from, err) 29 | } 30 | result, err := waitForCron(tt.cron, from) 31 | switch { 32 | case (err != nil && tt.err == nil) || (err == nil && tt.err != nil) || (err != nil && tt.err != nil && err.Error() != tt.err.Error()): 33 | t.Errorf("waitForCron(%s, %s) error = %v, wantErr %v", tt.cron, tt.from, err, tt.err) 34 | case result != tt.wait: 35 | t.Errorf("waitForCron(%s, %s) = %v, want %v", tt.cron, tt.from, result, tt.wait) 36 | } 37 | }) 38 | } 39 | } 40 | 41 | func TestWaitForBeginTime(t *testing.T) { 42 | tests := []struct { 43 | name string 44 | begin string 45 | from string 46 | wait time.Duration 47 | err error 48 | }{ 49 | {"wait one minute", "+1", "2018-10-10T10:00:00Z", 1 * time.Minute, nil}, 50 | {"wait 999999 minutes", "+999999", "2018-10-10T10:00:00Z", 999999 * time.Minute, nil}, 51 | {"wait until 10:23", "1023", "2018-10-10T10:00:00Z", 23 * time.Minute, nil}, 52 | {"wait until 23:59", "2359", "2018-10-10T10:00:00Z", 13*time.Hour + 59*time.Minute, nil}, 53 | {"wait until 9:59", "0959", "2018-10-10T10:00:00Z", 23*time.Hour + 59*time.Minute, nil}, 54 | {"pass time over 24h", "2401", "2018-10-10T10:00:00Z", 14*time.Hour + time.Minute, nil}, //time.Time accepts values outside the usual ranges 55 | {"fail text", "today", "2018-10-10T10:00:00Z", time.Duration(0), fmt.Errorf("invalid format for begin delay 'today'")}, 56 | {"fail number", "1", "2018-10-10T10:00:00Z", time.Duration(0), fmt.Errorf("invalid format for begin delay '1'")}, 57 | {"fail +hour", "+1h", "2018-10-10T10:00:00Z", time.Duration(0), fmt.Errorf("invalid format for begin delay '+1h'")}, 58 | {"fail too long time", "12345", "2018-10-10T10:00:00Z", time.Duration(0), fmt.Errorf("invalid format for begin delay '12345'")}, 59 | } 60 | for _, tt := range tests { 61 | t.Run(tt.name, func(t *testing.T) { 62 | from, err := time.Parse(time.RFC3339, tt.from) 63 | if err != nil { 64 | t.Fatalf("unable to parse from %s: %v", tt.from, err) 65 | } 66 | result, err := waitForBeginTime(tt.begin, from) 67 | switch { 68 | case (err != nil && tt.err == nil) || (err == nil && tt.err != nil) || (err != nil && tt.err != nil && err.Error() != tt.err.Error()): 69 | t.Errorf("waitForBeginTime(%s, %s) error = %v, wantErr %v", tt.begin, tt.from, err, tt.err) 70 | case result != tt.wait: 71 | t.Errorf("waitForBeginTime(%s, %s) = %v, want %v", tt.begin, tt.from, result, tt.wait) 72 | } 73 | }) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /pkg/database/connection.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | mysql "github.com/go-sql-driver/mysql" 8 | ) 9 | 10 | type Connection struct { 11 | User string 12 | Pass string 13 | Host string 14 | Port int 15 | } 16 | 17 | func (c Connection) MySQL() string { 18 | config := mysql.NewConfig() 19 | config.User = c.User 20 | config.Passwd = c.Pass 21 | if strings.HasPrefix(c.Host, "/") { 22 | config.Net = "unix" 23 | config.Addr = c.Host 24 | } else { 25 | config.Net = "tcp" 26 | config.Addr = fmt.Sprintf("%s:%d", c.Host, c.Port) 27 | } 28 | config.ParseTime = true 29 | config.TLSConfig = "preferred" 30 | return config.FormatDSN() 31 | } 32 | -------------------------------------------------------------------------------- /pkg/database/dump.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "fmt" 7 | 8 | "github.com/databacker/mysql-backup/pkg/database/mysql" 9 | ) 10 | 11 | type DumpOpts struct { 12 | Compact bool 13 | Triggers bool 14 | Routines bool 15 | SuppressUseDatabase bool 16 | MaxAllowedPacket int 17 | } 18 | 19 | func Dump(ctx context.Context, dbconn Connection, opts DumpOpts, writers []DumpWriter) error { 20 | 21 | // TODO: dump data for each writer: 22 | // per schema 23 | // mysqldump --databases ${onedb} $MYSQLDUMP_OPTS 24 | // all at once 25 | // mysqldump -A $MYSQLDUMP_OPTS 26 | // all at once limited to some databases 27 | // mysqldump --databases $DB_DUMP_INCLUDE $MYSQLDUMP_OPTS 28 | for _, writer := range writers { 29 | db, err := sql.Open("mysql", dbconn.MySQL()) 30 | if err != nil { 31 | return fmt.Errorf("failed to open connection to database: %v", err) 32 | } 33 | defer func() { _ = db.Close() }() 34 | for _, schema := range writer.Schemas { 35 | dumper := &mysql.Data{ 36 | Out: writer.Writer, 37 | Connection: db, 38 | Schema: schema, 39 | Host: dbconn.Host, 40 | Compact: opts.Compact, 41 | Triggers: opts.Triggers, 42 | Routines: opts.Routines, 43 | SuppressUseDatabase: opts.SuppressUseDatabase, 44 | MaxAllowedPacket: opts.MaxAllowedPacket, 45 | } 46 | if err := dumper.Dump(); err != nil { 47 | return fmt.Errorf("failed to dump database %s: %v", schema, err) 48 | } 49 | } 50 | } 51 | 52 | return nil 53 | } 54 | -------------------------------------------------------------------------------- /pkg/database/dumpwriter.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | type DumpWriter struct { 8 | Schemas []string 9 | Writer io.Writer 10 | } 11 | -------------------------------------------------------------------------------- /pkg/database/mysql/date.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "database/sql/driver" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | // NullDate represents a time.Time that may be null. 10 | // NullDate implements the Scanner interface so 11 | // it can be used as a scan destination, similar to NullString. 12 | // It is distinct from sql.NullTime in that it can output formats only as a date 13 | type NullDate struct { 14 | Date time.Time 15 | Valid bool 16 | } 17 | 18 | // Scan implements the Scanner interface. 19 | func (n *NullDate) Scan(value any) error { 20 | if value == nil { 21 | n.Date, n.Valid = time.Time{}, false 22 | return nil 23 | } 24 | switch s := value.(type) { 25 | case string: 26 | t, err := time.Parse("2006-01-02", s) 27 | if err != nil { 28 | return err 29 | } 30 | n.Date = t 31 | n.Valid = true 32 | return nil 33 | case time.Time: 34 | n.Date = s 35 | n.Valid = true 36 | return nil 37 | } 38 | n.Valid = false 39 | return fmt.Errorf("unknown type %T for NullDate", value) 40 | } 41 | 42 | // Value implements the driver Valuer interface. 43 | func (n NullDate) Value() (driver.Value, error) { 44 | if !n.Valid { 45 | return nil, nil 46 | } 47 | return n.Date.Format("2006-01-02"), nil 48 | } 49 | -------------------------------------------------------------------------------- /pkg/database/mysql/sanitize.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import "strings" 4 | 5 | var lazyMySQLReplacer *strings.Replacer 6 | 7 | // sanitize MySQL based on 8 | // https://dev.mysql.com/doc/refman/8.0/en/string-literals.html table 9.1 9 | // needs to be placed in either a single or a double quoted string 10 | func sanitize(input string) string { 11 | if lazyMySQLReplacer == nil { 12 | lazyMySQLReplacer = strings.NewReplacer( 13 | "\x00", "\\0", 14 | "'", "\\'", 15 | "\"", "\\\"", 16 | "\b", "\\b", 17 | "\n", "\\n", 18 | "\r", "\\r", 19 | // "\t", "\\t", Tab literals are acceptable in reads 20 | "\x1A", "\\Z", // ASCII 26 == x1A 21 | "\\", "\\\\", 22 | // "%", "\\%", 23 | // "_", "\\_", 24 | ) 25 | } 26 | return lazyMySQLReplacer.Replace(input) 27 | } 28 | -------------------------------------------------------------------------------- /pkg/database/mysql/view.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "strings" 9 | "text/template" 10 | ) 11 | 12 | var _ Table = &view{} 13 | 14 | type view struct { 15 | baseTable 16 | charset string 17 | collation string 18 | } 19 | 20 | var viewFullTemplate0, viewCompactTemplate0, viewFullTemplate1, viewCompactTemplate1 *template.Template 21 | 22 | func init() { 23 | tmpl, err := template.New("mysqldumpView").Funcs(template.FuncMap{ 24 | "sub": sub, 25 | "esc": esc, 26 | }).Parse(viewTmpl0) 27 | if err != nil { 28 | panic(fmt.Errorf("could not parse view template: %w", err)) 29 | } 30 | viewFullTemplate0 = tmpl 31 | 32 | tmpl, err = template.New("mysqldumpView").Funcs(template.FuncMap{ 33 | "sub": sub, 34 | "esc": esc, 35 | }).Parse(viewTmpl1) 36 | if err != nil { 37 | panic(fmt.Errorf("could not parse view template: %w", err)) 38 | } 39 | viewFullTemplate1 = tmpl 40 | 41 | tmpl, err = template.New("mysqldumpViewCompact").Funcs(template.FuncMap{ 42 | "sub": sub, 43 | "esc": esc, 44 | }).Parse(viewTmplCompact0) 45 | if err != nil { 46 | panic(fmt.Errorf("could not parse view compact template: %w", err)) 47 | } 48 | viewCompactTemplate0 = tmpl 49 | 50 | tmpl, err = template.New("mysqldumpViewCompact").Funcs(template.FuncMap{ 51 | "sub": sub, 52 | "esc": esc, 53 | }).Parse(viewTmplCompact1) 54 | if err != nil { 55 | panic(fmt.Errorf("could not parse view compact template: %w", err)) 56 | } 57 | viewCompactTemplate1 = tmpl 58 | } 59 | 60 | func (v *view) CreateSQL() ([]string, error) { 61 | var tableReturn, tableSQL, charSetClient, collationConnection sql.NullString 62 | if err := v.data.tx.QueryRow("SHOW CREATE VIEW "+esc(v.Name())).Scan(&tableReturn, &tableSQL, &charSetClient, &collationConnection); err != nil { 63 | return nil, err 64 | } 65 | 66 | if tableReturn.String != v.Name() { 67 | return nil, errors.New("returned view is not the same as requested view") 68 | } 69 | 70 | // this comes in one string, which we need to break down into 3 parts for the template 71 | // CREATE ALGORITHM=UNDEFINED DEFINER=`testadmin`@`%` SQL SECURITY DEFINER VIEW `view1` AS select `t1`.`id` AS `id`,`t1`.`name` AS `name` from `t1` 72 | // becomes: 73 | // CREATE ALGORITHM=UNDEFINED 74 | // DEFINER=`testadmin`@`%` SQL SECURITY DEFINER 75 | // VIEW `view1` AS select `t1`.`id` AS `id`,`t1`.`name` AS `name` from `t1` 76 | in := tableSQL.String 77 | indexDefiner := strings.Index(in, "DEFINER") 78 | indexView := strings.Index(in, "VIEW") 79 | 80 | parts := make([]string, 3) 81 | parts[0] = strings.TrimSpace(in[:indexDefiner]) 82 | parts[1] = strings.TrimSpace(in[indexDefiner:indexView]) 83 | parts[2] = strings.TrimSpace(in[indexView:]) 84 | 85 | v.charset = charSetClient.String 86 | v.collation = collationConnection.String 87 | 88 | return parts, nil 89 | } 90 | 91 | // SELECT TABLE_NAME,CHARACTER_SET_CLIENT,COLLATION_CONNECTION FROM INFORMATION_SCHEMA.VIEWS; 92 | func (v *view) Init() error { 93 | if err := v.initColumnData(); err != nil { 94 | return fmt.Errorf("failed to initialize column data for view %s: %w", v.name, err) 95 | } 96 | var tableName, charSetClient, collationConnection sql.NullString 97 | 98 | if err := v.data.tx.QueryRow("SELECT TABLE_NAME,CHARACTER_SET_CLIENT,COLLATION_CONNECTION FROM INFORMATION_SCHEMA.VIEWS WHERE table_name = '"+v.name+"'").Scan(&tableName, &charSetClient, &collationConnection); err != nil { 99 | return fmt.Errorf("failed to get view information schema for view %s: %w", v.name, err) 100 | } 101 | if tableName.String != v.name { 102 | return fmt.Errorf("returned view name %s is not the same as requested view %s", tableName.String, v.name) 103 | } 104 | if !charSetClient.Valid { 105 | return fmt.Errorf("returned charset is not valid for view %s", v.name) 106 | } 107 | if !collationConnection.Valid { 108 | return fmt.Errorf("returned collation is not valid for view %s", v.name) 109 | } 110 | v.charset = charSetClient.String 111 | v.collation = collationConnection.String 112 | return nil 113 | } 114 | 115 | func (v *view) Execute(out io.Writer, compact bool, part int) error { 116 | var tmpl *template.Template 117 | switch part { 118 | case 0: 119 | tmpl = viewFullTemplate0 120 | if compact { 121 | tmpl = viewCompactTemplate0 122 | } 123 | case 1: 124 | tmpl = viewFullTemplate1 125 | if compact { 126 | tmpl = viewCompactTemplate1 127 | } 128 | default: 129 | return fmt.Errorf("invalid part %d for view %s", part, v.name) 130 | } 131 | return tmpl.Execute(out, v) 132 | } 133 | 134 | func (v *view) Charset() string { 135 | return v.charset 136 | } 137 | 138 | func (v *view) Collation() string { 139 | return v.collation 140 | } 141 | 142 | // takes a Table, but is a view 143 | const viewTmpl0 = ` 144 | -- 145 | -- Temporary view structure for view {{ esc .Name }} 146 | -- 147 | 148 | DROP TABLE IF EXISTS {{ esc .Name }}; 149 | /*!50001 DROP VIEW IF EXISTS {{ esc .Name }}*/; 150 | SET @saved_cs_client = @@character_set_client; 151 | /*!50503 SET character_set_client = utf8mb4 */; 152 | /*!50001 CREATE VIEW {{ esc .Name }} AS SELECT 153 | {{ $columns := .Columns }}{{ range $index, $column := .Columns }} 1 AS {{ esc $column }}{{ if ne $index (sub (len $columns) 1) }},{{ printf "%c" 10 }}{{ else }}*/;{{ end }}{{ end }} 154 | SET character_set_client = @saved_cs_client; 155 | ` 156 | 157 | const viewTmpl1 = ` 158 | -- 159 | -- Current Database: {{ esc .Database }} 160 | -- 161 | 162 | USE {{ esc .Database }}; 163 | 164 | -- 165 | -- Final view structure for view {{ esc .Name }} 166 | -- 167 | 168 | /*!50001 DROP VIEW IF EXISTS {{ esc .Name }}*/; 169 | /*!50001 SET @saved_cs_client = @@character_set_client */; 170 | /*!50001 SET @saved_cs_results = @@character_set_results */; 171 | /*!50001 SET @saved_col_connection = @@collation_connection */; 172 | /*!50001 SET character_set_client = {{ .Charset }} */; 173 | /*!50001 SET character_set_results = {{ .Charset }} */; 174 | /*!50001 SET collation_connection = {{ .Collation }} */; 175 | /*!50001 {{ $sql := .CreateSQL }}{{ index $sql 0 }} */ 176 | /*!50013 {{ index $sql 1 }} */ 177 | /*!50001 {{ index $sql 2 }} */; 178 | /*!50001 SET character_set_client = @saved_cs_client */; 179 | /*!50001 SET character_set_results = @saved_cs_results */; 180 | /*!50001 SET collation_connection = @saved_col_connection */; 181 | ` 182 | 183 | const viewTmplCompact0 = ` 184 | SET @saved_cs_client = @@character_set_client; 185 | /*!50503 SET character_set_client = utf8mb4 */; 186 | /*!50001 CREATE VIEW {{ esc .Name }} AS SELECT 187 | {{ $columns := .Columns }}{{ range $index, $column := .Columns }} 1 AS {{ esc $column }}{{ if ne $index (sub (len $columns) 1) }},{{ printf "%c" 10 }}{{ else }}*/;{{ end }}{{ end }} 188 | SET character_set_client = @saved_cs_client; 189 | ` 190 | 191 | const viewTmplCompact1 = ` 192 | USE {{ esc .Database }}; 193 | /*!50001 DROP VIEW IF EXISTS {{ esc .Name }}*/; 194 | /*!50001 SET @saved_cs_client = @@character_set_client */; 195 | /*!50001 SET @saved_cs_results = @@character_set_results */; 196 | /*!50001 SET @saved_col_connection = @@collation_connection */; 197 | /*!50001 SET character_set_client = {{ .Charset }} */; 198 | /*!50001 SET character_set_results = {{ .Charset }} */; 199 | /*!50001 SET collation_connection = {{ .Collation }} */; 200 | /*!50001 {{ $sql := .CreateSQL }}{{ index $sql 0 }} */ 201 | /*!50013 {{ index $sql 1 }} */ 202 | /*!50001 {{ index $sql 2 }} */; 203 | /*!50001 SET character_set_client = @saved_cs_client */; 204 | /*!50001 SET character_set_results = @saved_cs_results */; 205 | /*!50001 SET collation_connection = @saved_col_connection */; 206 | ` 207 | -------------------------------------------------------------------------------- /pkg/database/restore.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "database/sql" 7 | "fmt" 8 | "io" 9 | "regexp" 10 | ) 11 | 12 | const ( 13 | // used to define default max buffer size Scanner, counter part of dump 14 | defaultMaxAllowedPacket = 4194304 15 | ) 16 | 17 | var ( 18 | useRegex = regexp.MustCompile(`(?i)^(USE\s*` + "`" + `)([^\s]+)(` + "`" + `\s*;)$`) 19 | createRegex = regexp.MustCompile(`(?i)^(CREATE\s+DATABASE\s*(\/\*.*\*\/\s*)?` + "`" + `)([^\s]+)(` + "`" + `\s*(\s*\/\*.*\*\/\s*)?\s*;$)`) 20 | ) 21 | 22 | func Restore(ctx context.Context, dbconn Connection, databasesMap map[string]string, readers []io.ReadSeeker) error { 23 | db, err := sql.Open("mysql", dbconn.MySQL()) 24 | if err != nil { 25 | return fmt.Errorf("failed to open connection to database: %v", err) 26 | } 27 | defer func() { _ = db.Close() }() 28 | 29 | // load data into database by reading from each reader 30 | for _, r := range readers { 31 | tx, err := db.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable}) 32 | if err != nil { 33 | return fmt.Errorf("failed to restore database: %w", err) 34 | } 35 | scanBuf := []byte{} 36 | scanner := bufio.NewScanner(r) 37 | // increase the buffer size 38 | scanner.Buffer(scanBuf, defaultMaxAllowedPacket) //TODO should be a configurable option like with dump 39 | var current string 40 | for scanner.Scan() { 41 | line := scanner.Text() 42 | if line == "" { 43 | continue 44 | } 45 | current += line + "\n" 46 | if line[len(line)-1] != ';' { 47 | continue 48 | } 49 | // if we have the line that sets the database, and we need to replace, replace it 50 | if createRegex.MatchString(current) { 51 | dbName := createRegex.FindStringSubmatch(current)[3] 52 | if newName, ok := databasesMap[dbName]; ok { 53 | current = createRegex.ReplaceAllString(current, fmt.Sprintf("${1}%s${4}", newName)) 54 | } 55 | } 56 | if useRegex.MatchString(current) { 57 | dbName := useRegex.FindStringSubmatch(current)[2] 58 | if newName, ok := databasesMap[dbName]; ok { 59 | current = useRegex.ReplaceAllString(current, fmt.Sprintf("${1}%s${3}", newName)) 60 | } 61 | } 62 | // we hit a break, so we have the entire transaction 63 | if _, err := tx.Exec(current); err != nil { 64 | _ = tx.Rollback() 65 | return fmt.Errorf("failed to restore database: %w", err) 66 | } 67 | current = "" 68 | } 69 | if err := scanner.Err(); err != nil { 70 | return fmt.Errorf("failed to restore database: %w", err) 71 | } 72 | if err := tx.Commit(); err != nil { 73 | return fmt.Errorf("failed to restore database: %w", err) 74 | } 75 | } 76 | 77 | return nil 78 | } 79 | -------------------------------------------------------------------------------- /pkg/database/schemas.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | ) 7 | 8 | var ( 9 | excludeSchemaList = []string{"information_schema", "performance_schema", "sys", "mysql"} 10 | excludeSchemas = map[string]bool{} 11 | ) 12 | 13 | func init() { 14 | for _, schema := range excludeSchemaList { 15 | excludeSchemas[schema] = true 16 | } 17 | } 18 | 19 | func GetSchemas(dbconn Connection) ([]string, error) { 20 | db, err := sql.Open("mysql", dbconn.MySQL()) 21 | if err != nil { 22 | return nil, fmt.Errorf("failed to open connection to database: %v", err) 23 | } 24 | defer func() { _ = db.Close() }() 25 | 26 | // TODO: get list of schemas 27 | // mysql -h $DB_SERVER -P $DB_PORT $DBUSER $DBPASS -N -e 'show databases' 28 | rows, err := db.Query("show databases") 29 | if err != nil { 30 | return nil, fmt.Errorf("could not get schemas: %v", err) 31 | } 32 | defer func() { _ = rows.Close() }() 33 | 34 | names := []string{} 35 | for rows.Next() { 36 | var name string 37 | err := rows.Scan(&name) 38 | if err != nil { 39 | return nil, fmt.Errorf("error getting database name: %v", err) 40 | } 41 | if _, ok := excludeSchemas[name]; ok { 42 | continue 43 | } 44 | names = append(names, name) 45 | } 46 | 47 | return names, nil 48 | } 49 | -------------------------------------------------------------------------------- /pkg/encrypt/aes256cbc.go: -------------------------------------------------------------------------------- 1 | package encrypt 2 | 3 | import ( 4 | "bytes" 5 | "crypto/aes" 6 | "crypto/cipher" 7 | "crypto/rand" 8 | "fmt" 9 | "io" 10 | ) 11 | 12 | var _ Encryptor = &AES256CBC{} 13 | 14 | type AES256CBC struct { 15 | key []byte 16 | iv []byte 17 | prependIV bool 18 | } 19 | 20 | func NewAES256CBC(key, iv []byte, prependIV bool) (*AES256CBC, error) { 21 | if len(key) != 32 { 22 | return nil, fmt.Errorf("key must be 32 bytes") 23 | } 24 | return &AES256CBC{ 25 | key: key, 26 | iv: iv, 27 | prependIV: prependIV, 28 | }, nil 29 | } 30 | 31 | type cbcEncryptWriter struct { 32 | writer io.Writer 33 | mode cipher.BlockMode 34 | buf []byte 35 | closed bool 36 | } 37 | 38 | func (w *cbcEncryptWriter) Write(p []byte) (int, error) { 39 | total := 0 40 | for len(p) > 0 { 41 | n := aes.BlockSize - len(w.buf) 42 | if n > len(p) { 43 | n = len(p) 44 | } 45 | w.buf = append(w.buf, p[:n]...) 46 | p = p[n:] 47 | total += n 48 | 49 | if len(w.buf) == aes.BlockSize { 50 | block := make([]byte, aes.BlockSize) 51 | w.mode.CryptBlocks(block, w.buf) 52 | if _, err := w.writer.Write(block); err != nil { 53 | return total, err 54 | } 55 | w.buf = w.buf[:0] 56 | } 57 | } 58 | return total, nil 59 | } 60 | 61 | func (w *cbcEncryptWriter) Close() error { 62 | if w.closed { 63 | return nil 64 | } 65 | w.closed = true 66 | 67 | // PKCS#7 padding 68 | padLen := aes.BlockSize - len(w.buf)%aes.BlockSize 69 | padding := bytes.Repeat([]byte{byte(padLen)}, padLen) 70 | w.buf = append(w.buf, padding...) 71 | 72 | // Encrypt all remaining blocks in w.buf 73 | out := make([]byte, len(w.buf)) 74 | w.mode.CryptBlocks(out, w.buf) 75 | 76 | // Write to the underlying writer 77 | _, err := w.writer.Write(out) 78 | return err 79 | } 80 | 81 | type cbcDecryptWriter struct { 82 | writer io.Writer 83 | block cipher.Block 84 | mode cipher.BlockMode 85 | buf []byte 86 | iv []byte 87 | closed bool 88 | readIV bool 89 | } 90 | 91 | func (w *cbcDecryptWriter) Write(p []byte) (int, error) { 92 | total := 0 93 | w.buf = append(w.buf, p...) 94 | 95 | if !w.readIV && len(w.buf) >= aes.BlockSize { 96 | w.iv = w.buf[:aes.BlockSize] 97 | w.mode = cipher.NewCBCDecrypter(w.block, w.iv) 98 | w.buf = w.buf[aes.BlockSize:] 99 | w.readIV = true 100 | } 101 | 102 | for len(w.buf) >= aes.BlockSize*2 { 103 | block := w.buf[:aes.BlockSize] 104 | dst := make([]byte, aes.BlockSize) 105 | w.mode.CryptBlocks(dst, block) 106 | if _, err := w.writer.Write(dst); err != nil { 107 | return total, err 108 | } 109 | w.buf = w.buf[aes.BlockSize:] 110 | total += aes.BlockSize 111 | } 112 | 113 | return total, nil 114 | } 115 | 116 | func (w *cbcDecryptWriter) Close() error { 117 | if w.closed { 118 | return nil 119 | } 120 | w.closed = true 121 | 122 | if len(w.buf) != aes.BlockSize { 123 | return fmt.Errorf("incomplete final block") 124 | } 125 | block := make([]byte, aes.BlockSize) 126 | w.mode.CryptBlocks(block, w.buf) 127 | 128 | // Remove PKCS#7 padding 129 | padLen := int(block[len(block)-1]) 130 | if padLen <= 0 || padLen > aes.BlockSize { 131 | return fmt.Errorf("invalid padding") 132 | } 133 | for _, b := range block[len(block)-padLen:] { 134 | if int(b) != padLen { 135 | return fmt.Errorf("invalid padding content") 136 | } 137 | } 138 | _, err := w.writer.Write(block[:len(block)-padLen]) 139 | return err 140 | } 141 | func (s *AES256CBC) Name() string { 142 | return string(AlgoDirectAES256CBC) 143 | } 144 | 145 | func (s *AES256CBC) Description() string { 146 | return "AES-256-CBC output format with IV prepended; should work with `openssl enc -d -aes-256-cbc -K -iv auto`." 147 | } 148 | 149 | func (s *AES256CBC) Decrypt(out io.Writer) (io.WriteCloser, error) { 150 | if len(s.key) != 32 { 151 | return nil, fmt.Errorf("key must be 32 bytes") 152 | } 153 | 154 | block, err := aes.NewCipher(s.key) 155 | if err != nil { 156 | return nil, err 157 | } 158 | 159 | // The returned WriteCloser will buffer input until it receives at least one full block 160 | return &cbcDecryptWriter{ 161 | writer: out, 162 | block: block, 163 | }, nil 164 | } 165 | 166 | func (s *AES256CBC) Encrypt(out io.Writer) (io.WriteCloser, error) { 167 | if len(s.key) != 32 { 168 | return nil, fmt.Errorf("key must be 32 bytes") 169 | } 170 | 171 | block, err := aes.NewCipher(s.key) 172 | if err != nil { 173 | return nil, err 174 | } 175 | 176 | iv := s.iv 177 | if len(iv) == 0 { 178 | iv = make([]byte, aes.BlockSize) 179 | if _, err := rand.Read(iv); err != nil { 180 | return nil, fmt.Errorf("failed to generate IV: %w", err) 181 | } 182 | } 183 | if s.prependIV { 184 | if _, err := out.Write(iv); err != nil { 185 | return nil, fmt.Errorf("failed to write IV to output: %w", err) 186 | } 187 | } 188 | 189 | mode := cipher.NewCBCEncrypter(block, iv) 190 | 191 | return &cbcEncryptWriter{ 192 | writer: out, 193 | mode: mode, 194 | buf: make([]byte, 0, aes.BlockSize), 195 | }, nil 196 | } 197 | -------------------------------------------------------------------------------- /pkg/encrypt/age.go: -------------------------------------------------------------------------------- 1 | package encrypt 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | 8 | "filippo.io/age" 9 | ) 10 | 11 | var _ Encryptor = &AgeChacha20Poly1305{} 12 | 13 | type AgeChacha20Poly1305 struct { 14 | recipientPubKey string 15 | identityKey string 16 | } 17 | 18 | func NewAgeChacha20Poly1305(recipientPubKey []byte) (*AgeChacha20Poly1305, error) { 19 | key := string(recipientPubKey) 20 | return &AgeChacha20Poly1305{recipientPubKey: key}, nil 21 | } 22 | 23 | func (a *AgeChacha20Poly1305) Name() string { 24 | return string(AlgoAgeChacha20Poly1305) 25 | } 26 | 27 | func (a *AgeChacha20Poly1305) Description() string { 28 | return "age format with encryption using chacha20 and poly1305." 29 | } 30 | 31 | func (a *AgeChacha20Poly1305) Decrypt(out io.Writer) (io.WriteCloser, error) { 32 | identity, err := age.ParseX25519Identity(a.identityKey) 33 | if err != nil { 34 | return nil, fmt.Errorf("invalid age identity: %w", err) 35 | } 36 | 37 | // Buffer encrypted input until Close() 38 | buf := &bytes.Buffer{} 39 | return &ageDecryptWriter{ 40 | identity: identity, 41 | buf: buf, 42 | out: out, 43 | }, nil 44 | } 45 | 46 | func (a *AgeChacha20Poly1305) Encrypt(out io.Writer) (io.WriteCloser, error) { 47 | // Parse the recipient's X25519 public key 48 | recipient, err := age.ParseX25519Recipient(a.recipientPubKey) 49 | if err != nil { 50 | return nil, fmt.Errorf("invalid recipient public key: %w", err) 51 | } 52 | 53 | // Create an age Writer that encrypts to the recipient 54 | ageWriter, err := age.Encrypt(out, recipient) 55 | if err != nil { 56 | return nil, fmt.Errorf("failed to initialize age writer: %w", err) 57 | } 58 | 59 | return ageWriter, nil // already an io.WriteCloser 60 | } 61 | 62 | type ageDecryptWriter struct { 63 | identity *age.X25519Identity 64 | buf *bytes.Buffer 65 | out io.Writer 66 | closed bool 67 | } 68 | 69 | func (w *ageDecryptWriter) Write(p []byte) (int, error) { 70 | return w.buf.Write(p) 71 | } 72 | 73 | func (w *ageDecryptWriter) Close() error { 74 | if w.closed { 75 | return nil 76 | } 77 | w.closed = true 78 | 79 | reader, err := age.Decrypt(w.buf, w.identity) 80 | if err != nil { 81 | return fmt.Errorf("age decryption failed: %w", err) 82 | } 83 | 84 | _, err = io.Copy(w.out, reader) 85 | return err 86 | } 87 | -------------------------------------------------------------------------------- /pkg/encrypt/chacha20poly1305.go: -------------------------------------------------------------------------------- 1 | package encrypt 2 | 3 | import ( 4 | "bytes" 5 | "crypto/cipher" 6 | "crypto/rand" 7 | "fmt" 8 | "io" 9 | 10 | "golang.org/x/crypto/chacha20poly1305" 11 | ) 12 | 13 | var _ Encryptor = &Chacha20Poly1305{} 14 | 15 | type Chacha20Poly1305 struct { 16 | key []byte 17 | } 18 | 19 | func NewChacha20Poly1305(key []byte) (*Chacha20Poly1305, error) { 20 | if len(key) != 32 { 21 | return nil, fmt.Errorf("key length must be 32 bytes for Chacha20Poly1305, not %d", len(key)) 22 | } 23 | return &Chacha20Poly1305{key: key}, nil 24 | } 25 | 26 | func (s *Chacha20Poly1305) Name() string { 27 | return string(AlgoChacha20Poly1305) 28 | } 29 | 30 | func (s *Chacha20Poly1305) Description() string { 31 | return "Chacha20-Poly1305 encryption." 32 | } 33 | 34 | func (s *Chacha20Poly1305) Decrypt(out io.Writer) (io.WriteCloser, error) { 35 | if len(s.key) != chacha20poly1305.KeySize { 36 | return nil, fmt.Errorf("key must be 32 bytes") 37 | } 38 | 39 | aead, err := chacha20poly1305.New(s.key) 40 | if err != nil { 41 | return nil, fmt.Errorf("failed to create chacha20poly1305: %w", err) 42 | } 43 | 44 | return &chacha20DecryptWriter{ 45 | aead: aead, 46 | out: out, 47 | buf: &bytes.Buffer{}, 48 | }, nil 49 | } 50 | 51 | func (s *Chacha20Poly1305) Encrypt(out io.Writer) (io.WriteCloser, error) { 52 | if len(s.key) != chacha20poly1305.KeySize { 53 | return nil, fmt.Errorf("key must be 32 bytes") 54 | } 55 | 56 | aead, err := chacha20poly1305.New(s.key) 57 | if err != nil { 58 | return nil, fmt.Errorf("failed to create chacha20poly1305: %w", err) 59 | } 60 | 61 | nonce := make([]byte, chacha20poly1305.NonceSize) 62 | if _, err := rand.Read(nonce); err != nil { 63 | return nil, fmt.Errorf("failed to generate nonce: %w", err) 64 | } 65 | 66 | // Write the nonce first 67 | if _, err := out.Write(nonce); err != nil { 68 | return nil, fmt.Errorf("failed to write nonce: %w", err) 69 | } 70 | 71 | return &chacha20EncryptWriter{ 72 | aead: aead, 73 | nonce: nonce, 74 | out: out, 75 | buffer: &bytes.Buffer{}, 76 | }, nil 77 | } 78 | 79 | type chacha20EncryptWriter struct { 80 | aead cipher.AEAD 81 | nonce []byte 82 | out io.Writer 83 | buffer *bytes.Buffer 84 | closed bool 85 | } 86 | 87 | func (w *chacha20EncryptWriter) Write(p []byte) (int, error) { 88 | return w.buffer.Write(p) 89 | } 90 | 91 | func (w *chacha20EncryptWriter) Close() error { 92 | if w.closed { 93 | return nil 94 | } 95 | w.closed = true 96 | 97 | ciphertext := w.aead.Seal(nil, w.nonce, w.buffer.Bytes(), nil) 98 | _, err := w.out.Write(ciphertext) 99 | return err 100 | } 101 | 102 | type chacha20DecryptWriter struct { 103 | aead cipher.AEAD 104 | out io.Writer 105 | buf *bytes.Buffer 106 | closed bool 107 | } 108 | 109 | func (w *chacha20DecryptWriter) Write(p []byte) (int, error) { 110 | return w.buf.Write(p) 111 | } 112 | 113 | func (w *chacha20DecryptWriter) Close() error { 114 | if w.closed { 115 | return nil 116 | } 117 | w.closed = true 118 | 119 | data := w.buf.Bytes() 120 | if len(data) < chacha20poly1305.NonceSize { 121 | return fmt.Errorf("missing nonce or ciphertext") 122 | } 123 | 124 | nonce := data[:chacha20poly1305.NonceSize] 125 | ciphertext := data[chacha20poly1305.NonceSize:] 126 | 127 | plaintext, err := w.aead.Open(nil, nonce, ciphertext, nil) 128 | if err != nil { 129 | return fmt.Errorf("decryption failed: %w", err) 130 | } 131 | 132 | _, err = w.out.Write(plaintext) 133 | return err 134 | } 135 | -------------------------------------------------------------------------------- /pkg/encrypt/const.go: -------------------------------------------------------------------------------- 1 | package encrypt 2 | 3 | import "github.com/databacker/api/go/api" 4 | 5 | const ( 6 | AlgoSMimeAES256CBC = api.EncryptionAlgorithmSmimeAes256Cbc 7 | AlgoDirectAES256CBC = api.EncryptionAlgorithmAes256Cbc 8 | AlgoPBKDF2AES256CBC = api.EncryptionAlgorithmPbkdf2Aes256Cbc 9 | AlgoAgeChacha20Poly1305 = api.EncryptionAlgorithmAgeChacha20Poly1305 10 | AlgoChacha20Poly1305 = api.EncryptionAlgorithmChacha20Poly1305 11 | ) 12 | 13 | var All = []string{ 14 | string(AlgoSMimeAES256CBC), 15 | string(AlgoDirectAES256CBC), 16 | string(AlgoPBKDF2AES256CBC), 17 | string(AlgoAgeChacha20Poly1305), 18 | string(AlgoChacha20Poly1305), 19 | } 20 | -------------------------------------------------------------------------------- /pkg/encrypt/encryptor.go: -------------------------------------------------------------------------------- 1 | package encrypt 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | 7 | "github.com/databacker/api/go/api" 8 | ) 9 | 10 | type Encryptor interface { 11 | Name() string 12 | Description() string 13 | Decrypt(out io.Writer) (io.WriteCloser, error) 14 | Encrypt(out io.Writer) (io.WriteCloser, error) 15 | } 16 | 17 | func GetEncryptor(name string, key []byte) (Encryptor, error) { 18 | var ( 19 | enc Encryptor 20 | err error 21 | ) 22 | nameEnum := api.EncryptionAlgorithm(name) 23 | switch nameEnum { 24 | case AlgoSMimeAES256CBC: 25 | enc, err = NewSMimeAES256CBC(key) 26 | case AlgoDirectAES256CBC: 27 | enc, err = NewAES256CBC(key, nil, true) 28 | case AlgoPBKDF2AES256CBC: 29 | enc, err = NewPBKDF2AES256CBC(key) 30 | case AlgoAgeChacha20Poly1305: 31 | enc, err = NewAgeChacha20Poly1305(key) 32 | case AlgoChacha20Poly1305: 33 | enc, err = NewChacha20Poly1305(key) 34 | default: 35 | return nil, fmt.Errorf("unknown encryption format: %s", name) 36 | } 37 | return enc, err 38 | } 39 | -------------------------------------------------------------------------------- /pkg/encrypt/pbkdf2aes256cbc.go: -------------------------------------------------------------------------------- 1 | package encrypt 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/sha256" 6 | "fmt" 7 | "io" 8 | 9 | "golang.org/x/crypto/pbkdf2" 10 | ) 11 | 12 | const ( 13 | pbkdf2KeyLen = 32 14 | pbkdf2SaltSize = 16 15 | pbkdf2Iterations = 10000 16 | ) 17 | 18 | var _ Encryptor = &PBKDF2AES256CBC{} 19 | 20 | type PBKDF2AES256CBC struct { 21 | passphrase []byte 22 | } 23 | 24 | func NewPBKDF2AES256CBC(passphrase []byte) (*PBKDF2AES256CBC, error) { 25 | if len(passphrase) == 0 { 26 | return nil, fmt.Errorf("passphrase cannot be empty") 27 | } 28 | return &PBKDF2AES256CBC{passphrase: passphrase}, nil 29 | } 30 | 31 | func (s *PBKDF2AES256CBC) Name() string { 32 | return string(AlgoPBKDF2AES256CBC) 33 | } 34 | 35 | func (s *PBKDF2AES256CBC) Description() string { 36 | return "PBKDF2 with AES256-CBC encryption. Should work with `openssl enc -d -aes-256-cbc -pbkdf2 -pass `" 37 | } 38 | 39 | func (s *PBKDF2AES256CBC) Decrypt(out io.Writer) (io.WriteCloser, error) { 40 | // Return a WriteCloser that buffers the salt, derives the key, and then streams decryption 41 | pr := &pbkdf2DecryptReader{ 42 | passphrase: s.passphrase, 43 | out: out, 44 | buf: make([]byte, 0, pbkdf2SaltSize), 45 | } 46 | return pr, nil 47 | } 48 | 49 | func (s *PBKDF2AES256CBC) Encrypt(out io.Writer) (io.WriteCloser, error) { 50 | // Step 1: Generate a random salt (used by OpenSSL) 51 | salt := make([]byte, 8) 52 | if _, err := rand.Read(salt); err != nil { 53 | return nil, fmt.Errorf("failed to generate salt: %w", err) 54 | } 55 | 56 | // Step 3: Derive 32-byte key using PBKDF2 with SHA-256 57 | keyComplete := pbkdf2.Key(s.passphrase, salt, pbkdf2Iterations, 48, sha256.New) 58 | key := keyComplete[:pbkdf2KeyLen] 59 | iv := keyComplete[32:] 60 | 61 | // Step 3: Write non-standard header and salt to output stream 62 | if _, err := out.Write([]byte("Salted__")); err != nil { 63 | return nil, fmt.Errorf("failed to write salt: %w", err) 64 | } 65 | if _, err := out.Write(salt); err != nil { 66 | return nil, fmt.Errorf("failed to write salt: %w", err) 67 | } 68 | 69 | // Step 4: Delegate to AES256CBC using the derived key 70 | aes, err := NewAES256CBC(key, iv, false) 71 | if err != nil { 72 | return nil, err 73 | } 74 | return aes.Encrypt(out) 75 | } 76 | 77 | type pbkdf2DecryptReader struct { 78 | passphrase []byte 79 | out io.Writer 80 | buf []byte 81 | aes io.WriteCloser 82 | err error 83 | closed bool 84 | } 85 | 86 | func (r *pbkdf2DecryptReader) Write(p []byte) (int, error) { 87 | if r.aes != nil { 88 | return r.aes.Write(p) 89 | } 90 | 91 | // Buffer salt 92 | needed := pbkdf2SaltSize - len(r.buf) 93 | if needed > len(p) { 94 | r.buf = append(r.buf, p...) 95 | return len(p), nil 96 | } 97 | 98 | r.buf = append(r.buf, p[:needed]...) 99 | // Derive key 100 | key := pbkdf2.Key(r.passphrase, r.buf, pbkdf2Iterations, 32, sha256.New) 101 | 102 | // Initialize AES decryption 103 | aes, err := NewAES256CBC(key, nil, false) 104 | if err != nil { 105 | r.err = err 106 | return 0, err 107 | } 108 | 109 | r.aes, err = aes.Decrypt(r.out) 110 | if err != nil { 111 | r.err = err 112 | return 0, err 113 | } 114 | 115 | // Write remaining data after salt 116 | n, err := r.aes.Write(p[needed:]) 117 | return needed + n, err 118 | } 119 | 120 | func (r *pbkdf2DecryptReader) Close() error { 121 | if r.closed { 122 | return nil 123 | } 124 | r.closed = true 125 | if r.aes != nil { 126 | return r.aes.Close() 127 | } 128 | return r.err 129 | } 130 | -------------------------------------------------------------------------------- /pkg/encrypt/smime.go: -------------------------------------------------------------------------------- 1 | package encrypt 2 | 3 | import ( 4 | "bytes" 5 | "crypto/x509" 6 | "encoding/pem" 7 | "fmt" 8 | "io" 9 | 10 | cms "github.com/InfiniteLoopSpace/go_S-MIME/cms" 11 | ) 12 | 13 | var _ Encryptor = &SMimeAES256CBC{} 14 | 15 | type SMimeAES256CBC struct { 16 | recipientCert *x509.Certificate 17 | } 18 | 19 | func NewSMimeAES256CBC(certPEM []byte) (*SMimeAES256CBC, error) { 20 | block, _ := pem.Decode(certPEM) 21 | if block == nil { 22 | return nil, fmt.Errorf("failed to decode recipient cert PEM") 23 | } 24 | cert, err := x509.ParseCertificate(block.Bytes) 25 | if err != nil { 26 | return nil, fmt.Errorf("invalid recipient cert: %w", err) 27 | } 28 | return &SMimeAES256CBC{recipientCert: cert}, nil 29 | } 30 | 31 | func (s *SMimeAES256CBC) Name() string { 32 | return string(AlgoSMimeAES256CBC) 33 | } 34 | 35 | func (s *SMimeAES256CBC) Description() string { 36 | return "SMIME with AES256-CBC encryption. Should work with `openssl smime -decrypt -inform DER -recip -inkey `" 37 | } 38 | 39 | func (s *SMimeAES256CBC) Decrypt(out io.Writer) (io.WriteCloser, error) { 40 | return nil, fmt.Errorf("decrypt not implemented for SMIME") 41 | } 42 | 43 | func (s *SMimeAES256CBC) Encrypt(out io.Writer) (io.WriteCloser, error) { 44 | buf := &bytes.Buffer{} 45 | return &streamingEncryptWriter{ 46 | recipient: s.recipientCert, 47 | plaintext: buf, 48 | out: out, 49 | }, nil 50 | } 51 | 52 | type streamingEncryptWriter struct { 53 | recipient *x509.Certificate 54 | plaintext *bytes.Buffer 55 | out io.Writer 56 | closed bool 57 | } 58 | 59 | func (w *streamingEncryptWriter) Write(p []byte) (int, error) { 60 | return w.plaintext.Write(p) 61 | } 62 | 63 | func (w *streamingEncryptWriter) Close() error { 64 | if w.closed { 65 | return nil 66 | } 67 | w.closed = true 68 | 69 | // Create a new S/MIME wrapper 70 | s, err := cms.New() 71 | if err != nil { 72 | return fmt.Errorf("failed to create smime instance: %w", err) 73 | } 74 | 75 | // Encrypt the buffered plaintext using AES-256-CBC and wrap in CMS 76 | der, err := s.Encrypt(w.plaintext.Bytes(), []*x509.Certificate{w.recipient}) 77 | if err != nil { 78 | return fmt.Errorf("S/MIME encryption failed: %w", err) 79 | } 80 | 81 | _, err = w.out.Write(der) 82 | return err 83 | } 84 | -------------------------------------------------------------------------------- /pkg/internal/remote/certs.go: -------------------------------------------------------------------------------- 1 | package remote 2 | 3 | import ( 4 | "crypto/ed25519" 5 | "crypto/rand" 6 | "crypto/tls" 7 | "crypto/x509" 8 | "crypto/x509/pkix" 9 | "encoding/pem" 10 | "fmt" 11 | "math/big" 12 | "time" 13 | ) 14 | 15 | const ( 16 | clientOrg = "client.databack.io" 17 | certValidity = 5 * time.Minute 18 | DigestSha256 = "sha256" 19 | ) 20 | 21 | // SelfSignedCertFromPrivateKey creates a self-signed certificate from an ed25519 private key 22 | func SelfSignedCertFromPrivateKey(privateKey ed25519.PrivateKey, hostname string) (*tls.Certificate, error) { 23 | if privateKey == nil || len(privateKey) != ed25519.PrivateKeySize { 24 | return nil, fmt.Errorf("invalid private key") 25 | } 26 | publicKey := privateKey.Public() 27 | 28 | // Create a template for the certificate 29 | template := x509.Certificate{ 30 | SerialNumber: big.NewInt(1), 31 | Subject: pkix.Name{ 32 | Organization: []string{clientOrg}, 33 | }, 34 | NotBefore: time.Now(), 35 | NotAfter: time.Now().Add(certValidity), 36 | 37 | KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, 38 | ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, 39 | BasicConstraintsValid: true, 40 | } 41 | if hostname != "" { 42 | template.DNSNames = append(template.DNSNames, hostname) 43 | } 44 | 45 | // Self-sign the certificate 46 | certBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey, privateKey) 47 | if err != nil { 48 | return nil, fmt.Errorf("failed to create certificate: %w", err) 49 | } 50 | 51 | // Encode and print the certificate 52 | certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certBytes}) 53 | 54 | // Create the TLS certificate to use in tls.Config 55 | marshaledPrivateKey, err := x509.MarshalPKCS8PrivateKey(privateKey) 56 | if err != nil { 57 | return nil, fmt.Errorf("failed to marshal private key: %w", err) 58 | } 59 | cert, err := tls.X509KeyPair(certPEM, pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: marshaledPrivateKey})) 60 | return &cert, err 61 | } 62 | -------------------------------------------------------------------------------- /pkg/internal/test/README.md: -------------------------------------------------------------------------------- 1 | # test package 2 | 3 | Contains common utilities used in tests across other packages. Part of `pkg/internal/` to ensure it is not 4 | exported elsewhere. 5 | -------------------------------------------------------------------------------- /pkg/internal/test/remote.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "crypto" 5 | "crypto/ed25519" 6 | cryptorand "crypto/rand" 7 | "crypto/sha256" 8 | "crypto/tls" 9 | "crypto/x509" 10 | "fmt" 11 | "io" 12 | "net/http" 13 | "net/http/httptest" 14 | 15 | utilremote "github.com/databacker/mysql-backup/pkg/internal/remote" 16 | ) 17 | 18 | func StartServer(clientKeyCount int, handler http.HandlerFunc) (server *httptest.Server, serverFingerprint string, clientKeys [][]byte, err error) { 19 | // Generate new private keys for each of the clients 20 | var clientPublicKeys []crypto.PublicKey 21 | for i := 0; i < clientKeyCount; i++ { 22 | clientSeed := make([]byte, ed25519.SeedSize) 23 | if _, err := io.ReadFull(cryptorand.Reader, clientSeed); err != nil { 24 | return nil, "", nil, fmt.Errorf("failed to generate client random seed: %w", err) 25 | } 26 | clientKeys = append(clientKeys, clientSeed) 27 | clientKey := ed25519.NewKeyFromSeed(clientSeed) 28 | clientPublicKeys = append(clientPublicKeys, clientKey.Public()) 29 | } 30 | 31 | serverSeed := make([]byte, ed25519.SeedSize) 32 | if _, err := io.ReadFull(cryptorand.Reader, serverSeed); err != nil { 33 | return nil, "", nil, fmt.Errorf("failed to generate server random seed: %w", err) 34 | } 35 | serverKey := ed25519.NewKeyFromSeed(serverSeed) 36 | 37 | // Create a self-signed certificate from the private key 38 | serverCert, err := utilremote.SelfSignedCertFromPrivateKey(serverKey, "127.0.0.1") 39 | if err != nil { 40 | return nil, "", nil, fmt.Errorf("failed to create self-signed certificate: %v", err) 41 | } 42 | serverFingerprint = fmt.Sprintf("%s:%s", utilremote.DigestSha256, fmt.Sprintf("%x", sha256.Sum256(serverCert.Certificate[0]))) 43 | 44 | // Start a local HTTPS server 45 | server = httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 46 | // Check if the client's public key is in the known list 47 | peerCerts := r.TLS.PeerCertificates 48 | if len(peerCerts) == 0 { 49 | w.WriteHeader(http.StatusForbidden) 50 | return 51 | } 52 | peerPublicKey := peerCerts[0].PublicKey.(ed25519.PublicKey) 53 | // make sure the client's public key is in the list of known keys 54 | var matched bool 55 | for _, publicKey := range clientPublicKeys { 56 | if peerPublicKey.Equal(publicKey.(ed25519.PublicKey)) { 57 | matched = true 58 | break 59 | } 60 | } 61 | if !matched { 62 | w.WriteHeader(http.StatusForbidden) 63 | return 64 | } 65 | // was any custom handler passed? 66 | if handler != nil { 67 | handler(w, r) 68 | } 69 | })) 70 | server.TLS = &tls.Config{ 71 | ClientAuth: tls.RequestClientCert, 72 | ClientCAs: x509.NewCertPool(), 73 | Certificates: []tls.Certificate{*serverCert}, 74 | } 75 | server.StartTLS() 76 | return server, serverFingerprint, clientKeys, nil 77 | } 78 | -------------------------------------------------------------------------------- /pkg/remote/connection.go: -------------------------------------------------------------------------------- 1 | package remote 2 | 3 | type Connection struct { 4 | URL string `yaml:"url"` 5 | // Certificate digests of the certificate of the remote server or one that signed it in the chain. 6 | // Value starts with the hash algorithm (e.g. sha256:) followed by the digest. 7 | // Only known listed algorithms are supported; others are considered an error. 8 | Certificates []string `yaml:"certificates"` // e.g. sha256:69729b8e15a86efc177a57afb7171dfc64add28c2fca8cf1507e34453ccb1470 9 | // Credentials to use to authenticate to the remote server. 10 | // Format of the credentials is base64-encoded Curve25519 private key. 11 | Credentials string `yaml:"credentials"` // e.g. BwMqVfr1myxqX8tikIPYCyNtpHgMLIg/2nUE+pLQnTE= 12 | } 13 | -------------------------------------------------------------------------------- /pkg/remote/const.go: -------------------------------------------------------------------------------- 1 | package remote 2 | -------------------------------------------------------------------------------- /pkg/remote/get.go: -------------------------------------------------------------------------------- 1 | package remote 2 | 3 | import ( 4 | "context" 5 | "crypto/ed25519" 6 | "crypto/sha256" 7 | "crypto/tls" 8 | "crypto/x509" 9 | "encoding/base64" 10 | "fmt" 11 | "net" 12 | "net/http" 13 | "net/url" 14 | "strings" 15 | "time" 16 | 17 | utilremote "github.com/databacker/mysql-backup/pkg/internal/remote" 18 | ) 19 | 20 | var ( 21 | validAlgos = []string{utilremote.DigestSha256} 22 | validAlgosHash = map[string]bool{} 23 | ) 24 | 25 | func init() { 26 | for _, algo := range validAlgos { 27 | validAlgosHash[algo] = true 28 | } 29 | } 30 | 31 | // OpenConnection opens a connection to a TLS server, given the URL, digests of acceptable certs, and curve25519 key for authentication. 32 | // The credentials should be base64-encoded curve25519 private key. This is curve25519 and *not* ed25519; ed25519 calls this 33 | // the "seed key". It must be 32 bytes long. 34 | // The certs should be a list of fingerprints in the format "algo:hex-fingerprint". 35 | func OpenConnection(urlString string, certs []string, credentials string) (resp *http.Response, err error) { 36 | // open a connection to the URL. 37 | // Uses mTLS, but rather than verifying the CA that signed the client cert, 38 | // server should accept a self-signed cert. It then should check if the client's public key is in a known good list. 39 | u, err := url.Parse(urlString) 40 | if err != nil { 41 | return nil, fmt.Errorf("error parsing URL: %w", err) 42 | } 43 | client, err := GetTLSClient(u.Hostname(), certs, credentials) 44 | if err != nil { 45 | return nil, fmt.Errorf("error creating TLS client: %w", err) 46 | } 47 | 48 | req, err := http.NewRequest(http.MethodGet, urlString, nil) 49 | if err != nil { 50 | return nil, fmt.Errorf("error creating HTTP request: %w", err) 51 | } 52 | 53 | return client.Do(req) 54 | } 55 | 56 | // GetTLSClient gets a TLS client for a connection to a TLS server, given the URL, digests of acceptable certs, and curve25519 key for authentication. 57 | // The credentials should be base64-encoded curve25519 private key. This is curve25519 and *not* ed25519; ed25519 calls this 58 | // the "seed key". It must be 32 bytes long. 59 | // The certs should be a list of fingerprints in the format "algo:hex-fingerprint". 60 | func GetTLSClient(serverName string, certs []string, credentials string) (client *http.Client, err error) { 61 | tlsConfig, err := GetTLSConfig(serverName, certs, credentials) 62 | if err != nil { 63 | return nil, fmt.Errorf("error creating TLS config: %w", err) 64 | } 65 | client = &http.Client{ 66 | Transport: &http.Transport{ 67 | // Configure TLS via DialTLS 68 | DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { 69 | return tls.Dial(network, addr, tlsConfig) 70 | }, 71 | }, 72 | } 73 | return client, nil 74 | } 75 | 76 | // GetTLSConfig gets the TLS config a TLS client for a connection to a TLS server, given the digests of acceptable certs, and curve25519 key for authentication. 77 | // The credentials should be base64-encoded curve25519 private key. This is curve25519 and *not* ed25519; ed25519 calls this 78 | // the "seed key". It must be 32 bytes long. 79 | // The certs should be a list of fingerprints in the format "algo:hex-fingerprint". 80 | func GetTLSConfig(serverName string, certs []string, credentials string) (tlsConfig *tls.Config, err error) { 81 | // Uses mTLS, but rather than verifying the CA that signed the client cert, 82 | // server should accept a self-signed cert. It then should check if the client's public key is in a known good list. 83 | 84 | var trustedCertsByAlgo = map[string]map[string]bool{} 85 | for _, fingerprint := range certs { 86 | parts := strings.SplitN(fingerprint, ":", 2) 87 | if len(parts) != 2 { 88 | return nil, fmt.Errorf("invalid fingerprint format from configuration: %s", fingerprint) 89 | } 90 | algo, fp := parts[0], parts[1] 91 | if !validAlgosHash[algo] { 92 | return nil, fmt.Errorf("invalid algorithm in fingerprint: %s", fingerprint) 93 | } 94 | if trustedCertsByAlgo[algo] == nil { 95 | trustedCertsByAlgo[algo] = map[string]bool{} 96 | } 97 | trustedCertsByAlgo[algo][fp] = true 98 | } 99 | // get our curve25519 key 100 | keyBytes, err := base64.StdEncoding.DecodeString(credentials) 101 | if err != nil { 102 | return nil, fmt.Errorf("error decoding credentials: %w", err) 103 | } 104 | if len(keyBytes) != ed25519.SeedSize { 105 | return nil, fmt.Errorf("invalid key size %d, must be %d", len(keyBytes), ed25519.SeedSize) 106 | } 107 | 108 | key := ed25519.NewKeyFromSeed(keyBytes) 109 | clientCert, err := utilremote.SelfSignedCertFromPrivateKey(key, "") 110 | if err != nil { 111 | return nil, fmt.Errorf("error creating client certificate: %w", err) 112 | } 113 | 114 | tlsConfig = &tls.Config{ 115 | ServerName: serverName, 116 | InsecureSkipVerify: true, // disable regular verification, because go has no way to do regular verification and only fallback to my function 117 | VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { 118 | // If verifiedChains is not empty, then the normal verification has passed. 119 | if len(verifiedChains) > 0 { 120 | return nil 121 | } 122 | 123 | certs := make([]*x509.Certificate, len(rawCerts)) 124 | for i, asn1Data := range rawCerts { 125 | cert, err := x509.ParseCertificate(asn1Data) 126 | if err != nil { 127 | return fmt.Errorf("failed to parse certificate: %v", err) 128 | } 129 | certs[i] = cert 130 | } 131 | 132 | // Try to verify the certificate chain using the system pool 133 | opts := x509.VerifyOptions{ 134 | Intermediates: x509.NewCertPool(), 135 | DNSName: tlsConfig.ServerName, 136 | } 137 | for i, cert := range certs { 138 | // skip the first cert, because it's the one we're trying to verify 139 | if i == 0 { 140 | continue 141 | } 142 | // add every other cert as a valid intermediate 143 | opts.Intermediates.AddCert(cert) 144 | } 145 | 146 | // if one of the certs is valid and verified, accept it 147 | if _, err := certs[0].Verify(opts); err == nil { 148 | return nil 149 | } 150 | 151 | // the cert presented by the server was not signed by a known CA, so fall back to our own list 152 | for _, rawCert := range rawCerts { 153 | fingerprint := fmt.Sprintf("%x", sha256.Sum256(rawCert)) 154 | if trustedFingerprints, ok := trustedCertsByAlgo[utilremote.DigestSha256]; ok { 155 | if _, ok := trustedFingerprints[fingerprint]; ok { 156 | if validateCert(certs[0], tlsConfig.ServerName) { 157 | return nil 158 | } 159 | } 160 | } 161 | } 162 | 163 | // not in system or in the approved list 164 | return fmt.Errorf("certificate not trusted") 165 | }, 166 | Certificates: []tls.Certificate{*clientCert}, 167 | } 168 | return tlsConfig, nil 169 | } 170 | 171 | // validateCert given a cert that we decided to trust its cert or signature, make sure its properties are correct: 172 | // - still valid expiration date 173 | // - hostname matches 174 | // - valid function 175 | func validateCert(cert *x509.Certificate, hostname string) bool { 176 | // valid date 177 | now := time.Now() 178 | if now.Before(cert.NotBefore) || now.After(cert.NotAfter) { 179 | return false 180 | } 181 | 182 | // valid hostname or IP 183 | var validHostname bool 184 | for _, dnsName := range cert.DNSNames { 185 | if dnsName == hostname { 186 | validHostname = true 187 | break 188 | } 189 | } 190 | if hostname == cert.Subject.CommonName { 191 | validHostname = true 192 | } 193 | if !validHostname { 194 | return false 195 | } 196 | 197 | // check keyusage 198 | if cert.KeyUsage&x509.KeyUsageDigitalSignature == 0 { 199 | return false 200 | } 201 | return true 202 | } 203 | -------------------------------------------------------------------------------- /pkg/remote/get_test.go: -------------------------------------------------------------------------------- 1 | package remote 2 | 3 | import ( 4 | "crypto/ed25519" 5 | cryptorand "crypto/rand" 6 | "crypto/x509" 7 | "encoding/base64" 8 | "io" 9 | "net/http" 10 | "testing" 11 | 12 | utilremote "github.com/databacker/mysql-backup/pkg/internal/remote" 13 | utiltest "github.com/databacker/mysql-backup/pkg/internal/test" 14 | ) 15 | 16 | func TestSelfSignedCertFromPrivateKey(t *testing.T) { 17 | // Generate a new private key 18 | publicKey, privateKey, err := ed25519.GenerateKey(nil) 19 | if err != nil { 20 | t.Fatalf("failed to generate private key: %v", err) 21 | } 22 | 23 | tests := []struct { 24 | name string 25 | privateKey ed25519.PrivateKey 26 | expectError bool 27 | }{ 28 | { 29 | name: "valid private key", 30 | privateKey: privateKey, 31 | expectError: false, 32 | }, 33 | { 34 | name: "nil private key", 35 | privateKey: nil, 36 | expectError: true, 37 | }, 38 | } 39 | 40 | for _, test := range tests { 41 | t.Run(test.name, func(t *testing.T) { 42 | // Call the function with the private key 43 | cert, err := utilremote.SelfSignedCertFromPrivateKey(test.privateKey, "") 44 | if (err != nil) != test.expectError { 45 | t.Fatalf("selfSignedCertFromPrivateKey returned an error: %v", err) 46 | } 47 | 48 | if !test.expectError { 49 | // Check if the returned certificate is not nil 50 | if cert == nil { 51 | t.Fatalf("selfSignedCertFromPrivateKey returned a nil certificate") 52 | } 53 | 54 | // Parse the certificate 55 | parsedCert, err := x509.ParseCertificate(cert.Certificate[0]) 56 | if err != nil { 57 | t.Fatalf("failed to parse certificate: %v", err) 58 | } 59 | 60 | // Check if the certificate's public key matches the private key's public key 61 | if !publicKey.Equal(parsedCert.PublicKey.(ed25519.PublicKey)) { 62 | t.Fatalf("public key in certificate does not match private key's public key") 63 | } 64 | } 65 | }) 66 | } 67 | } 68 | 69 | func TestOpenConnection(t *testing.T) { 70 | // Generate a private key that is not in the list of known keys 71 | clientSeedUnknown := make([]byte, ed25519.SeedSize) 72 | if _, err := io.ReadFull(cryptorand.Reader, clientSeedUnknown); err != nil { 73 | t.Fatalf("failed to generate random seed: %v", err) 74 | } 75 | server, fingerprint, clientKeys, err := utiltest.StartServer(1, nil) 76 | if err != nil { 77 | t.Fatalf("failed to start server: %v", err) 78 | } 79 | defer server.Close() 80 | 81 | tests := []struct { 82 | name string 83 | clientPrivateKey []byte 84 | certs []string 85 | expectError bool 86 | expectedStatus int 87 | }{ 88 | { 89 | name: "client key in list", 90 | clientPrivateKey: clientKeys[0], 91 | certs: []string{fingerprint}, 92 | expectError: false, 93 | expectedStatus: http.StatusOK, 94 | }, 95 | { 96 | name: "client key not in list", 97 | clientPrivateKey: clientSeedUnknown, 98 | certs: []string{fingerprint}, 99 | expectError: false, 100 | expectedStatus: http.StatusForbidden, 101 | }, 102 | { 103 | name: "no certs", 104 | clientPrivateKey: clientKeys[0], 105 | certs: []string{}, 106 | expectError: true, 107 | expectedStatus: http.StatusForbidden, 108 | }, 109 | } 110 | 111 | for _, tt := range tests { 112 | t.Run(tt.name, func(t *testing.T) { 113 | // Call openConnection 114 | b64EncodedClientKey := base64.StdEncoding.EncodeToString(tt.clientPrivateKey) 115 | resp, err := OpenConnection(server.URL, tt.certs, b64EncodedClientKey) 116 | switch { 117 | case err != nil && !tt.expectError: 118 | t.Errorf("openConnection returned an unexpected error: %v", err) 119 | case err == nil && tt.expectError: 120 | t.Errorf("openConnection did not return an expected error: %v", err) 121 | case err == nil && resp.StatusCode != tt.expectedStatus: 122 | t.Errorf("openConnection returned an unexpected status code: %d", resp.StatusCode) 123 | } 124 | }) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /pkg/storage/credentials/creds.go: -------------------------------------------------------------------------------- 1 | package credentials 2 | 3 | type Creds struct { 4 | SMB SMBCreds 5 | AWS AWSCreds 6 | } 7 | 8 | type SMBCreds struct { 9 | Username string 10 | Password string 11 | Domain string 12 | } 13 | 14 | type AWSCreds struct { 15 | AccessKeyID string 16 | SecretAccessKey string 17 | Endpoint string 18 | PathStyle bool 19 | Region string 20 | } 21 | -------------------------------------------------------------------------------- /pkg/storage/file/file.go: -------------------------------------------------------------------------------- 1 | package file 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "io/fs" 8 | "net/url" 9 | "os" 10 | "path" 11 | "path/filepath" 12 | 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | type File struct { 17 | url url.URL 18 | path string 19 | } 20 | 21 | func New(u url.URL) *File { 22 | return &File{u, u.Path} 23 | } 24 | 25 | func (f *File) Pull(ctx context.Context, source, target string, logger *log.Entry) (int64, error) { 26 | return copyFile(path.Join(f.path, source), target) 27 | } 28 | 29 | func (f *File) Push(ctx context.Context, target, source string, logger *log.Entry) (int64, error) { 30 | return copyFile(source, filepath.Join(f.path, target)) 31 | } 32 | 33 | func (f *File) Clean(filename string) string { 34 | return filename 35 | } 36 | 37 | func (f *File) Protocol() string { 38 | return "file" 39 | } 40 | 41 | func (f *File) URL() string { 42 | return f.url.String() 43 | } 44 | 45 | func (f *File) ReadDir(ctx context.Context, dirname string, logger *log.Entry) ([]fs.FileInfo, error) { 46 | 47 | entries, err := os.ReadDir(filepath.Join(f.path, dirname)) 48 | if err != nil { 49 | return nil, err 50 | } 51 | var files []fs.FileInfo 52 | for _, entry := range entries { 53 | info, err := entry.Info() 54 | if err != nil { 55 | return nil, err 56 | } 57 | files = append(files, info) 58 | } 59 | return files, nil 60 | } 61 | 62 | func (f *File) Remove(ctx context.Context, target string, logger *log.Entry) error { 63 | return os.Remove(filepath.Join(f.path, target)) 64 | } 65 | 66 | // copyFile copy a file from to as efficiently as possible 67 | func copyFile(from, to string) (int64, error) { 68 | src, err := os.Open(from) 69 | if err != nil { 70 | return 0, fmt.Errorf("failed to open source file %s: %w", from, err) 71 | } 72 | defer func() { _ = src.Close() }() 73 | 74 | dst, err := os.Create(to) 75 | if err != nil { 76 | return 0, fmt.Errorf("failed to create target file %s: %w", to, err) 77 | } 78 | defer func() { _ = dst.Close() }() 79 | n, err := io.Copy(dst, src) 80 | return n, err 81 | } 82 | -------------------------------------------------------------------------------- /pkg/storage/parse.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/databacker/api/go/api" 7 | "github.com/databacker/mysql-backup/pkg/storage/credentials" 8 | "github.com/databacker/mysql-backup/pkg/storage/file" 9 | "github.com/databacker/mysql-backup/pkg/storage/s3" 10 | "github.com/databacker/mysql-backup/pkg/storage/smb" 11 | "github.com/databacker/mysql-backup/pkg/util" 12 | "gopkg.in/yaml.v3" 13 | ) 14 | 15 | func ParseURL(url string, creds credentials.Creds) (Storage, error) { 16 | // parse the target URL 17 | u, err := util.SmartParse(url) 18 | if err != nil { 19 | return nil, fmt.Errorf("invalid target url%v", err) 20 | } 21 | 22 | // do the upload 23 | var store Storage 24 | switch u.Scheme { 25 | case "file": 26 | store = file.New(*u) 27 | case "smb": 28 | opts := []smb.Option{} 29 | if creds.SMB.Domain != "" { 30 | opts = append(opts, smb.WithDomain(creds.SMB.Domain)) 31 | } 32 | if creds.SMB.Username != "" { 33 | opts = append(opts, smb.WithUsername(creds.SMB.Username)) 34 | } 35 | if creds.SMB.Password != "" { 36 | opts = append(opts, smb.WithPassword(creds.SMB.Password)) 37 | } 38 | store = smb.New(*u, opts...) 39 | case "s3": 40 | opts := []s3.Option{} 41 | if creds.AWS.Endpoint != "" { 42 | opts = append(opts, s3.WithEndpoint(creds.AWS.Endpoint)) 43 | } 44 | if creds.AWS.Region != "" { 45 | opts = append(opts, s3.WithRegion(creds.AWS.Region)) 46 | } 47 | if creds.AWS.AccessKeyID != "" { 48 | opts = append(opts, s3.WithAccessKeyId(creds.AWS.AccessKeyID)) 49 | } 50 | if creds.AWS.SecretAccessKey != "" { 51 | opts = append(opts, s3.WithSecretAccessKey(creds.AWS.SecretAccessKey)) 52 | } 53 | if creds.AWS.PathStyle { 54 | opts = append(opts, s3.WithPathStyle()) 55 | } 56 | store = s3.New(*u, opts...) 57 | default: 58 | return nil, fmt.Errorf("unknown url protocol: %s", u.Scheme) 59 | } 60 | return store, nil 61 | } 62 | 63 | // FromTarget parse an api.Target and return something that implements the Storage interface 64 | func FromTarget(target api.Target) (store Storage, err error) { 65 | u, err := util.SmartParse(target.URL) 66 | if err != nil { 67 | return nil, err 68 | } 69 | switch target.Type { 70 | case api.TargetTypeS3: 71 | var spec api.S3 72 | specBytes, err := yaml.Marshal(target.Spec) 73 | if err != nil { 74 | return nil, fmt.Errorf("error marshalling spec part of target: %w", err) 75 | } 76 | if err := yaml.Unmarshal(specBytes, &spec); err != nil { 77 | return nil, fmt.Errorf("parsed yaml had kind S3, but spec invalid") 78 | } 79 | 80 | opts := []s3.Option{} 81 | if spec.Region != nil && *spec.Region != "" { 82 | opts = append(opts, s3.WithRegion(*spec.Region)) 83 | } 84 | if spec.Endpoint != nil && *spec.Endpoint != "" { 85 | opts = append(opts, s3.WithEndpoint(*spec.Endpoint)) 86 | } 87 | if spec.AccessKeyID != nil && *spec.AccessKeyID != "" { 88 | opts = append(opts, s3.WithAccessKeyId(*spec.AccessKeyID)) 89 | } 90 | if spec.SecretAccessKey != nil && *spec.SecretAccessKey != "" { 91 | opts = append(opts, s3.WithSecretAccessKey(*spec.SecretAccessKey)) 92 | } 93 | store = s3.New(*u, opts...) 94 | case api.TargetTypeSmb: 95 | var spec api.SMB 96 | specBytes, err := yaml.Marshal(target.Spec) 97 | if err != nil { 98 | return nil, fmt.Errorf("error marshalling spec part of target: %w", err) 99 | } 100 | if err := yaml.Unmarshal(specBytes, &spec); err != nil { 101 | return nil, fmt.Errorf("parsed yaml had kind SMB, but spec invalid") 102 | } 103 | 104 | opts := []smb.Option{} 105 | if spec.Domain != nil && *spec.Domain != "" { 106 | opts = append(opts, smb.WithDomain(*spec.Domain)) 107 | } 108 | if spec.Username != nil && *spec.Username != "" { 109 | opts = append(opts, smb.WithUsername(*spec.Username)) 110 | } 111 | if spec.Password != nil && *spec.Password != "" { 112 | opts = append(opts, smb.WithPassword(*spec.Password)) 113 | } 114 | store = smb.New(*u, opts...) 115 | case api.TargetTypeFile: 116 | store, err = ParseURL(target.URL, credentials.Creds{}) 117 | if err != nil { 118 | return nil, err 119 | } 120 | default: 121 | return nil, fmt.Errorf("unknown target type: %s", target.Type) 122 | } 123 | return store, nil 124 | } 125 | -------------------------------------------------------------------------------- /pkg/storage/s3/reader.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | type CountingReader interface { 8 | io.Reader 9 | Bytes() int64 10 | } 11 | 12 | func NewCountingReader(r io.Reader) CountingReader { 13 | return &countingReader{r: r} 14 | } 15 | 16 | type countingReader struct { 17 | r io.Reader 18 | bytes int64 19 | } 20 | 21 | func (cr *countingReader) Read(p []byte) (int, error) { 22 | n, err := cr.r.Read(p) 23 | cr.bytes += int64(n) 24 | return n, err 25 | } 26 | 27 | func (cr *countingReader) Bytes() int64 { 28 | return cr.bytes 29 | } 30 | -------------------------------------------------------------------------------- /pkg/storage/s3/s3.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io/fs" 7 | "net/url" 8 | "os" 9 | "path" 10 | "path/filepath" 11 | "strings" 12 | "time" 13 | 14 | "github.com/aws/aws-sdk-go-v2/aws" 15 | "github.com/aws/aws-sdk-go-v2/config" 16 | "github.com/aws/aws-sdk-go-v2/credentials" 17 | "github.com/aws/aws-sdk-go-v2/feature/s3/manager" 18 | "github.com/aws/aws-sdk-go-v2/service/s3" 19 | log "github.com/sirupsen/logrus" 20 | ) 21 | 22 | type S3 struct { 23 | url url.URL 24 | // pathStyle option is not really used, but may be required 25 | // at some point; see https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/ 26 | pathStyle bool 27 | region string 28 | endpoint string 29 | accessKeyId string 30 | secretAccessKey string 31 | } 32 | 33 | type Option func(s *S3) 34 | 35 | func WithPathStyle() Option { 36 | return func(s *S3) { 37 | s.pathStyle = true 38 | } 39 | } 40 | func WithRegion(region string) Option { 41 | return func(s *S3) { 42 | s.region = region 43 | } 44 | } 45 | func WithEndpoint(endpoint string) Option { 46 | return func(s *S3) { 47 | s.endpoint = endpoint 48 | } 49 | } 50 | func WithAccessKeyId(accessKeyId string) Option { 51 | return func(s *S3) { 52 | s.accessKeyId = accessKeyId 53 | } 54 | } 55 | func WithSecretAccessKey(secretAccessKey string) Option { 56 | return func(s *S3) { 57 | s.secretAccessKey = secretAccessKey 58 | } 59 | } 60 | 61 | func New(u url.URL, opts ...Option) *S3 { 62 | s := &S3{url: u} 63 | for _, opt := range opts { 64 | opt(s) 65 | } 66 | return s 67 | } 68 | 69 | func (s *S3) Pull(ctx context.Context, source, target string, logger *log.Entry) (int64, error) { 70 | // get the s3 client 71 | client, err := s.getClient(logger) 72 | if err != nil { 73 | return 0, fmt.Errorf("failed to get AWS client: %v", err) 74 | } 75 | 76 | bucket, path := s.url.Hostname(), path.Join(s.url.Path, source) 77 | 78 | // Create a downloader with the session and default options 79 | downloader := manager.NewDownloader(client) 80 | 81 | // Create a file to write the S3 Object contents to. 82 | f, err := os.Create(target) 83 | if err != nil { 84 | return 0, fmt.Errorf("failed to create target restore file %q, %v", target, err) 85 | } 86 | defer func() { _ = f.Close() }() 87 | 88 | // Write the contents of S3 Object to the file 89 | n, err := downloader.Download(context.TODO(), f, &s3.GetObjectInput{ 90 | Bucket: aws.String(bucket), 91 | Key: aws.String(path), 92 | }) 93 | if err != nil { 94 | return 0, fmt.Errorf("failed to download file, %v", err) 95 | } 96 | return n, nil 97 | } 98 | 99 | func (s *S3) Push(ctx context.Context, target, source string, logger *log.Entry) (int64, error) { 100 | // get the s3 client 101 | client, err := s.getClient(logger) 102 | if err != nil { 103 | return 0, fmt.Errorf("failed to get AWS client: %v", err) 104 | } 105 | bucket, key := s.url.Hostname(), s.url.Path 106 | 107 | // Create an uploader with the session and default options 108 | uploader := manager.NewUploader(client) 109 | 110 | // Create a file to write the S3 Object contents to. 111 | f, err := os.Open(source) 112 | if err != nil { 113 | return 0, fmt.Errorf("failed to read input file %q, %v", source, err) 114 | } 115 | defer func() { _ = f.Close() }() 116 | countingReader := NewCountingReader(f) 117 | 118 | // S3 always prepends a /, so if it already has one, it would become // 119 | // For some services, that is ok, but for others, it causes issues. 120 | key = strings.TrimPrefix(path.Join(key, target), "/") 121 | 122 | // Write the contents of the file to the S3 object 123 | _, err = uploader.Upload(context.TODO(), &s3.PutObjectInput{ 124 | Bucket: aws.String(bucket), 125 | Key: aws.String(key), 126 | Body: countingReader, 127 | }) 128 | if err != nil { 129 | return 0, fmt.Errorf("failed to upload file, %v", err) 130 | } 131 | return countingReader.Bytes(), nil 132 | } 133 | 134 | func (s *S3) Clean(filename string) string { 135 | return filename 136 | } 137 | 138 | func (s *S3) Protocol() string { 139 | return "s3" 140 | } 141 | 142 | func (s *S3) URL() string { 143 | return s.url.String() 144 | } 145 | 146 | func (s *S3) ReadDir(ctx context.Context, dirname string, logger *log.Entry) ([]fs.FileInfo, error) { 147 | // get the s3 client 148 | client, err := s.getClient(logger) 149 | if err != nil { 150 | return nil, fmt.Errorf("failed to get AWS client: %v", err) 151 | } 152 | 153 | // Call ListObjectsV2 with your bucket and prefix 154 | // ensure that there is no leading / 155 | p := strings.TrimPrefix(filepath.Join(s.url.Path, dirname), "/") 156 | result, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{Bucket: aws.String(s.url.Hostname()), Prefix: aws.String(filepath.Join(p, dirname))}) 157 | if err != nil { 158 | return nil, fmt.Errorf("failed to list objects, %v", err) 159 | } 160 | 161 | // Convert s3.Object to fs.FileInfo 162 | var files []fs.FileInfo 163 | for _, item := range result.Contents { 164 | files = append(files, &s3FileInfo{ 165 | name: *item.Key, 166 | lastModified: *item.LastModified, 167 | size: *item.Size, 168 | }) 169 | } 170 | 171 | return files, nil 172 | } 173 | 174 | func (s *S3) Remove(ctx context.Context, target string, logger *log.Entry) error { 175 | // Get the AWS client 176 | client, err := s.getClient(logger) 177 | if err != nil { 178 | return fmt.Errorf("failed to get AWS client: %v", err) 179 | } 180 | 181 | // Call DeleteObject with your bucket and the key of the object you want to delete 182 | _, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{ 183 | Bucket: aws.String(s.url.Hostname()), 184 | Key: aws.String(target), 185 | }) 186 | if err != nil { 187 | return fmt.Errorf("failed to delete object, %v", err) 188 | } 189 | 190 | return nil 191 | } 192 | 193 | func (s *S3) getClient(logger *log.Entry) (*s3.Client, error) { 194 | // Get the AWS config 195 | var configOpts []func(*config.LoadOptions) error // global client options 196 | if logger.Level == log.TraceLevel { 197 | configOpts = append(configOpts, config.WithClientLogMode(aws.LogRequestWithBody|aws.LogResponse)) 198 | } 199 | if s.region != "" { 200 | configOpts = append(configOpts, config.WithRegion(s.region)) 201 | } 202 | if s.accessKeyId != "" { 203 | configOpts = append(configOpts, config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( 204 | s.accessKeyId, 205 | s.secretAccessKey, 206 | "", 207 | ))) 208 | } 209 | cfg, err := config.LoadDefaultConfig(context.TODO(), 210 | configOpts..., 211 | ) 212 | if err != nil { 213 | return nil, fmt.Errorf("failed to load AWS config: %v", err) 214 | } 215 | 216 | // Get the S3 client 217 | var s3opts []func(*s3.Options) // s3 client options 218 | if s.endpoint != "" { 219 | cleanEndpoint := getEndpoint(s.endpoint) 220 | s3opts = append(s3opts, 221 | func(o *s3.Options) { 222 | o.BaseEndpoint = &cleanEndpoint 223 | }, 224 | ) 225 | } 226 | if s.pathStyle { 227 | s3opts = append(s3opts, 228 | func(o *s3.Options) { 229 | o.UsePathStyle = true 230 | }, 231 | ) 232 | } 233 | 234 | // Create a new S3 service client 235 | return s3.NewFromConfig(cfg, s3opts...), nil 236 | } 237 | 238 | // getEndpoint returns a clean (for AWS client) endpoint. Normally, this is unchanged, 239 | // but for some reason, the lookup gets flaky when the endpoint is 127.0.0.1, 240 | // so in that case, set it to localhost explicitly. 241 | func getEndpoint(endpoint string) string { 242 | e := endpoint 243 | u, err := url.Parse(endpoint) 244 | if err == nil { 245 | if u.Hostname() == "127.0.0.1" { 246 | port := u.Port() 247 | u.Host = "localhost" 248 | if port != "" { 249 | u.Host += ":" + port 250 | } 251 | e = u.String() 252 | } 253 | } 254 | return e 255 | } 256 | 257 | type s3FileInfo struct { 258 | name string 259 | lastModified time.Time 260 | size int64 261 | } 262 | 263 | func (s s3FileInfo) Name() string { return s.name } 264 | func (s s3FileInfo) Size() int64 { return s.size } 265 | func (s s3FileInfo) Mode() os.FileMode { return 0 } // Not applicable in S3 266 | func (s s3FileInfo) ModTime() time.Time { return s.lastModified } 267 | func (s s3FileInfo) IsDir() bool { return false } // Not applicable in S3 268 | func (s s3FileInfo) Sys() interface{} { return nil } // Not applicable in S3 269 | -------------------------------------------------------------------------------- /pkg/storage/smb/smb.go: -------------------------------------------------------------------------------- 1 | package smb 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net" 8 | "net/url" 9 | "os" 10 | "path/filepath" 11 | "strings" 12 | 13 | "github.com/cloudsoda/go-smb2" 14 | log "github.com/sirupsen/logrus" 15 | ) 16 | 17 | const ( 18 | defaultSMBPort = "445" 19 | ) 20 | 21 | type SMB struct { 22 | url url.URL 23 | domain string 24 | username string 25 | password string 26 | } 27 | 28 | type Option func(s *SMB) 29 | 30 | func WithDomain(domain string) Option { 31 | return func(s *SMB) { 32 | s.domain = domain 33 | } 34 | } 35 | func WithUsername(username string) Option { 36 | return func(s *SMB) { 37 | s.username = username 38 | } 39 | } 40 | func WithPassword(password string) Option { 41 | return func(s *SMB) { 42 | s.password = password 43 | } 44 | } 45 | 46 | func New(u url.URL, opts ...Option) *SMB { 47 | s := &SMB{url: u} 48 | for _, opt := range opts { 49 | opt(s) 50 | } 51 | return s 52 | } 53 | 54 | func (s *SMB) Pull(ctx context.Context, source, target string, logger *log.Entry) (int64, error) { 55 | var ( 56 | copied int64 57 | err error 58 | ) 59 | err = s.exec(s.url, func(fs *smb2.Share, sharepath string) error { 60 | smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, filepath.Base(strings.ReplaceAll(target, ":", "-"))) 61 | smbFilename = strings.TrimPrefix(smbFilename, fmt.Sprintf("%c", smb2.PathSeparator)) 62 | 63 | to, err := os.Create(target) 64 | if err != nil { 65 | return err 66 | } 67 | defer func() { _ = to.Close() }() 68 | from, err := fs.Open(smbFilename) 69 | if err != nil { 70 | return err 71 | } 72 | defer func() { _ = from.Close() }() 73 | copied, err = io.Copy(to, from) 74 | return err 75 | }) 76 | return copied, err 77 | } 78 | 79 | func (s *SMB) Push(ctx context.Context, target, source string, logger *log.Entry) (int64, error) { 80 | var ( 81 | copied int64 82 | err error 83 | ) 84 | err = s.exec(s.url, func(fs *smb2.Share, sharepath string) error { 85 | smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, target) 86 | smbFilename = strings.TrimPrefix(smbFilename, fmt.Sprintf("%c", smb2.PathSeparator)) 87 | from, err := os.Open(source) 88 | if err != nil { 89 | return err 90 | } 91 | defer func() { _ = from.Close() }() 92 | to, err := fs.Create(smbFilename) 93 | if err != nil { 94 | return err 95 | } 96 | defer func() { _ = to.Close() }() 97 | copied, err = io.Copy(to, from) 98 | return err 99 | }) 100 | return copied, err 101 | } 102 | 103 | func (s *SMB) Clean(filename string) string { 104 | return strings.ReplaceAll(filename, ":", "-") 105 | } 106 | 107 | func (s *SMB) Protocol() string { 108 | return "smb" 109 | } 110 | 111 | func (s *SMB) URL() string { 112 | return s.url.String() 113 | } 114 | 115 | func (s *SMB) ReadDir(ctx context.Context, dirname string, logger *log.Entry) ([]os.FileInfo, error) { 116 | var ( 117 | err error 118 | infos []os.FileInfo 119 | ) 120 | err = s.exec(s.url, func(fs *smb2.Share, sharepath string) error { 121 | infos, err = fs.ReadDir(sharepath) 122 | return err 123 | }) 124 | return infos, err 125 | } 126 | 127 | func (s *SMB) Remove(ctx context.Context, target string, logger *log.Entry) error { 128 | return s.exec(s.url, func(fs *smb2.Share, sharepath string) error { 129 | smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, filepath.Base(strings.ReplaceAll(target, ":", "-"))) 130 | smbFilename = strings.TrimPrefix(smbFilename, fmt.Sprintf("%c", smb2.PathSeparator)) 131 | return fs.Remove(smbFilename) 132 | }) 133 | } 134 | 135 | func (s *SMB) exec(u url.URL, command func(fs *smb2.Share, sharepath string) error) error { 136 | var ( 137 | username = s.username 138 | password = s.password 139 | domain = s.domain 140 | ) 141 | 142 | hostname, port, path := u.Hostname(), u.Port(), u.Path 143 | // set default port 144 | if port == "" { 145 | port = defaultSMBPort 146 | } 147 | host := fmt.Sprintf("%s:%s", hostname, port) 148 | share, sharepath := parseSMBPath(path) 149 | if s.username == "" && u.User != nil { 150 | username = u.User.Username() 151 | password, _ = u.User.Password() 152 | username, domain = parseSMBDomain(username) 153 | } 154 | 155 | conn, err := net.Dial("tcp", host) 156 | if err != nil { 157 | return err 158 | } 159 | defer func() { _ = conn.Close() }() 160 | 161 | d := &smb2.Dialer{ 162 | Initiator: &smb2.NTLMInitiator{ 163 | Domain: domain, 164 | User: username, 165 | Password: password, 166 | }, 167 | } 168 | 169 | smbConn, err := d.Dial(conn) 170 | if err != nil { 171 | return err 172 | } 173 | defer func() { 174 | _ = smbConn.Logoff() 175 | }() 176 | 177 | fs, err := smbConn.Mount(share) 178 | if err != nil { 179 | return err 180 | } 181 | defer func() { 182 | _ = fs.Umount() 183 | }() 184 | return command(fs, sharepath) 185 | } 186 | 187 | // parseSMBDomain parse a username to get an SMB domain 188 | // nolint: unused 189 | func parseSMBDomain(username string) (user, domain string) { 190 | parts := strings.SplitN(username, ";", 2) 191 | if len(parts) < 2 { 192 | return username, "" 193 | } 194 | // if we reached this point, we have a username that has a domain in it 195 | return parts[1], parts[0] 196 | } 197 | 198 | // parseSMBPath parse an smb path into its constituent parts 199 | func parseSMBPath(path string) (share, sharepath string) { 200 | sep := "/" 201 | parts := strings.Split(path, sep) 202 | if len(parts) <= 1 { 203 | return path, "" 204 | } 205 | // if the path started with a slash, it might have an empty string as the first element 206 | if parts[0] == "" { 207 | parts = parts[1:] 208 | } 209 | // ensure no leading / as it messes up SMB 210 | return parts[0], strings.Join(parts[1:], sep) 211 | } 212 | -------------------------------------------------------------------------------- /pkg/storage/storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "context" 5 | "io/fs" 6 | 7 | log "github.com/sirupsen/logrus" 8 | ) 9 | 10 | type Storage interface { 11 | Protocol() string 12 | URL() string 13 | Clean(filename string) string 14 | Push(ctx context.Context, target, source string, logger *log.Entry) (int64, error) 15 | Pull(ctx context.Context, source, target string, logger *log.Entry) (int64, error) 16 | ReadDir(ctx context.Context, dirname string, logger *log.Entry) ([]fs.FileInfo, error) 17 | // Remove remove a particular file 18 | Remove(ctx context.Context, target string, logger *log.Entry) error 19 | } 20 | -------------------------------------------------------------------------------- /pkg/util/namedreader.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | type NamedReader struct { 8 | Name string 9 | io.ReaderAt 10 | io.ReadSeeker 11 | } 12 | -------------------------------------------------------------------------------- /pkg/util/parse.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "net/url" 5 | "strings" 6 | ) 7 | 8 | // smartParse parse a url, but convert "/" into "file:///" 9 | func SmartParse(raw string) (*url.URL, error) { 10 | if strings.HasPrefix(raw, "/") { 11 | raw = "file://" + raw 12 | } 13 | 14 | return url.Parse(raw) 15 | } 16 | -------------------------------------------------------------------------------- /pkg/util/tracer.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "context" 5 | 6 | "go.opentelemetry.io/otel" 7 | "go.opentelemetry.io/otel/trace" 8 | ) 9 | 10 | type contextKey string 11 | 12 | const ( 13 | tracerKey contextKey = "mysql-backup-tracer-name" 14 | ) 15 | 16 | // ContextWithTracer adds a tracer to the context, using a key known only internally to this package. 17 | func ContextWithTracer(ctx context.Context, tracer trace.Tracer) context.Context { 18 | return context.WithValue(ctx, tracerKey, tracer) 19 | } 20 | 21 | // GetTracerFromContext retrieves a tracer from the context, or returns a default tracer if none is found. 22 | func GetTracerFromContext(ctx context.Context) trace.Tracer { 23 | tracerAny := ctx.Value(tracerKey) 24 | if tracerAny == nil { 25 | return otel.Tracer("default") 26 | } 27 | tracer, ok := tracerAny.(trace.Tracer) 28 | if !ok { 29 | return otel.Tracer("default") 30 | } 31 | 32 | return tracer 33 | } 34 | -------------------------------------------------------------------------------- /scripts.d/post-backup/rename_backup.sh.example: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Rename backup file. 3 | if [[ -n "$DB_DEBUG" ]]; then 4 | set -x 5 | fi 6 | 7 | if [ -e ${DUMPFILE} ]; 8 | then 9 | now=$(date +"%Y-%m-%d-%H_%M") 10 | new_name=db_backup-${now}.gz 11 | old_name=$(basename ${DUMPFILE}) 12 | echo "Renaming backup file from ${old_name} to ${new_name}" 13 | mv ${DUMPFILE} ${DESTDIR}/${new_name} 14 | else 15 | echo "ERROR: Backup file ${DUMPFILE} does not exist!" 16 | fi 17 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Integration Tests 2 | 3 | This folder contains integration tests. They are executed only if the go tag `integration` is set, e.g. 4 | 5 | ```bash 6 | go test -tags=integration 7 | ``` 8 | 9 | As part of the process, it starts mysql, smb and s3 containers, and then runs the tests against them. 10 | When it is done, it tears them down. 11 | 12 | If you wish to keep the containers, for example, for inspection, then run it with the `keepcontainers` tag, e.g. 13 | 14 | ```bash 15 | go test -tags=integration,keepcontainers 16 | ``` 17 | 18 | If you wish to see the logs from the various containers - smb, s3, mysql - before they are torn down, then run it 19 | with the `logs` tag, e.g. 20 | 21 | ```bash 22 | go test -tags=integration,logs 23 | ``` 24 | 25 | ## How it works 26 | 27 | There are three containers started: 28 | 29 | * mysql 30 | * smb 31 | * s3 32 | 33 | These are all started using the golang docker API. Each of these has their port exposed to the host machine. 34 | The startup process lets docker pick the port, and then finds it. 35 | 36 | At that point, each test in the list of tests is run bu invoking `mysql-backup` directly on the host machine, 37 | pointing it at the various targets. `mysql-backup` is **not** invoked as a subprocess, but rather as a library call. 38 | This does leave the possibility of a bug in how the CLI calls the library, but we accept that risk as reasonable. 39 | 40 | Because the SMB and S3 containers save to local directories, the place to check the results needs to be mounted into 41 | the containers. 42 | 43 | On startup, the test creates a temporary working directory, henceforth called `base`. All files are saved to somewhere 44 | inside base, whether as a file target for backups with target of file://dir or /dir, or for an S3 or SMB target inside 45 | their respective containers, or for storing pre/post backup/restore scripts. 46 | 47 | The structure of the base directory is as follows. Keep in mind that we have one single SMB and S3 container each, so the 48 | directory is shared among different backups. That means we need to distinguish among targets that we pass to the 49 | containers. Further, they might run in parallel, so it is important that the different activities do not trounce each other. 50 | 51 | We resolve this by having each backup target get its own directory under `base/backups/`. The name of the directory 52 | cannot be based just on the target, as that might be reused. We also try to avoid sequence numbers, as they are not very 53 | helpful. Instead, each target gets a random directory name. This is then appended to the target. 54 | 55 | Here are some examples, assuming that the base is `/tmp/mysql-backup-test-abcd123` and the random generated number 56 | is `115647`: 57 | 58 | * `file://dir` -> `/tmp/mysql-backup-test-abcd123/backups/dir/115647` 59 | * `s3://s3/bucket1` -> `s3://s3/bucket1/115647` ; which, since `/tmp/mysql-backup-test-abcd123/` is mounted to the 60 | container, becomes `/tmp/mysql-backup-test-abcd123/backups/s3/bucket1/115647` 61 | * `smb://smb/path2` -> `smb://smb/path2/115647` ; which, since `/tmp/mysql-backup-test-abcd123/` is mounted to the 62 | container, becomes `/tmp/mysql-backup-test-abcd123/backups/smb/path2/115647` 63 | 64 | In order to keep it simple, we have the test target be the basic, e.g. `smb://smb/noauth` or `/backups`, and then we 65 | add the rest of the path to the caller before passing it on to `mysql-backup`. 66 | 67 | Structure of base is: 68 | 69 | base/ - base of the backup area 70 | backup.sql - the backup we take manually at the beginning, for comparison 71 | backups/ - the directory where backups are stored 72 | 15674832/ - one target's backup 73 | 88725436/ - another target's backup 74 | -------------------------------------------------------------------------------- /test/backup_log_containers.go: -------------------------------------------------------------------------------- 1 | //go:build integration && !logs 2 | 3 | package test 4 | 5 | func logContainers(dc *dockerContext, cids ...string) error { 6 | return nil 7 | } 8 | -------------------------------------------------------------------------------- /test/backup_nolog_containers.go: -------------------------------------------------------------------------------- 1 | //go:build integration && logs 2 | 3 | package test 4 | 5 | func logContainers(dc *dockerContext, cids ...string) error { 6 | return dc.logContainers(cids...) 7 | } 8 | -------------------------------------------------------------------------------- /test/backup_teardown_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | 3 | package test 4 | 5 | import "fmt" 6 | 7 | func teardown(dc *dockerContext, cids ...string) error { 8 | if err := dc.rmContainers(cids...); err != nil { 9 | return fmt.Errorf("failed to remove containers: %v", err) 10 | } 11 | return nil 12 | } 13 | -------------------------------------------------------------------------------- /test/ctr/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.19 2 | 3 | # smb port 4 | EXPOSE 445 5 | 6 | # install the necessary client 7 | RUN apk add --update bash samba-server && rm -rf /var/cache/apk/* && touch /etc/samba/smb.conf 8 | 9 | # enter smb.conf 10 | COPY smb.conf /etc/samba/ 11 | COPY smbusers /etc/samba/ 12 | COPY *.tdb /var/lib/samba/private/ 13 | # create a user with no home directory but the right password 14 | RUN adduser user -D -H 15 | RUN echo user:pass | chpasswd 16 | 17 | # ensure that the directory where we will mount it exists, so that nobody user can write there 18 | RUN mkdir -p /share/backups && chmod 0777 /share/backups 19 | 20 | # run samba in the foreground 21 | CMD /usr/sbin/smbd -F --debug-stdout -d 4 --no-process-group 22 | -------------------------------------------------------------------------------- /test/ctr/passdb.tdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/databacker/mysql-backup/e39440fbedbf0b0aacac29e03d036902472a716b/test/ctr/passdb.tdb -------------------------------------------------------------------------------- /test/ctr/secrets.tdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/databacker/mysql-backup/e39440fbedbf0b0aacac29e03d036902472a716b/test/ctr/secrets.tdb -------------------------------------------------------------------------------- /test/ctr/smb.conf: -------------------------------------------------------------------------------- 1 | [global] 2 | netbios name = conf 3 | 4 | workgroup = CONF 5 | 6 | security = user 7 | encrypt passwords = yes 8 | 9 | # Run a WINS server 10 | wins support = yes 11 | 12 | # The following three lines ensure that the Samba 13 | # server will maintain the role of master browser. 14 | # Make sure no other Samba server has its OS level 15 | # set higher than it is here. 16 | local master = yes 17 | preferred master = yes 18 | os level = 65 19 | 20 | 21 | [noauth] 22 | path = /share/backups 23 | create mask = 0755 24 | read only = no 25 | guest ok = yes 26 | 27 | [auth] 28 | path = /share/backups 29 | create mask = 0755 30 | read only = no 31 | valid users = user 32 | 33 | [nopath] 34 | path = /share/backups/nopath 35 | create mask = 0755 36 | read only = no 37 | guest ok = yes 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /test/ctr/smbusers: -------------------------------------------------------------------------------- 1 | # Unix_name = SMB_name1 SMB_name2 ... 2 | root = administrator admin 3 | nobody = guest pcguest smbguest 4 | user = user -------------------------------------------------------------------------------- /test/package_noteardown_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration && keepcontainers 2 | 3 | package test 4 | 5 | import "fmt" 6 | 7 | func teardown(dc *dockerContext, cids ...string) error { 8 | return nil 9 | } 10 | --------------------------------------------------------------------------------