├── .dockerignore ├── .editorconfig ├── .github └── workflows │ ├── docker.yml │ └── go.yml ├── .gitignore ├── .goreleaser.yml ├── CHANGELOG.md ├── CONTRIBUTORS ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── config.go ├── config_test.go ├── connstring.go ├── connstring_test.go ├── contrib ├── kubernetes │ └── cronjob.yaml └── systemd │ ├── pg_back@.service │ └── pg_back@.timer ├── crypto.go ├── crypto_test.go ├── docker └── compose.test.yml ├── go.mod ├── go.sum ├── hash.go ├── hash_test.go ├── hook.go ├── hook_test.go ├── legacy.go ├── legacy_test.go ├── lock.go ├── lock_test.go ├── lock_win.go ├── log.go ├── log_test.go ├── main.go ├── main_test.go ├── pg_back.conf ├── purge.go ├── purge_test.go ├── sql.go ├── sql_test.go ├── testdata └── fixture.sql ├── upload.go └── upload_test.go /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .gitignore 3 | CHANGELOG.md 4 | CONTRIBUTORS 5 | LICENSE 6 | Dockerfile 7 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | [*.go] 2 | indent_style = tab 3 | indent_size = 4 4 | 5 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | tags: 7 | - v* 8 | pull_request: 9 | branches: [ "master" ] 10 | release: 11 | types: [ "published" ] 12 | workflow_dispatch: 13 | 14 | env: 15 | IMAGE_NAME: pg_back 16 | 17 | jobs: 18 | test: 19 | runs-on: ubuntu-latest 20 | strategy: 21 | matrix: 22 | pg_version: [13, 14, 15, 16, 17] 23 | 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v4 27 | 28 | - name: Test Docker container 29 | uses: adambirds/docker-compose-action@v1.4.0 30 | env: 31 | PG_VERSION: ${{ matrix.pg_version }} 32 | with: 33 | compose-file: ./docker/compose.test.yml 34 | up-flags: --abort-on-container-exit --exit-code-from pg_back 35 | down-flags: --volumes 36 | 37 | buildx: 38 | runs-on: ubuntu-latest 39 | permissions: 40 | contents: read 41 | packages: write 42 | 43 | steps: 44 | - name: Checkout 45 | uses: actions/checkout@v4 46 | 47 | - id: prep 48 | if: "startsWith(github.ref, 'refs/tags/v')" 49 | run: | 50 | echo ::set-output name=tag::${GITHUB_REF#refs/tags/v} 51 | 52 | - name: Set up QEMU 53 | uses: docker/setup-qemu-action@v2.2.0 54 | 55 | - name: Set up Docker Buildx 56 | uses: docker/setup-buildx-action@v2.9.1 57 | 58 | - name: Cache Docker layers 59 | uses: actions/cache@v4 60 | with: 61 | path: /tmp/.buildx-cache 62 | key: ${{ runner.os }}-buildx-${{ github.sha }} 63 | restore-keys: | 64 | ${{ runner.os }}-buildx- 65 | 66 | - name: Login to GHCR 67 | uses: docker/login-action@v2.2.0 68 | if: github.event_name != 'pull_request' 69 | with: 70 | registry: ghcr.io 71 | username: ${{ github.actor }} 72 | password: ${{ secrets.GITHUB_TOKEN }} 73 | 74 | - name: Push to GitHub Packages 75 | uses: docker/build-push-action@v4.1.1 76 | with: 77 | context: . 78 | tags: ghcr.io/${{ github.repository_owner }}/${{ env.IMAGE_NAME }}:${{ steps.prep.outputs.tag || 'latest' }} 79 | push: ${{ github.event_name != 'pull_request' }} 80 | cache-from: type=local,src=/tmp/.buildx-cache 81 | cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max 82 | 83 | - name: Move cache 84 | run: | 85 | rm -rf /tmp/.buildx-cache 86 | mv /tmp/.buildx-cache-new /tmp/.buildx-cache 87 | 88 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | 11 | unit: 12 | runs-on: ${{ matrix.os }}-latest 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | os: [ubuntu, windows, macos] 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Set up Go 21 | uses: actions/setup-go@v5 22 | with: 23 | go-version: ">=1.24" 24 | 25 | - name: Build 26 | run: go build -v . 27 | 28 | - name: Test 29 | run: go test . 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | pg_back 2 | dist/ 3 | cover.out 4 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # .goreleaser.yml created for goreleaser version v1.16.1 2 | # Documentation at http://goreleaser.com 3 | before: 4 | hooks: 5 | # You may remove this if you don't use go modules. 6 | - go mod download 7 | builds: 8 | - env: 9 | - CGO_ENABLED=0 10 | goos: 11 | - linux 12 | - windows 13 | - darwin 14 | goarch: 15 | - amd64 16 | - arm64 17 | archives: 18 | - format_overrides: 19 | - goos: windows 20 | format: zip 21 | checksum: 22 | name_template: 'checksums.txt' 23 | snapshot: 24 | name_template: "dev-{{.Commit}}" 25 | changelog: 26 | sort: asc 27 | filters: 28 | exclude: 29 | - '^docs:' 30 | - '^test:' 31 | source: 32 | enabled: true 33 | nfpms: 34 | - package_name: pg-back 35 | homepage: https://github.com/orgrim/pg_back/ 36 | maintainer: Nicolas Thauvin 37 | description: | 38 | pg_back uses pg_dumpall to dump roles and tablespaces, pg_dump to dump 39 | each selected database to a separate file. The custom format of pg_dump 40 | is used by default. 41 | license: PostgreSQL 42 | formats: 43 | - deb 44 | - rpm 45 | bindir: /usr/bin 46 | contents: 47 | - src: pg_back.conf 48 | dst: /etc/pg_back/pg_back.conf 49 | type: "config|noreplace" 50 | 51 | - src: contrib/systemd/pg_back@.service 52 | dst: /lib/systemd/system/pg_back@.service 53 | packager: deb 54 | 55 | - src: contrib/systemd/pg_back@.timer 56 | dst: /lib/systemd/system/pg_back@.timer 57 | packager: deb 58 | replaces: 59 | - pg_back 60 | overrides: 61 | deb: 62 | dependencies: 63 | - postgresql-client 64 | rpm: 65 | file_name_template: >- 66 | {{ .PackageName }}-{{ .Version }}- 67 | {{- if eq .Arch "amd64" }}x86_64 68 | {{- else }}{{ .Arch }}{{ end }} 69 | dependencies: 70 | - postgresql 71 | rpm: 72 | summary: pg_back dumps databases from PostgreSQL 73 | group: Applications/Databases 74 | signs: 75 | - signature: "${artifact}.asc" 76 | args: ["-u", "nico@orgrim.net", "-o", "${signature}", "-a", "-b", "${artifact}"] 77 | artifacts: checksum 78 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## pg_back 2.6.0 4 | 5 | * Allow to override the connection user per database 6 | 7 | ## pg_back 2.5.0 8 | 9 | * Fix Azure URL handling 10 | * Add support to upload files to Backblaze B2 11 | 12 | ## pg_back 2.4.0 13 | 14 | * Add the --upload-prefix option to put files under this remote directory 15 | * Do not try to purge global files when --dump-only is enabled 16 | 17 | ## pg_back 2.3.1 18 | 19 | * Fix cipher_public_key and cipher_private_key not allowed in config file 20 | 21 | ## pg_back 2.3.0 22 | 23 | * Add options to avoid dumping hash passwords with pg_dumpall 24 | * Ensure usability without superuser privileges 25 | * Update Go dependencies 26 | * Add the --dump-only option to only dump databases 27 | * Fix mode of the directory when the format is dir 28 | * Add an option to download from remote locations 29 | * Add an option to list files from remote locations 30 | 31 | ## pg_back 2.2.0 32 | 33 | * Support compression in plain format 34 | * Add option to skip loading config file 35 | * Harden file permissions of output files 36 | * Add Dockerfile and an example docker compose config 37 | * Add an example configuration for Kubernetes 38 | * Support AGE public keys for encryption 39 | * Fix inclusion and excusion lists parsing in per db configs 40 | 41 | ## pg_back 2.1.1 42 | 43 | * Fix exec path expansion when binDir is set 44 | * Validate if the path given to -B is an existing directory 45 | * Replace _ with - in the Debian package name, to make it a valid name 46 | * Build with go 1.20 and update dependencies for security fixes 47 | 48 | ## pg_back 2.1.0 49 | 50 | * Add quiet mode with the commnad line option -q/--quiet. It takes precedence 51 | over verbose mode. 52 | * Compute checksum of global and ACL files 53 | * Add a dump of hba_file and ident_file 54 | * Encrypt and decrypt produced files 55 | * Ensure jobs option is greater than or equal to 1 56 | * Better check and adapt to versions of pg_dump and pg_dumpall 57 | * Upload files to AWS S3 58 | * Upload files to a remote host with SFTP 59 | * Upload files to Google Cloud Storage (GCS) 60 | * Upload files to Azure Blob Storage 61 | * Add a systemd timer for Debian in the package generated by goreleaser 62 | * Check the syntax of the configuration file 63 | * Always create a createdb.sql file for plain format instead of dumping with --create 64 | * Fix the purge based on a number of files to keep 65 | 66 | ## pg_back 2.0.1 67 | 68 | * Use /var/run/postgresql as default host for connections 69 | * Support Windows 70 | * Force legacy timestamp format on Windows 71 | * Allow postgresql URIs as connection strings 72 | * Tell pg_dump and pg_dumpall never to prompt for a password 73 | 74 | 75 | ## pg_back 2.0.0 76 | 77 | * Full rewrite in Go 78 | * Better handling of configuration dump 79 | * No need for pg_dumpacl anymore 80 | * Long option names on the commandline 81 | * New command line options: 82 | - --bin-directory - path to the binaries of PostgreSQL 83 | - --format - dump format 84 | - --parallel-backup-jobs - jobs for directory format 85 | - --compress - compression level for format that support it 86 | - --pre-backup-hook - command to run before backups 87 | - --post-backup-hook - command to run after backups 88 | * keyword=value connection string support with the -d option 89 | * Purge interval can be less than 1 day 90 | * Allow concurrent pg_dump jobs 91 | * Per database output directories using the {dbname} keyword in the path 92 | * Per database configuration with schema and table inclusion/exclusion 93 | * New configuration file format (ini) with an option to convert from the v1 94 | format 95 | * RFC 3339 time format in file name by default 96 | * Use semver for version numbers 97 | * Add a set of unit tests 98 | 99 | Incompatible changes from v1: 100 | 101 | * Configuration file format: use --convert-legacy-config to convert a v1 102 | configuration file 103 | * Fixed filename format with timestamp: either RFC 3339 or the default from v1 104 | (YYYY-mm-dd_HH-MM-SS) 105 | * Hook commands are parsed and split respecting shell quotes, and passed to 106 | fork/exec, not a to shell 107 | 108 | 109 | ## pg_back 1.10 110 | 111 | * Add signature in Directory format 112 | * Allow negative integer for PGBK_PURGE 113 | 114 | 115 | ## pg_back 1.9 116 | 117 | * Fix dumping settings not using connection parameters 118 | 119 | 120 | ## pg_back 1.8 121 | 122 | * Add a timeout when trying to pause replication on standby clusters 123 | * Add pre/post-backup hooks 124 | * Save to output of SHOW ALL to a file, to backup parameters 125 | * Add optionnal checksum of dump files 126 | * New default configuration file path (/etc/pg_back/pg_back.conf) with 127 | backward compatibility 128 | * Some bugfixes and improvements 129 | 130 | 131 | ## pg_back 1.7 132 | 133 | * Fix the purge not handling pg_dumpacl SQL file properly 134 | * Improve documentation of the configuration file 135 | 136 | 137 | ## pg_back 1.6 138 | 139 | * Improvements on support for pg_dumpacl (0.1 and 0.2) 140 | 141 | 142 | ## pg_back 1.5 143 | 144 | * Support for pg_dumpacl (https://github.com/dalibo/pg_dumpacl) 145 | * RPM Packaging 146 | 147 | 148 | ## pg_back 1.4 149 | 150 | * Support PostgreSQL 10 151 | 152 | 153 | ## pg_back 1.3 154 | 155 | * Ensure replication is not paused while an exclusive lock is taken 156 | * Allow a retention policy based on time and number of backups 157 | * Allow to disable purge 158 | 159 | 160 | ## pg_back 1.2 161 | 162 | * Support pausing replication on standby servers 163 | * Add timestamped information messages 164 | 165 | 166 | ## pg_back 1.1 167 | 168 | * Support the directory format of pg_dump 169 | 170 | 171 | ## pg_back 1.0 172 | 173 | * First release 174 | 175 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | Guillaume Lelarge 2 | Stephen Ingram 3 | Pete Deffendol 4 | Marc Cousin 5 | Thibaut Madelaine 6 | Nicolas Gollet 7 | Etienne Bersac 8 | Christophe Courtois 9 | Victor Leclere 10 | Jean-Marie Renouard 11 | Florent Jardin 12 | Stefan Fercot 13 | Thibaud Walkowiak 14 | Gounick 15 | Massimo Lusetti 16 | Kenny Root 17 | Pierrick Chovelon 18 | Dennis Urban 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # Build the application from source 4 | FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:latest AS build-stage 5 | 6 | ARG TARGETPLATFORM 7 | ARG BUILDPLATFORM 8 | ARG TARGETOS 9 | ARG TARGETARCH 10 | 11 | WORKDIR /app 12 | 13 | COPY go.mod go.sum ./ 14 | RUN go mod download 15 | 16 | COPY . . 17 | 18 | RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -ldflags="-w -s" -o /go/bin/pg_back 19 | 20 | # Deploy the application binary into a lean image 21 | FROM --platform=${TARGETPLATFORM:-linux/amd64} alpine:latest AS build-release-stage 22 | 23 | WORKDIR /app 24 | 25 | RUN apk add --no-cache postgresql-client 26 | 27 | COPY --from=build-stage /go/bin/pg_back /app/pg_back 28 | 29 | ENTRYPOINT ["/app/pg_back"] 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions 5 | are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | 2. Redistributions in binary form must reproduce the above copyright 10 | notice, this list of conditions and the following disclaimer in the 11 | documentation and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 14 | IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 | OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 | IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 17 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 20 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 | THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | all: pg_back 3 | 4 | pg_back: *.go 5 | go build -ldflags="-s -w" . 6 | 7 | test: 8 | go test -coverprofile=cover.out -v 9 | 10 | coverage: test 11 | go tool cover -func=cover.out 12 | 13 | coverage-html: 14 | go tool cover -html=cover.out 15 | 16 | install: 17 | go install . 18 | 19 | clean: 20 | rm -rf test 21 | -rm cover.out pg_back 22 | 23 | .PHONY: all pg_back test coverage coverage-html 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pg_back dumps databases from PostgreSQL 2 | 3 | ## Description 4 | 5 | pg_back is a dump tool for PostgreSQL. The goal is to dump all or some 6 | databases with globals at once in the format you want, because a simple call to 7 | pg_dumpall only dumps databases in the plain SQL format. 8 | 9 | Behind the scene, pg_back uses `pg_dumpall` to dump roles and tablespaces 10 | definitions, `pg_dump` to dump all or each selected database to a separate file 11 | in the custom format. It also extract database level ACL and configuration that 12 | is not dumped by pg_dump older than 11. Finally, it dumps all configuration 13 | options of the PostgreSQL instance. 14 | 15 | ## Features 16 | 17 | * Dump all or a list of databases 18 | * Dump all but a list of excluded databases 19 | * Include database templates 20 | * Choose the format of the dump for each database 21 | * Limit dumped schemas and tables 22 | * Dump databases concurrently 23 | * Compute a SHA checksum of each dump 24 | * Pre-backup and post-backup hooks 25 | * Purge based on age and number of dumps to keep 26 | * Dump from a hot standby by pausing replication replay 27 | * Encrypt and decrypt dumps and other files 28 | * Upload and download dumps to S3, GCS, Azure, B2 or a remote host with SFTP 29 | 30 | ## Install 31 | 32 | A compiled binary is available from the [Github repository](https://github.com/orgrim/pg_back/releases). 33 | 34 | The binary only needs `pg_dumpall` and `pg_dump`. 35 | 36 | ## Install from source 37 | 38 | ``` 39 | go install github.com/orgrim/pg_back@latest 40 | ``` 41 | 42 | Use `make` to build and install from source (you need go 1.20 or above). 43 | 44 | As an alternative, the following *docker* command downloads, compiles and puts `pg_back` 45 | in the current directory: 46 | 47 | ``` 48 | docker run -u $(id -u) --rm -v "$PWD":/go/bin golang:1.20 -v "$PWD/.cache":/.cache \ 49 | go install github.com/orgrim/pg_back@latest 50 | ``` 51 | 52 | ## Minimum versions 53 | 54 | The minimum version of `pg_dump` et `pg_dumpall` required to dump is 8.4. The 55 | oldest tested server version of PostgreSQL is 8.2. 56 | 57 | ## Usage 58 | 59 | ### Basic usage 60 | 61 | Use the `--help` or `-?` to print the list of available options. To dump all 62 | databases, you only need to give the proper connection options to the PostgreSQL 63 | instance and the path to a writable directory to store the dump files. 64 | 65 | If default and command line options are not enough, a configuration file 66 | may be provided with `-c ` (see [pg_back.conf](pg_back.conf)). 67 | (Note: see below to convert configuration files from version 1.) 68 | 69 | If the default output directory `/var/backups/postgresql` does not exist or has 70 | improper ownership for your user, use `-b` to give the path where to store the 71 | files. The path may contain the `{dbname}` keyword, that would be replaced by 72 | the name of the database being dumped, this permits to dump each database in 73 | its own directory. 74 | 75 | To connect to PostgreSQL, use the `-h`, `-p`, `-U` and `-d` options. If you 76 | need less known connection options such as `sslcert` and `sslkey`, you can give 77 | a `keyword=value` libpq connection string like `pg_dump` and `pg_dumpall` 78 | accept with their `-d` option. When using connection strings, backslashes must 79 | be escaped (doubled), as well as literal single quotes (used as string 80 | delimiters). 81 | 82 | The other command line options let you tweak what is dumped, purged, and how 83 | it is done. These options can be put in a configuration file. The command line 84 | options override configuration options. 85 | 86 | ### Per-database configuration 87 | 88 | Per-database configuration can only be done with a configuration file. The 89 | configuration file uses the `ini` format, global options are in a unspecified 90 | section at the top of the file, and database specific options are in a section 91 | named after the database. Per database options override global options of the 92 | configuration file. 93 | 94 | In database sections of the configuration file, a list of schemas or tables can 95 | be excluded from or selected in the dump. When using these options, the rules 96 | of the `-t`, `-T`, `-n` and `-N` of `pg_dump` and pattern rules apply. See the 97 | [documentation of `pg_dump`][pg_dump]. 98 | 99 | When no databases names are given on the command line, all databases except 100 | templates are dumped. To include templates, use `--with-templates` (`-T`), if 101 | templates are includes from the configuration file, `--without-templates` force 102 | exclude them. 103 | 104 | Databases can be excluded with `--exclude-dbs` (`-D`), which is a comma separated list 105 | of database names. If a database is listed on the command line and part of 106 | exclusion list, exclusion wins. 107 | 108 | Multiple databases can be dumped at the same time, by using a number of 109 | concurrent `pg_dump` jobs greater than 1 with `--jobs` (`-j`) option. It is different 110 | than `--parallel-backup-jobs` (`-J`) that controls the number of sessions used by 111 | `pg_dump` with the directory format. 112 | 113 | ### Checksums 114 | 115 | A checksum of all output files is computed in a separate file when 116 | `--checksum-algo` (`-S`) is different than `none`. The possible algorithms are: 117 | `sha1`, `sha224`, `sha256`, `sha384` and `sha512`. The checksum file is in the 118 | format required by _shaXsum_ (`sha1sum`, `sha256sum`, etc.) tools for checking 119 | with their `-c` option. 120 | 121 | ### Purge 122 | 123 | Older dumps can be removed based on their age with `--purge-older-than` (`-P`) 124 | in days, if no unit is given. Allowed units are the ones understood by the 125 | `time.ParseDuration` Go function: "s" (seconds), "m" (minutes), "h" (hours) and 126 | so on. 127 | 128 | A number of dump files to keep when purging can also be specified with 129 | `--purge-min-keep` (`-K`) with the special value `all` to keep everything, thus 130 | avoiding file removal completly. When both `--purge-older-than` and 131 | `--purge-min-keep` are used, the minimum number of dumps to keep is enforced 132 | before old dumps are removed. This avoids removing all dumps when the time 133 | interval is too small. 134 | 135 | ### Hooks 136 | 137 | A command can be run before taking dumps with `--pre-backup-hook`, and after 138 | with `--post-backup-hook`. The commands are executed directly, not by a shell, 139 | respecting single and double quoted values. Even if some operation fails, the 140 | post backup hook is executed when present. 141 | 142 | ### Encryption 143 | 144 | All the files procuded by a run of pg_back can be encrypted using age 145 | ( an easy to use tool that does authenticated 146 | encryption of files). Encryption can be done with a passphrase or a key pair. 147 | 148 | To encrypt files with a passphrase, use the `--encrypt` option along with the 149 | `--cipher-pass` option or `PGBK_CIPHER_PASS` environment variable to specify 150 | the passphrase. When `encrypt` is set to true in the configuration file, the 151 | `--no-encrypt` option allows to disable encryption on the command line. By 152 | default, unencrypted source files are removed when they are successfully 153 | encrypted. Use the `--encrypt-keep-src` option to keep them or 154 | `--no-encrypt-keep-src` to force remove them and override the configuration 155 | file. If required, checksum of encrypted files are computed. 156 | 157 | When using keys, use `--cipher-public-key` to encrypt and 158 | `--cipher-private-key` to decrypt. The value are passed as strings in Bech32 159 | encoding. The easiest way to create them is to use the `age` tool. 160 | 161 | Encrypted files can be decrypted with the correct passphrase or the private key 162 | and the `--decrypt` option. When `--decrypt` is present on the command line, 163 | dumps are not performed, instead files are decrypted. Files can also be 164 | decrypted with the `age` tool, independently. Decryption of multiple files can 165 | be parallelized with the `-j` option. Arguments on the commandline (database 166 | names when dumping) are used as shell globs to choose which files to decrypt. 167 | 168 | **Please note** that files are written on disk unencrypted in the backup directory, 169 | before encryption and deleted after the encryption operation is complete. This 170 | means that the host running `pg_back` must secure enough to ensure privacy of the 171 | backup directory and connections to PostgreSQL. 172 | 173 | ### Upload to remote locations 174 | 175 | All files produced by a run can be uploaded to a remote location by setting the 176 | `--upload` option to a value different than `none`. The possible values are 177 | `s3`, `sftp`, `gcs`, `azure`, `b2` or `none`. 178 | 179 | When set to `s3`, files are uploaded to AWS S3. The `--s3-*` family of options 180 | can be used to tweak the access to the bucket. The `--s3-profile` option only 181 | reads credentials and basic configuration, s3 specific options are not used. 182 | 183 | When set to `sftp`, files are uploaded to a remote host using SFTP. The 184 | `--sftp-*` family of options can be used to setup the access to the host. The 185 | `PGBK_SSH_PASS` sets the password or decrypts the private key (identity file), 186 | it is used only when `--sftp-password` is not set (either in the configuration 187 | file or on the command line). When an identity file is provided, the password 188 | is used to decrypt it and the password authentication method is not tried with 189 | the server. The only SSH authentication methods used are password and 190 | publickey. If an SSH agent is available, it is always used. 191 | 192 | When set to `gcs`, files are uploaded to Google Cloud Storage. The `--gcs-*` 193 | family of options can be used to setup access to the bucket. When `--gcs-keyfile` 194 | is empty, `GOOGLE_APPLICATION_CREDENTIALS` environment is used. 195 | 196 | When set to `azure`, files are uploaded to Azure Blob Storage. The `--azure-*` 197 | family of options can be used to setup access to the container. The name of the 198 | container is mandatory. If the account name is left empty, an anonymous 199 | connection is used and the endpoint is used directly: this allows the use of a 200 | full URL to the container with a SAS token. When an account is provided, the 201 | URL is built by prepending the container name to the endpoint and scheme is 202 | always `https`. The default endpoint is `blob.core.windows.net`. The 203 | `AZURE_STORAGE_ACCOUNT` and `AZURE_STORAGE_KEY` are used when `--azure-account` 204 | and `--azure-key` are not set (on the command line or corresponding options in 205 | the configuration file). 206 | 207 | WARNING: Azure support is not guaranted because there are no free solutions for 208 | testing on it 209 | 210 | When set to `b2`, files are uploaded to Backblaze B2. The `--b2-*` family of 211 | options can be used to tweak the access to the 212 | bucket. `--b2-concurrent-connections` can be used to upload the file through 213 | parallel HTTP connections. 214 | 215 | The `--upload-prefix` option can be used to place the files in a remote 216 | directory, as most cloud storage treat prefix as directories. The filename and 217 | the prefix is separated by a / in the remote location. 218 | 219 | The `--purge-remote` option can be set to `yes` to apply the same purge policy 220 | on the remote location as the local directory. 221 | 222 | When files are encrypted and their unencrypted source is kept, only encrypted 223 | files are uploaded. 224 | 225 | ### Downloading from remote locations 226 | 227 | Previously uploaded files can be downloaded using the `--download` option with 228 | a value different than `none`, similarly to `--upload`. The options to setup 229 | the remote access are the same as `--upload`. 230 | 231 | It is possible to only list remote files with `--list-remote` with a value 232 | different than `none`, similarly to `--upload` and `--download`. 233 | 234 | When listing or downloading files, dumps are not performed. Arguments on the 235 | commandline (database names when dumping) are used as shell globs to 236 | select/filter files. 237 | 238 | If `--download` is used at the same time as `--decrypt`, files are downloaded 239 | first, then files matching globs are decrypted. 240 | 241 | ## Restoring files 242 | 243 | The following files are created: 244 | 245 | * `pg_globals_{date}.sql`: definition of roles and tablespaces, dumped with 246 | `pg_dumpall -g`. This file is restored with `psql`. 247 | * `pg_settings_{date}.out`: the list of server parameters found in the 248 | configuration files (9.5+) or in the `pg_settings` view. They shall be put 249 | back by hand. 250 | * `ident_file_{date}.out`: the full contents of the `pg_ident.conf` file, 251 | usually located in the data directory. 252 | * `hba_file_{date}.out`: the full contents of the `pg_hba.conf` file, usually 253 | located in the data directory. 254 | * `{dbname}_{date}.createdb.sql`: an SQL file containing the definition of the 255 | database and parameters set at the database or "role in database" level. It 256 | is mostly useful when using a version of `pg_dump` older than 11. It is 257 | restored with `psql`. 258 | * `{dbname}_{date}.{d,sql,dump,tar}`: the dump of the database, with a suffix 259 | depending of its format. If the format is plain, the dump is suffixed with 260 | `sql` and must be restored with `psql`. Otherwise, it must be restored with 261 | `pg_restore`. 262 | 263 | When checksum are computed, for each file described above, a text file of the 264 | same name with a suffix naming the checksum algorithm is produced. 265 | 266 | When files are encrypted, they are suffixed with `age` and must be decrypted 267 | first, see the [Encryption] section above. When checksums are computed and 268 | encryption is required, checksum files are encrypted and encrypted files are 269 | checksummed. 270 | 271 | To sum up, when restoring: 272 | 273 | 1. Create the roles and tablespaces by executing `pg_globals_{date}.sql` with `psql`. 274 | 2. Create the database with `{dbname}_{date}.createdb.sql` if necessary. 275 | 3. Restore the database(s) with `pg_restore` (use `-C` to create the database) or `psql` 276 | 277 | ## Managing the configuration file 278 | 279 | The previous v1 configuration files are not compatible with pg_back v2. 280 | 281 | Give the path of the v1 configuration file to the `--convert-legacy-config` 282 | command line option, and pg_back will try its best to convert it to the v2 283 | format. Redirect the output to the new configuration file: 284 | 285 | ``` 286 | pg_back --convert-legacy-config pg_back1.conf > pg_back2.conf 287 | ``` 288 | 289 | The default configuration file can be printed with the `--print-default-config` 290 | command line option. 291 | 292 | On some environments (especially Debian), you may have to add `host = /var/run/postgresql` 293 | to override the default `/tmp` host. 294 | 295 | ## Testing 296 | 297 | Use the Makefile or regular `go test`. 298 | 299 | To run SQL tests requiring a PostgreSQL instance: 300 | 301 | 1. run `initdb` in some directory 302 | 2. start `postgres` 303 | 3. load `testdata/fixture.sql` with `psql` 304 | 4. use `go test` or `make test` with the `PGBK_TEST_CONNINFO` environment 305 | variable set to a libpq connection string pointing to the instance. For 306 | example : 307 | 308 | ``` 309 | PGBK_TEST_CONNINFO="host=/tmp port=14651" make test 310 | ``` 311 | 312 | ## Contributing 313 | 314 | Please use the issues and pull requests features from Github. 315 | 316 | ## License 317 | 318 | PostgreSQL - See [LICENSE][license] file 319 | 320 | [license]: https://github.com/orgrim/pg_back/blob/master/LICENSE 321 | 322 | [pg_dump]: https://www.postgresql.org/docs/current/app-pgdump.html 323 | -------------------------------------------------------------------------------- /connstring.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "fmt" 30 | "maps" 31 | "net/url" 32 | "sort" 33 | "strings" 34 | "unicode" 35 | ) 36 | 37 | const ( 38 | CI_KEYVAL int = iota 39 | CI_URI 40 | ) 41 | 42 | type ConnInfo struct { 43 | Kind int // See CI_* constants 44 | Infos map[string]string 45 | } 46 | 47 | func parseConnInfo(connstring string) (*ConnInfo, error) { 48 | c := ConnInfo{} 49 | 50 | if strings.HasPrefix(connstring, "postgresql://") { 51 | c.Kind = CI_URI 52 | i, err := parseUrlConnInfo(connstring) 53 | if err != nil { 54 | return nil, err 55 | } 56 | c.Infos = i 57 | return &c, nil 58 | } 59 | 60 | if strings.Contains(connstring, "=") { 61 | c.Kind = CI_KEYVAL 62 | i, err := parseKeywordConnInfo(connstring) 63 | if err != nil { 64 | return nil, err 65 | } 66 | c.Infos = i 67 | return &c, nil 68 | } 69 | 70 | return nil, fmt.Errorf("parseConnInfo: invalid input connection string") 71 | } 72 | 73 | func (c *ConnInfo) String() string { 74 | switch c.Kind { 75 | case CI_KEYVAL: 76 | return makeKeywordConnInfo(c.Infos) 77 | case CI_URI: 78 | return makeUrlConnInfo(c.Infos) 79 | } 80 | 81 | return "" 82 | } 83 | 84 | func (c *ConnInfo) Copy() *ConnInfo { 85 | newC := ConnInfo{ 86 | Kind: c.Kind, 87 | Infos: make(map[string]string, len(c.Infos)), 88 | } 89 | maps.Copy(newC.Infos, c.Infos) 90 | 91 | return &newC 92 | } 93 | 94 | // Set returns a pointer to a full copy of the conninfo with the key added or 95 | // the value updated 96 | func (c *ConnInfo) Set(keyword, value string) *ConnInfo { 97 | newC := c.Copy() 98 | newC.Infos[keyword] = value 99 | 100 | return newC 101 | } 102 | 103 | // Del returns a pointer to a full copy of the conninfo with the key removed 104 | func (c *ConnInfo) Del(keyword string) *ConnInfo { 105 | newC := c.Copy() 106 | delete(newC.Infos, keyword) 107 | 108 | return newC 109 | } 110 | 111 | // MakeEnv return the conninfo as a list of "key=value" environment variables 112 | // that the libpq understands, as stated in the documentation of PostgreSQL 14 113 | func (c *ConnInfo) MakeEnv() []string { 114 | env := make([]string, 0, len(c.Infos)) 115 | for k, v := range c.Infos { 116 | switch k { 117 | case "host": 118 | env = append(env, "PGHOST="+v) 119 | case "hostaddr": 120 | env = append(env, "PGHOSTADDR="+v) 121 | case "port": 122 | env = append(env, "PGPORT="+v) 123 | case "dbname": 124 | env = append(env, "PGDATABASE="+v) 125 | case "user": 126 | env = append(env, "PGUSER="+v) 127 | case "password": 128 | env = append(env, "PGPASSWORD="+v) 129 | case "passfile": 130 | env = append(env, "PGPASSFILE="+v) 131 | case "service": 132 | env = append(env, "PGSERVICE="+v) 133 | case "options": 134 | env = append(env, "PGOPTIONS="+v) 135 | case "application_name": 136 | env = append(env, "PGAPPNAME="+v) 137 | case "sslmode": 138 | env = append(env, "PGSSLMODE="+v) 139 | case "requiressl": 140 | env = append(env, "PGREQUIRESSL="+v) 141 | case "sslcert": 142 | env = append(env, "PGSSLCERT="+v) 143 | case "sslkey": 144 | env = append(env, "PGSSLKEY="+v) 145 | case "sslrootcert": 146 | env = append(env, "PGSSLROOTCERT="+v) 147 | case "sslcrl": 148 | env = append(env, "PGSSLCRL="+v) 149 | case "krbsrvname": 150 | env = append(env, "PGKRBSRVNAME="+v) 151 | case "gsslib": 152 | env = append(env, "PGGSSLIB="+v) 153 | case "connect_timeout": 154 | env = append(env, "PGCONNECT_TIMEOUT="+v) 155 | case "channel_binding": 156 | env = append(env, "PGCHANNELBINDING="+v) 157 | case "sslcompression": 158 | env = append(env, "PGSSLCOMPRESSION="+v) 159 | case "sslcrldir": 160 | env = append(env, "PGSSLCRLDIR="+v) 161 | case "sslsni": 162 | env = append(env, "PGSSLSNI="+v) 163 | case "requirepeer": 164 | env = append(env, "PGREQUIREPEER="+v) 165 | case "ssl_min_protocol_version": 166 | env = append(env, "PGSSLMINPROTOCOLVERSION="+v) 167 | case "ssl_max_protocol_version": 168 | env = append(env, "PGSSLMAXPROTOCOLVERSION="+v) 169 | case "gssencmode": 170 | env = append(env, "PGGSSENCMODE="+v) 171 | case "client_encoding": 172 | env = append(env, "PGCLIENTENCODING="+v) 173 | case "target_session_attrs": 174 | env = append(env, "PGTARGETSESSIONATTRS="+v) 175 | } 176 | } 177 | 178 | return env 179 | } 180 | 181 | func parseUrlConnInfo(connstring string) (map[string]string, error) { 182 | u, err := url.Parse(connstring) 183 | if err != nil { 184 | return nil, fmt.Errorf("parsing of URI conninfo failed: %w", err) 185 | } 186 | 187 | connInfo := make(map[string]string, 0) 188 | if u.Host != "" { 189 | fullHosts := strings.Split(u.Host, ",") 190 | if len(fullHosts) == 1 { 191 | v := u.Hostname() 192 | if v != "" { 193 | connInfo["host"] = v 194 | } 195 | v = u.Port() 196 | if v != "" { 197 | connInfo["port"] = v 198 | } 199 | } else { 200 | // We need to split and group hosts and ports 201 | // ourselves, net/url does not handle multiple hosts 202 | // correctly 203 | hosts := make([]string, 0) 204 | ports := make([]string, 0) 205 | for _, fullHost := range fullHosts { 206 | hostPort := make([]string, 0) 207 | if strings.HasPrefix(fullHost, "[") { 208 | // Handle literal IPv6 addresses 209 | hostPort = strings.Split(strings.TrimPrefix(fullHost, "["), "]:") 210 | } else { 211 | hostPort = strings.Split(fullHost, ":") 212 | } 213 | if len(hostPort) == 1 { 214 | hosts = append(hosts, strings.Trim(hostPort[0], "[]")) 215 | } else { 216 | hosts = append(hosts, strings.Trim(hostPort[0], "[]")) 217 | ports = append(ports, hostPort[1]) 218 | } 219 | } 220 | connInfo["host"] = strings.Join(hosts, ",") 221 | connInfo["port"] = strings.Join(ports, ",") 222 | } 223 | } 224 | 225 | user := u.User.Username() 226 | if user != "" { 227 | connInfo["user"] = user 228 | } 229 | 230 | password, set := u.User.Password() 231 | if password != "" && set { 232 | connInfo["password"] = password 233 | } 234 | 235 | dbname := strings.TrimPrefix(u.Path, "/") 236 | if dbname != "" { 237 | connInfo["dbname"] = dbname 238 | } 239 | 240 | for k, vs := range u.Query() { 241 | if k == "" { 242 | continue 243 | } 244 | connInfo[k] = strings.Join(vs, ",") 245 | } 246 | 247 | return connInfo, nil 248 | } 249 | 250 | // parseKeywordConnInfo parses and converts a key=value connection string of 251 | // PostgreSQL into a map 252 | func parseKeywordConnInfo(connstring string) (map[string]string, error) { 253 | 254 | // Structure to hold the state of the parsing 255 | s := struct { 256 | expKey bool // expect the next token is a keyword 257 | expSep bool // expect the next token is the = keyword/value separator 258 | expVal bool // expect the next token is a value 259 | inKey bool // we are inside a keyword 260 | inVal bool // we are inside a value 261 | inEscape bool // we have found a backslash next rune is escaped 262 | isQuoted bool // we are in a quoted value the end of token rune is different 263 | }{expKey: true} // we start by expecting a keyword 264 | 265 | // We store our key/value pais in a map. When a keyword is given 266 | // multiple times, only the value closest to the right is 267 | // kept. PostgreSQL behaves the same. 268 | pairs := make(map[string]string) 269 | keyword := "" 270 | value := "" 271 | 272 | for _, r := range []rune(connstring) { 273 | if s.expKey { 274 | if unicode.IsSpace(r) { 275 | continue 276 | } 277 | 278 | if r >= 'a' && r <= 'z' { 279 | keyword += string(r) 280 | s.expKey = false 281 | s.inKey = true 282 | continue 283 | } 284 | 285 | // Here are more strict than PostgreSQL by allowing 286 | // keyword to start only with lowercase ascii letters 287 | return pairs, fmt.Errorf("illegal keyword character") 288 | } 289 | 290 | if s.expSep { 291 | if unicode.IsSpace(r) { 292 | continue 293 | } 294 | 295 | if r == '=' { 296 | s.expSep = false 297 | s.expVal = true 298 | continue 299 | } 300 | 301 | return pairs, fmt.Errorf("missing \"=\" after \"%s\"", keyword) 302 | } 303 | 304 | if s.expVal { 305 | if unicode.IsSpace(r) { 306 | continue 307 | } 308 | 309 | s.expVal = false 310 | s.inVal = true 311 | 312 | if r == '\'' { 313 | s.isQuoted = true 314 | continue 315 | } 316 | } 317 | 318 | if s.inKey { 319 | if (r >= 'a' && r <= 'z') || r == '_' { 320 | keyword += string(r) 321 | continue 322 | } 323 | 324 | if unicode.IsSpace(r) { 325 | s.inKey = false 326 | s.expSep = true 327 | continue 328 | } 329 | 330 | if r == '=' { 331 | s.inKey = false 332 | s.expVal = true 333 | continue 334 | } 335 | 336 | return pairs, fmt.Errorf("illegal character in keyword") 337 | } 338 | 339 | if s.inVal { 340 | if r == '\\' && !s.inEscape { 341 | s.inEscape = true 342 | continue 343 | } 344 | 345 | if s.inEscape { 346 | s.inEscape = false 347 | value += string(r) 348 | continue 349 | } 350 | 351 | if s.isQuoted && r == '\'' { 352 | s.isQuoted = false 353 | s.inVal = false 354 | s.expKey = true 355 | pairs[keyword] = value 356 | keyword = "" 357 | value = "" 358 | continue 359 | } 360 | 361 | if !s.isQuoted && unicode.IsSpace(r) { 362 | s.inVal = false 363 | s.expKey = true 364 | pairs[keyword] = value 365 | keyword = "" 366 | value = "" 367 | continue 368 | } 369 | 370 | value += string(r) 371 | } 372 | 373 | } 374 | 375 | if s.expSep || s.inKey { 376 | return pairs, fmt.Errorf("missing value") 377 | } 378 | 379 | if s.inVal && s.isQuoted { 380 | return pairs, fmt.Errorf("unterminated quoted string") 381 | } 382 | 383 | if s.expVal || s.inVal { 384 | pairs[keyword] = value 385 | } 386 | 387 | return pairs, nil 388 | } 389 | 390 | func makeKeywordConnInfo(infos map[string]string) string { 391 | conninfo := "" 392 | 393 | // Map keys are randomized, sort them so that the output is always the 394 | // same for a given input, useful for unit tests. 395 | keywords := make([]string, 0, len(infos)) 396 | for k := range infos { 397 | keywords = append(keywords, k) 398 | } 399 | sort.Strings(keywords) 400 | 401 | for i, k := range keywords { 402 | // single quotes and backslashes must be escaped 403 | value := strings.ReplaceAll(infos[k], "\\", "\\\\") 404 | value = strings.ReplaceAll(value, "'", "\\'") 405 | 406 | // empty values or values containing space, the equal sign or single quotes 407 | // must be single quoted 408 | if len(infos[k]) == 0 || strings.ContainsAny(infos[k], "\t\n\v\f\r ='") { 409 | value = "'" + value + "'" 410 | } 411 | 412 | if i < (len(infos) - 1) { 413 | conninfo += fmt.Sprintf("%v=%v ", k, value) 414 | } else { 415 | conninfo += fmt.Sprintf("%v=%v", k, value) 416 | } 417 | } 418 | return conninfo 419 | } 420 | 421 | func makeUrlConnInfo(infos map[string]string) string { 422 | u := &url.URL{ 423 | Scheme: "postgresql", 424 | } 425 | 426 | // create user info 427 | username, hasUser := infos["user"] 428 | pass, hasPass := infos["password"] 429 | 430 | var user *url.Userinfo 431 | if hasPass { 432 | user = url.UserPassword(username, pass) 433 | } else if hasUser { 434 | user = url.User(username) 435 | } 436 | u.User = user 437 | 438 | // Manage host:port list with commas. When the hosts is a unix socket 439 | // directory, do not set the Host field of the url because it won't be 440 | // percent encoded, use the query part instead 441 | if !strings.Contains(infos["host"], "/") { 442 | hosts := strings.Split(infos["host"], ",") 443 | ports := strings.Split(infos["port"], ",") 444 | 445 | // Ensure we have lists of the same size to build host:port in a loop 446 | if len(hosts) > len(ports) { 447 | if len(ports) == 1 { 448 | // same non default port for all hosts, duplicate it 449 | // for the next loop 450 | if ports[0] != "" { 451 | for range len(hosts) { 452 | ports = append(ports, ports[0]) 453 | } 454 | } 455 | } else { 456 | // fill with empty port to fix the list 457 | for range len(hosts) { 458 | ports = append(ports, "") 459 | } 460 | } 461 | } 462 | 463 | hostnames := make([]string, 0, len(hosts)) 464 | 465 | for i, host := range hosts { 466 | // Take care of IPv6 addresses 467 | if strings.Contains(host, ":") && !strings.HasPrefix(host, "[") { 468 | host = "[" + host + "]" 469 | } 470 | 471 | if ports[i] != "" { 472 | hostnames = append(hostnames, host+":"+ports[i]) 473 | } else { 474 | hostnames = append(hostnames, host) 475 | } 476 | } 477 | 478 | u.Host = strings.Join(hostnames, ",") 479 | } 480 | 481 | // dbname 482 | u.Path = "/" + infos["dbname"] 483 | u.RawPath = "/" + url.PathEscape(infos["dbname"]) 484 | 485 | // compute query 486 | query := url.Values{} 487 | needPort := false 488 | 489 | // Sort keys so that host comes before port and we can add port to the 490 | // query when we are forced to add host to the query (unix socket 491 | // directory) in the next loop 492 | keys := make([]string, 0, len(infos)) 493 | for k := range infos { 494 | keys = append(keys, k) 495 | } 496 | sort.Strings(keys) 497 | 498 | for _, k := range keys { 499 | if k == "host" && strings.Contains(infos[k], "/") || k == "port" && needPort { 500 | needPort = true 501 | query.Set(k, infos[k]) 502 | continue 503 | } 504 | 505 | if k != "host" && k != "port" && k != "user" && k != "password" && k != "dbname" { 506 | query.Set(k, infos[k]) 507 | } 508 | } 509 | u.RawQuery = query.Encode() 510 | 511 | return u.String() 512 | } 513 | 514 | // prepareConnInfo returns a connexion string computed from the input 515 | // values. When the dbname is already a connection string or a postgresql:// 516 | // URI, it only add the application_name keyword if not set. 517 | func prepareConnInfo(host string, port int, username string, dbname string) (*ConnInfo, error) { 518 | var ( 519 | conninfo *ConnInfo 520 | err error 521 | ) 522 | 523 | // dbname may be a connstring or a URI. The database name option, 524 | // usually -d for PostgreSQL binaires accept a connection string and 525 | // URIs. We do a simple check for a = sign or the postgresql scheme. If 526 | // someone has a database name containing a space, one can still dump 527 | // it by giving us connstring. 528 | if strings.HasPrefix(dbname, "postgresql://") || strings.Contains(dbname, "=") { 529 | conninfo, err = parseConnInfo(dbname) 530 | if err != nil { 531 | return nil, err 532 | } 533 | 534 | } else { 535 | conninfo = &ConnInfo{ 536 | Infos: make(map[string]string), 537 | } 538 | 539 | if host != "" { 540 | conninfo.Infos["host"] = host 541 | } 542 | 543 | if port != 0 { 544 | conninfo.Infos["port"] = fmt.Sprintf("%v", port) 545 | } 546 | 547 | if username != "" { 548 | conninfo.Infos["user"] = username 549 | } 550 | 551 | if dbname != "" { 552 | conninfo.Infos["dbname"] = dbname 553 | } 554 | } 555 | 556 | if _, ok := conninfo.Infos["application_name"]; !ok { 557 | l.Verboseln("using pg_back as application_name") 558 | conninfo.Infos["application_name"] = "pg_back" 559 | } 560 | 561 | return conninfo, nil 562 | } 563 | -------------------------------------------------------------------------------- /connstring_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "fmt" 30 | "github.com/google/go-cmp/cmp" 31 | "github.com/google/go-cmp/cmp/cmpopts" 32 | "testing" 33 | ) 34 | 35 | func TestParseKeywordConnInfo(t *testing.T) { 36 | var tests = []struct { 37 | input string 38 | fail string 39 | want map[string]string 40 | }{ 41 | { 42 | "host=/tmp port=5432", 43 | "", 44 | map[string]string{"host": "/tmp", "port": "5432"}, 45 | }, 46 | { 47 | "host='/tmp'", 48 | "", 49 | map[string]string{"host": "/tmp"}, 50 | }, 51 | { 52 | "host = pg.local port\n = 5433 user=u1 dbname= 'silly name' ", 53 | "", 54 | map[string]string{"dbname": "silly name", "host": "pg.local", "port": "5433", "user": "u1"}, 55 | }, 56 | { 57 | "bad keyword = value", 58 | "missing \"=\" after \"bad\"", 59 | map[string]string{}, 60 | }, 61 | { 62 | "bad-keyword=value", 63 | "illegal character in keyword", 64 | map[string]string{}, 65 | }, 66 | { 67 | "%bad_keyword=value", 68 | "illegal keyword character", 69 | map[string]string{}, 70 | }, 71 | { 72 | `key='\' \\ \p' other_key=ab\ cd`, 73 | "", 74 | map[string]string{"key": `' \ p`, "other_key": "ab cd"}, 75 | }, 76 | { 77 | "novalue ", 78 | "missing value", 79 | map[string]string{}, 80 | }, 81 | { 82 | "novalue", 83 | "missing value", 84 | map[string]string{}, 85 | }, 86 | { 87 | "key = 'no end quote", 88 | "unterminated quoted string", 89 | map[string]string{}, 90 | }, 91 | } 92 | 93 | for i, st := range tests { 94 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 95 | got, err := parseKeywordConnInfo(st.input) 96 | if err != nil { 97 | if len(st.fail) == 0 { 98 | t.Errorf("unexpected error: %v", err) 99 | } else { 100 | if err.Error() != st.fail { 101 | t.Errorf("unexpected error, got: %v, want: %v", err.Error(), st.fail) 102 | } 103 | } 104 | } 105 | 106 | if diff := cmp.Diff(st.want, got, cmpopts.EquateEmpty()); diff != "" { 107 | t.Errorf("parseKeywordConnInfo() mismatch (-want +got):\n%s", diff) 108 | } 109 | }) 110 | } 111 | } 112 | 113 | func TestParseUrlConnInfo(t *testing.T) { 114 | var tests = []struct { 115 | input string 116 | want map[string]string 117 | }{ 118 | { 119 | "postgresql://", 120 | map[string]string{}, 121 | }, 122 | { 123 | "postgresql://localhost", 124 | map[string]string{"host": "localhost"}, 125 | }, 126 | { 127 | "postgresql://localhost:5433", 128 | map[string]string{"host": "localhost", "port": "5433"}, 129 | }, 130 | { 131 | "postgresql://localhost/mydb", 132 | map[string]string{"host": "localhost", "dbname": "mydb"}, 133 | }, 134 | { 135 | "postgresql://user@localhost", 136 | map[string]string{"host": "localhost", "user": "user"}, 137 | }, 138 | { 139 | "postgresql://user:secret@localhost", 140 | map[string]string{"host": "localhost", "user": "user", "password": "secret"}, 141 | }, 142 | { 143 | "postgresql://other@localhost/otherdb?connect_timeout=10&application_name=myapp", 144 | map[string]string{"host": "localhost", "user": "other", "dbname": "otherdb", "connect_timeout": "10", "application_name": "myapp"}, 145 | }, 146 | { 147 | "postgresql://host1:123,host2:456/somedb?target_session_attrs=any&application_name=myapp", 148 | map[string]string{"host": "host1,host2", "port": "123,456", "dbname": "somedb", "target_session_attrs": "any", "application_name": "myapp"}, 149 | }, 150 | { 151 | "postgresql://[::1]:5433,[::1]:", 152 | map[string]string{"host": "::1,::1", "port": "5433,"}, 153 | }, 154 | } 155 | 156 | for i, st := range tests { 157 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 158 | got, err := parseUrlConnInfo(st.input) 159 | if err != nil { 160 | t.Errorf("unexpected error: %v", err) 161 | } 162 | 163 | if diff := cmp.Diff(st.want, got, cmpopts.EquateEmpty()); diff != "" { 164 | t.Errorf("parseUrlConnInfo() mismatch (-want +got):\n%s", diff) 165 | } 166 | }) 167 | } 168 | } 169 | 170 | func TestMakeKeywordConnInfo(t *testing.T) { 171 | var tests = []struct { 172 | infos map[string]string 173 | want string 174 | }{ 175 | { 176 | map[string]string{"host": "/tmp", "port": "5432", "dbname": "ab c'd", "password": "jE'r\\m"}, 177 | `dbname='ab c\'d' host=/tmp password='jE\'r\\m' port=5432`, 178 | }, 179 | } 180 | 181 | for i, st := range tests { 182 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 183 | got := makeKeywordConnInfo(st.infos) 184 | if got != st.want { 185 | t.Errorf("got: %v, want: %v", got, st.want) 186 | } 187 | }) 188 | } 189 | } 190 | 191 | func TestMakeUrlConnInfo(t *testing.T) { 192 | var tests = []struct { 193 | infos map[string]string 194 | want string 195 | }{ 196 | { 197 | map[string]string{"host": "localhost", "port": "5432", "dbname": "db", "password": "secret"}, 198 | "postgresql://:secret@localhost:5432/db", 199 | }, 200 | { 201 | map[string]string{"host": "::1", "port": "5432", "dbname": "db", "password": "secret"}, 202 | "postgresql://:secret@[::1]:5432/db", 203 | }, 204 | { 205 | map[string]string{"host": "/tmp", "port": "5432", "dbname": "db"}, 206 | "postgresql:///db?host=%2Ftmp&port=5432", 207 | }, 208 | { 209 | map[string]string{"host": "h1,h2", "port": "5432", "dbname": "db", "user": "u1"}, 210 | "postgresql://u1@h1:5432,h2:5432/db", 211 | }, 212 | { 213 | map[string]string{"host": "h1,h2,h3", "port": "5432,5433", "dbname": "db", "user": "u1"}, 214 | "postgresql://u1@h1:5432,h2:5433,h3/db", 215 | }, 216 | { 217 | map[string]string{"host": "h1,h2,h3", "port": "5432,5433,", "dbname": "db", "user": "u1"}, 218 | "postgresql://u1@h1:5432,h2:5433,h3/db", 219 | }, 220 | { 221 | map[string]string{"host": "localhost", "port": "", "user": "u1", "password": "p"}, 222 | "postgresql://u1:p@localhost/", 223 | }, 224 | { 225 | map[string]string{}, 226 | "postgresql:///", 227 | }, 228 | { 229 | map[string]string{"user": "other", "host": "localhost", "dbname": "otherdb", "connect_timeout": "10", "application_name": "myapp"}, 230 | "postgresql://other@localhost/otherdb?application_name=myapp&connect_timeout=10", 231 | }, 232 | } 233 | 234 | for i, st := range tests { 235 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 236 | got := makeUrlConnInfo(st.infos) 237 | if got != st.want { 238 | t.Errorf("got: %v, want: %v", got, st.want) 239 | } 240 | }) 241 | } 242 | } 243 | 244 | func TestPrepareConnInfo(t *testing.T) { 245 | var tests = []struct { 246 | host string 247 | port int 248 | username string 249 | dbname string 250 | want string 251 | }{ 252 | {"/tmp", 0, "", "", "application_name=pg_back host=/tmp"}, 253 | {"localhost", 5432, "postgres", "postgres", "application_name=pg_back dbname=postgres host=localhost port=5432 user=postgres"}, 254 | {"localhost", 0, "postgres", "postgres", "application_name=pg_back dbname=postgres host=localhost user=postgres"}, 255 | {"localhost", 5432, "", "postgres", "application_name=pg_back dbname=postgres host=localhost port=5432"}, 256 | {"localhost", 5432, "postgres", "", "application_name=pg_back host=localhost port=5432 user=postgres"}, 257 | {"localhost", 0, "postgres", "", "application_name=pg_back host=localhost user=postgres"}, 258 | {"", 0, "postgres", "", "application_name=pg_back user=postgres"}, 259 | {"localhost", 0, "postgres", "host=/tmp port=5432", "application_name=pg_back host=/tmp port=5432"}, 260 | {"", 0, "", "host=/tmp port=5433 application_name=other", "application_name=other host=/tmp port=5433"}, 261 | {"", 0, "", "postgresql:///db?host=/tmp", "postgresql:///db?application_name=pg_back&host=%2Ftmp"}, 262 | } 263 | 264 | for i, subt := range tests { 265 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 266 | res, _ := prepareConnInfo(subt.host, subt.port, subt.username, subt.dbname) 267 | if res.String() != subt.want { 268 | t.Errorf("got '%s', want '%s'", res, subt.want) 269 | } 270 | }) 271 | } 272 | } 273 | 274 | func TestConnInfoCopy(t *testing.T) { 275 | want := &ConnInfo{ 276 | Kind: CI_KEYVAL, 277 | Infos: map[string]string{"host": "localhost", "port": "5432", "dbname": "db"}, 278 | } 279 | 280 | got := want.Copy() 281 | 282 | if diff := cmp.Diff(want, got, cmpopts.EquateEmpty()); diff != "" { 283 | t.Errorf("*ConnInfo.Copy() mismatch (-want +got):\n%s", diff) 284 | } 285 | 286 | if want == got { 287 | t.Errorf("*ConnInfo.Copy() output is the same") 288 | } 289 | } 290 | 291 | func TestConnInfoSet(t *testing.T) { 292 | var tests = []struct { 293 | input *ConnInfo 294 | key string 295 | val string 296 | want *ConnInfo 297 | }{ 298 | { 299 | &ConnInfo{ 300 | Kind: CI_KEYVAL, 301 | Infos: map[string]string{"host": "localhost", "port": "5432", "dbname": "db"}, 302 | }, 303 | "dbname", 304 | "other", 305 | &ConnInfo{ 306 | Kind: CI_KEYVAL, 307 | Infos: map[string]string{"host": "localhost", "port": "5432", "dbname": "other"}, 308 | }, 309 | }, 310 | { 311 | &ConnInfo{ 312 | Kind: CI_KEYVAL, 313 | Infos: map[string]string{"host": "localhost", "port": "5432", "dbname": "db"}, 314 | }, 315 | "user", 316 | "somebody", 317 | &ConnInfo{ 318 | Kind: CI_KEYVAL, 319 | Infos: map[string]string{"host": "localhost", "port": "5432", "dbname": "db", "user": "somebody"}, 320 | }, 321 | }, 322 | } 323 | 324 | for i, subt := range tests { 325 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 326 | got := subt.input.Set(subt.key, subt.val) 327 | if diff := cmp.Diff(subt.want, got, cmpopts.EquateEmpty()); diff != "" { 328 | t.Errorf("*ConnInfo.Set() mismatch (-want +got):\n%s", diff) 329 | } 330 | }) 331 | } 332 | } 333 | 334 | func TestConnInfoDel(t *testing.T) { 335 | var tests = []struct { 336 | input *ConnInfo 337 | key string 338 | want *ConnInfo 339 | }{ 340 | { 341 | &ConnInfo{ 342 | Kind: CI_KEYVAL, 343 | Infos: map[string]string{"host": "localhost", "port": "5432", "dbname": "db"}, 344 | }, 345 | "dbname", 346 | &ConnInfo{ 347 | Kind: CI_KEYVAL, 348 | Infos: map[string]string{"host": "localhost", "port": "5432"}, 349 | }, 350 | }, 351 | { 352 | &ConnInfo{ 353 | Kind: CI_KEYVAL, 354 | Infos: map[string]string{"host": "localhost", "port": "5432", "dbname": "db"}, 355 | }, 356 | "user", 357 | &ConnInfo{ 358 | Kind: CI_KEYVAL, 359 | Infos: map[string]string{"host": "localhost", "port": "5432", "dbname": "db"}, 360 | }, 361 | }, 362 | } 363 | 364 | for i, subt := range tests { 365 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 366 | got := subt.input.Del(subt.key) 367 | if diff := cmp.Diff(subt.want, got, cmpopts.EquateEmpty()); diff != "" { 368 | t.Errorf("*ConnInfo.Del() mismatch (-want +got):\n%s", diff) 369 | } 370 | }) 371 | } 372 | } 373 | -------------------------------------------------------------------------------- /contrib/kubernetes/cronjob.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This assumes you have created Kubernetes secrets. Below is an example for 3 | # MinIO endpoint. It is using the https://age-encryption.org/ public key 4 | # encryption which may be generated using the "age-keygen" command. 5 | # 6 | # kubectl create secret generic backup-secret \ 7 | # --from-literal=BACKUP_PUBLIC_KEY=age1foobar 8 | # 9 | # kubectl create secret generic postgres-secret \ 10 | # --from-literal=PGHOST=postgres \ 11 | # --from-literal=PGUSER=myapp \ 12 | # --from-literal=PGPASSWORD=mysecret 13 | # 14 | # kubectl create secret generic s3-secret \ 15 | # --from-literal=S3_ENDPOINT=https://minio:9000/ \ 16 | # --from-literal=S3_REGION=us-east-1 \ 17 | # --from-literal=S3_BUCKET=my-backup-bucket \ 18 | # --from-literal=S3_KEYID=nizcifWuAmKaidJivCaf \ 19 | # --from-literal=S3_SECRET=irmOajchefyikIajHebvocguWodShuchvibAtLev 20 | 21 | apiVersion: batch/v1 22 | kind: CronJob 23 | metadata: 24 | name: backup 25 | spec: 26 | schedule: "0 2 * * *" 27 | jobTemplate: 28 | spec: 29 | template: 30 | spec: 31 | containers: 32 | - name: backup 33 | image: ghcr.io/orgrim/pg_back:latest 34 | imagePullPolicy: Always 35 | envFrom: 36 | - secretRef: 37 | name: backup-secret 38 | - secretRef: 39 | name: postgres-secret 40 | - secretRef: 41 | name: s3-secret 42 | args: 43 | - "--encrypt" 44 | - "--cipher-public-key" 45 | - "$(BACKUP_PUBLIC_KEY)" 46 | - "--s3-bucket" 47 | - "$(S3_BUCKET)" 48 | - "--s3-endpoint" 49 | - "$(S3_ENDPOINT)" 50 | - "--s3-key-id" 51 | - "$(S3_KEYID)" 52 | - "--s3-secret" 53 | - "$(S3_SECRET)" 54 | - "--s3-region" 55 | - "$(S3_REGION)" 56 | -------------------------------------------------------------------------------- /contrib/systemd/pg_back@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Wants=postgresql@%i.service 3 | AssertPathExists=/etc/pg_back/%I/pg_back.conf 4 | Description=Simple backup for PostgreSQL 5 | Documentation=https://github.com/orgrim/pg_back 6 | After=postgresql@%i.service 7 | RequiresMountsFor=/etc/pg_back/%I /var/lib/postgresql/%I 8 | 9 | [Service] 10 | # Execute pre and post scripts as root, otherwise it does it as User= 11 | PermissionsStartOnly=true 12 | ExecStartPre=/usr/bin/install -m 0750 -o postgres -g postgres -d /var/backups/postgresql/%I 13 | 14 | ExecStart=/usr/bin/pg_back -c /etc/pg_back/%I/pg_back.conf 15 | User=postgres 16 | Group=postgres 17 | 18 | IOSchedulingClass=best-effort 19 | IOSchedulingPriority=7 20 | SyslogIdentifier=pg_back@%i 21 | Type=oneshot 22 | 23 | [Install] 24 | WantedBy=multi-user.target 25 | -------------------------------------------------------------------------------- /contrib/systemd/pg_back@.timer: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Daily PostgreSQL backup 3 | AssertPathExists=/etc/pg_back/%I/pg_back.conf 4 | 5 | [Timer] 6 | OnCalendar=*-*-* 4:00:00 7 | AccuracySec=30m 8 | Persistent=true 9 | 10 | [Install] 11 | WantedBy=postgresql@%i.service 12 | -------------------------------------------------------------------------------- /crypto.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "errors" 30 | "fmt" 31 | "io" 32 | "os" 33 | "path/filepath" 34 | "strings" 35 | 36 | "filippo.io/age" 37 | ) 38 | 39 | func ageEncrypt(src io.Reader, dst io.Writer, params encryptParams) error { 40 | if params.PublicKey != "" { 41 | return ageEncryptPublicKey(src, dst, params.PublicKey) 42 | } 43 | 44 | if params.Passphrase != "" { 45 | return ageEncryptPassphrase(src, dst, params.Passphrase) 46 | } 47 | 48 | return fmt.Errorf("Unexpected condition: no public key or passphrase") 49 | } 50 | 51 | func ageEncryptPassphrase(src io.Reader, dst io.Writer, passphrase string) error { 52 | // Age encrypt to a recipient, Scrypt allow to create a key from a passphrase 53 | recipient, err := age.NewScryptRecipient(passphrase) 54 | if err != nil { 55 | return fmt.Errorf("failed to create recipient from passphrase: %w", err) 56 | } 57 | 58 | return ageEncryptInternal(src, dst, recipient) 59 | } 60 | 61 | func ageEncryptPublicKey(src io.Reader, dst io.Writer, publicKey string) error { 62 | recipient, err := age.ParseX25519Recipient(publicKey) 63 | if err != nil { 64 | return fmt.Errorf("failed to create recipient from public key: %w", err) 65 | } 66 | 67 | return ageEncryptInternal(src, dst, recipient) 68 | } 69 | 70 | func ageEncryptInternal(src io.Reader, dst io.Writer, recipient age.Recipient) error { 71 | w, err := age.Encrypt(dst, recipient) 72 | if err != nil { 73 | return fmt.Errorf("failed to create encrypted file: %w", err) 74 | } 75 | 76 | if _, err := io.Copy(w, src); err != nil { 77 | return fmt.Errorf("failed to write to encrypted file: %w", err) 78 | } 79 | 80 | // It is mandatory to Close the writer from age so that it flushes its data 81 | w.Close() 82 | 83 | return nil 84 | } 85 | 86 | func ageDecrypt(src io.Reader, dst io.Writer, params decryptParams) error { 87 | if params.PrivateKey != "" { 88 | return ageDecryptPrivateKey(src, dst, params.PrivateKey) 89 | } 90 | 91 | if params.Passphrase != "" { 92 | return ageDecryptPassphrase(src, dst, params.Passphrase) 93 | } 94 | 95 | return fmt.Errorf("No private key or passphrase specified") 96 | } 97 | 98 | func ageDecryptPrivateKey(src io.Reader, dst io.Writer, privateKey string) error { 99 | identity, err := age.ParseX25519Identity(privateKey) 100 | if err != nil { 101 | return fmt.Errorf("failed to parse AGE private key: %w", err) 102 | } 103 | 104 | return ageDecryptInternal(src, dst, identity) 105 | } 106 | 107 | func ageDecryptPassphrase(src io.Reader, dst io.Writer, passphrase string) error { 108 | identity, err := age.NewScryptIdentity(passphrase) 109 | if err != nil { 110 | return fmt.Errorf("failed to create identity from passphrase: %w", err) 111 | } 112 | 113 | return ageDecryptInternal(src, dst, identity) 114 | } 115 | 116 | func ageDecryptInternal(src io.Reader, dst io.Writer, identity age.Identity) error { 117 | r, err := age.Decrypt(src, identity) 118 | if err != nil { 119 | var badpass *age.NoIdentityMatchError 120 | if errors.As(err, &badpass) { 121 | return fmt.Errorf("invalid key or passphrase") 122 | } 123 | return fmt.Errorf("failed to initiate decryption: %w", err) 124 | } 125 | 126 | if _, err := io.Copy(dst, r); err != nil { 127 | return fmt.Errorf("failed to read encrypted data: %w", err) 128 | } 129 | 130 | return nil 131 | } 132 | 133 | func encryptFile(path string, mode int, params encryptParams, keep bool) ([]string, error) { 134 | encrypted := make([]string, 0) 135 | 136 | i, err := os.Stat(path) 137 | if err != nil { 138 | return encrypted, err 139 | } 140 | 141 | if i.IsDir() { 142 | l.Verboseln("dump is a directory, encrypting all files inside") 143 | err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { 144 | if err != nil { 145 | return err 146 | } 147 | if info.Mode().IsRegular() { 148 | l.Verboseln("encrypting:", path) 149 | 150 | src, err := os.Open(path) 151 | if err != nil { 152 | l.Errorln(err) 153 | return err 154 | } 155 | defer src.Close() 156 | 157 | dstFile := fmt.Sprintf("%s.age", path) 158 | dst, err := os.Create(dstFile) 159 | if err != nil { 160 | l.Errorln(err) 161 | return err 162 | } 163 | defer dst.Close() 164 | 165 | if err := ageEncrypt(src, dst, params); err != nil { 166 | dst.Close() 167 | os.Remove(dstFile) 168 | return fmt.Errorf("could not encrypt %s: %s", path, err) 169 | } 170 | 171 | encrypted = append(encrypted, dstFile) 172 | if mode > 0 { 173 | if err := os.Chmod(dstFile, os.FileMode(mode)); err != nil { 174 | return fmt.Errorf("could not chmod to more secure permission for encrypted file: %w", err) 175 | } 176 | } 177 | 178 | if !keep { 179 | l.Verboseln("removing source file:", path) 180 | src.Close() 181 | if err := os.Remove(path); err != nil { 182 | return fmt.Errorf("could not remove %s: %w", path, err) 183 | } 184 | } 185 | } 186 | return nil 187 | }) 188 | 189 | if err != nil { 190 | return encrypted, fmt.Errorf("error walking the path %q: %v", path, err) 191 | } 192 | } else { 193 | l.Verboseln("encrypting:", path) 194 | src, err := os.Open(path) 195 | if err != nil { 196 | l.Errorln(err) 197 | return encrypted, err 198 | } 199 | 200 | defer src.Close() 201 | 202 | dstFile := fmt.Sprintf("%s.age", path) 203 | dst, err := os.Create(dstFile) 204 | if err != nil { 205 | l.Errorln(err) 206 | return encrypted, err 207 | } 208 | 209 | defer dst.Close() 210 | 211 | if err := ageEncrypt(src, dst, params); err != nil { 212 | dst.Close() 213 | os.Remove(dstFile) 214 | return encrypted, fmt.Errorf("could not encrypt %s: %s", path, err) 215 | } 216 | 217 | encrypted = append(encrypted, dstFile) 218 | if mode > 0 { 219 | if err := os.Chmod(dstFile, os.FileMode(mode)); err != nil { 220 | return encrypted, fmt.Errorf("could not chmod to more secure permission for encrypted file: %w", err) 221 | } 222 | } 223 | if !keep { 224 | l.Verboseln("removing source file:", path) 225 | src.Close() 226 | if err := os.Remove(path); err != nil { 227 | return encrypted, fmt.Errorf("could not remove %s: %w", path, err) 228 | } 229 | } 230 | } 231 | 232 | return encrypted, nil 233 | } 234 | 235 | func decryptFile(path string, params decryptParams) error { 236 | l.Infoln("decrypting", path) 237 | 238 | src, err := os.Open(path) 239 | if err != nil { 240 | return err 241 | } 242 | 243 | defer src.Close() 244 | 245 | dstFile := strings.TrimSuffix(path, ".age") 246 | dst, err := os.Create(dstFile) 247 | if err != nil { 248 | return err 249 | } 250 | 251 | defer dst.Close() 252 | 253 | if err := ageDecrypt(src, dst, params); err != nil { 254 | dst.Close() 255 | os.Remove(dstFile) 256 | return fmt.Errorf("could not decrypt %s: %s", path, err) 257 | } 258 | 259 | return nil 260 | } 261 | -------------------------------------------------------------------------------- /crypto_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "bytes" 30 | b64 "encoding/base64" 31 | "strings" 32 | "testing" 33 | 34 | "filippo.io/age" 35 | ) 36 | 37 | const TEST_PRIVATE_KEY = "AGE-SECRET-KEY-1XLVVN6PHUZNFFFRA0AGLEJ22GWDGN6WG8KFDV56FH5DC9P2Y8F2SPH8W44" 38 | const TEST_PUBLIC_KEY = "age1702xupy5u4d6a5z2dwcn9e2th4mqpth5kvl3nmhq063gf70d9awsl37jn6" 39 | const TEST_ENCRYPTED_FILE_BASE64 = "YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzanBtS3hmUGQwekNER0hwWjNod1Z2Y1FCVEZGMkExdmRlcS9vUy8vTEZBClgvOXB3QjRLN2E3aERGUmFMSXdiM3h4R0JFTytwb0hsSEpJV0NTVk1mME0KLS0tIHpqSnRhc0F6NEZ6b0R6aEl5U0V3cnNmL2pKRWVwNHd3dU9wdExjeWx0Tk0KL9m6JZXXAeEZBA7w7nuyrl4ztjY2+Ypu1GNrL6bjv7aw+ACqVGZZwLDI6Q==" 40 | const TEST_PLAINTEXT_FILE = "test string" 41 | 42 | func TestAgeEncrypt_NilParams_Failure(t *testing.T) { 43 | content := "to be encrypted" 44 | reader := strings.NewReader(content) 45 | writer := &bytes.Buffer{} 46 | params := encryptParams{} 47 | 48 | err := ageEncrypt(reader, writer, params) 49 | if err == nil { 50 | t.Errorf("Expected empty encryption params to fail") 51 | } 52 | } 53 | 54 | func TestAgeDecrypt_NilParams_Failure(t *testing.T) { 55 | content := "to be encrypted" 56 | reader := strings.NewReader(content) 57 | writer := &bytes.Buffer{} 58 | params := decryptParams{} 59 | 60 | err := ageDecrypt(reader, writer, params) 61 | if err == nil { 62 | t.Errorf("Expected empty encryption params to fail") 63 | } 64 | } 65 | 66 | func TestAgeDecrypt_InvalidPrivateKey_Failure(t *testing.T) { 67 | encrypted, err := b64.StdEncoding.DecodeString(TEST_ENCRYPTED_FILE_BASE64) 68 | if err != nil { 69 | t.Fatalf("could not decode golden string") 70 | } 71 | reader := bytes.NewReader(encrypted) 72 | writer := &bytes.Buffer{} 73 | params := decryptParams{PrivateKey: TEST_PUBLIC_KEY} 74 | 75 | err = ageDecrypt(reader, writer, params) 76 | if err == nil { 77 | t.Errorf("Expected invalid private key to fail") 78 | } 79 | } 80 | 81 | func TestAgeDecrypt_InvalidPublicKey_Failure(t *testing.T) { 82 | content := "to be encrypted" 83 | reader := strings.NewReader(content) 84 | writer := &bytes.Buffer{} 85 | params := decryptParams{PrivateKey: TEST_PRIVATE_KEY} 86 | 87 | err := ageDecrypt(reader, writer, params) 88 | if err == nil { 89 | t.Errorf("Expected invalid public key to fail") 90 | } 91 | } 92 | 93 | func TestAgeEncryptPassphrase_EmptyPassphrase_Failure(t *testing.T) { 94 | content := "to be encrypted" 95 | reader := strings.NewReader(content) 96 | writer := &bytes.Buffer{} 97 | 98 | err := ageEncryptPassphrase(reader, writer, "") 99 | if err == nil { 100 | t.Errorf("Expected empty passphrase to fail") 101 | } 102 | } 103 | 104 | func TestAgeDecryptPassphrase_EmptyPassphrase_Failure(t *testing.T) { 105 | encrypted, err := b64.StdEncoding.DecodeString(TEST_ENCRYPTED_FILE_BASE64) 106 | if err != nil { 107 | t.Fatalf("could not decode golden string") 108 | } 109 | reader := bytes.NewReader(encrypted) 110 | writer := &bytes.Buffer{} 111 | 112 | err = ageDecryptPassphrase(reader, writer, "") 113 | if err == nil { 114 | t.Errorf("Expected empty passphrase to fail") 115 | } 116 | } 117 | 118 | func TestAgeDecrypt_Golden_Success(t *testing.T) { 119 | encrypted, err := b64.StdEncoding.DecodeString(TEST_ENCRYPTED_FILE_BASE64) 120 | if err != nil { 121 | t.Fatalf("could not decode golden string") 122 | } 123 | reader := bytes.NewReader(encrypted) 124 | writer := &bytes.Buffer{} 125 | params := decryptParams{ 126 | PrivateKey: TEST_PRIVATE_KEY, 127 | } 128 | 129 | err = ageDecrypt(reader, writer, params) 130 | if err != nil { 131 | t.Fatalf("could not decrypt golden message: %v", err) 132 | } 133 | 134 | if writer.String() != TEST_PLAINTEXT_FILE { 135 | t.Errorf("got %v want %v", writer.String(), TEST_PLAINTEXT_FILE) 136 | } 137 | } 138 | 139 | func TestAgeEncrypt_PublicKey_Loopback_Success(t *testing.T) { 140 | identity, err := age.GenerateX25519Identity() 141 | if err != nil { 142 | t.Fatalf("Failed to generate key pair: %v", err) 143 | } 144 | 145 | content := "to be encrypted" 146 | reader := strings.NewReader(content) 147 | writer := &bytes.Buffer{} 148 | params := encryptParams{PublicKey: identity.Recipient().String()} 149 | 150 | err = ageEncrypt(reader, writer, params) 151 | if err != nil { 152 | t.Errorf("Unexpected error when encrypting") 153 | } 154 | 155 | ciphertext := writer.String() 156 | if ciphertext == "" { 157 | t.Errorf("encrypted output is empty") 158 | } 159 | 160 | reader = strings.NewReader(ciphertext) 161 | writer = &bytes.Buffer{} 162 | decryptParams := decryptParams{PrivateKey: identity.String()} 163 | err = ageDecrypt(reader, writer, decryptParams) 164 | if err != nil { 165 | t.Errorf("Unexpected error when decrypting") 166 | } 167 | 168 | if writer.String() != content { 169 | t.Errorf("Did not decrypt to same plaintext") 170 | } 171 | } 172 | 173 | func TestAgeEncrypt_Passphrase_Loopback_Success(t *testing.T) { 174 | content := "to be encrypted" 175 | reader := strings.NewReader(content) 176 | writer := &bytes.Buffer{} 177 | params := encryptParams{Passphrase: "supersecret"} 178 | 179 | err := ageEncrypt(reader, writer, params) 180 | if err != nil { 181 | t.Errorf("Unexpected error when encrypting") 182 | } 183 | 184 | ciphertext := writer.String() 185 | if ciphertext == "" { 186 | t.Errorf("encrypted output is empty") 187 | } 188 | 189 | reader = strings.NewReader(ciphertext) 190 | writer = &bytes.Buffer{} 191 | decryptParams := decryptParams{Passphrase: "supersecret"} 192 | err = ageDecrypt(reader, writer, decryptParams) 193 | if err != nil { 194 | t.Errorf("Unexpected error when decrypting") 195 | } 196 | 197 | if writer.String() != content { 198 | t.Errorf("Did not decrypt to same plaintext") 199 | } 200 | } 201 | 202 | func TestAgeEncrypt_WrongPrivateKey_Loopback_Failure(t *testing.T) { 203 | identity, err := age.GenerateX25519Identity() 204 | if err != nil { 205 | t.Fatalf("Failed to generate key pair: %v", err) 206 | } 207 | 208 | content := "to be encrypted" 209 | reader := strings.NewReader(content) 210 | writer := &bytes.Buffer{} 211 | params := encryptParams{PublicKey: identity.Recipient().String()} 212 | 213 | err = ageEncrypt(reader, writer, params) 214 | if err != nil { 215 | t.Errorf("Unexpected error when encrypting") 216 | } 217 | 218 | ciphertext := writer.String() 219 | if ciphertext == "" { 220 | t.Errorf("encrypted output is empty") 221 | } 222 | 223 | wrongIdentity, err := age.GenerateX25519Identity() 224 | if err != nil { 225 | t.Fatalf("Failed to generate key pair: %v", err) 226 | } 227 | 228 | reader = strings.NewReader(ciphertext) 229 | writer = &bytes.Buffer{} 230 | decryptParams := decryptParams{PrivateKey: wrongIdentity.String()} 231 | err = ageDecrypt(reader, writer, decryptParams) 232 | if err == nil { 233 | t.Errorf("Decryption should have failed") 234 | } 235 | } 236 | 237 | func TestAgeEncrypt_WrongPassphrase_Loopback_Failure(t *testing.T) { 238 | content := "to be encrypted" 239 | reader := strings.NewReader(content) 240 | writer := &bytes.Buffer{} 241 | params := encryptParams{Passphrase: "supersecret"} 242 | 243 | err := ageEncrypt(reader, writer, params) 244 | if err != nil { 245 | t.Errorf("Unexpected error when encrypting") 246 | } 247 | 248 | ciphertext := writer.String() 249 | if ciphertext == "" { 250 | t.Errorf("encrypted output is empty") 251 | } 252 | 253 | reader = strings.NewReader(ciphertext) 254 | writer = &bytes.Buffer{} 255 | decryptParams := decryptParams{Passphrase: "wrong"} 256 | err = ageDecrypt(reader, writer, decryptParams) 257 | if err == nil { 258 | t.Fatalf("Decryption should have failed") 259 | } 260 | } 261 | -------------------------------------------------------------------------------- /docker/compose.test.yml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | postgres: 4 | image: postgres:${PG_VERSION} 5 | environment: 6 | - PG_VERSION=latest 7 | - POSTGRES_PASSWORD=secret 8 | healthcheck: 9 | test: ["CMD-SHELL", "pg_isready"] 10 | interval: 10s 11 | timeout: 5s 12 | retries: 5 13 | 14 | pg_back: 15 | build: .. 16 | environment: 17 | - PGPASSWORD=secret 18 | command: -h postgres -p 5432 -U postgres 19 | depends_on: 20 | postgres: 21 | condition: service_healthy 22 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/orgrim/pg_back 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.1 6 | 7 | require ( 8 | cloud.google.com/go/storage v1.51.0 9 | filippo.io/age v1.2.1 10 | github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 11 | github.com/Backblaze/blazer v0.7.2 12 | github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be 13 | github.com/aws/aws-sdk-go v1.55.6 14 | github.com/google/go-cmp v0.7.0 15 | github.com/jackc/pgtype v1.14.4 16 | github.com/jackc/pgx/v4 v4.18.3 17 | github.com/pkg/sftp v1.13.9 18 | github.com/spf13/pflag v1.0.6 19 | golang.org/x/crypto v0.37.0 20 | google.golang.org/api v0.229.0 21 | gopkg.in/ini.v1 v1.67.0 22 | ) 23 | 24 | require ( 25 | cel.dev/expr v0.23.1 // indirect 26 | cloud.google.com/go v0.120.1 // indirect 27 | cloud.google.com/go/auth v0.16.0 // indirect 28 | cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect 29 | cloud.google.com/go/compute/metadata v0.6.0 // indirect 30 | cloud.google.com/go/iam v1.5.2 // indirect 31 | cloud.google.com/go/monitoring v1.24.2 // indirect 32 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect 33 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect 34 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect 35 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect 36 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect 37 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 38 | github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect 39 | github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect 40 | github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect 41 | github.com/felixge/httpsnoop v1.0.4 // indirect 42 | github.com/go-logr/logr v1.4.2 // indirect 43 | github.com/go-logr/stdr v1.2.2 // indirect 44 | github.com/google/s2a-go v0.1.9 // indirect 45 | github.com/google/uuid v1.6.0 // indirect 46 | github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect 47 | github.com/googleapis/gax-go/v2 v2.14.1 // indirect 48 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect 49 | github.com/jackc/pgconn v1.14.3 // indirect 50 | github.com/jackc/pgio v1.0.0 // indirect 51 | github.com/jackc/pgpassfile v1.0.0 // indirect 52 | github.com/jackc/pgproto3/v2 v2.3.3 // indirect 53 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 54 | github.com/jmespath/go-jmespath v0.4.0 // indirect 55 | github.com/kr/fs v0.1.0 // indirect 56 | github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect 57 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 58 | go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect 59 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect 60 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect 61 | go.opentelemetry.io/otel v1.35.0 // indirect 62 | go.opentelemetry.io/otel/metric v1.35.0 // indirect 63 | go.opentelemetry.io/otel/sdk v1.35.0 // indirect 64 | go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect 65 | go.opentelemetry.io/otel/trace v1.35.0 // indirect 66 | golang.org/x/net v0.39.0 // indirect 67 | golang.org/x/oauth2 v0.29.0 // indirect 68 | golang.org/x/sync v0.13.0 // indirect 69 | golang.org/x/sys v0.32.0 // indirect 70 | golang.org/x/text v0.24.0 // indirect 71 | golang.org/x/time v0.11.0 // indirect 72 | google.golang.org/genproto v0.0.0-20250414145226-207652e42e2e // indirect 73 | google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e // indirect 74 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect 75 | google.golang.org/grpc v1.71.1 // indirect 76 | google.golang.org/protobuf v1.36.6 // indirect 77 | ) 78 | -------------------------------------------------------------------------------- /hash.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "crypto/sha1" 30 | "crypto/sha256" 31 | "crypto/sha512" 32 | "fmt" 33 | "hash" 34 | "io" 35 | "os" 36 | "path/filepath" 37 | ) 38 | 39 | func computeChecksum(path string, h hash.Hash) (string, error) { 40 | h.Reset() 41 | 42 | f, err := os.Open(path) 43 | if err != nil { 44 | return "", err 45 | } 46 | defer f.Close() 47 | 48 | if _, err := io.Copy(h, f); err != nil { 49 | return "", err 50 | } 51 | return string(h.Sum(nil)), nil 52 | } 53 | 54 | func checksumFile(path string, mode int, algo string) (string, error) { 55 | var h hash.Hash 56 | 57 | switch algo { 58 | case "none": 59 | return "", nil 60 | case "sha1": 61 | h = sha1.New() 62 | case "sha224": 63 | h = sha256.New224() 64 | case "sha256": 65 | h = sha256.New() 66 | case "sha384": 67 | h = sha512.New384() 68 | case "sha512": 69 | h = sha512.New() 70 | default: 71 | return "", fmt.Errorf("unsupported hash algorithm: %s", algo) 72 | } 73 | 74 | i, err := os.Stat(path) 75 | if err != nil { 76 | return "", err 77 | } 78 | 79 | sumFile := fmt.Sprintf("%s.%s", path, algo) 80 | l.Verbosef("create checksum file: %s", sumFile) 81 | o, err := os.Create(sumFile) 82 | if err != nil { 83 | l.Errorln(err) 84 | return "", err 85 | } 86 | defer o.Close() 87 | 88 | if i.IsDir() { 89 | l.Verboseln("dump is a directory, checksumming all file inside") 90 | err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error { 91 | if err != nil { 92 | return err 93 | } 94 | if info.Mode().IsRegular() { 95 | l.Verboseln("computing checksum of:", path) 96 | r, cerr := computeChecksum(path, h) 97 | if cerr != nil { 98 | return fmt.Errorf("could not checksum %s: %s", path, cerr) 99 | } 100 | fmt.Fprintf(o, "%x *%s\n", r, path) 101 | } 102 | return nil 103 | }) 104 | 105 | if err != nil { 106 | return "", fmt.Errorf("error walking the path %q: %v\n", path, err) 107 | } 108 | } else { 109 | 110 | // Open the file and use io.Copy to feed the data to the hash, 111 | // like in the example of the doc, then write the result to a 112 | // file that the standard shaXXXsum tools can understand 113 | l.Verboseln("computing checksum of:", path) 114 | r, _ := computeChecksum(path, h) 115 | fmt.Fprintf(o, "%x %s\n", r, path) 116 | } 117 | l.Verboseln("computing checksum with MODE", mode, path) 118 | if mode > 0 { 119 | if err := os.Chmod(o.Name(), os.FileMode(mode)); err != nil { 120 | return "", fmt.Errorf("could not chmod checksum file %s: %s", path, err) 121 | } 122 | } 123 | return sumFile, nil 124 | } 125 | 126 | func checksumFileList(paths []string, mode int, algo string, sumFilePrefix string) (string, error) { 127 | var h hash.Hash 128 | 129 | switch algo { 130 | case "none": 131 | return "", nil 132 | case "sha1": 133 | h = sha1.New() 134 | case "sha224": 135 | h = sha256.New224() 136 | case "sha256": 137 | h = sha256.New() 138 | case "sha384": 139 | h = sha512.New384() 140 | case "sha512": 141 | h = sha512.New() 142 | default: 143 | return "", fmt.Errorf("unsupported hash algorithm: %s", algo) 144 | } 145 | 146 | sumPath := fmt.Sprintf("%s.%s", sumFilePrefix, algo) 147 | l.Verbosef("create or use checksum file: %s", sumPath) 148 | o, err := os.OpenFile(sumPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) 149 | if err != nil { 150 | return "", fmt.Errorf("could not open %s: %w", sumPath, err) 151 | } 152 | 153 | defer o.Close() 154 | 155 | failed := false 156 | for _, path := range paths { 157 | l.Verboseln("computing checksum of:", path) 158 | r, err := computeChecksum(path, h) 159 | if err != nil { 160 | l.Errorf("could not checksum %s: %s", path, err) 161 | failed = true 162 | continue 163 | } 164 | 165 | fmt.Fprintf(o, "%x *%s\n", r, path) 166 | 167 | if mode > 0 { 168 | if err := os.Chmod(o.Name(), os.FileMode(mode)); err != nil { 169 | return "", fmt.Errorf("could not chmod checksum file %s: %s", path, err) 170 | } 171 | } 172 | } 173 | 174 | if failed { 175 | return "", fmt.Errorf("computing of checksum failed. Please examine output") 176 | } 177 | 178 | return sumPath, nil 179 | } 180 | -------------------------------------------------------------------------------- /hash_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "errors" 30 | "fmt" 31 | "io" 32 | "os" 33 | "os/exec" 34 | "path/filepath" 35 | "runtime" 36 | "testing" 37 | ) 38 | 39 | func TestChecksumFile(t *testing.T) { 40 | var tests = []struct { 41 | algo string 42 | tool string 43 | }{ 44 | {"sha1", "sha1sum"}, 45 | {"sha224", "sha224sum"}, 46 | {"sha256", "sha256sum"}, 47 | {"sha384", "sha384sum"}, 48 | {"sha512", "sha512sum"}, 49 | } 50 | 51 | // create a temporary directory to store a test file to 52 | // checksum with the different algorithm relatively 53 | dir, err := os.MkdirTemp("", "test_checksum_file") 54 | if err != nil { 55 | t.Fatal("could not create tempdir:", err) 56 | } 57 | defer os.RemoveAll(dir) 58 | 59 | var cwd string 60 | cwd, err = os.Getwd() 61 | if err != nil { 62 | t.Fatal("could not get current dir:", err) 63 | } 64 | 65 | err = os.Chdir(dir) 66 | if err != nil { 67 | t.Fatal("could not change to tempdir:", err) 68 | } 69 | defer os.Chdir(cwd) 70 | 71 | // create a test file 72 | if f, err := os.Create("test"); err != nil { 73 | t.Fatal("could not create test file") 74 | } else { 75 | fmt.Fprintf(f, "abdc\n") 76 | f.Close() 77 | } 78 | 79 | // bad algo 80 | if _, err := checksumFile("", 0o700, "none"); err != nil { 81 | t.Errorf("expected , got %q\n", err) 82 | } 83 | 84 | if _, err := checksumFile("", 0o700, "other"); err == nil { 85 | t.Errorf("expected err, got \n") 86 | } 87 | 88 | // test each algo with the file 89 | for i, st := range tests { 90 | t.Run(fmt.Sprintf("f%v", i), func(t *testing.T) { 91 | if _, err := checksumFile("test", 0o700, st.algo); err != nil { 92 | t.Errorf("checksumFile returned: %v", err) 93 | } 94 | 95 | _, err := exec.LookPath(st.tool) 96 | if err != nil { 97 | t.Skip("check command not in the PATH:", st.tool) 98 | } 99 | 100 | c := exec.Command(st.tool, "-c", "test."+st.algo) 101 | out, err := c.CombinedOutput() 102 | if err != nil { 103 | t.Errorf("check command failed: %s\n", out) 104 | } 105 | if string(out) != "test: OK\n" { 106 | t.Errorf("expected OK, got %q\n", out) 107 | } 108 | }) 109 | } 110 | 111 | // bad files 112 | var e *os.PathError 113 | l.logger.SetOutput(io.Discard) 114 | if _, err := checksumFile("", 0o700, "sha1"); !errors.As(err, &e) { 115 | t.Errorf("expected an *os.PathError, got %q\n", err) 116 | } 117 | 118 | os.Chmod("test.sha1", 0444) 119 | if _, err := checksumFile("test", 0o700, "sha1"); !errors.As(err, &e) { 120 | t.Errorf("expected an *os.PathError, got %q\n", err) 121 | } 122 | os.Chmod("test.sha1", 0644) 123 | l.logger.SetOutput(os.Stderr) 124 | 125 | // create a directory and some files 126 | if err := os.Mkdir("test.d", 0755); err != nil { 127 | t.Fatal("could not create test dir") 128 | } 129 | for i := range 3 { 130 | f, err := os.Create(filepath.Join("test.d", fmt.Sprintf("test%d", i))) 131 | if err != nil { 132 | t.Fatal("could not create test file") 133 | } 134 | fmt.Fprintf(f, "abdc%d", i) 135 | f.Close() 136 | } 137 | 138 | // test each algo with the directory 139 | for i, st := range tests { 140 | t.Run(fmt.Sprintf("d%v", i), func(t *testing.T) { 141 | if _, err := checksumFile("test.d", 0o700, st.algo); err != nil { 142 | t.Errorf("checksumFile returned: %v", err) 143 | } 144 | 145 | _, err := exec.LookPath(st.tool) 146 | if err != nil { 147 | t.Skip("check command not in the PATH:", st.tool) 148 | } 149 | 150 | c := exec.Command(st.tool, "-c", fmt.Sprintf("test.d.%s", st.algo)) 151 | out, err := c.CombinedOutput() 152 | if err != nil { 153 | t.Errorf("check command failed: %s\n", out) 154 | } 155 | 156 | res := string(out) 157 | if runtime.GOOS == "windows" { 158 | if res != "test.d\\test0: OK\ntest.d\\test1: OK\ntest.d\\test2: OK\n" { 159 | t.Errorf("expected OK, got %q\n", out) 160 | } 161 | } else { 162 | if res != "test.d/test0: OK\ntest.d/test1: OK\ntest.d/test2: OK\n" { 163 | t.Errorf("expected OK, got %q\n", out) 164 | } 165 | } 166 | }) 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /hook.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "fmt" 30 | "os" 31 | "os/exec" 32 | "strings" 33 | 34 | "github.com/anmitsu/go-shlex" 35 | ) 36 | 37 | func hookCommand(cmd string, logPrefix string) error { 38 | if cmd == "" { 39 | return fmt.Errorf("unable to run an empty command") 40 | } 41 | 42 | l.Verboseln("parsing hook command") 43 | words, err := shlex.Split(cmd, true) 44 | if err != nil { 45 | return fmt.Errorf("unable to parse hook command: %s", err) 46 | } 47 | 48 | prog := words[0] 49 | args := words[1:] 50 | 51 | l.Verboseln("running:", prog, args) 52 | c := exec.Command(prog, args...) 53 | stdoutStderr, err := c.CombinedOutput() 54 | if err != nil { 55 | for line := range strings.SplitSeq(string(stdoutStderr), "\n") { 56 | if line != "" { 57 | l.Errorln(logPrefix, line) 58 | } 59 | } 60 | return err 61 | } 62 | if len(stdoutStderr) > 0 { 63 | for line := range strings.SplitSeq(string(stdoutStderr), "\n") { 64 | if line != "" { 65 | l.Infoln(logPrefix, line) 66 | } 67 | } 68 | } 69 | return nil 70 | } 71 | 72 | func preBackupHook(cmd string) error { 73 | if cmd != "" { 74 | l.Infoln("running pre-backup command:", cmd) 75 | if err := hookCommand(cmd, "pre-backup:"); err != nil { 76 | l.Fatalln("hook command failed:", err) 77 | return err 78 | } 79 | } 80 | return nil 81 | } 82 | 83 | func postBackupHook(cmd string) { 84 | if cmd != "" { 85 | l.Infoln("running post-backup command:", cmd) 86 | if err := hookCommand(cmd, "post-backup:"); err != nil { 87 | l.Fatalln("hook command failed:", err) 88 | os.Exit(1) 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /hook_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | //go:build !windows 27 | 28 | package main 29 | 30 | import ( 31 | "bytes" 32 | "fmt" 33 | "os" 34 | "os/exec" 35 | "regexp" 36 | "strings" 37 | "testing" 38 | ) 39 | 40 | func TestHookCommand(t *testing.T) { 41 | var tests = []struct { 42 | cmd string 43 | re string 44 | }{ 45 | {"echo 'a'", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} INFO: test: a\n$`}, 46 | {"echo a'", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} ERROR: unable to parse hook command: No closing quotation\n$`}, 47 | {"echo 'a\r\nb'", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} INFO: test: a\n\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} INFO: test: b\n$`}, 48 | {"", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} ERROR: unable to run an empty command\n$`}, 49 | {"/nothingBLA a", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} ERROR: .*/nothingBLA.*\n$`}, 50 | {"sh -c 'echo test; exit 1'", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} ERROR: test: test\n\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} ERROR: exit status 1\n$`}, 51 | } 52 | 53 | for i, subt := range tests { 54 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 55 | buf := new(bytes.Buffer) 56 | l.logger.SetOutput(buf) 57 | 58 | if err := hookCommand(subt.cmd, "test:"); err != nil { 59 | l.Errorln(err) 60 | } 61 | 62 | lines := strings.ReplaceAll(buf.String(), "\r", "") 63 | matched, err := regexp.MatchString(subt.re, lines) 64 | if err != nil { 65 | t.Fatal("pattern did not compile:", err) 66 | } 67 | if !matched { 68 | t.Errorf("expected a match of %q, got %q\n", subt.re, lines) 69 | } 70 | l.logger.SetOutput(os.Stderr) 71 | }) 72 | } 73 | } 74 | 75 | func TestPreBackupHook(t *testing.T) { 76 | var tests = []struct { 77 | cmd string 78 | re string 79 | fails bool 80 | }{ 81 | {"echo 'a'", `\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} INFO: running pre-backup command: echo 'a'\n\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} INFO: pre-backup: a\n$`, false}, 82 | {"", "", false}, 83 | {"/nothingBLA a", `\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} INFO: running pre-backup command: /nothingBLA a\n\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} FATAL: .*/nothingBLA.*\n$`, true}, 84 | } 85 | for i, subt := range tests { 86 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 87 | buf := new(bytes.Buffer) 88 | l.logger.SetOutput(buf) 89 | 90 | if err := preBackupHook(subt.cmd); err != nil { 91 | if !subt.fails { 92 | t.Errorf("function test must not fail, got error: %q\n", err) 93 | } 94 | } else { 95 | if subt.fails { 96 | t.Errorf("function test must fail, it did not\n") 97 | } 98 | } 99 | 100 | lines := strings.ReplaceAll(buf.String(), "\r", "") 101 | matched, err := regexp.MatchString(subt.re, lines) 102 | if err != nil { 103 | t.Fatal("pattern did not compile:", err) 104 | } 105 | if !matched { 106 | t.Errorf("expected a match of %q, got %q\n", subt.re, lines) 107 | } 108 | l.logger.SetOutput(os.Stderr) 109 | }) 110 | } 111 | } 112 | 113 | func TestPostBackupHook(t *testing.T) { 114 | t.Run("0", func(t *testing.T) { 115 | if os.Getenv("_TEST_HOOK") == "1" { 116 | postBackupHook("false") 117 | return 118 | } 119 | cmd := exec.Command(os.Args[0], "-test.run=TestPostBackupHook") 120 | cmd.Env = append(os.Environ(), "_TEST_HOOK=1") 121 | err := cmd.Run() 122 | if e, ok := err.(*exec.ExitError); ok && !e.Success() { 123 | return 124 | } 125 | t.Fatalf("process ran with err %v, want exit status 1", err) 126 | }) 127 | 128 | t.Run("1", func(t *testing.T) { 129 | buf := new(bytes.Buffer) 130 | l.logger.SetOutput(buf) 131 | postBackupHook("") 132 | lines := buf.String() 133 | if len(lines) != 0 { 134 | t.Errorf("did not expect any output, got %q\n", lines) 135 | } 136 | }) 137 | } 138 | -------------------------------------------------------------------------------- /legacy.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "fmt" 30 | "io" 31 | "os" 32 | "strings" 33 | 34 | "github.com/anmitsu/go-shlex" 35 | ) 36 | 37 | // Read the input file and return all lines that look like legacy configuration 38 | // options 39 | func readLegacyConf(f io.Reader) ([]string, error) { 40 | var lines []string 41 | 42 | data, err := io.ReadAll(f) 43 | if err != nil { 44 | return lines, fmt.Errorf("could not read file: %w", err) 45 | } 46 | 47 | lines = make([]string, 0) 48 | buf := make([]byte, 0) 49 | for _, b := range data { 50 | if b == '\n' { 51 | if len(buf) > 0 { 52 | line := strings.Trim(string(buf), " \t\r\v") 53 | if strings.HasPrefix(line, "PGBK_") || strings.HasPrefix(line, "SIGNATURE_ALGO=") { 54 | lines = append(lines, line) 55 | } 56 | } 57 | buf = make([]byte, 0) 58 | continue 59 | } 60 | buf = append(buf, b) 61 | } 62 | 63 | return lines, nil 64 | } 65 | 66 | // Remove the end comment of a line while taking single and double quoted 67 | // strings into account 68 | func stripEndComment(in string) string { 69 | buf := make([]byte, 0, len(in)) 70 | 71 | s := struct { 72 | inSQuote bool 73 | inDQuote bool 74 | inEscape bool 75 | }{} 76 | 77 | out: 78 | for _, b := range []byte(in) { 79 | switch b { 80 | case '"': 81 | if !s.inSQuote { 82 | if s.inDQuote { 83 | if s.inEscape { 84 | s.inEscape = false 85 | } else { 86 | s.inDQuote = false 87 | } 88 | } else { 89 | s.inDQuote = true 90 | } 91 | } else { 92 | if s.inEscape { 93 | s.inEscape = false 94 | } 95 | } 96 | 97 | case '\'': 98 | if !s.inDQuote { 99 | if s.inSQuote { 100 | if s.inEscape { 101 | s.inEscape = false 102 | } else { 103 | s.inSQuote = false 104 | } 105 | } else { 106 | s.inSQuote = true 107 | } 108 | } else { 109 | if s.inEscape { 110 | s.inEscape = false 111 | } 112 | } 113 | 114 | case '\\': 115 | if s.inEscape { 116 | s.inEscape = false 117 | } else { 118 | s.inEscape = true 119 | } 120 | 121 | case '#': 122 | if !s.inDQuote && !s.inSQuote { 123 | break out 124 | } 125 | } 126 | 127 | buf = append(buf, b) 128 | } 129 | 130 | return strings.Trim(string(buf), " \t\v") 131 | } 132 | 133 | func convertLegacyConf(oldConf []string) string { 134 | var result string 135 | 136 | table := map[string]string{ 137 | "PGBK_BIN": "bin_directory", 138 | "PGBK_BACKUP_DIR": "backup_directory", 139 | "PGBK_PURGE": "purge_older_than", 140 | "PGBK_PURGE_MIN_KEEP": "purge_min_keep", 141 | "PGBK_DBLIST": "include_dbs", 142 | "PGBK_EXCLUDE": "exclude_dbs", 143 | "PGBK_STANDBY_PAUSE_TIMEOUT": "pause_timeout", 144 | "PGBK_HOSTNAME": "host", 145 | "PGBK_PORT": "port", 146 | "PGBK_USERNAME": "user", 147 | "PGBK_CONNDB": "dbname", 148 | "PGBK_PRE_BACKUP_COMMAND": "pre_backup_hook", 149 | "PGBK_POST_BACKUP_COMMAND": "post_backup_hook", 150 | "SIGNATURE_ALGO": "checksum_algorithm", 151 | } 152 | 153 | for _, line := range oldConf { 154 | 155 | tokens := strings.SplitN(line, "=", 2) 156 | value := stripEndComment(tokens[1]) 157 | 158 | switch tokens[0] { 159 | case "PGBK_TIMESTAMP": 160 | // Detect to legacy format, otherwise discard the value 161 | // for the new rfc3339 162 | if strings.Trim(value, "'\"") == "%Y-%m-%d_%H-%M-%S" { 163 | result += fmt.Sprintln("timestamp_format", "=", "legacy") 164 | } else { 165 | result += fmt.Sprintln("timestamp_format", "=", "rfc3339") 166 | } 167 | 168 | case "PGBK_OPTS": 169 | // Parse the elements with shlex to keeps spaces when 170 | // it is a shell array. When the value is not a shell 171 | // array we need to trim quotes to ensure shlex splits 172 | // the options. 173 | v := value 174 | if strings.HasPrefix(value, "(") { 175 | v = strings.Trim(value, "()") 176 | } else { 177 | v = strings.Trim(value, "'\"") 178 | } 179 | words, err := shlex.Split(v, true) 180 | if err != nil { 181 | l.Warnf("could not parse value of PGBK_OPTS \"%s\": %s", value, err) 182 | continue 183 | } 184 | 185 | // Extract the format into a distinct option, otherwise 186 | // a format option from pg_dump_options could interfere 187 | // with the computed pg_dump command which use the last 188 | // format option it finds 189 | qWords := make([]string, 0, len(words)) 190 | expectFormat := false 191 | for _, w := range words { 192 | 193 | switch w { 194 | case "-Fp", "-Fplain", "--format=p", "--format=plain": 195 | result += fmt.Sprintln("format", "=", "plain") 196 | continue 197 | case "-Fc", "-Fcustom", "--format=c", "--format=custom": 198 | result += fmt.Sprintln("format", "=", "custom") 199 | continue 200 | case "-Ft", "-Ftar", "--format=t", "--format=tar": 201 | result += fmt.Sprintln("format", "=", "tar") 202 | continue 203 | case "-Fd", "-Fdirectory", "--format=d", "--format=directory": 204 | result += fmt.Sprintln("format", "=", "directory") 205 | continue 206 | case "-F", "--format": 207 | expectFormat = true 208 | continue 209 | } 210 | 211 | if expectFormat { 212 | expectFormat = false 213 | switch []byte(w)[0] { 214 | case 'p': 215 | result += fmt.Sprintln("format", "=", "plain") 216 | case 'c': 217 | result += fmt.Sprintln("format", "=", "custom") 218 | case 't': 219 | result += fmt.Sprintln("format", "=", "tar") 220 | case 'd': 221 | result += fmt.Sprintln("format", "=", "directory") 222 | } 223 | continue 224 | } 225 | 226 | // Quote tokens back so that we do not lose 227 | // spaces used in shell array elements 228 | if strings.Contains(w, " ") { 229 | quote := strings.ReplaceAll(w, "\\", "\\\\") 230 | quote = strings.ReplaceAll(quote, "\"", "\\\"") 231 | qWords = append(qWords, fmt.Sprintf("\"%s\"", quote)) 232 | } else { 233 | qWords = append(qWords, w) 234 | } 235 | } 236 | 237 | result += fmt.Sprintln("pg_dump_options", "=", strings.Join(qWords, " ")) 238 | 239 | case "PGBK_DBLIST", "PGBK_EXCLUDE": 240 | // The separator for lists of databases now the comma 241 | dbs := make([]string, 0) 242 | for d := range strings.SplitSeq(strings.Trim(value, "'\""), " ") { 243 | if len(d) > 0 { 244 | dbs = append(dbs, d) 245 | } 246 | } 247 | result += fmt.Sprintln(table[tokens[0]], "=", strings.Join(dbs, ", ")) 248 | 249 | case "PGBK_WITH_TEMPLATES": 250 | // with_templates is now a boolean, the shell script 251 | // used the "yes" value to include templates databases 252 | if strings.Trim(value, "'\"") == "yes" { 253 | result += fmt.Sprintln("with_templates", "=", "true") 254 | } else { 255 | result += fmt.Sprintln("with_templates", "=", "false") 256 | } 257 | 258 | default: 259 | result += fmt.Sprintln(table[tokens[0]], "=", value) 260 | } 261 | } 262 | 263 | return result 264 | } 265 | 266 | func convertLegacyConfFile(path string) error { 267 | f, err := os.Open(path) 268 | if err != nil { 269 | return fmt.Errorf("could not convert configuration: %w", err) 270 | } 271 | defer f.Close() 272 | 273 | contents, err := readLegacyConf(f) 274 | if err != nil { 275 | return fmt.Errorf("could not convert configuration: %w", err) 276 | } 277 | 278 | fmt.Printf("%s", convertLegacyConf(contents)) 279 | 280 | return nil 281 | } 282 | -------------------------------------------------------------------------------- /legacy_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "bytes" 30 | "fmt" 31 | "testing" 32 | 33 | "github.com/google/go-cmp/cmp" 34 | "github.com/google/go-cmp/cmp/cmpopts" 35 | ) 36 | 37 | func TestReadLegacyConf(t *testing.T) { 38 | var tests = []struct { 39 | conf string 40 | want []string 41 | }{ 42 | {"# comment\n" + 43 | "var=thing\n", 44 | []string{}, 45 | }, 46 | {"# comment\n" + 47 | "# Example: PGBK_OPTS=(\"-Fc\" \"-T\" \"tmp*\")\n" + 48 | "PGBK_OPTS=(\"-Fc\")\n", 49 | []string{ 50 | "PGBK_OPTS=(\"-Fc\")", 51 | }, 52 | }, 53 | {"# comment\n" + 54 | "PGBK_PURGE=30 # 30 days\n" + 55 | "#PGBK_HOSTNAME=\n" + 56 | "PGBK_PORT=5433\n", 57 | []string{ 58 | "PGBK_PURGE=30 # 30 days", 59 | "PGBK_PORT=5433", 60 | }, 61 | }, 62 | {" PGBK_PURGE=30\n" + 63 | "SIGNATURE_ALGO=\"sha256\"\n", 64 | []string{ 65 | "PGBK_PURGE=30", 66 | "SIGNATURE_ALGO=\"sha256\"", 67 | }, 68 | }, 69 | } 70 | 71 | for i, st := range tests { 72 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 73 | f := bytes.NewBufferString(st.conf) 74 | 75 | got, err := readLegacyConf(f) 76 | if err != nil { 77 | t.Errorf("got an error: %s", err) 78 | } else { 79 | if diff := cmp.Diff(st.want, got, cmpopts.EquateEmpty()); diff != "" { 80 | t.Errorf("got %v, want %v", got, st.want) 81 | } 82 | } 83 | }) 84 | } 85 | } 86 | 87 | func TestStripEndComment(t *testing.T) { 88 | var tests = []struct { 89 | line string 90 | want string 91 | }{ 92 | {"value", "value"}, 93 | {"value#comment", "value"}, 94 | {"value #comment", "value"}, 95 | {"value\\\\ #comment", "value\\\\"}, 96 | 97 | {"'value' # comment", "'value'"}, 98 | {"'value \\'quoted\\'' # comment", "'value \\'quoted\\''"}, 99 | {"'value \\'quoted # com'me#nt", "'value \\'quoted # com'me"}, 100 | {"'value \"\\\"\\'quoted #\" com'me#nt", "'value \"\\\"\\'quoted #\" com'me"}, 101 | 102 | {"\"value\" # comment", "\"value\""}, 103 | {"\"value \\\"quoted\\\"\" # comment", "\"value \\\"quoted\\\"\""}, 104 | {"\"value \\\"quoted # com\"me#nt", "\"value \\\"quoted # com\"me"}, 105 | {"\"value \\'\\\"quoted #' com\"me#nt", "\"value \\'\\\"quoted #' com\"me"}, 106 | } 107 | 108 | for i, st := range tests { 109 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 110 | got := stripEndComment(st.line) 111 | if got != st.want { 112 | t.Errorf("got %v, want %v", got, st.want) 113 | } 114 | }) 115 | } 116 | } 117 | 118 | func TestConvertLegacyConf(t *testing.T) { 119 | var tests = []struct { 120 | input []string 121 | want string 122 | }{ 123 | {[]string{ 124 | "PGBK_BIN=", 125 | "PGBK_BACKUP_DIR=/var/backups/postgresql", 126 | "PGBK_TIMESTAMP='%Y-%m-%d_%H-%M-%S'", 127 | "PGBK_PURGE=30", 128 | "PGBK_PURGE_MIN_KEEP=0", 129 | "PGBK_OPTS=(\"-F\" \"c\")", 130 | "PGBK_DBLIST=\"db1 db2\"", 131 | "PGBK_EXCLUDE=\"sampledb1 testdb2\"", 132 | "PGBK_WITH_TEMPLATES=\"no\"", 133 | "PGBK_STANDBY_PAUSE_TIMEOUT=3600", 134 | "PGBK_HOSTNAME=/tmp", 135 | "PGBK_PORT=5432", 136 | "PGBK_USERNAME=", 137 | "PGBK_CONNDB=postgres", 138 | "PGBK_PRE_BACKUP_COMMAND=/bin/true", 139 | "PGBK_POST_BACKUP_COMMAND=/bin/false", 140 | }, "bin_directory = \n" + 141 | "backup_directory = /var/backups/postgresql\n" + 142 | "timestamp_format = legacy\n" + 143 | "purge_older_than = 30\n" + 144 | "purge_min_keep = 0\n" + 145 | "format = custom\n" + 146 | "pg_dump_options = \n" + 147 | "include_dbs = db1, db2\n" + 148 | "exclude_dbs = sampledb1, testdb2\n" + 149 | "with_templates = false\n" + 150 | "pause_timeout = 3600\n" + 151 | "host = /tmp\n" + 152 | "port = 5432\n" + 153 | "user = \n" + 154 | "dbname = postgres\n" + 155 | "pre_backup_hook = /bin/true\n" + 156 | "post_backup_hook = /bin/false\n"}, 157 | {[]string{ 158 | "PGBK_TIMESTAMP=\"%Y-%m-%d\"", 159 | "PGBK_OPTS=(\"--format=c\" \"-T\" \"tmp*\")", 160 | "PGBK_WITH_TEMPLATES=\"yes\"", 161 | }, "timestamp_format = rfc3339\n" + 162 | "format = custom\n" + 163 | "pg_dump_options = -T tmp*\n" + 164 | "with_templates = true\n"}, 165 | {[]string{ 166 | "PGBK_OPTS=(\"--format\" \"c\" \"-T\" \"tmp spaced\")", 167 | }, "format = custom\n" + 168 | "pg_dump_options = -T \"tmp spaced\"\n"}, 169 | {[]string{ 170 | "PGBK_OPTS=\"-Fplain --create\"", 171 | }, "format = plain\n" + 172 | "pg_dump_options = --create\n"}, 173 | } 174 | 175 | for i, st := range tests { 176 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 177 | got := convertLegacyConf(st.input) 178 | if got != st.want { 179 | t.Errorf("got %v, want %v", got, st.want) 180 | } 181 | }) 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /lock.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | //go:build !windows 27 | // +build !windows 28 | 29 | package main 30 | 31 | import ( 32 | "os" 33 | "path/filepath" 34 | "syscall" 35 | ) 36 | 37 | // lockPath open and try to lock a file with a non-blocking exclusive 38 | // lock. return the open file, which must be held open to keep the 39 | // lock, wether it could be locked and a potentiel error. 40 | func lockPath(path string) (*os.File, bool, error) { 41 | if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 42 | return nil, false, err 43 | } 44 | 45 | f, err := os.Create(path) 46 | if err != nil { 47 | return nil, false, err 48 | } 49 | 50 | l.Verboseln("locking", path, "with flock()") 51 | if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { 52 | switch err { 53 | case syscall.EWOULDBLOCK: 54 | return f, false, nil 55 | default: 56 | f.Close() 57 | return nil, false, err 58 | } 59 | } 60 | 61 | return f, true, nil 62 | } 63 | 64 | // unlockPath releases the lock from the open file and removes the 65 | // underlying path 66 | func unlockPath(f *os.File) error { 67 | path := f.Name() 68 | l.Verboseln("unlocking", path, "with flock()") 69 | if err := syscall.Flock(int(f.Fd()), syscall.LOCK_UN); err != nil { 70 | return err 71 | } 72 | 73 | f.Close() 74 | return os.Remove(path) 75 | } 76 | -------------------------------------------------------------------------------- /lock_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "errors" 30 | "os" 31 | "path/filepath" 32 | "runtime" 33 | "testing" 34 | ) 35 | 36 | func TestLockPath(t *testing.T) { 37 | // Work from a tempdir 38 | dir, err := os.MkdirTemp("", "test_lockpath") 39 | if err != nil { 40 | t.Fatal("could not create tempdir:", err) 41 | } 42 | defer os.RemoveAll(dir) 43 | 44 | // tempdir with perms for mkdirall failure 45 | if err := os.MkdirAll(filepath.Join(dir, "subfail"), 0444); err != nil { 46 | t.Fatal("could not create temp subdir:", err) 47 | } 48 | 49 | var e *os.PathError 50 | // On windows the directory is created even with a mode of the tempdir that should make it fail 51 | if runtime.GOOS != "windows" { 52 | _, _, err = lockPath(filepath.Join(dir, "subfail", "subfail", "lockfail")) 53 | if !errors.As(err, &e) { 54 | t.Errorf("expected a *os.PathError, got %q\n", err) 55 | } 56 | } 57 | // path is subdir of tempdir to make os.create fail 58 | _, _, err = lockPath(filepath.Join(dir, "subfail")) 59 | if !errors.As(err, &e) { 60 | t.Errorf("expected a *os.PathError, got %q\n", err) 61 | } 62 | 63 | // lock a path with success 64 | f, l, err := lockPath(filepath.Join(dir, "lock")) 65 | if err != nil { 66 | t.Errorf("expected got error %q\n", err) 67 | } 68 | defer f.Close() 69 | if !l { 70 | t.Errorf("expected a true for locked, got false") 71 | } 72 | 73 | // fail to lock it again 74 | f1, l1, err := lockPath(filepath.Join(dir, "lock")) 75 | if err != nil { 76 | t.Errorf("expected got error %q\n", err) 77 | } 78 | if l1 { 79 | t.Errorf("expected a false for failed locked, got true") 80 | } 81 | f1.Close() 82 | } 83 | 84 | func TestUnlockPath(t *testing.T) { 85 | f, err := os.CreateTemp("", "test_unlockpath") 86 | if err != nil { 87 | t.Fatal("could not create tempfile") 88 | } 89 | defer os.Remove(f.Name()) 90 | 91 | // unlock shall always work even if the file is not locked 92 | err = unlockPath(f) 93 | if err != nil { 94 | t.Errorf("got error %q on non locked file\n", err) 95 | } 96 | 97 | // error when the locked file as already been removed 98 | os.Remove(f.Name()) 99 | err = unlockPath(f) 100 | if err == nil { 101 | t.Errorf("got instead of \"bad file descriptor\" error") 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /lock_win.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | //go:build windows 27 | // +build windows 28 | 29 | package main 30 | 31 | import ( 32 | "fmt" 33 | "os" 34 | "path/filepath" 35 | ) 36 | 37 | // lockPath on windows just creates a file without locking, it only tests if 38 | // the file exist to consider it locked 39 | func lockPath(path string) (*os.File, bool, error) { 40 | if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { 41 | return nil, false, err 42 | } 43 | 44 | info, err := os.Stat(path) 45 | if err == nil { 46 | if info.IsDir() { 47 | return nil, false, &os.PathError{Op: "stat", Path: path, Err: fmt.Errorf("unexpected directory")} 48 | } 49 | return nil, false, err 50 | } 51 | 52 | l.Verboseln("creating lock file", path) 53 | f, err := os.Create(path) 54 | if err != nil { 55 | return nil, false, err 56 | } 57 | return f, true, nil 58 | } 59 | 60 | // unlockPath releases the lock from the open file and removes the 61 | // underlying path 62 | func unlockPath(f *os.File) error { 63 | path := f.Name() 64 | l.Verboseln("removing lock file", path) 65 | f.Close() 66 | return os.Remove(path) 67 | } 68 | -------------------------------------------------------------------------------- /log.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "log" 30 | "os" 31 | ) 32 | 33 | // LevelLog custom type to allow a verbose mode and handling of levels 34 | // with a prefix 35 | type LevelLog struct { 36 | logger *log.Logger 37 | verbose bool 38 | quiet bool 39 | } 40 | 41 | var l = NewLevelLog() 42 | 43 | // NewLevelLog setups a logger with the proper configuration for the underlying log 44 | func NewLevelLog() *LevelLog { 45 | return &LevelLog{ 46 | logger: log.New(os.Stderr, "", log.LstdFlags|log.Lmsgprefix), 47 | verbose: false, 48 | quiet: false, 49 | } 50 | } 51 | 52 | // SetVerbose toggles verbose mode 53 | func (l *LevelLog) SetVerbosity(verbose bool, quiet bool) { 54 | if quiet { 55 | l.quiet = quiet 56 | l.verbose = false 57 | 58 | // Quiet mode takes over verbose mode 59 | return 60 | } 61 | 62 | l.verbose = verbose 63 | if verbose { 64 | l.logger.SetFlags(log.LstdFlags | log.Lmsgprefix | log.Lmicroseconds) 65 | } 66 | } 67 | 68 | // Verbosef prints with log.Printf a message with DEBUG: prefix using log.Printf, only when verbose mode is true 69 | func (l *LevelLog) Verbosef(format string, v ...interface{}) { 70 | if l.verbose { 71 | l.logger.SetPrefix("DEBUG: ") 72 | l.logger.Printf(format, v...) 73 | } 74 | } 75 | 76 | // Verboseln prints a message with DEBUG: prefix using log.Println, only when verbose mode is true 77 | func (l *LevelLog) Verboseln(v ...interface{}) { 78 | if l.verbose { 79 | l.logger.SetPrefix("DEBUG: ") 80 | l.logger.Println(v...) 81 | } 82 | } 83 | 84 | // Infof prints a message with INFO: prefix using log.Printf 85 | func (l *LevelLog) Infof(format string, v ...interface{}) { 86 | if !l.quiet { 87 | l.logger.SetPrefix("INFO: ") 88 | l.logger.Printf(format, v...) 89 | } 90 | } 91 | 92 | // Infoln prints a message with INFO: prefix using log.Println 93 | func (l *LevelLog) Infoln(v ...interface{}) { 94 | if !l.quiet { 95 | l.logger.SetPrefix("INFO: ") 96 | l.logger.Println(v...) 97 | } 98 | } 99 | 100 | // Warnf prints a message with WARN: prefix using log.Printf 101 | func (l *LevelLog) Warnf(format string, v ...interface{}) { 102 | l.logger.SetPrefix("WARN: ") 103 | l.logger.Printf(format, v...) 104 | } 105 | 106 | // Warnln prints a message with WARN: prefix using log.Println 107 | func (l *LevelLog) Warnln(v ...interface{}) { 108 | l.logger.SetPrefix("WARN: ") 109 | l.logger.Println(v...) 110 | } 111 | 112 | // Errorf prints a message with ERROR: prefix using log.Printf 113 | func (l *LevelLog) Errorf(format string, v ...interface{}) { 114 | l.logger.SetPrefix("ERROR: ") 115 | l.logger.Printf(format, v...) 116 | } 117 | 118 | // Errorln prints a message with ERROR: prefix using log.Println 119 | func (l *LevelLog) Errorln(v ...interface{}) { 120 | l.logger.SetPrefix("ERROR: ") 121 | l.logger.Println(v...) 122 | } 123 | 124 | // Fatalf prints a message with FATAL: prefix using log.Printf 125 | func (l *LevelLog) Fatalf(format string, v ...interface{}) { 126 | l.logger.SetPrefix("FATAL: ") 127 | l.logger.Printf(format, v...) 128 | } 129 | 130 | // Fatalln prints a message with FATAL: prefix using log.Println 131 | func (l *LevelLog) Fatalln(v ...interface{}) { 132 | l.logger.SetPrefix("FATAL: ") 133 | l.logger.Println(v...) 134 | } 135 | -------------------------------------------------------------------------------- /log_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "bytes" 30 | "fmt" 31 | "os" 32 | "regexp" 33 | "testing" 34 | ) 35 | 36 | func TestLevelLogSetVerbose(t *testing.T) { 37 | var tests = []bool{true, false} 38 | l := NewLevelLog() 39 | for i, subt := range tests { 40 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 41 | l.SetVerbosity(subt, false) 42 | if l.verbose != subt { 43 | t.Errorf("got %v, want %v", l.verbose, subt) 44 | } 45 | }) 46 | } 47 | } 48 | 49 | func TestLevelLogVerbose(t *testing.T) { 50 | var tests = []struct { 51 | verbose bool 52 | message string 53 | re string 54 | fOrln bool 55 | }{ 56 | {true, "test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.\d{6} DEBUG: test$`, true}, 57 | {true, "test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}.\d{6} DEBUG: test$`, false}, 58 | {false, "test", `^$`, true}, 59 | {false, "test", `^$`, false}, 60 | } 61 | 62 | l := NewLevelLog() 63 | 64 | for i, subt := range tests { 65 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 66 | buf := new(bytes.Buffer) 67 | l.logger.SetOutput(buf) 68 | l.SetVerbosity(subt.verbose, false) 69 | if subt.fOrln { 70 | l.Verbosef("%s", subt.message) 71 | } else { 72 | l.Verboseln(subt.message) 73 | } 74 | line := buf.String() 75 | if len(line) > 0 { 76 | line = line[0 : len(line)-1] 77 | } 78 | matched, err := regexp.MatchString(subt.re, line) 79 | if err != nil { 80 | t.Fatal("pattern did not compile:", err) 81 | } 82 | if !matched { 83 | t.Errorf("log output should match %q is %q", subt.re, line) 84 | } 85 | l.logger.SetOutput(os.Stderr) 86 | }) 87 | } 88 | } 89 | 90 | func TestLevelLogInfo(t *testing.T) { 91 | var tests = []struct { 92 | message string 93 | re string 94 | fOrln bool 95 | }{ 96 | {"test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} INFO: test$`, true}, 97 | {"test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} INFO: test$`, false}, 98 | } 99 | 100 | l := NewLevelLog() 101 | 102 | for i, subt := range tests { 103 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 104 | buf := new(bytes.Buffer) 105 | l.logger.SetOutput(buf) 106 | if subt.fOrln { 107 | l.Infof("%s", subt.message) 108 | } else { 109 | l.Infoln(subt.message) 110 | } 111 | line := buf.String() 112 | line = line[0 : len(line)-1] 113 | 114 | matched, err := regexp.MatchString(subt.re, line) 115 | if err != nil { 116 | t.Fatal("pattern did not compile:", err) 117 | } 118 | if !matched { 119 | t.Errorf("log output should match %q is %q", subt.re, line) 120 | } 121 | l.logger.SetOutput(os.Stderr) 122 | }) 123 | } 124 | } 125 | 126 | func TestLevelLogWarn(t *testing.T) { 127 | var tests = []struct { 128 | message string 129 | re string 130 | fOrln bool 131 | }{ 132 | {"test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} WARN: test$`, true}, 133 | {"test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} WARN: test$`, false}, 134 | } 135 | 136 | l := NewLevelLog() 137 | 138 | for i, subt := range tests { 139 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 140 | buf := new(bytes.Buffer) 141 | l.logger.SetOutput(buf) 142 | if subt.fOrln { 143 | l.Warnf("%s", subt.message) 144 | } else { 145 | l.Warnln(subt.message) 146 | } 147 | line := buf.String() 148 | line = line[0 : len(line)-1] 149 | 150 | matched, err := regexp.MatchString(subt.re, line) 151 | if err != nil { 152 | t.Fatal("pattern did not compile:", err) 153 | } 154 | if !matched { 155 | t.Errorf("log output should match %q is %q", subt.re, line) 156 | } 157 | l.logger.SetOutput(os.Stderr) 158 | }) 159 | } 160 | } 161 | 162 | func TestLevelLogError(t *testing.T) { 163 | var tests = []struct { 164 | message string 165 | re string 166 | fOrln bool 167 | }{ 168 | {"test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} ERROR: test$`, true}, 169 | {"test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} ERROR: test$`, false}, 170 | } 171 | 172 | l := NewLevelLog() 173 | 174 | for i, subt := range tests { 175 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 176 | buf := new(bytes.Buffer) 177 | l.logger.SetOutput(buf) 178 | if subt.fOrln { 179 | l.Errorf("%s", subt.message) 180 | } else { 181 | l.Errorln(subt.message) 182 | } 183 | line := buf.String() 184 | line = line[0 : len(line)-1] 185 | 186 | matched, err := regexp.MatchString(subt.re, line) 187 | if err != nil { 188 | t.Fatal("pattern did not compile:", err) 189 | } 190 | if !matched { 191 | t.Errorf("log output should match %q is %q", subt.re, line) 192 | } 193 | l.logger.SetOutput(os.Stderr) 194 | }) 195 | } 196 | } 197 | 198 | func TestLevelLogFatal(t *testing.T) { 199 | var tests = []struct { 200 | message string 201 | re string 202 | fOrln bool 203 | }{ 204 | {"test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} FATAL: test$`, true}, 205 | {"test", `^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} FATAL: test$`, false}, 206 | } 207 | 208 | l := NewLevelLog() 209 | 210 | for i, subt := range tests { 211 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 212 | buf := new(bytes.Buffer) 213 | l.logger.SetOutput(buf) 214 | if subt.fOrln { 215 | l.Fatalf("%s", subt.message) 216 | } else { 217 | l.Fatalln(subt.message) 218 | } 219 | line := buf.String() 220 | line = line[0 : len(line)-1] 221 | 222 | matched, err := regexp.MatchString(subt.re, line) 223 | if err != nil { 224 | t.Fatal("pattern did not compile:", err) 225 | } 226 | if !matched { 227 | t.Errorf("log output should match %q is %q", subt.re, line) 228 | } 229 | l.logger.SetOutput(os.Stderr) 230 | }) 231 | } 232 | } 233 | 234 | func TestLevelLogQuiet(t *testing.T) { 235 | l := NewLevelLog() 236 | 237 | // Set verbose and quiet to ensure quiet takes over verbose when both are true 238 | l.SetVerbosity(true, true) 239 | 240 | buf := new(bytes.Buffer) 241 | l.logger.SetOutput(buf) 242 | 243 | l.Verbosef("test") 244 | if buf.Len() > 0 { 245 | t.Errorf("log function Verbosef has printed data when it should not") 246 | } 247 | 248 | buf.Reset() 249 | l.Verboseln("test") 250 | if buf.Len() > 0 { 251 | t.Errorf("log function Verboseln has printed data when it should not") 252 | } 253 | 254 | buf.Reset() 255 | l.Infof("test") 256 | if buf.Len() > 0 { 257 | t.Errorf("log function Infof has printed data when it should not") 258 | } 259 | 260 | buf.Reset() 261 | l.Infoln("test") 262 | if buf.Len() > 0 { 263 | t.Errorf("log function Infoln has printed data when it should not") 264 | } 265 | } 266 | -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2022 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "fmt" 30 | "runtime" 31 | "testing" 32 | ) 33 | 34 | func TestExecPath(t *testing.T) { 35 | var tests []struct { 36 | dir string 37 | prog string 38 | want string 39 | } 40 | 41 | if runtime.GOOS != "windows" { 42 | tests = []struct { 43 | dir string 44 | prog string 45 | want string 46 | }{ 47 | {"", "pg_dump", "pg_dump"}, 48 | {"/path/to/bin", "prog", "/path/to/bin/prog"}, 49 | } 50 | } else { 51 | tests = []struct { 52 | dir string 53 | prog string 54 | want string 55 | }{ 56 | {"", "pg_dump", "pg_dump.exe"}, 57 | {"C:\\path\\to\\bin", "prog", "C:\\path\\to\\bin\\prog.exe"}, 58 | } 59 | } 60 | 61 | for i, st := range tests { 62 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 63 | binDir = st.dir 64 | got := execPath(st.prog) 65 | if got != st.want { 66 | t.Errorf("expected %q, got %q\n", st.want, got) 67 | } 68 | }) 69 | } 70 | } 71 | 72 | func TestEnsureCipherParamsPresent_NoEncryptNoDecrypt_NoParams_ReturnsNil(t *testing.T) { 73 | opts := options{} 74 | 75 | err := ensureCipherParamsPresent(&opts) 76 | if err != nil { 77 | t.Errorf("should not return error") 78 | } 79 | } 80 | 81 | func TestEnsureCipherParamsPresent_NoEncryptNoDecrypt_HasParams_ReturnsNil(t *testing.T) { 82 | opts := options{ 83 | CipherPublicKey: "foo1", 84 | CipherPrivateKey: "bar99", 85 | CipherPassphrase: "secretwords", 86 | } 87 | 88 | err := ensureCipherParamsPresent(&opts) 89 | if err != nil { 90 | t.Errorf("should not return error") 91 | } 92 | } 93 | 94 | func TestEnsureCipherParamsPresent_Encrypt_NoParams_Failure(t *testing.T) { 95 | opts := options{ 96 | Encrypt: true, 97 | CipherPrivateKey: "bar99", 98 | } 99 | 100 | err := ensureCipherParamsPresent(&opts) 101 | if err == nil { 102 | t.Errorf("should have error about not finding passphrase") 103 | } 104 | } 105 | 106 | func TestEnsureCipherParamsPresent_Encrypt_NoParamsButEnv_Success(t *testing.T) { 107 | opts := options{ 108 | Encrypt: true, 109 | } 110 | t.Setenv("PGBK_CIPHER_PASS", "works") 111 | 112 | err := ensureCipherParamsPresent(&opts) 113 | if err != nil { 114 | t.Errorf("should have read environment variable") 115 | } 116 | 117 | if opts.CipherPassphrase != "works" { 118 | t.Errorf("passphrase was not read correctly from environment") 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /pg_back.conf: -------------------------------------------------------------------------------- 1 | # pg_back configuration file 2 | 3 | # PostgreSQL binaries path. Leave empty to search $PATH 4 | bin_directory = 5 | 6 | # Where to store the dumps and other files. It can include the 7 | # {dbname} keyword that will be replaced by the name of the database 8 | # being dumped. 9 | backup_directory = /var/backups/postgresql 10 | 11 | # Mode permissions to apply to database dumps after the dump is completed. 12 | # This parameter uses Unix file permission chmod / mode with an octal 13 | # representation (Example: 0700 or 0600). A negative value can be used to 14 | # disable modifying permission and let the system handle that (example when 15 | # umask is defined). When the format is set to directory, pg_back ensures 16 | # the top-level directory is traversable by adding execute (+x) permission 17 | # if read (r) or write (w) permission is set and it set the configured 18 | # permissions of files inside the directory. 19 | backup_file_mode = 0600 20 | 21 | # Timestamp format to use in filenames of output files. Two values are 22 | # possible: legacy and rfc3339. For example legacy is 2006-01-02_15-04-05, and 23 | # rfc3339 is 2006-01-02T15:04:05-07:00. rfc3339 is the default, except on 24 | # Windows where it is not possible to use the rfs3339 format in filename. Thus 25 | # the only format on Windows is legacy: the option has no effect on Windows. 26 | # timestamp_format = rfc3339 27 | 28 | # PostgreSQL connection options. This are the usual libpq 29 | # variables. dbname is the database used to dump globals, acl, 30 | # configuration and pause replication. password is better set in 31 | # ~/.pgpass 32 | host = 33 | port = 34 | user = 35 | dbname = 36 | 37 | # Weither to dump role passwords when running pg_dump 38 | dump_role_passwords = true 39 | 40 | # List of database names to dump. When left empty, dump all 41 | # databases. See with_templates to dump templates too. Separator is 42 | # comma. 43 | include_dbs = 44 | 45 | # List of database names not to dump. Separator is comma. 46 | exclude_dbs = 47 | 48 | # When set to true, database templates are also dumped, either 49 | # explicitly if listed in the include_dbs list or implicitly if 50 | # include_dbs is empty. 51 | with_templates = false 52 | 53 | # Dump only databases, excluding configuration and globals 54 | dump_only = false 55 | 56 | # Format of the dump, understood by pg_dump. Possible values are 57 | # plain, custom, tar or directory. 58 | format = custom 59 | 60 | # When the format is directory, number of parallel jobs to dumps (-j 61 | # option of pg_dump). 62 | parallel_backup_jobs = 1 63 | 64 | # When using a compressed binary format, e.g. custom or directory, adjust the 65 | # compression level between 0 and 9. Use -1 to keep the default level of pg_dump. 66 | compress_level = -1 67 | 68 | # Compute a checksum for each file in the dumps. It can be checked 69 | # by the corresponding shaXsum -c command. Possible values are: none to 70 | # disable checksums, sha1, sha224, sha256, sha384, and sha512. 71 | checksum_algorithm = none 72 | 73 | # Encrypt the files produced, including globals and configuration. 74 | encrypt = false 75 | 76 | # Passphrase to use for encryption and decryption. The PGBK_CIPHER_PASS 77 | # environment variable can be used alternatively. 78 | cipher_pass = 79 | 80 | # AGE public key for encryption; in Bech32 encoding starting with 'age1' 81 | cipher_public_key = 82 | 83 | # AGE private key for decryption; in Bech32 encoding starting with 'AGE-SECRET-KEY-1' 84 | cipher_private_key = 85 | 86 | # Keep original files after encrypting them. 87 | encrypt_keep_source = false 88 | 89 | # Purge dumps older than this number of days. If the interval has to 90 | # be shorter than one day, use a duration with units, h for hours, m 91 | # for minutes, s for seconds, us for microseconds or ns for 92 | # nanoseconds, e.g. 1h30m24s. 93 | purge_older_than = 30 94 | 95 | # When purging older dumps, always keep this minimum number of 96 | # dumps. The default is 0. Even if purge_older_than is 0 the dumps of 97 | # the current run are kept. To remove all dumps and not 98 | # keep anything, for example to just test for data corruption, then 99 | # purge_older_than shall be a negative duration. 100 | purge_min_keep = 0 101 | 102 | # Number of pg_dump commands to run concurrently. 103 | jobs = 1 104 | 105 | # inject these options to pg_dump 106 | pg_dump_options = 107 | 108 | # When dumping from a hot standby server, wait for exclusive locks to 109 | # be released within this number of seconds. Abort if exclusive locks 110 | # are still held. If a exclusive lock is granted and replication is 111 | # paused, the lock is held until the replication is resumed, causing 112 | # pg_dump to wait forever. 113 | pause_timeout = 3600 114 | 115 | # Commands to execute before and after dumping. The post-backup 116 | # command is always executed even in case of failure. 117 | pre_backup_hook = 118 | post_backup_hook = 119 | 120 | # Upload resulting files to a remote location. Possible values are: none, 121 | # s3, sftp, gcs. The default is none, meaning no file will be uploaded. 122 | upload = none 123 | 124 | # The upload_prefix option can be used to place the files in a remote 125 | # directory, as most cloud storage treat prefix as directories. The filename and 126 | # the prefix is separated by a / in the remote location. 127 | upload_prefix = "" 128 | 129 | # delete local file after upload 130 | delete_uploaded = "no" 131 | 132 | # Purge remote files. When uploading to a remote location, purge the remote 133 | # files with the same rules as the local directory. 134 | # purge_remote = false 135 | 136 | # AWS S3 Access information. Region and Bucket are mandatory. If no credential 137 | # or profile is provided, defaults from aws sdk are used. 138 | # s3_region = 139 | # s3_bucket = 140 | # s3_profile = 141 | # s3_key_id = 142 | # s3_secret = 143 | # s3_endpoint = 144 | # s3_force_path = false 145 | # s3_tls = true 146 | 147 | # SFTP Access information. If the user is empty, the current system user is 148 | # used. Port defaults to 22. The password is also used as passphrase for any 149 | # identity file given, it can be provided with the PGBK_SSH_PASS environment 150 | # variable. PGBK_SSH_PASS is overridden by a value set here or on the command 151 | # line. Use the directory to inform where to store files, it can be relative to 152 | # the working directory of the SSH connection, the home directory of the remote 153 | # user in most cases. 154 | # sftp_host = 155 | # sftp_port = 156 | # sftp_user = 157 | # sftp_password = 158 | # sftp_directory = 159 | # sftp_identity = 160 | # sftp_ignore_hostkey = false 161 | 162 | # Google Cloud Storage (GCS) Access information. Bucket is mandatory. If the 163 | # path to the key file is empty, the GOOGLE_APPLICATION_CREDENTIALS environment 164 | # variable is used. 165 | # gcs_bucket = 166 | # gcs_endpoint = 167 | # gcs_keyfile = 168 | 169 | # Azure Blob Storage access information. The container is mandatory. If the 170 | # account name is left empty, an anonymous connection is used and the endpoint 171 | # is used directly: this allows the use of a full URL to the container with a 172 | # SAS token. When an account is provided, the URL is built by prepending the 173 | # container name to the endpoint. The default endpoint is 174 | # blob.core.windows.net. The AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY are 175 | # used when azure_account and azure_key are not set. 176 | # azure_container = 177 | # azure_endpoint = 178 | # azure_account = 179 | # azure_key = 180 | 181 | 182 | # Backblaze B2 Access information. Region, Endpoint, Bucket, Key-ID and App-Key are mandatory. 183 | # b2_bucket = 184 | # b2_key_id = 185 | # b2_app_key = 186 | # b2_force_path = false 187 | # b2_concurrent_connections = 5 188 | 189 | 190 | # # Per database options. Use a ini section named the same as the 191 | # # database. These options take precedence over the global values. 192 | # [dbname] 193 | # user = 194 | # format = 195 | # parallel_backup_jobs = 196 | # compress_level = 197 | # checksum_algorithm = 198 | # purge_older_than = 199 | # purge_min_keep = 200 | 201 | # # List of schemas and tables to dump or exlude from the dump. 202 | # # Inclusion and exclusion rules of pg_dump apply, as well as 203 | # # pattern rules. Separate schema/table names with a comma. 204 | # schemas = 205 | # exclude_schemas = 206 | 207 | # tables = 208 | # exclude_tables = 209 | 210 | # Include or exclude large objects in the dump. Leave the option commented to 211 | # keep the default behaviour, see pg_dump -b. 212 | # with_blobs = true 213 | 214 | # # inject these options to pg_dump. Use an empty value to override the 215 | # # global value of pg_dump_options. 216 | # pg_dump_options = 217 | 218 | -------------------------------------------------------------------------------- /purge.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "errors" 30 | "fmt" 31 | "io" 32 | "os" 33 | "path/filepath" 34 | "regexp" 35 | "sort" 36 | "strings" 37 | "time" 38 | ) 39 | 40 | type purgeJob struct { 41 | datetime time.Time 42 | dirs []string 43 | files []string 44 | } 45 | 46 | func genPurgeJobs(items []Item, dbname string) []purgeJob { 47 | jobs := make(map[string]purgeJob) 48 | 49 | // The files to purge must be grouped by date. depending on the options 50 | // there can be up to 6 files for a database or output 51 | reExt := regexp.MustCompile(`^(sql|d|dump|tar|out|createdb\.sql)(?:\.(sha\d{1,3}|age))?(?:\.(sha\d{1,3}|age))?(?:\.(sha\d{1,3}))?`) 52 | 53 | for _, item := range items { 54 | if strings.HasPrefix(item.key, cleanDBName(dbname)+"_") { 55 | dateNExt := strings.TrimPrefix(item.key, cleanDBName(dbname)+"_") 56 | parts := strings.SplitN(dateNExt, ".", 2) 57 | 58 | var ( 59 | date time.Time 60 | parsed bool 61 | ) 62 | 63 | // We match the file using every timestamp format 64 | // possible so that the format can be changed without 65 | // breaking the purge 66 | for _, layout := range []string{"2006-01-02_15-04-05", time.RFC3339} { 67 | 68 | // Parse the format to a time in the local 69 | // timezone when the timezone is not part of 70 | // the string, otherwise it uses to timezone 71 | // written in the string. We do this because 72 | // the limit is in the local timezone. 73 | date, _ = time.ParseInLocation(layout, parts[0], time.Local) 74 | if !date.IsZero() { 75 | parsed = true 76 | break 77 | } 78 | } 79 | 80 | if !parsed { 81 | // the file does not match the time format, skip it 82 | continue 83 | } 84 | 85 | // Identify the kind of file based on the dot separated 86 | // strings at the end of its name 87 | matches := reExt.FindStringSubmatch(parts[1]) 88 | if len(matches) == 5 { 89 | job := jobs[parts[0]] 90 | 91 | if job.datetime.IsZero() { 92 | job.datetime = date 93 | } 94 | 95 | if date.Before(job.datetime) { 96 | job.datetime = date 97 | } 98 | 99 | if item.isDir { 100 | job.dirs = append(job.dirs, item.key) 101 | } else { 102 | job.files = append(job.files, item.key) 103 | } 104 | 105 | jobs[parts[0]] = job 106 | continue 107 | } 108 | } 109 | } 110 | 111 | // The output is a list of jobs, sorted by date, youngest first 112 | jobList := make([]purgeJob, 0) 113 | for _, j := range jobs { 114 | jobList = append(jobList, j) 115 | } 116 | 117 | sort.Slice(jobList, func(i, j int) bool { 118 | return jobList[i].datetime.After(jobList[j].datetime) 119 | }) 120 | 121 | return jobList 122 | } 123 | 124 | func purgeDumps(directory string, dbname string, keep int, limit time.Time) error { 125 | l.Verboseln("purge:", dbname, "limit:", limit, "keep:", keep) 126 | 127 | // The dbname can be put in the path of the backup directory, so we 128 | // have to compute it first. This is why a dbname is required to purge 129 | // old dumps 130 | dirpath := filepath.Dir(formatDumpPath(directory, "", "", dbname, time.Time{}, 0)) 131 | dir, err := os.Open(dirpath) 132 | if err != nil { 133 | return fmt.Errorf("could not purge %s: %s", dirpath, err) 134 | } 135 | defer dir.Close() 136 | 137 | files := make([]Item, 0) 138 | for { 139 | var f []os.FileInfo 140 | f, err = dir.Readdir(1) 141 | if err != nil { 142 | if errors.Is(err, io.EOF) { 143 | // reset to avoid returning is.EOF at the end 144 | err = nil 145 | break 146 | } 147 | return fmt.Errorf("could not purge %s: %s", dirpath, err) 148 | } 149 | 150 | files = append(files, Item{key: f[0].Name(), modtime: f[0].ModTime(), isDir: f[0].IsDir()}) 151 | } 152 | 153 | // Parse and group by date. We remove groups of files produced by 154 | // the same run (including checksums, encrypted files, etc) 155 | jobs := genPurgeJobs(files, dbname) 156 | 157 | if keep < len(jobs) && keep >= 0 { 158 | // Show the files kept in verbose mode 159 | for _, j := range jobs[:keep] { 160 | for _, f := range j.files { 161 | l.Verboseln("keeping (count)", filepath.Join(dirpath, f)) 162 | } 163 | 164 | for _, d := range j.dirs { 165 | l.Verboseln("keeping (count)", filepath.Join(dirpath, d)) 166 | } 167 | } 168 | 169 | // Purge the older files that after excluding the one we need 170 | // to keep 171 | for _, j := range jobs[keep:] { 172 | if j.datetime.Before(limit) { 173 | for _, f := range j.files { 174 | path := filepath.Join(dirpath, f) 175 | l.Infoln("removing", path) 176 | if err = os.Remove(path); err != nil { 177 | l.Errorln(err) 178 | } 179 | } 180 | 181 | for _, d := range j.dirs { 182 | path := filepath.Join(dirpath, d) 183 | l.Infoln("removing", path) 184 | if err = os.RemoveAll(path); err != nil { 185 | l.Errorln(err) 186 | } 187 | } 188 | } else { 189 | for _, f := range j.files { 190 | l.Verboseln("keeping (age)", filepath.Join(dirpath, f)) 191 | } 192 | 193 | for _, d := range j.dirs { 194 | l.Verboseln("keeping (age)", filepath.Join(dirpath, d)) 195 | } 196 | } 197 | } 198 | } 199 | 200 | if err != nil { 201 | return fmt.Errorf("could not purge %s: %s", dirpath, err) 202 | } 203 | 204 | return nil 205 | } 206 | 207 | func purgeRemoteDumps(repo Repo, uploadPrefix string, directory string, dbname string, keep int, limit time.Time) error { 208 | l.Verboseln("remote purge:", dbname, "limit:", limit, "keep:", keep) 209 | 210 | // The dbname can be put in the directory tree of the dump, in this 211 | // case the directory containing {dbname} in its name is kept on the 212 | // remote path along with any subdirectory. So we have to include it in 213 | // the filter when listing remote files 214 | dirpath := filepath.Dir(formatDumpPath(directory, "", "", dbname, time.Time{}, 0)) 215 | prefix := filepath.Join(uploadPrefix, relPath(directory, filepath.Join(dirpath, cleanDBName(dbname)))) 216 | 217 | l.Verboseln("remote file prefix:", prefix) 218 | 219 | // Get the list of files from the repository, this includes the 220 | // contents of dumps in the directory format. 221 | remoteFiles, err := repo.List(prefix) 222 | if err != nil { 223 | return fmt.Errorf("could not purge: %w", err) 224 | } 225 | 226 | // We are going to parse the filename, we need to remove any posible 227 | // parent dir before the name of the dump 228 | parentDir := filepath.Dir(prefix) 229 | if parentDir == "." || parentDir == "/" { 230 | parentDir = "" 231 | } 232 | 233 | files := make([]Item, 0) 234 | for _, i := range remoteFiles { 235 | f, err := filepath.Rel(parentDir, i.key) 236 | if err != nil { 237 | l.Warnf("could not process remote file %s: %s", i.key, err) 238 | continue 239 | } 240 | 241 | files = append(files, Item{key: f, modtime: i.modtime, isDir: i.isDir}) 242 | } 243 | 244 | // Parse and group by date. We remove groups of files produced by 245 | // the same run (including checksums, encrypted files, etc) 246 | jobs := genPurgeJobs(files, dbname) 247 | 248 | if keep < len(jobs) && keep >= 0 { 249 | // Show the files kept in verbose mode 250 | for _, j := range jobs[:keep] { 251 | for _, f := range j.files { 252 | l.Verboseln("keeping remote (count)", filepath.Join(parentDir, f)) 253 | } 254 | 255 | for _, d := range j.dirs { 256 | l.Verboseln("keeping remote (count)", filepath.Join(parentDir, d)) 257 | } 258 | } 259 | 260 | // Purge the older files that after excluding the one we need 261 | // to keep 262 | for _, j := range jobs[keep:] { 263 | if j.datetime.Before(limit) { 264 | for _, f := range j.files { 265 | path := filepath.Join(parentDir, f) 266 | l.Infoln("removing remote", path) 267 | if err = repo.Remove(path); err != nil { 268 | l.Errorln(err) 269 | } 270 | } 271 | 272 | for _, d := range j.dirs { 273 | path := filepath.Join(parentDir, d) 274 | l.Infoln("removing remote", path) 275 | if err = repo.Remove(path); err != nil { 276 | l.Errorln(err) 277 | } 278 | } 279 | 280 | } else { 281 | for _, f := range j.files { 282 | l.Verboseln("keeping remote (age)", filepath.Join(parentDir, f)) 283 | } 284 | 285 | for _, d := range j.dirs { 286 | l.Verboseln("keeping remote (age)", filepath.Join(parentDir, d)) 287 | } 288 | } 289 | } 290 | } 291 | 292 | if err != nil { 293 | return fmt.Errorf("could not purge: %w", err) 294 | } 295 | 296 | return nil 297 | } 298 | -------------------------------------------------------------------------------- /purge_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "fmt" 30 | "os" 31 | "path/filepath" 32 | "runtime" 33 | "testing" 34 | "time" 35 | ) 36 | 37 | // func purgeDumps(directory string, dbname string, keep int, limit time.Time) error 38 | func TestPurgeDumps(t *testing.T) { 39 | // work in a tempdir 40 | dir, err := os.MkdirTemp("", "test_purge_dumps") 41 | if err != nil { 42 | t.Fatal("could not create tempdir:", err) 43 | } 44 | defer os.RemoveAll(dir) 45 | 46 | // empty path - on windows chmod does not work as expected 47 | wd := filepath.Join(dir, "real", "bad") 48 | if err := os.MkdirAll(wd, 0755); err != nil { 49 | t.Fatal("could not create test dir") 50 | } 51 | 52 | if runtime.GOOS != "windows" { 53 | os.Chmod(filepath.Dir(wd), 0444) 54 | err = purgeDumps(wd, "", 0, time.Time{}) 55 | if err == nil { 56 | t.Errorf("empty path gave error \n") 57 | } 58 | os.Chmod(filepath.Dir(wd), 0755) 59 | } 60 | 61 | // empty dbname 62 | when := time.Now().Add(-time.Hour) 63 | tf := formatDumpPath(wd, "2006-01-02_15-04-05", "dump", "", when, 0) 64 | f, err := os.Create(tf) 65 | if err != nil { 66 | t.Errorf("could not create temp file %s: %s", tf, err) 67 | } 68 | 69 | f.Close() 70 | os.Chtimes(tf, when, when) 71 | 72 | err = purgeDumps(wd, "", 0, time.Now()) 73 | if err != nil { 74 | t.Errorf("empty dbname (file: %s) gave error %s", tf, err) 75 | } 76 | if _, err := os.Stat(tf); err == nil { 77 | t.Errorf("file still exists") 78 | } 79 | 80 | // file without write perms 81 | if runtime.GOOS != "windows" { 82 | tf = formatDumpPath(wd, time.RFC3339, "dump", "db", time.Now().Add(-time.Hour), 0) 83 | os.WriteFile(tf, []byte("truc\n"), 0644) 84 | os.Chmod(filepath.Dir(tf), 0555) 85 | 86 | err = purgeDumps(wd, "db", 0, time.Now()) 87 | if err == nil { 88 | t.Errorf("bad perms on file did not gave an error") 89 | } 90 | os.Chmod(filepath.Dir(tf), 0755) 91 | 92 | // dir without write perms 93 | tf = formatDumpPath(wd, time.RFC3339, "d", "db", time.Now().Add(-time.Hour), 0) 94 | os.MkdirAll(tf, 0755) 95 | os.Chmod(filepath.Dir(tf), 0555) 96 | 97 | err = purgeDumps(wd, "db", 0, time.Now()) 98 | if err == nil { 99 | t.Errorf("bad perms on dir did not gave an error") 100 | } 101 | os.Chmod(filepath.Dir(tf), 0755) 102 | } 103 | 104 | // time and keep limits 105 | var tests = []struct { 106 | keep int 107 | limit time.Time 108 | format string 109 | want int 110 | }{ 111 | {0, time.Time{}, "2006-01-02_15-04-05", 3}, 112 | {1, time.Time{}, "2006-01-02_15-04-05", 3}, 113 | {0, time.Now().Add(-time.Minute * time.Duration(90)), "2006-01-02_15-04-05", 1}, 114 | {1, time.Now().Add(-time.Minute * time.Duration(90)), "2006-01-02_15-04-05", 1}, 115 | {2, time.Now().Add(-time.Minute * time.Duration(90)), "2006-01-02_15-04-05", 2}, 116 | {3, time.Now().Add(-time.Minute * time.Duration(90)), "2006-01-02_15-04-05", 3}, 117 | {-1, time.Now(), "2006-01-02_15-04-05", 3}, 118 | {0, time.Time{}, time.RFC3339, 3}, 119 | {1, time.Time{}, time.RFC3339, 3}, 120 | {0, time.Now().Add(-time.Minute * time.Duration(90)), time.RFC3339, 1}, 121 | {1, time.Now().Add(-time.Minute * time.Duration(90)), time.RFC3339, 1}, 122 | {2, time.Now().Add(-time.Minute * time.Duration(90)), time.RFC3339, 2}, 123 | {3, time.Now().Add(-time.Minute * time.Duration(90)), time.RFC3339, 3}, 124 | {-1, time.Now(), time.RFC3339, 3}, 125 | } 126 | 127 | for i, st := range tests { 128 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 129 | if runtime.GOOS == "windows" && st.format == time.RFC3339 { 130 | t.Skip("testing on windows") 131 | } 132 | 133 | // create 3 files, 1 per hour 134 | wd = filepath.Join(dir, "wd") 135 | if err := os.MkdirAll(wd, 0755); err != nil { 136 | t.Fatal("could not create test dir") 137 | } 138 | for i := 1; i <= 3; i++ { 139 | when := time.Now().Add(-time.Hour * time.Duration(i)) 140 | tf = formatDumpPath(wd, st.format, "dump", "db", when, 0) 141 | os.WriteFile(tf, []byte("truc\n"), 0644) 142 | os.Chtimes(tf, when, when) 143 | } 144 | 145 | if err := purgeDumps(wd, "db", st.keep, st.limit); err != nil { 146 | t.Errorf("purgeDumps returned: %v", err) 147 | } 148 | 149 | dir, err := os.Open(wd) 150 | if err != nil { 151 | t.Fatal("could not open workdir:", err) 152 | } 153 | defer dir.Close() 154 | 155 | fi, err := dir.Readdir(-1) 156 | if err != nil { 157 | t.Fatal("could not read workdir:", err) 158 | } 159 | if len(fi) != st.want { 160 | var info string 161 | for _, f := range fi { 162 | info += fmt.Sprintf("%s %v\n", f.Name(), f.ModTime()) 163 | } 164 | t.Errorf("expected %d files in dir, found %d\n%slimit: %v, keep: %v", st.want, len(fi), info, st.limit, st.keep) 165 | } 166 | 167 | os.RemoveAll(wd) 168 | }) 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /sql.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "database/sql" 30 | "errors" 31 | "fmt" 32 | "strings" 33 | "time" 34 | 35 | "github.com/jackc/pgtype" 36 | _ "github.com/jackc/pgx/v4/stdlib" 37 | ) 38 | 39 | type pg struct { 40 | conn *sql.DB 41 | version int 42 | xlogOrWal string 43 | superuser bool 44 | } 45 | 46 | func pgGetVersionNum(db *sql.DB) (int, error) { 47 | var version int 48 | 49 | query := "select setting from pg_settings where name = 'server_version_num'" 50 | l.Verboseln("executing SQL query:", query) 51 | err := db.QueryRow(query).Scan(&version) 52 | if err != nil { 53 | return 0, fmt.Errorf("could not get PostgreSQL server version: %s", err) 54 | } 55 | 56 | return version, nil 57 | } 58 | 59 | func pgAmISuperuser(db *sql.DB) (bool, error) { 60 | var isSuper bool 61 | 62 | query := "select rolsuper from pg_roles where rolname = current_user" 63 | l.Verboseln("executing SQL query:", query) 64 | err := db.QueryRow(query).Scan(&isSuper) 65 | if err != nil { 66 | return false, fmt.Errorf("could not check if db user is superuser: %s", err) 67 | } 68 | 69 | return isSuper, nil 70 | } 71 | 72 | func dbOpen(conninfo *ConnInfo) (*pg, error) { 73 | connstr := conninfo.String() 74 | l.Verbosef("connecting to PostgreSQL with: \"%s\"", connstr) 75 | db, err := sql.Open("pgx", connstr) 76 | if err != nil { 77 | return nil, fmt.Errorf("could not open database: %s", err) 78 | } 79 | 80 | if err := db.Ping(); err != nil { 81 | db.Close() 82 | return nil, fmt.Errorf("could not connect to database: %s", err) 83 | } 84 | 85 | newDB := new(pg) 86 | newDB.conn = db 87 | newDB.version, err = pgGetVersionNum(db) 88 | if err != nil { 89 | db.Close() 90 | return nil, err 91 | } 92 | 93 | l.Verboseln("server num version is:", newDB.version) 94 | // Keyword xlog has been replaced by wal as of PostgreSQL 10 95 | if newDB.version >= 100000 { 96 | newDB.xlogOrWal = "wal" 97 | } else { 98 | newDB.xlogOrWal = "xlog" 99 | } 100 | 101 | newDB.superuser, err = pgAmISuperuser(db) 102 | if err != nil { 103 | db.Close() 104 | return nil, err 105 | } 106 | 107 | return newDB, nil 108 | } 109 | 110 | func (db *pg) Close() error { 111 | l.Verboseln("closing connection to PostgreSQL") 112 | return db.conn.Close() 113 | } 114 | 115 | func sqlQuoteLiteral(s string) string { 116 | var o string 117 | // Make standard_conforming_strings happy if the input 118 | // contains some backslash 119 | if strings.ContainsAny(s, "\\") { 120 | o = "E" 121 | } 122 | o += "'" 123 | 124 | // double single quotes and backslahses 125 | o += strings.ReplaceAll(s, "'", "''") 126 | o = strings.ReplaceAll(o, "\\", "\\\\") 127 | 128 | o += "'" 129 | 130 | return o 131 | } 132 | 133 | func sqlQuoteIdent(s string) string { 134 | // just double the quote quotes, there are no escape for identifiers 135 | return strings.ReplaceAll(s, "\"", "\"\"") 136 | } 137 | 138 | func listAllDatabases(db *pg, withTemplates bool) ([]string, error) { 139 | var ( 140 | query string 141 | dbname string 142 | ) 143 | 144 | if withTemplates { 145 | query = "select datname from pg_database where datallowconn;" 146 | } else { 147 | query = "select datname from pg_database where datallowconn and not datistemplate;" 148 | } 149 | 150 | dbs := make([]string, 0) 151 | l.Verboseln("executing SQL query:", query) 152 | rows, err := db.conn.Query(query) 153 | if err != nil { 154 | return dbs, fmt.Errorf("could not list databases: %s", err) 155 | } 156 | defer rows.Close() 157 | 158 | for rows.Next() { 159 | err := rows.Scan(&dbname) 160 | if err != nil { 161 | continue 162 | } 163 | dbs = append(dbs, dbname) 164 | } 165 | if err := rows.Err(); err != nil { 166 | return dbs, fmt.Errorf("could not retrieve rows: %s", err) 167 | } 168 | return dbs, nil 169 | } 170 | 171 | func listDatabases(db *pg, withTemplates bool, excludedDbs []string, includedDbs []string) ([]string, error) { 172 | var ( 173 | databases []string 174 | err error 175 | ) 176 | 177 | // When an explicit list of database is given, allow to select 178 | // templates 179 | if len(includedDbs) > 0 { 180 | databases, err = listAllDatabases(db, true) 181 | if err != nil { 182 | return databases, err 183 | } 184 | realDbs := make([]string, 0, len(includedDbs)) 185 | 186 | nextidb: 187 | for _, d := range includedDbs { 188 | 189 | for _, e := range databases { 190 | if d == e { 191 | realDbs = append(realDbs, d) 192 | continue nextidb 193 | } 194 | } 195 | l.Warnf("database \"%s\" does not exists, excluded", d) 196 | } 197 | databases = realDbs 198 | } else { 199 | databases, err = listAllDatabases(db, withTemplates) 200 | if err != nil { 201 | return databases, err 202 | } 203 | } 204 | 205 | // Exclude databases even if they are explicitly included 206 | if len(excludedDbs) > 0 { 207 | filtered := make([]string, 0, len(databases)) 208 | 209 | nextfdb: 210 | for _, d := range databases { 211 | for _, e := range excludedDbs { 212 | if d == e { 213 | continue nextfdb 214 | } 215 | } 216 | filtered = append(filtered, d) 217 | } 218 | databases = filtered 219 | } 220 | return databases, nil 221 | } 222 | 223 | type pgVersionError struct { 224 | s string 225 | } 226 | 227 | func (e *pgVersionError) Error() string { 228 | return e.s 229 | } 230 | 231 | type pgPrivError struct { 232 | s string 233 | } 234 | 235 | func (e *pgPrivError) Error() string { 236 | return e.s 237 | } 238 | 239 | // pg_dumpacl stuff 240 | func dumpCreateDBAndACL(db *pg, dbname string, force bool) (string, error) { 241 | var s string 242 | 243 | if dbname == "" { 244 | return "", fmt.Errorf("empty input dbname") 245 | } 246 | 247 | // this query only work from 9.0, where datcollate and datctype were 248 | // added to pg_database 249 | if db.version < 90000 { 250 | return "", &pgVersionError{s: "cluster version is older than 9.0, not dumping ACL"} 251 | } 252 | 253 | // this is no longer necessary after 11. Dumping ACL is the 254 | // job of pg_dump so we have to check its version, not the 255 | // server 256 | if pgToolVersion("pg_dump") >= 110000 && !force { 257 | l.Verboseln("no need to dump create database query and database ACL with pg_dump from >=11") 258 | return "", nil 259 | } 260 | 261 | l.Infoln("dumping database creation and ACL commands of", dbname) 262 | 263 | query := "SELECT coalesce(rolname, (select rolname from pg_roles where oid=(select datdba from pg_database where datname='template0'))), " + 264 | " pg_encoding_to_char(d.encoding), " + 265 | " datcollate, datctype, datistemplate, datacl, datconnlimit, " + 266 | " (SELECT spcname FROM pg_tablespace t WHERE t.oid = d.dattablespace) AS dattablespace " + 267 | "FROM pg_database d" + 268 | " LEFT JOIN pg_roles u ON (datdba = u.oid) " + 269 | "WHERE datallowconn AND datname = $1" 270 | l.Verboseln("executing SQL query:", query) 271 | rows, err := db.conn.Query(query, dbname) 272 | if err != nil { 273 | return "", fmt.Errorf("could not query database information for %s: %s", dbname, err) 274 | } 275 | defer rows.Close() 276 | 277 | for rows.Next() { 278 | var ( 279 | owner string 280 | encoding string 281 | collate string 282 | ctype string 283 | istemplate bool 284 | acl pgtype.TextArray 285 | connlimit int 286 | tablespace string 287 | ) 288 | err := rows.Scan(&owner, &encoding, &collate, &ctype, &istemplate, &acl, &connlimit, &tablespace) 289 | if err != nil { 290 | return "", fmt.Errorf("could not get row: %s", err) 291 | } 292 | 293 | if dbname != "template0" { 294 | s += fmt.Sprintf("--\n-- Database creation\n--\n\n") 295 | s += fmt.Sprintf("CREATE DATABASE \"%s\" WITH TEMPLATE = template0 OWNER = \"%s\"", sqlQuoteIdent(dbname), sqlQuoteIdent(owner)) 296 | s += fmt.Sprintf(" ENCODING = %s", sqlQuoteLiteral(encoding)) 297 | s += fmt.Sprintf(" LC_COLLATE = %s", sqlQuoteLiteral(collate)) 298 | 299 | s += fmt.Sprintf(" LC_CTYPE = %s", sqlQuoteLiteral(ctype)) 300 | 301 | if tablespace != "pg_default" { 302 | s += fmt.Sprintf(" TABLESPACE = \"%s\"", sqlQuoteIdent(tablespace)) 303 | } 304 | if connlimit != -1 { 305 | s += fmt.Sprintf(" CONNECTION LIMIT = %d", connlimit) 306 | } 307 | s += fmt.Sprintf(";\n\n") 308 | 309 | if istemplate { 310 | s += fmt.Sprintf("UPDATE pg_catalog.pg_database SET datistemplate = 't' WHERE datname = %s;\n", sqlQuoteLiteral(dbname)) 311 | } 312 | } 313 | 314 | // When all privileges are revoked from public, there 315 | // isn't any acl entry in the list showing this. So 316 | // when the list is not empty and no acl are granted 317 | // to public, we have to output a revoke statement for 318 | // public, before any grant. 319 | if len(acl.Elements) > 0 { 320 | var ( 321 | t string 322 | revokeAll bool = true 323 | ) 324 | 325 | s += fmt.Sprintf("--\n-- Database privileges \n--\n\n") 326 | 327 | for _, e := range acl.Elements { 328 | if e.Status == pgtype.Null { 329 | continue 330 | } 331 | 332 | if strings.HasPrefix(e.String, "=") { 333 | revokeAll = false 334 | } 335 | 336 | t += makeACLCommands(e.String, dbname, owner) 337 | } 338 | 339 | if revokeAll { 340 | s += fmt.Sprintf("REVOKE CONNECT, TEMPORARY ON DATABASE \"%s\" FROM PUBLIC;\n", sqlQuoteIdent(dbname)) 341 | } 342 | s += t 343 | } 344 | } 345 | err = rows.Err() 346 | if err != nil { 347 | return s, fmt.Errorf("could not retrive rows: %s", err) 348 | } 349 | 350 | return s, nil 351 | } 352 | 353 | func makeACLCommands(aclitem string, dbname string, owner string) string { 354 | var s string 355 | // the aclitem format is "grantee=privs/grantor" where privs 356 | // is a list of letters, one for each privilege followed by * 357 | // when grantee as WITH GRANT OPTION for it 358 | t := strings.Split(aclitem, "=") 359 | grantee := t[0] 360 | if len(t) != 2 { 361 | return "" 362 | } 363 | t = strings.Split(t[1], "/") 364 | privs := t[0] 365 | if len(t) != 2 { 366 | return "" 367 | } 368 | grantor := t[1] 369 | 370 | // public role: when the privs differ from the default, issue grants 371 | if grantee == "" { 372 | grantee = "PUBLIC" 373 | if privs != "Tc" { 374 | s += fmt.Sprintf("REVOKE ALL ON DATABASE \"%s\" FROM PUBLIC;\n", sqlQuoteIdent(dbname)) 375 | } else { 376 | return s 377 | } 378 | } 379 | // owner: when other roles have been given privileges, all 380 | // privileges are shown for the owner 381 | if grantee == owner { 382 | if privs != "CTc" { 383 | s += fmt.Sprintf("REVOKE ALL ON DATABASE \"%s\" FROM \"%s\";\n", sqlQuoteIdent(dbname), sqlQuoteIdent(grantee)) 384 | } else { 385 | return s 386 | } 387 | } 388 | 389 | if grantor != owner { 390 | s += fmt.Sprintf("SET SESSION AUTHORIZATION \"%s\";\n", sqlQuoteIdent(grantor)) 391 | } 392 | for i, b := range privs { 393 | switch b { 394 | case 'C': 395 | s += fmt.Sprintf("GRANT CREATE ON DATABASE \"%s\" TO \"%s\"", sqlQuoteIdent(dbname), sqlQuoteIdent(grantee)) 396 | case 'T': 397 | s += fmt.Sprintf("GRANT TEMPORARY ON DATABASE \"%s\" TO \"%s\"", sqlQuoteIdent(dbname), sqlQuoteIdent(grantee)) 398 | case 'c': 399 | s += fmt.Sprintf("GRANT CONNECT ON DATABASE \"%s\" TO \"%s\"", sqlQuoteIdent(dbname), sqlQuoteIdent(grantee)) 400 | } 401 | 402 | if i+1 < len(privs) { 403 | if privs[i+1] == '*' { 404 | s += fmt.Sprintf(" WITH GRANT OPTION;\n") 405 | } else if privs[i] != '*' { 406 | s += fmt.Sprintf(";\n") 407 | } 408 | } else if privs[i] != '*' { 409 | s += fmt.Sprintf(";\n") 410 | } 411 | } 412 | if grantor != owner { 413 | s += fmt.Sprintf("RESET SESSION AUTHORIZATION;\n") 414 | } 415 | return s 416 | } 417 | 418 | func dumpDBConfig(db *pg, dbname string) (string, error) { 419 | var s string 420 | 421 | if dbname == "" { 422 | return "", fmt.Errorf("empty input dbname") 423 | } 424 | 425 | // this query only work from 9.0, where pg_db_role_setting was introduced 426 | if db.version < 90000 { 427 | return "", &pgVersionError{s: "cluster version is older than 9.0, not dumping database configuration"} 428 | } 429 | 430 | // this is no longer necessary after 11. Dumping ACL is the 431 | // job of pg_dump so we have to check its version, not the 432 | // server 433 | if pgToolVersion("pg_dump") >= 110000 { 434 | l.Verboseln("no need to dump database configuration with pg_dump from >=11") 435 | return "", nil 436 | } 437 | 438 | l.Infoln("dumping database configuration commands of", dbname) 439 | // dump per database config 440 | query := "SELECT CASE setrole WHEN 0 THEN NULL ELSE pg_get_userbyid(setrole) END, unnest(setconfig) FROM pg_db_role_setting WHERE setdatabase = (SELECT oid FROM pg_database WHERE datname = $1) ORDER BY 1, 2" 441 | l.Verboseln("executing SQL query:", query) 442 | rows, err := db.conn.Query(query, dbname) 443 | if err != nil { 444 | return "", fmt.Errorf("could not query database configuration for %s: %s", dbname, err) 445 | } 446 | defer rows.Close() 447 | 448 | for rows.Next() { 449 | var ( 450 | role pgtype.Text 451 | keyVal string 452 | ) 453 | 454 | err := rows.Scan(&role, &keyVal) 455 | if err != nil { 456 | return "", fmt.Errorf("could not get row: %s", err) 457 | } 458 | 459 | // split 460 | tokens := strings.Split(keyVal, "=") 461 | 462 | // do not quote the value for those two parameters 463 | if tokens[0] != "DateStyle" && tokens[0] != "search_path" { 464 | tokens[1] = fmt.Sprintf("'%s'", tokens[1]) 465 | } 466 | 467 | if role.Status != pgtype.Null { 468 | s += fmt.Sprintf("ALTER ROLE \"%s\" IN DATABASE \"%s\" SET \"%s\" TO %s;\n", role.String, dbname, tokens[0], tokens[1]) 469 | } else { 470 | s += fmt.Sprintf("ALTER DATABASE \"%s\" SET \"%s\" TO %s;\n", dbname, tokens[0], tokens[1]) 471 | } 472 | } 473 | err = rows.Err() 474 | if err != nil { 475 | return "", fmt.Errorf("could not retrive rows: %s", err) 476 | } 477 | 478 | return s, nil 479 | } 480 | 481 | func showSettings(db *pg) (string, error) { 482 | var s, query string 483 | 484 | if db.version < 80400 { 485 | return "", &pgVersionError{s: "cluster version is older than 8.4, not dumping configuration"} 486 | } 487 | 488 | if !db.superuser { 489 | return "", &pgPrivError{s: "current user is not superuser, not dumping configuration"} 490 | } 491 | 492 | if db.version >= 90500 { 493 | // use pg_show_all_file_settings() from 9.5+ to get 494 | // the non default values set in the files and 495 | // applied, this avoid duplicates when multiple files 496 | // define parameters. 497 | query = "SELECT name, setting FROM pg_show_all_file_settings() WHERE applied ORDER BY name" 498 | } else { 499 | query = "SELECT name, setting FROM pg_settings WHERE sourcefile IS NOT NULL" 500 | } 501 | 502 | l.Verboseln("executing SQL query:", query) 503 | rows, err := db.conn.Query(query) 504 | if err != nil { 505 | return "", fmt.Errorf("could not query instance configuration: %s", err) 506 | } 507 | defer rows.Close() 508 | 509 | for rows.Next() { 510 | var ( 511 | name string 512 | value string 513 | ) 514 | 515 | err := rows.Scan(&name, &value) 516 | if err != nil { 517 | l.Errorln(err) 518 | continue 519 | } 520 | 521 | if name != "DateStyle" && name != "search_path" { 522 | value = fmt.Sprintf("'%s'", value) 523 | } 524 | 525 | s += fmt.Sprintf("%s = %s\n", name, value) 526 | } 527 | 528 | err = rows.Err() 529 | if err != nil { 530 | return "", fmt.Errorf("could not retrive rows: %s", err) 531 | } 532 | 533 | if db.version >= 90500 { 534 | return s, nil 535 | } else { 536 | // when dumping settings from pg_settings, some 537 | // settings may not be found because their value can 538 | // set a higher levels than configuration files 539 | return s, &pgVersionError{s: "cluster version is older than 9.5, settings from configuration files could be missing if the SET command was used"} 540 | } 541 | } 542 | 543 | func extractFileFromSettings(db *pg, name string) (string, error) { 544 | query := "SELECT setting, pg_read_file(setting, 0, (pg_stat_file(setting)).size) FROM pg_settings WHERE name = $1" 545 | 546 | l.Verboseln("executing SQL query:", query) 547 | rows, err := db.conn.Query(query, name) 548 | if err != nil { 549 | return "", fmt.Errorf("could not query file contents from settings: %s", err) 550 | } 551 | defer rows.Close() 552 | 553 | var result string 554 | 555 | for rows.Next() { 556 | var ( 557 | path string 558 | contents string 559 | ) 560 | 561 | err := rows.Scan(&path, &contents) 562 | if err != nil { 563 | l.Errorln(err) 564 | continue 565 | } 566 | 567 | result = fmt.Sprintf("# path: %s\n%s\n", path, contents) 568 | } 569 | 570 | err = rows.Err() 571 | if err != nil { 572 | return "", fmt.Errorf("could not retrive rows: %s", err) 573 | } 574 | 575 | return result, nil 576 | } 577 | 578 | type pgReplicaHasLocks struct{} 579 | 580 | func (*pgReplicaHasLocks) Error() string { 581 | return "replication not paused because of AccessExclusiveLock" 582 | } 583 | 584 | func pauseReplication(db *pg) error { 585 | // If an AccessExclusiveLock is granted when the replay is 586 | // paused, it will remain and pg_dump would be stuck forever 587 | query := fmt.Sprintf("SELECT pg_%s_replay_pause() "+ 588 | "WHERE NOT EXISTS (SELECT 1 FROM pg_locks WHERE mode = 'AccessExclusiveLock') "+ 589 | "AND pg_is_in_recovery();", db.xlogOrWal) 590 | l.Verboseln("executing SQL query:", query) 591 | rows, err := db.conn.Query(query) 592 | if err != nil { 593 | return fmt.Errorf("could not pause replication: %s", err) 594 | } 595 | defer rows.Close() 596 | 597 | // The query returns a single row with one column of type void, 598 | // which is and empty string, on success. It does not return 599 | // any row on failure 600 | void := "failed" 601 | for rows.Next() { 602 | err := rows.Scan(&void) 603 | if err != nil { 604 | return fmt.Errorf("could not get row: %s", err) 605 | } 606 | } 607 | if void == "failed" { 608 | return &pgReplicaHasLocks{} 609 | } 610 | return nil 611 | } 612 | 613 | func canPauseReplication(db *pg) (bool, error) { 614 | // hot standby exists from 9.0 615 | if db.version < 90000 { 616 | return false, nil 617 | } 618 | 619 | query := fmt.Sprintf("SELECT 1 FROM pg_proc "+ 620 | "WHERE proname='pg_%s_replay_pause' AND pg_is_in_recovery()", db.xlogOrWal) 621 | l.Verboseln("executing SQL query:", query) 622 | rows, err := db.conn.Query(query) 623 | if err != nil { 624 | return false, fmt.Errorf("could not check if replication is pausable: %s", err) 625 | } 626 | defer rows.Close() 627 | 628 | // The query returns 1 on success, no row on failure 629 | var one int 630 | for rows.Next() { 631 | err := rows.Scan(&one) 632 | if err != nil { 633 | return false, fmt.Errorf("could not get row: %s", err) 634 | } 635 | } 636 | if one == 0 { 637 | return false, nil 638 | } 639 | 640 | return true, nil 641 | } 642 | 643 | func pauseReplicationWithTimeout(db *pg, timeOut int) error { 644 | 645 | if ok, err := canPauseReplication(db); !ok { 646 | return err 647 | } 648 | 649 | ticker := time.NewTicker(time.Duration(10) * time.Second) 650 | done := make(chan bool) 651 | stop := make(chan bool) 652 | fail := make(chan error) 653 | 654 | l.Infoln("pausing replication") 655 | 656 | // We want to retry pausing replication at a defined interval 657 | // but not forever. We cannot put the timeout in the same 658 | // select as the ticker since the ticker would always win 659 | go func() { 660 | var rerr *pgReplicaHasLocks 661 | defer ticker.Stop() 662 | 663 | for { 664 | if err := pauseReplication(db); err != nil { 665 | if errors.As(err, &rerr) { 666 | l.Warnln(err) 667 | } else { 668 | fail <- err 669 | return 670 | } 671 | } else { 672 | done <- true 673 | return 674 | } 675 | 676 | select { 677 | case <-stop: 678 | return 679 | case <-ticker.C: 680 | break 681 | } 682 | } 683 | }() 684 | 685 | // Return as soon as the replication is paused or stop the 686 | // goroutine if we hit the timeout 687 | select { 688 | case <-done: 689 | l.Infoln("replication paused") 690 | case <-time.After(time.Duration(timeOut) * time.Second): 691 | stop <- true 692 | return fmt.Errorf("replication not paused after %v", time.Duration(timeOut)*time.Second) 693 | case err := <-fail: 694 | return fmt.Errorf("%s", err) 695 | } 696 | 697 | return nil 698 | } 699 | 700 | func resumeReplication(db *pg) error { 701 | if ok, err := canPauseReplication(db); !ok { 702 | return err 703 | } 704 | 705 | l.Infoln("resuming replication") 706 | query := fmt.Sprintf("SELECT pg_%s_replay_resume() WHERE pg_is_in_recovery();", db.xlogOrWal) 707 | l.Verboseln("executing SQL query:", query) 708 | _, err := db.conn.Exec(query) 709 | if err != nil { 710 | return fmt.Errorf("could not resume replication: %s", err) 711 | } 712 | 713 | return nil 714 | } 715 | -------------------------------------------------------------------------------- /sql_test.go: -------------------------------------------------------------------------------- 1 | // pg_back 2 | // 3 | // Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved. 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions 7 | // are met: 8 | // 9 | // 1. Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // 2. Redistributions in binary form must reproduce the above copyright 12 | // notice, this list of conditions and the following disclaimer in the 13 | // documentation and/or other materials provided with the distribution. 14 | // 15 | // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 16 | // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | // IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 19 | // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | // THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | package main 27 | 28 | import ( 29 | "fmt" 30 | "os" 31 | "regexp" 32 | "strings" 33 | "testing" 34 | 35 | "github.com/google/go-cmp/cmp" 36 | "github.com/google/go-cmp/cmp/cmpopts" 37 | ) 38 | 39 | var ( 40 | testdb *pg 41 | ) 42 | 43 | func needPgConn(t *testing.T) { 44 | if os.Getenv("PGBK_TEST_CONNINFO") == "" { 45 | t.Skip("testing with PostgreSQL disabled") 46 | } 47 | 48 | if testdb == nil { 49 | conninfo, err := parseConnInfo(os.Getenv("PGBK_TEST_CONNINFO")) 50 | if err != nil { 51 | t.Fatalf("unable to parse PGBK_TEST_CONNINFO: %s", err) 52 | } 53 | testdb, err = dbOpen(conninfo) 54 | if err != nil { 55 | t.Fatalf("expected an ok on dbOpen(), got %s", err) 56 | } 57 | } 58 | } 59 | 60 | func needPgDump(t *testing.T) { 61 | if pgToolVersion("pg_dump") >= 110000 { 62 | t.Skip("testing with a pg_dump version > 11") 63 | } 64 | } 65 | 66 | func TestSqlQuoteLiteral(t *testing.T) { 67 | var tests = []struct { 68 | input string 69 | want string 70 | }{ 71 | {"", "''"}, 72 | {"'", "''''"}, 73 | {"'; select 1 --", "'''; select 1 --'"}, 74 | {"\\", "E'\\\\'"}, 75 | {"'\\n", "E'''\\\\n'"}, 76 | } 77 | 78 | for i, st := range tests { 79 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 80 | got := sqlQuoteLiteral(st.input) 81 | if got != st.want { 82 | t.Errorf("got '%s', want '%s'", got, st.want) 83 | } 84 | }) 85 | } 86 | } 87 | 88 | func TestSqlQuoteIdent(t *testing.T) { 89 | var tests = []struct { 90 | input string 91 | want string 92 | }{ 93 | {"\"", "\"\""}, 94 | {"", ""}, 95 | {"\"; select 1 --", "\"\"; select 1 --"}, 96 | } 97 | 98 | for i, st := range tests { 99 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 100 | got := sqlQuoteIdent(st.input) 101 | if got != st.want { 102 | t.Errorf("got '%s', want '%s'", got, st.want) 103 | } 104 | }) 105 | } 106 | } 107 | 108 | func TestMakeACLCommands(t *testing.T) { 109 | var tests = []struct { 110 | input string 111 | want string 112 | }{ 113 | {"", ""}, 114 | {"invalid", ""}, 115 | {"=", ""}, 116 | {"/", ""}, 117 | {"=c/postgres", "REVOKE ALL ON DATABASE \"testdb\" FROM PUBLIC;\nSET SESSION AUTHORIZATION \"postgres\";\nGRANT CONNECT ON DATABASE \"testdb\" TO \"PUBLIC\";\nRESET SESSION AUTHORIZATION;\n"}, 118 | {"=Tc/postgres", ""}, 119 | {"testrole=CTc/testrole", ""}, 120 | {"testrole=Cc/testrole", "REVOKE ALL ON DATABASE \"testdb\" FROM \"testrole\";\nGRANT CREATE ON DATABASE \"testdb\" TO \"testrole\";\nGRANT CONNECT ON DATABASE \"testdb\" TO \"testrole\";\n"}, 121 | {"other=CT*c/testrole", "GRANT CREATE ON DATABASE \"testdb\" TO \"other\";\nGRANT TEMPORARY ON DATABASE \"testdb\" TO \"other\" WITH GRANT OPTION;\nGRANT CONNECT ON DATABASE \"testdb\" TO \"other\";\n"}, 122 | {"other=T*/testrole", "GRANT TEMPORARY ON DATABASE \"testdb\" TO \"other\" WITH GRANT OPTION;\n"}, 123 | } 124 | 125 | dbname := "testdb" 126 | owner := "testrole" 127 | 128 | for i, st := range tests { 129 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 130 | got := makeACLCommands(st.input, dbname, owner) 131 | if got != st.want { 132 | t.Errorf("got '%s', want '%s'", got, st.want) 133 | } 134 | }) 135 | } 136 | } 137 | 138 | func TestDbOpen(t *testing.T) { 139 | if os.Getenv("PGBK_TEST_CONNINFO") == "" { 140 | t.Skip("testing with PostgreSQL disabled") 141 | } 142 | 143 | conninfo, err := parseConnInfo(os.Getenv("PGBK_TEST_CONNINFO")) 144 | if err != nil { 145 | t.Fatalf("unable to parse PGBK_TEST_CONNINFO: %s", err) 146 | } 147 | db, err := dbOpen(conninfo) 148 | if err != nil { 149 | t.Fatalf("expected an ok on dbOpen(), got %s", err) 150 | } 151 | if err := db.Close(); err != nil { 152 | t.Errorf("expected an okon db.Close(), got %s", err) 153 | } 154 | 155 | testdb, err = dbOpen(conninfo) 156 | if err != nil { 157 | t.Fatalf("expected an ok on dbOpen(), got %s", err) 158 | } 159 | } 160 | 161 | func TestListAllDatabases(t *testing.T) { 162 | var tests = []struct { 163 | templates bool 164 | want []string 165 | }{ 166 | {false, []string{"b1", "b2", "postgres"}}, 167 | {true, []string{"b1", "b2", "postgres", "template1"}}, 168 | } 169 | 170 | needPgConn(t) 171 | 172 | for i, st := range tests { 173 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 174 | got, err := listAllDatabases(testdb, st.templates) 175 | if err != nil { 176 | t.Errorf("expected non nil error, got %q", err) 177 | } 178 | 179 | // sort result before comparing because we do not use order by in the queries 180 | if diff := cmp.Diff(st.want, got, cmpopts.EquateEmpty(), cmpopts.SortSlices(func(x, y string) bool { return x < y })); diff != "" { 181 | t.Errorf("listAllDatabases() mismatch (-want +got):\n%s", diff) 182 | } 183 | }) 184 | } 185 | } 186 | 187 | func TestListDatabases(t *testing.T) { 188 | var tests = []struct { 189 | withTemplates bool 190 | excludedDbs []string 191 | includedDbs []string 192 | want []string 193 | }{ 194 | {false, []string{}, []string{}, []string{"b1", "b2", "postgres"}}, 195 | {true, []string{}, []string{}, []string{"b1", "b2", "postgres", "template1"}}, 196 | {true, []string{}, []string{"b1", "postgres"}, []string{"b1", "postgres"}}, 197 | {false, []string{}, []string{"b2", "template1"}, []string{"b2", "template1"}}, 198 | {false, []string{}, []string{"b2", "b3"}, []string{"b2"}}, 199 | {true, []string{"b1", "b3"}, []string{}, []string{"b2", "postgres", "template1"}}, 200 | {false, []string{"b1", "b3"}, []string{}, []string{"b2", "postgres"}}, 201 | {false, []string{"b1", "b3"}, []string{"b1", "b2", "template1"}, []string{"b2", "template1"}}, 202 | } 203 | 204 | needPgConn(t) 205 | 206 | for i, st := range tests { 207 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 208 | got, err := listDatabases(testdb, st.withTemplates, st.excludedDbs, st.includedDbs) 209 | if err != nil { 210 | t.Errorf("expected non nil error, got %q", err) 211 | } 212 | 213 | if diff := cmp.Diff(st.want, got, cmpopts.EquateEmpty(), cmpopts.SortSlices(func(x, y string) bool { return x < y })); diff != "" { 214 | t.Errorf("listDatabases() mismatch (-want +got):\n%s", diff) 215 | } 216 | }) 217 | } 218 | } 219 | 220 | func TestDumpDBConfig(t *testing.T) { 221 | var tests = []struct { 222 | want string 223 | }{ 224 | {"ALTER ROLE \"u1\" IN DATABASE \"b1\" SET \"work_mem\" TO '1MB';\nALTER DATABASE \"b1\" SET \"log_min_duration_statement\" TO '10s';\nALTER DATABASE \"b1\" SET \"work_mem\" TO '5MB';\n"}, 225 | } 226 | 227 | needPgConn(t) 228 | needPgDump(t) 229 | 230 | for i, st := range tests { 231 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 232 | got, err := dumpDBConfig(testdb, "b1") 233 | if err != nil { 234 | t.Errorf("expected non nil error, got %q", err) 235 | } 236 | 237 | if diff := cmp.Diff(st.want, got, cmpopts.EquateEmpty()); diff != "" { 238 | t.Errorf("dumpDBConfig() mismatch (-want +got):\n%s", diff) 239 | } 240 | }) 241 | } 242 | } 243 | 244 | func TestShowSettings(t *testing.T) { 245 | needPgConn(t) 246 | 247 | got, err := showSettings(testdb) 248 | if err != nil { 249 | t.Errorf("expected non nil error, got %q", err) 250 | } 251 | // we cannot exactly test the content, it depends on the version of PostgreSQL 252 | if got == "" { 253 | t.Errorf("expected some data, got nothing") 254 | } else { 255 | p := strings.Split(got, "\n") 256 | re := regexp.MustCompile(`^([\.\w]+) = '(.*)'$`) 257 | for _, v := range p { 258 | if v == "" { 259 | continue 260 | } 261 | if !re.MatchString(v) { 262 | if !strings.HasPrefix(v, "DateStyle") && !strings.HasPrefix(v, "search_path") { 263 | t.Errorf("got misformed parameter: %s", v) 264 | } 265 | } 266 | } 267 | } 268 | } 269 | 270 | func TestDumpCreateDBAndACL(t *testing.T) { 271 | needPgConn(t) 272 | needPgDump(t) 273 | 274 | var tests = []struct { 275 | db string 276 | want string 277 | }{ 278 | {"b1", "--\n-- Database creation\n--\n\nCREATE DATABASE \"b1\" WITH TEMPLATE = template0 OWNER = \"u1\" ENCODING = 'UTF8' LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8';\n\n"}, 279 | {"b2", "--\n-- Database creation\n--\n\nCREATE DATABASE \"b2\" WITH TEMPLATE = template0 OWNER = \"u1\" ENCODING = 'UTF8' LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8';\n\n--\n-- Database privileges \n--\n\nREVOKE CONNECT, TEMPORARY ON DATABASE \"b2\" FROM PUBLIC;\nGRANT CONNECT ON DATABASE \"b2\" TO \"u2\";\n"}, 280 | } 281 | 282 | for _, st := range tests { 283 | t.Run(fmt.Sprintf("%s", st.db), func(t *testing.T) { 284 | got, err := dumpCreateDBAndACL(testdb, st.db, false) 285 | if err != nil { 286 | t.Errorf("expected non nil error, got %q", err) 287 | } 288 | 289 | if diff := cmp.Diff(st.want, got, cmpopts.EquateEmpty()); diff != "" { 290 | t.Errorf("dumpCreateDBAndACL() mismatch (-want +got):\n%s", diff) 291 | } 292 | }) 293 | } 294 | } 295 | 296 | func TestExtractFileFromSettings(t *testing.T) { 297 | needPgConn(t) 298 | 299 | got, err := extractFileFromSettings(testdb, "hba_file") 300 | if err != nil { 301 | t.Errorf("expected non nil error, got %q", err) 302 | } 303 | 304 | if got == "" { 305 | t.Errorf("expected some data, got nothing") 306 | } else { 307 | c := strings.Split(got, "\n") 308 | re := regexp.MustCompile(`^# path: \S+`) 309 | if !re.MatchString(c[0]) { 310 | t.Errorf("excepted string matching \"^# path: \\S+\", got %s", c[0]) 311 | } 312 | } 313 | } 314 | 315 | // Testing replication management fonctions needs a more complex setup 316 | // so we skip it. 317 | -------------------------------------------------------------------------------- /testdata/fixture.sql: -------------------------------------------------------------------------------- 1 | CREATE ROLE u1 LOGIN PASSWORD 'u1'; 2 | CREATE DATABASE b1 OWNER u1; 3 | ALTER ROLE u1 IN DATABASE b1 SET work_mem TO '1MB'; 4 | ALTER ROLE u1 SET temp_buffers TO '32MB'; 5 | ALTER DATABASE b1 SET work_mem TO '5MB'; 6 | ALTER DATABASE b1 SET log_min_duration_statement TO '10s'; 7 | 8 | CREATE ROLE u2 LOGIN PASSWORD 'u2'; 9 | CREATE DATABASE b2 OWNER u1; 10 | REVOKE ALL ON DATABASE b2 FROM PUBLIC; 11 | GRANT CONNECT ON DATABASE b2 TO u2; 12 | 13 | \c b1 14 | 15 | SET ROLE u1; 16 | CREATE TABLE t1 AS SELECT generate_series(0, 9) i; 17 | CREATE TABLE t2 AS SELECT generate_series(10, 19) j; 18 | CREATE TABLE t3 AS SELECT generate_series(0, 9) i; 19 | CREATE TABLE t4 AS SELECT generate_series(10, 19) j; 20 | 21 | \c b2 22 | 23 | SET ROLE u2; 24 | CREATE TABLE t1 AS SELECT generate_series(0, 9) i; 25 | CREATE TABLE t2 AS SELECT generate_series(10, 19) j; 26 | CREATE TABLE t3 AS SELECT generate_series(0, 9) i; 27 | CREATE TABLE t4 AS SELECT generate_series(10, 19) j; 28 | -------------------------------------------------------------------------------- /upload_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os/user" 6 | "path/filepath" 7 | "runtime" 8 | "testing" 9 | ) 10 | 11 | func TestExpandHomeDir(t *testing.T) { 12 | u, err := user.Current() 13 | if err != nil { 14 | t.Errorf("could not get current user: %s", err) 15 | } 16 | 17 | var tests = []struct { 18 | input string 19 | want string 20 | }{ 21 | {"", "."}, 22 | {"/truc/truc/../muche", "/truc/muche"}, 23 | {"./truc/muche", "truc/muche"}, 24 | {"~/truc/muche/dir", filepath.Clean(filepath.Join(u.HomeDir, "/truc/muche/dir"))}, 25 | {fmt.Sprintf("~%s/truc/muche", u.Username), filepath.Clean(filepath.Join(u.HomeDir, "/truc/muche"))}, 26 | } 27 | 28 | if runtime.GOOS == "windows" { 29 | tests = []struct { 30 | input string 31 | want string 32 | }{ 33 | {"", "."}, 34 | {"/truc/truc/../muche", "\\truc\\muche"}, 35 | {"./truc/muche", "truc\\muche"}, 36 | {"~/truc/muche/dir", filepath.Clean(filepath.Join(u.HomeDir, "/truc/muche/dir"))}, 37 | {fmt.Sprintf("~%s/truc/muche", u.Username), filepath.Clean(filepath.Join(u.HomeDir, "/truc/muche"))}, 38 | } 39 | } 40 | 41 | for i, st := range tests { 42 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 43 | got, err := expandHomeDir(st.input) 44 | if err != nil { 45 | t.Errorf("unexpected error: %v", err) 46 | } 47 | 48 | if got != st.want { 49 | t.Errorf("got: %v, want %v", got, st.want) 50 | } 51 | }) 52 | } 53 | } 54 | 55 | func TestRelPath(t *testing.T) { 56 | var tests = []struct { 57 | basedir string 58 | path string 59 | want string 60 | }{ 61 | {"/var/truc/dir", "/var/truc/dir/dump.d/file", "dump.d/file"}, 62 | {"/var/{dbname}/dir", "/var/b1/dir/b1.dump", "b1/dir/b1.dump"}, 63 | } 64 | 65 | if runtime.GOOS == "windows" { 66 | tests = []struct { 67 | basedir string 68 | path string 69 | want string 70 | }{ 71 | {"C:\\var\\truc\\dir", "C:\\var\\truc\\dir\\dump.d\\file", "dump.d\\file"}, 72 | {"C:\\var\\{dbname}\\dir", "C:\\var\\b1\\dir\\b1.dump", "b1\\dir\\b1.dump"}, 73 | } 74 | } 75 | 76 | for i, st := range tests { 77 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 78 | got := relPath(st.basedir, st.path) 79 | if got != st.want { 80 | t.Errorf("got: %v, want %v", got, st.want) 81 | } 82 | }) 83 | } 84 | } 85 | 86 | func TestForwardSlashes(t *testing.T) { 87 | var tests = []struct { 88 | path string 89 | want string 90 | }{ 91 | {"/var/truc/dir", "/var/truc/dir"}, 92 | } 93 | 94 | if runtime.GOOS == "windows" { 95 | tests = []struct { 96 | path string 97 | want string 98 | }{ 99 | {"b1\\dir\\b1.dump", "b1/dir/b1.dump"}, 100 | } 101 | } 102 | 103 | for i, st := range tests { 104 | t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 105 | got := forwardSlashes(st.path) 106 | if got != st.want { 107 | t.Errorf("got: %v, want %v", got, st.want) 108 | } 109 | }) 110 | } 111 | } 112 | --------------------------------------------------------------------------------