├── markdown.template ├── .gitmodules ├── terraform ├── instance │ ├── output.tf │ ├── variables.tf │ ├── data.tf │ ├── main.tf │ └── user_data.sh └── storage │ ├── variables.tf │ └── main.tf ├── hetzner-rds-postgres.png ├── .gitignore ├── bin ├── recovery_complete.sh └── run.sh ├── templates ├── config │ ├── pgbackrest.conf │ ├── postgresql.conf │ └── pg_hba.conf └── bin │ └── backup.sh ├── test ├── Gemfile ├── runner.rb ├── rds │ ├── spec_helper.rb │ ├── docker-compose.yml │ └── rds_spec.rb └── Gemfile.lock ├── hugo.template ├── Dockerfile ├── do ├── README.md └── hetzner-rds-postgres.json /markdown.template: -------------------------------------------------------------------------------- 1 | ``` 2 | {{.Content}} 3 | ``` 4 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "ctuhl"] 2 | path = ctuhl 3 | url = git@github.com:pellepelster/ctuhl.git 4 | -------------------------------------------------------------------------------- /terraform/instance/output.tf: -------------------------------------------------------------------------------- 1 | output "public_ip" { 2 | value = hcloud_floating_ip.floating_ip.ip_address 3 | } -------------------------------------------------------------------------------- /hetzner-rds-postgres.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pellepelster/hetzner-rds-postgres/HEAD/hetzner-rds-postgres.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .terraform 3 | .terraform.lock.hcl 4 | terraform.tfstate 5 | terraform.tfstate.backup 6 | .bundle 7 | .vendor 8 | reports 9 | -------------------------------------------------------------------------------- /bin/recovery_complete.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail -o errexit -o nounset 4 | 5 | echo "complete" > /rds/run/recovery_complete 6 | -------------------------------------------------------------------------------- /terraform/storage/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cloud_api_token" {} 2 | 3 | variable "rds_instance_id" {} 4 | 5 | variable "location" { 6 | default = "fsn1" 7 | } -------------------------------------------------------------------------------- /templates/config/pgbackrest.conf: -------------------------------------------------------------------------------- 1 | [global] 2 | repo-path={{ .Env.BACKUP_DIR }}/{{ .Env.DB_INSTANCE_ID }} 3 | 4 | [{{ .Env.DB_INSTANCE_ID }}] 5 | pg1-path={{ .Env.INSTANCE_DATA_DIR }} 6 | pg1-socket-path=/rds/socket 7 | -------------------------------------------------------------------------------- /templates/bin/backup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail -o errexit -o nounset 4 | 5 | pgbackrest --config /rds/config/pgbackrest.conf --log-level-console=info --log-path=/rds/log --stanza={{ .Env.DB_INSTANCE_ID }} backup --type=full -------------------------------------------------------------------------------- /test/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gem 'docker-api' 4 | gem 'docker-compose' 5 | gem 'faraday' 6 | gem 'rake' 7 | gem 'minitest' 8 | gem 'minitest-hooks' 9 | gem 'minitest-reporters' 10 | gem 'pg' 11 | gem 'rubocop' 12 | -------------------------------------------------------------------------------- /test/runner.rb: -------------------------------------------------------------------------------- 1 | scope = ARGV.shift 2 | 3 | $LOAD_PATH << File.join(__dir__, '../', 'ctuhl/lib/ruby') 4 | $LOAD_PATH << File.join(__dir__, scope) 5 | 6 | Dir[File.join(__dir__, scope, '**', '*_spec.rb')].each do |test| 7 | require_relative test 8 | end -------------------------------------------------------------------------------- /templates/config/postgresql.conf: -------------------------------------------------------------------------------- 1 | archive_command = 'pgbackrest --config /rds/config/pgbackrest.conf --log-level-console=info --log-path=/rds/log --stanza={{ .Env.DB_INSTANCE_ID }} archive-push %p' 2 | archive_mode = on 3 | max_wal_senders = 3 4 | wal_level = replica 5 | listen_addresses = '*' 6 | unix_socket_directories = '/rds/socket' 7 | -------------------------------------------------------------------------------- /test/rds/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require 'minitest/autorun' 2 | require 'minitest/reporters' 3 | require 'minitest/hooks/default' 4 | require 'backticks' 5 | require 'docker' 6 | require 'compose_wrapper' 7 | require 'retry_until' 8 | 9 | Minitest::Reporters.use! [Minitest::Reporters::DefaultReporter.new, Minitest::Reporters::JUnitReporter.new(reports_dir = 'reports')] -------------------------------------------------------------------------------- /hugo.template: -------------------------------------------------------------------------------- 1 | {{ if .IsFullFile }} 2 | {{`{{< github repository="pellepelster/hetzner-rds-postgres"`}} file="{{.Filename}}" >}}{{.Filename}}{{`{{< /github >}}`}} 3 | {{ else }} 4 | {{`{{< github repository="pellepelster/hetzner-rds-postgres"`}} file="{{.Filename}}#L{{.Start}}-L{{.End}}" >}}{{.Filename}}{{`{{< /github >}}`}} 5 | {{ end }} 6 | {{`{{< highlight go "" >}}`}} 7 | {{.Content}} 8 | {{`{{< / highlight >}}`}} -------------------------------------------------------------------------------- /templates/config/pg_hba.conf: -------------------------------------------------------------------------------- 1 | # TYPE DATABASE USER ADDRESS METHOD 2 | local all all trust 3 | host all all 127.0.0.1/32 trust 4 | host all all ::1/128 trust 5 | host all rds 0.0.0.0/0 md5 6 | host all {{ .Env.DB_USERNAME }} 0.0.0.0/0 md5 7 | -------------------------------------------------------------------------------- /terraform/instance/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cloud_api_token" {} 2 | 3 | variable "location" { 4 | default = "fsn1" 5 | } 6 | 7 | variable "github_token" {} 8 | variable "github_owner" {} 9 | 10 | variable "rds_instance_id" {} 11 | 12 | variable "ssh_identity_ecdsa_key" {} 13 | variable "ssh_identity_ecdsa_pub" {} 14 | 15 | variable "ssh_identity_rsa_key" {} 16 | variable "ssh_identity_rsa_pub" {} 17 | 18 | variable "ssh_identity_ed25519_key" {} 19 | variable "ssh_identity_ed25519_pub" {} -------------------------------------------------------------------------------- /terraform/storage/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hcloud = { 4 | source = "terraform-providers/hcloud" 5 | version = "1.23.0" 6 | } 7 | } 8 | 9 | required_version = ">= 0.13" 10 | } 11 | 12 | provider "hcloud" { 13 | token = var.cloud_api_token 14 | } 15 | 16 | # snippet:terraform_data_volumes 17 | resource "hcloud_volume" "data" { 18 | name = "${var.rds_instance_id}-data" 19 | size = 64 20 | format = "ext4" 21 | location = var.location 22 | } 23 | 24 | resource "hcloud_volume" "backup" { 25 | name = "${var.rds_instance_id}-backup" 26 | size = 64 27 | format = "ext4" 28 | location = var.location 29 | } 30 | # /snippet:terraform_data_volumes 31 | -------------------------------------------------------------------------------- /test/rds/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | rds-test1: 4 | image: hetzner-rds-postgres 5 | environment: 6 | - "DB_INSTANCE_ID=test1" 7 | - "DB_PASSWORD=password1" 8 | ports: 9 | - "5432" 10 | volumes: 11 | - "rds-data:/storage/data" 12 | - "rds-backup:/storage/backup" 13 | 14 | rds-test1-no-password: 15 | image: hetzner-rds-postgres 16 | environment: 17 | - "DB_INSTANCE_ID=test1" 18 | ports: 19 | - "5432" 20 | 21 | rds-test1-no-instance-id: 22 | image: hetzner-rds-postgres 23 | environment: 24 | - "DB_PASSWORD=password1" 25 | ports: 26 | - "5432" 27 | 28 | volumes: 29 | rds-data: 30 | rds-backup: 31 | -------------------------------------------------------------------------------- /terraform/instance/data.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "user_data" { 2 | 3 | template = file("user_data.sh") 4 | 5 | vars = { 6 | public_ip = hcloud_floating_ip.floating_ip.ip_address 7 | 8 | github_owner = var.github_owner 9 | github_token = var.github_token 10 | 11 | # snippet:terraform_user_data_template 12 | storage_device_data = data.hcloud_volume.data.linux_device 13 | storage_device_backup = data.hcloud_volume.backup.linux_device 14 | # /snippet:terraform_user_data_template 15 | 16 | rds_instance_id = var.rds_instance_id 17 | 18 | ssh_identity_ecdsa_key = var.ssh_identity_ecdsa_key 19 | ssh_identity_ecdsa_pub = var.ssh_identity_ecdsa_pub 20 | 21 | ssh_identity_rsa_key = var.ssh_identity_rsa_key 22 | ssh_identity_rsa_pub = var.ssh_identity_rsa_pub 23 | 24 | ssh_identity_ed25519_key = var.ssh_identity_ed25519_key 25 | ssh_identity_ed25519_pub = var.ssh_identity_ed25519_pub 26 | } 27 | } 28 | 29 | # snippet:terraform_data_volumes_loookup 30 | data "hcloud_volume" "data" { 31 | name = "${var.rds_instance_id}-data" 32 | } 33 | 34 | data "hcloud_volume" "backup" { 35 | name = "${var.rds_instance_id}-backup" 36 | } 37 | # /snippet:terraform_data_volumes_loookup 38 | 39 | -------------------------------------------------------------------------------- /terraform/instance/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | 4 | hcloud = { 5 | source = "hetznercloud/hcloud" 6 | version = "1.23.0" 7 | } 8 | 9 | template = { 10 | source = "hashicorp/template" 11 | } 12 | } 13 | 14 | required_version = ">= 0.13" 15 | } 16 | 17 | provider "hcloud" { 18 | token = var.cloud_api_token 19 | } 20 | 21 | resource "hcloud_server" "instance" { 22 | name = var.rds_instance_id 23 | image = "debian-10" 24 | server_type = "cx11" 25 | location = var.location 26 | user_data = data.template_file.user_data.rendered 27 | ssh_keys = [ 28 | hcloud_ssh_key.id_rsa.id] 29 | } 30 | 31 | resource "hcloud_floating_ip_assignment" "ip_assignment" { 32 | floating_ip_id = hcloud_floating_ip.floating_ip.id 33 | server_id = hcloud_server.instance.id 34 | } 35 | 36 | resource "hcloud_floating_ip" "floating_ip" { 37 | name = "${var.rds_instance_id}" 38 | type = "ipv4" 39 | home_location = var.location 40 | } 41 | 42 | resource "hcloud_ssh_key" "id_rsa" { 43 | name = "id_rsa" 44 | public_key = file("~/.ssh/id_rsa.pub") 45 | } 46 | 47 | resource "hcloud_volume_attachment" "data" { 48 | volume_id = data.hcloud_volume.data.id 49 | server_id = hcloud_server.instance.id 50 | } 51 | 52 | resource "hcloud_volume_attachment" "backup" { 53 | volume_id = data.hcloud_volume.backup.id 54 | server_id = hcloud_server.instance.id 55 | } 56 | -------------------------------------------------------------------------------- /test/Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | ansi (1.5.0) 5 | ast (2.4.1) 6 | backticks (1.0.2) 7 | builder (3.2.4) 8 | docker-api (2.0.0) 9 | excon (>= 0.47.0) 10 | multi_json 11 | docker-compose (1.1.12) 12 | backticks (~> 1.0) 13 | excon (0.78.1) 14 | faraday (1.1.0) 15 | multipart-post (>= 1.2, < 3) 16 | ruby2_keywords 17 | minitest (5.14.2) 18 | minitest-hooks (1.5.0) 19 | minitest (> 5.3) 20 | minitest-reporters (1.4.2) 21 | ansi 22 | builder 23 | minitest (>= 5.0) 24 | ruby-progressbar 25 | multi_json (1.15.0) 26 | multipart-post (2.1.1) 27 | parallel (1.20.1) 28 | parser (2.7.2.0) 29 | ast (~> 2.4.1) 30 | pg (1.2.3) 31 | rainbow (3.0.0) 32 | rake (13.0.1) 33 | regexp_parser (2.0.0) 34 | rexml (3.2.4) 35 | rubocop (1.6.1) 36 | parallel (~> 1.10) 37 | parser (>= 2.7.1.5) 38 | rainbow (>= 2.2.2, < 4.0) 39 | regexp_parser (>= 1.8, < 3.0) 40 | rexml 41 | rubocop-ast (>= 1.2.0, < 2.0) 42 | ruby-progressbar (~> 1.7) 43 | unicode-display_width (>= 1.4.0, < 2.0) 44 | rubocop-ast (1.3.0) 45 | parser (>= 2.7.1.5) 46 | ruby-progressbar (1.10.1) 47 | ruby2_keywords (0.0.2) 48 | unicode-display_width (1.7.0) 49 | 50 | PLATFORMS 51 | ruby 52 | 53 | DEPENDENCIES 54 | docker-api 55 | docker-compose 56 | faraday 57 | minitest 58 | minitest-hooks 59 | minitest-reporters 60 | pg 61 | rake 62 | rubocop 63 | 64 | BUNDLED WITH 65 | 2.1.4 66 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:buster-slim 2 | 3 | ENV DB_INSTANCE_ID="" 4 | ENV DB_PASSWORD="" 5 | 6 | ENV USER=rds 7 | ENV USER_ID=4000 8 | ENV USER_GID=4000 9 | ENV DATA_DIR=/storage/data 10 | ENV BACKUP_DIR=/storage/backup 11 | 12 | ENV GOMPLATE_VERION="v3.8.0" 13 | ENV GOMPLATE_CHECKSUM="847f7d9fc0dc74c33188c2b0d0e9e4ed9204f67c36da5aacbab324f8bfbf29c9" 14 | 15 | # snippet:docker_install_packages 16 | ENV DEBIAN_FRONTEND=noninteractive 17 | 18 | RUN apt-get update && \ 19 | apt-get dist-upgrade --assume-yes --quiet && \ 20 | apt-get --assume-yes --quiet --no-install-recommends install \ 21 | postgresql-11 \ 22 | curl \ 23 | ca-certificates \ 24 | jq \ 25 | uuid-runtime \ 26 | pgbackrest \ 27 | libdbd-pg-perl \ 28 | libpq-dev 29 | # /snippet:docker_install_packages 30 | 31 | # snippet:docker_install_gomplate 32 | RUN curl -L -o /usr/local/bin/gomplate https://github.com/hairyhenderson/gomplate/releases/download/${GOMPLATE_VERION}/gomplate_linux-amd64-slim && \ 33 | echo "${GOMPLATE_CHECKSUM}" /usr/local/bin/gomplate | sha256sum -c && \ 34 | chmod +x /usr/local/bin/gomplate 35 | # /snippet:docker_install_gomplate 36 | 37 | # snippet:docker_user 38 | RUN groupadd --gid "${USER_GID}" "${USER}" && \ 39 | useradd \ 40 | --uid ${USER_ID} \ 41 | --gid ${USER_GID} \ 42 | --create-home \ 43 | --home-dir /${USER} \ 44 | --shell /bin/bash \ 45 | ${USER} 46 | # /snippet:docker_user 47 | 48 | RUN mkdir -p ${DATA_DIR} && chown -R ${USER}:${USER} ${DATA_DIR} && chmod -R 700 ${DATA_DIR} 49 | RUN mkdir -p ${BACKUP_DIR} && chown -R ${USER}:${USER} ${BACKUP_DIR} && chmod -R 700 ${BACKUP_DIR} 50 | 51 | COPY bin /rds/bin 52 | RUN chmod -R 700 /rds && chown -R ${USER}:${USER} /rds 53 | 54 | USER ${USER} 55 | WORKDIR /rds 56 | 57 | EXPOSE 5432 58 | 59 | COPY templates /rds/templates 60 | 61 | CMD /rds/bin/run.sh -------------------------------------------------------------------------------- /test/rds/rds_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | require 'securerandom' 3 | require 'ipaddr' 4 | require 'faraday' 5 | require 'timeout' 6 | require 'net/http' 7 | require 'pg' 8 | require 'rubygems' 9 | require 'net/http' 10 | require 'test_utils' 11 | 12 | describe 'hetzner-rds-postgres' do 13 | 14 | # snippet:test_volume_helper 15 | def remove_data_volume 16 | `docker volume rm -f rds_rds-data` 17 | end 18 | 19 | def remove_backup_volume 20 | `docker volume rm -f rds_rds-backup` 21 | end 22 | # /snippet:test_volume_helper 23 | 24 | def wait_for_server_start(service) 25 | wait_while { 26 | !@compose.logs(service).include? 'database system is ready to accept connections' 27 | } 28 | 29 | host, port = @compose.address(service, 5432) 30 | wait_while { 31 | !is_port_open?(host, port) 32 | } 33 | 34 | sleep 5 35 | end 36 | 37 | def clean_start(service) 38 | @compose.force_shutdown 39 | 40 | remove_data_volume 41 | remove_backup_volume 42 | 43 | @compose.up(service, detached: true) 44 | host, port = @compose.address(service, 5432) 45 | 46 | wait_while { 47 | !@compose.logs(service).include? 'backup command end: completed successfully' 48 | } 49 | 50 | wait_while { 51 | !is_port_open?(host, port) 52 | } 53 | 54 | return host, port 55 | end 56 | 57 | before(:all) do 58 | @compose ||= ComposeWrapper.new('rds/docker-compose.yml') 59 | end 60 | 61 | after(:all) do 62 | #@compose.dump_logs 63 | @compose.force_shutdown 64 | end 65 | 66 | it 'can connect with user test1 to database test1' do 67 | @compose.up('rds-test1-no-instance-id', detached: true) 68 | 69 | wait_while { 70 | !@compose.logs('rds-test1-no-instance-id').include? 'DB_INSTANCE_ID not set or empty, exiting' 71 | } 72 | end 73 | 74 | it 'can connect with user test1 to database test1' do 75 | host, port = clean_start('rds-test1') 76 | conn = PG::Connection.new(host, port, '', '', 'test1', 'test1', 'password1') 77 | conn.exec('SELECT version();') 78 | conn.close 79 | end 80 | 81 | it "does not allow empty passwords" do 82 | err = assert_raises PG::ConnectionBad do 83 | 84 | host, port = clean_start('rds-test1-no-password') 85 | conn = PG::Connection.new(host, port, '', '', 'test1', 'test1', ' ') 86 | conn.exec('SELECT version();') 87 | conn.close 88 | end 89 | end 90 | 91 | it 'does not allow empty passwords' do 92 | end 93 | 94 | it 'keeps data after restart' do 95 | host, port = clean_start('rds-test1') 96 | conn = PG::Connection.new(host, port, '', '', 'test1', 'test1', 'password1') 97 | 98 | conn.exec('CREATE TABLE pets (id SERIAL PRIMARY KEY, name VARCHAR(64) NOT NULL);') 99 | 100 | petname = SecureRandom.uuid 101 | conn.exec("INSERT INTO pets (name) VALUES ('#{petname}');") 102 | 103 | pets = conn.exec("SELECT * FROM pets;").map { |row| row['name'] } 104 | assert_includes(pets, petname) 105 | conn.close 106 | 107 | @compose.kill('rds-test1') 108 | wait_while { 109 | !@compose.logs('rds-test1').include? 'database system is shut down' 110 | } 111 | @compose.rm('rds-test1', force: true) 112 | @compose.up('rds-test1', detached: true) 113 | 114 | host, port = @compose.address('rds-test1', 5432) 115 | wait_for_server_start('rds-test1') 116 | 117 | conn = PG::Connection.new(host, port, '', '', 'test1', 'test1', 'password1') 118 | pets = conn.exec("SELECT * FROM pets;").map { |row| row['name'] } 119 | assert_includes(pets, petname) 120 | end 121 | 122 | it 'restores latest data from backup' do 123 | # snippet:test_restore_setup 124 | host, port = clean_start('rds-test1') 125 | conn = PG::Connection.new(host, port, '', '', 'test1', 'test1', 'password1') 126 | 127 | conn.exec('CREATE TABLE pets (id SERIAL PRIMARY KEY, name VARCHAR(64) NOT NULL);') 128 | 129 | petname = SecureRandom.uuid 130 | conn.exec("INSERT INTO pets (name) VALUES ('#{petname}');") 131 | 132 | pets = conn.exec("SELECT * FROM pets;").map { |row| row['name'] } 133 | assert_includes(pets, petname) 134 | conn.close 135 | # /snippet:test_restore_setup 136 | 137 | # snippet:test_restore_destroy 138 | @compose.exec('rds-test1', '/rds/bin/backup.sh') 139 | 140 | # stopping instance and remove data volume 141 | @compose.kill('rds-test1') 142 | wait_while { 143 | !@compose.logs('rds-test1').include? 'database system is shut down' 144 | } 145 | @compose.rm('rds-test1', force: true) 146 | remove_data_volume 147 | # /snippet:test_restore_destroy 148 | 149 | # snippet:test_restore_verify 150 | @compose.up('rds-test1', detached: true) 151 | 152 | 153 | host, port = @compose.address('rds-test1', 5432) 154 | wait_for_server_start('rds-test1') 155 | 156 | conn = PG::Connection.new(host, port, '', '', 'test1', 'test1', 'password1') 157 | pets = conn.exec("SELECT * FROM pets;").map { |row| row['name'] } 158 | assert_includes(pets, petname) 159 | # /snippet:test_restore_verify 160 | 161 | end 162 | end -------------------------------------------------------------------------------- /terraform/instance/user_data.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ${ uuid()} 4 | 5 | set -o pipefail -o errexit -o nounset 6 | 7 | export DEBIAN_FRONTEND=noninteractive 8 | 9 | echo "${ssh_identity_ecdsa_key}" | base64 -d > /etc/ssh/ssh_host_ecdsa_key 10 | chmod 600 /etc/ssh/ssh_host_ecdsa_key 11 | echo "${ssh_identity_ecdsa_pub}" | base64 -d > /etc/ssh/ssh_host_ecdsa_key.pub 12 | 13 | echo "${ssh_identity_rsa_key}" | base64 -d > /etc/ssh/ssh_host_rsa_key 14 | chmod 600 /etc/ssh/ssh_host_rsa_key.pub 15 | echo "${ssh_identity_rsa_pub}" | base64 -d > /etc/ssh/ssh_host_rsa_key.pub 16 | 17 | echo "${ssh_identity_ed25519_key}" | base64 -d > /etc/ssh/ssh_host_ed25519_key 18 | chmod 600 /etc/ssh/ssh_host_ed25519_key.pub 19 | echo "${ssh_identity_ed25519_pub}" | base64 -d > /etc/ssh/ssh_host_ed25519_key.pub 20 | 21 | function docker_login { 22 | echo "${github_token}" | docker login https://docker.pkg.github.com -u ${github_owner} --password-stdin 23 | } 24 | 25 | # snippet:terraform_data_volumes_mount 26 | function mount_storage_backup { 27 | echo "${storage_device_backup} /storage/backup ext4 defaults 0 0" >> /etc/fstab 28 | mkdir -p "/storage/backup" 29 | mount "/storage/backup" 30 | 31 | chown 4000:4000 "/storage/backup" 32 | } 33 | 34 | function mount_storage_data { 35 | echo "${storage_device_data} /storage/data ext4 defaults 0 0" >> /etc/fstab 36 | mkdir -p "/storage/data" 37 | mount "/storage/data" 38 | 39 | chown 4000:4000 "/storage/data" 40 | } 41 | # /snippet:terraform_data_volumes_mount 42 | 43 | function configure_public_ip { 44 | ip addr add ${public_ip} dev eth0 45 | } 46 | 47 | function update_system { 48 | apt-get update 49 | 50 | apt-get \ 51 | -o Dpkg::Options::="--force-confnew" \ 52 | --force-yes \ 53 | -fuy \ 54 | dist-upgrade 55 | } 56 | 57 | function install_prerequisites { 58 | apt-get install --no-install-recommends -qq -y \ 59 | docker.io \ 60 | docker-compose \ 61 | gnupg2 \ 62 | pass \ 63 | ufw \ 64 | uuid 65 | } 66 | 67 | function configure_ufw { 68 | ufw enable 69 | ufw allow ssh 70 | ufw allow 5432 71 | } 72 | 73 | # snippet:rds_service_backup_systemd_config 74 | function rds_service_backup_systemd_config { 75 | cat <<-EOF 76 | [Unit] 77 | Description=rds instance %i backup 78 | Requires=docker.service 79 | After=docker.service 80 | 81 | [Service] 82 | WorkingDirectory=/opt/dockerfiles/%i 83 | ExecStart=/usr/bin/docker-compose exec -T %i /rds/bin/backup.sh 84 | 85 | [Install] 86 | WantedBy=multi-user.target 87 | EOF 88 | } 89 | # /snippet:rds_service_backup_systemd_config 90 | 91 | # snippet:rds_service_backup_timer_systemd_config 92 | function rds_service_backup_timer_systemd_config { 93 | cat <<-EOF 94 | 95 | [Unit] 96 | Description=rds instance %i backup timer 97 | 98 | [Timer] 99 | OnCalendar=hourly 100 | 101 | [Install] 102 | WantedBy=basic.target 103 | EOF 104 | } 105 | # /snippet:rds_service_backup_timer_systemd_config 106 | 107 | # snippet:rds_service_systemd_config 108 | function rds_service_systemd_config { 109 | cat <<-EOF 110 | [Unit] 111 | Description=rds instance %i 112 | Requires=docker.service 113 | After=docker.service 114 | 115 | [Service] 116 | Restart=always 117 | TimeoutStartSec=1200 118 | 119 | WorkingDirectory=/opt/dockerfiles/%i 120 | 121 | ExecStartPre=/usr/bin/docker-compose down -v 122 | ExecStartPre=/usr/bin/docker-compose rm -fv 123 | ExecStartPre=/usr/bin/docker-compose pull 124 | 125 | # Compose up 126 | ExecStart=/usr/bin/docker-compose up 127 | 128 | # Compose down, remove containers and volumes 129 | ExecStop=/usr/bin/docker-compose down -v 130 | 131 | [Install] 132 | WantedBy=multi-user.target 133 | EOF 134 | } 135 | # /snippet:rds_service_systemd_config 136 | 137 | # snippet:docker_compose_config 138 | function docker_compose_config { 139 | cat <<-EOF 140 | version: "3" 141 | services: 142 | ${rds_instance_id}: 143 | image: docker.pkg.github.com/pellepelster/hetzner-rds-postgres/hetzner-rds-postgres:latest 144 | environment: 145 | - "DB_DATABASE=${rds_instance_id}" 146 | - "DB_PASSWORD=very-secret" 147 | ports: 148 | - "5432:5432" 149 | volumes: 150 | - "/storage/data:/storage/data" 151 | - "/storage/backup:/storage/backup" 152 | EOF 153 | } 154 | # /snippet:docker_compose_config 155 | 156 | mount_storage_backup 157 | mount_storage_data 158 | configure_public_ip 159 | update_system 160 | install_prerequisites 161 | configure_ufw 162 | docker_login 163 | 164 | 165 | mkdir -p "/opt/dockerfiles/${rds_instance_id}" 166 | docker_compose_config > "/opt/dockerfiles/${rds_instance_id}/docker-compose.yml" 167 | 168 | rds_service_backup_systemd_config > /etc/systemd/system/rds-backup@.service 169 | rds_service_backup_timer_systemd_config > /etc/systemd/system/rds-backup@.timer 170 | 171 | rds_service_systemd_config > /etc/systemd/system/rds@.service 172 | 173 | systemctl daemon-reload 174 | 175 | systemctl enable rds@${rds_instance_id} 176 | systemctl start rds@${rds_instance_id} 177 | 178 | systemctl enable rds-backup@${rds_instance_id}.timer 179 | systemctl start rds-backup@${rds_instance_id}.timer 180 | -------------------------------------------------------------------------------- /bin/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail -o errexit -o nounset 4 | 5 | if [ -z "${DB_INSTANCE_ID}" ]; then 6 | echo "DB_INSTANCE_ID not set or empty, exiting" 7 | exit 1 8 | fi 9 | 10 | export DB_USERNAME=${DB_USERNAME:-$DB_INSTANCE_ID} 11 | export DB_DATABASE=${DB_DATABASE:-$DB_INSTANCE_ID} 12 | 13 | export INSTANCE_DATA_DIR="${DATA_DIR}/${DB_INSTANCE_ID}" 14 | export DB_BACKUP_DIR="${BACKUP_DIR}/${DB_INSTANCE_ID}" 15 | 16 | if [ -z "${DB_PASSWORD}" ]; then 17 | DB_PASSWORD=$(uuidgen) 18 | echo "no password set, setting random password '${DB_PASSWORD}" 19 | fi 20 | 21 | mkdir -p "${INSTANCE_DATA_DIR}" 22 | mkdir -p "${DB_BACKUP_DIR}" 23 | 24 | chown ${USER_ID}:${USER_GID} "${INSTANCE_DATA_DIR}" 25 | chmod 700 "${INSTANCE_DATA_DIR}" 26 | 27 | chown ${USER_ID}:${USER_GID} "${DB_BACKUP_DIR}" 28 | chmod 700 "${DB_BACKUP_DIR}" 29 | 30 | mkdir -p /rds/{socket,log,run,bin} 31 | 32 | # snippet:run_gomplate 33 | gomplate --input-dir /rds/templates/config --output-dir /rds/config 34 | gomplate --input-dir /rds/templates/bin --output-dir /rds/bin 35 | # /snippet:run_gomplate 36 | 37 | POSTGRES_BASE_DIR="/usr/lib/postgresql/11" 38 | POSTGRES_BIN_DIR="${POSTGRES_BASE_DIR}/bin" 39 | 40 | function psql_execute() { 41 | local query=${1:-} 42 | psql -h /rds/socket postgres --field-separator-zero --record-separator-zero --tuples-only --quiet -c "${query}" 43 | } 44 | 45 | function pgbackrest_execute() { 46 | pgbackrest --config /rds/config/pgbackrest.conf --log-path=/rds/log --stanza=${DB_INSTANCE_ID} "$@" 47 | } 48 | 49 | function psql_count() { 50 | psql_execute "$@" | tr -d '[:space:]' 51 | } 52 | 53 | function init_db() { 54 | 55 | # snippet:run_initdb 56 | ${POSTGRES_BIN_DIR}/initdb --username="rds" --encoding=UTF8 --pwfile=<(echo "${DB_PASSWORD}") -D "${INSTANCE_DATA_DIR}" || true 57 | # /snippet:run_initdb 58 | 59 | cp /rds/config/postgresql.conf "${INSTANCE_DATA_DIR}/postgresql.conf" 60 | cp /rds/config/pg_hba.conf "${INSTANCE_DATA_DIR}/pg_hba.conf" 61 | 62 | # make sure we only listen public when DB is ready to go 63 | # snippet:run_init_start 64 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${DATA_DIR}/${DB_INSTANCE_ID}" start --options="-c listen_addresses=''" 65 | # /snippet:run_init_start 66 | 67 | if [[ $(pgbackrest_status_code) -gt 0 ]]; then 68 | 69 | if [[ "$(ls -A ${BACKUP_DIR}/${DB_INSTANCE_ID})" ]]; then 70 | local invalid_backups_dir="${BACKUP_DIR}/${DB_INSTANCE_ID}/.invalid_backups_$(date +%Y%m%d%H%M%S)" 71 | mkdir -p "${invalid_backups_dir}" 72 | mv ${BACKUP_DIR}/${DB_INSTANCE_ID}/* "${invalid_backups_dir}" 73 | fi 74 | fi 75 | 76 | pgbackrest --config /rds/config/pgbackrest.conf --log-path=/rds/log --stanza=${DB_INSTANCE_ID} stanza-create 77 | 78 | # snippet:run_create_db 79 | if [[ $(psql_count "SELECT count(datname) FROM pg_database WHERE datname = '${DB_DATABASE}';") == "0" ]]; then 80 | psql_execute "CREATE DATABASE ${DB_DATABASE}" 81 | fi 82 | # /snippet:run_create_db 83 | 84 | # snippet:run_create_user 85 | if [[ $(psql_count "SELECT count(u.usename) FROM pg_catalog.pg_user u WHERE u.usename = '${DB_USERNAME}';") == "0" ]]; then 86 | psql_execute "CREATE USER ${DB_USERNAME} WITH ENCRYPTED PASSWORD '${DB_PASSWORD}'" 87 | fi 88 | 89 | psql_execute "GRANT ALL PRIVILEGES ON DATABASE ${DB_DATABASE} TO ${DB_USERNAME}" 90 | # /snippet:run_create_user 91 | 92 | echo "executing initial backup" 93 | # snippet:run_init_finalize 94 | pgbackrest_execute --log-level-console=info backup 95 | 96 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" stop 97 | # /snippet:run_init_finalize 98 | } 99 | 100 | function pgbackrest_status_code() { 101 | PGBACKREST_INFO="$(pgbackrest_execute --output=json info)" 102 | 103 | if [[ $(echo ${PGBACKREST_INFO} | jq length) -gt 0 ]]; then 104 | BACKUP_INFO=$(echo ${PGBACKREST_INFO} | jq ".[] | select(.name == \"${DB_DATABASE}\")") 105 | echo ${BACKUP_INFO} | jq -r '.status.code' 106 | else 107 | echo "99" 108 | fi 109 | } 110 | 111 | if [[ "$(ls -A "${INSTANCE_DATA_DIR}")" ]]; then 112 | echo "data dir is not empty" 113 | rm -f /rds/socket/* 114 | rm -f "${INSTANCE_DATA_DIR}/postmaster.pid" 115 | 116 | # snippet:run_start_with_data 117 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" start --options="-c listen_addresses=''" 118 | psql_execute "ALTER USER ${DB_USERNAME} WITH ENCRYPTED PASSWORD '${DB_PASSWORD}'" 119 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" stop 120 | # /snippet:run_start_with_data 121 | else 122 | echo "data dir is empty" 123 | 124 | if [[ $(pgbackrest_status_code) -eq 0 ]]; then 125 | 126 | echo "restoring database from backup" 127 | # snippet:run_restore_restore 128 | pgbackrest_execute --db-path="${INSTANCE_DATA_DIR}" restore --recovery-option="recovery_end_command=/rds/bin/recovery_complete.sh" 129 | # /snippet:run_restore_restore 130 | 131 | sleep 5 132 | 133 | echo "starting db for recovery" 134 | # snippet:run_recovery 135 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" start --options="-c listen_addresses=''" 136 | 137 | while [[ -f /rds/run/recovery_complete ]]; do 138 | echo "waiting for recovery completion" 139 | sleep 5 140 | done 141 | # /snippet:run_recovery 142 | 143 | # snippet:run_recovery_finish 144 | until [[ "$(psql_execute 'SELECT pg_is_in_recovery();' | tr -d '[:space:]')" == "f" ]]; do 145 | echo "waiting for server to be ready" 146 | sleep 5 147 | done 148 | psql_execute "ALTER USER ${DB_USERNAME} WITH ENCRYPTED PASSWORD '${DB_PASSWORD}'" 149 | 150 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" stop 151 | # /snippet:run_recovery_finish 152 | else 153 | init_db 154 | fi 155 | fi 156 | 157 | echo "starting postgres db" 158 | # snippet:run_start 159 | exec ${POSTGRES_BIN_DIR}/postgres -D "${INSTANCE_DATA_DIR}" 160 | # /snippet:run_start 161 | -------------------------------------------------------------------------------- /do: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail -o errexit -o nounset 4 | 5 | DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd -P)" 6 | 7 | RDS_INSTANCE_ID="instance1" 8 | 9 | GITHUB_OWNER="pellepelster" 10 | GITHUB_REPOSITORY="hetzner-rds-postgres" 11 | 12 | DOCKER_REGISTRY="docker.pkg.github.com" 13 | DOCKER_REPOSITORY="${GITHUB_OWNER}/${GITHUB_REPOSITORY}" 14 | DOCKER_IMAGE_NAME="hetzner-rds-postgres" 15 | 16 | source "${DIR}/ctuhl/lib/shell/log.sh" 17 | source "${DIR}/ctuhl/lib/shell/ruby.sh" 18 | 19 | PASS_CLOUD_API_TOKEN="infrastructure/rds/${RDS_INSTANCE_ID}/cloud_api_token" 20 | PASS_GITHUB_RW_TOKEN="github/${GITHUB_OWNER}/personal_access_token_rw" 21 | PASS_GITHUB_RO_TOKEN="github/${GITHUB_OWNER}/personal_access_token_ro" 22 | 23 | PASS_INSTANCE_ECDSA_KEY="infrastructure/rds/${RDS_INSTANCE_ID}/ssh_host_ecdsa_key" 24 | PASS_INSTANCE_ECDSA_PUB="infrastructure/rds/${RDS_INSTANCE_ID}/ssh_host_ecdsa_public_key" 25 | PASS_INSTANCE_RSA_KEY="infrastructure/rds/${RDS_INSTANCE_ID}/ssh_host_rsa_key" 26 | PASS_INSTANCE_RSA_PUB="infrastructure/rds/${RDS_INSTANCE_ID}/ssh_host_rsa_public_key" 27 | PASS_INSTANCE_ED25519_KEY="infrastructure/rds/${RDS_INSTANCE_ID}/ssh_host_ed25519_key" 28 | PASS_INSTANCE_ED25519_PUB="infrastructure/rds/${RDS_INSTANCE_ID}/ssh_host_ed25519_public_key" 29 | 30 | TEMP_DIR="${DIR}/.tmp" 31 | mkdir -p "${TEMP_DIR}" 32 | 33 | trap task_clean SIGINT SIGTERM ERR EXIT 34 | 35 | function task_docker_login { 36 | ensure_docker_login 37 | } 38 | 39 | function ensure_docker_login { 40 | pass "${PASS_GITHUB_RW_TOKEN}" | docker login https://docker.pkg.github.com -u ${GITHUB_OWNER} --password-stdin 41 | } 42 | 43 | function generate_ssh_identity { 44 | local type="${1:-}" 45 | local pass_key_path="${2:-}" 46 | local pass_pub_path="${3:-}" 47 | ssh-keygen -q -N "" -t "${type}" -f "${TEMP_DIR}/ssh_host_${type}_key" 48 | pass insert -m "${pass_key_path}" < "${TEMP_DIR}/ssh_host_${type}_key" 49 | pass insert -m "${pass_pub_path}" < "${TEMP_DIR}/ssh_host_${type}_key.pub" 50 | } 51 | 52 | 53 | function task_generate_ssh_identities { 54 | generate_ssh_identity "ed25519" "${PASS_INSTANCE_ECDSA_KEY}" "${PASS_INSTANCE_ECDSA_PUB}" 55 | generate_ssh_identity "ecdsa" "${PASS_INSTANCE_RSA_KEY}" "${PASS_INSTANCE_RSA_PUB}" 56 | generate_ssh_identity "rsa" "${PASS_INSTANCE_ED25519_KEY}" "${PASS_INSTANCE_ED25519_PUB}" 57 | } 58 | 59 | function task_build { 60 | docker build -t ${DOCKER_IMAGE_NAME} -f Dockerfile . 61 | docker tag "${DOCKER_IMAGE_NAME}" "${DOCKER_REGISTRY}/${DOCKER_REPOSITORY}/${DOCKER_IMAGE_NAME}:latest" 62 | } 63 | 64 | function task_usage { 65 | echo "Usage: $0 build | test | deploy" 66 | exit 1 67 | } 68 | 69 | function task_clean { 70 | log_divider_header "cleaning up..." 71 | 72 | rm -rf "${TEMP_DIR}" 73 | 74 | cd "${DIR}/test/rds" 75 | docker-compose rm --force --stop -v 76 | 77 | docker volume rm -f rds_rds-data 78 | docker volume rm -f rds_rds-backup 79 | 80 | log_divider_footer 81 | } 82 | 83 | function terraform_wrapper_do() { 84 | 85 | log_divider_header "executing terraform..." 86 | 87 | local directory=${1:-} 88 | local command=${2:-apply} 89 | shift || true 90 | shift || true 91 | 92 | if [ ! -d "${directory}/.terraform" ]; then 93 | terraform_wrapper "${directory}" init -lock=false 94 | fi 95 | 96 | terraform_wrapper "${directory}" "${command}" -lock=false "$@" 97 | log_divider_footer 98 | } 99 | 100 | function terraform_wrapper() { 101 | local directory=${1:-} 102 | shift || true 103 | ( 104 | cd "${DIR}/${directory}" 105 | terraform "$@" 106 | ) 107 | } 108 | 109 | function task_infra_instance { 110 | export TF_VAR_github_token="$(pass ${PASS_GITHUB_RO_TOKEN})" 111 | export TF_VAR_github_owner="${GITHUB_OWNER}" 112 | export TF_VAR_rds_instance_id="${RDS_INSTANCE_ID}" 113 | export TF_VAR_cloud_api_token="$(pass ${PASS_CLOUD_API_TOKEN})" 114 | export TF_VAR_ssh_identity_ecdsa_key="$(pass "${PASS_INSTANCE_ECDSA_KEY}" | base64 -w 0)" 115 | export TF_VAR_ssh_identity_ecdsa_pub="$(pass "${PASS_INSTANCE_ECDSA_PUB}" | base64 -w 0)" 116 | export TF_VAR_ssh_identity_rsa_key="$(pass "${PASS_INSTANCE_RSA_KEY}" | base64 -w 0)" 117 | export TF_VAR_ssh_identity_rsa_pub="$(pass "${PASS_INSTANCE_RSA_PUB}" | base64 -w 0)" 118 | export TF_VAR_ssh_identity_ed25519_key="$(pass "${PASS_INSTANCE_ED25519_KEY}" | base64 -w 0)" 119 | export TF_VAR_ssh_identity_ed25519_pub="$(pass "${PASS_INSTANCE_ED25519_PUB}" | base64 -w 0)" 120 | 121 | terraform_wrapper_do "terraform/instance" "$@" 122 | } 123 | 124 | 125 | function ensure_environment { 126 | 127 | if [[ ! -f ~/.ssh/id_rsa.pub ]]; then 128 | echo "expected an ssh public key at ~/.ssh/id_rsa.pub for instance provisioning" 129 | exit 1 130 | fi 131 | 132 | if ! pass ${PASS_CLOUD_API_TOKEN} &> /dev/null; then 133 | echo "no cloud api token found at pass path '${PASS_CLOUD_API_TOKEN}'" 134 | exit 1 135 | fi 136 | 137 | if ! pass ${PASS_GITHUB_RW_TOKEN} &> /dev/null; then 138 | log_error "no personal github r/w token found at pass path '${PASS_GITHUB_RW_TOKEN}', can be set via './do set-github-access-token-rw'" 139 | exit 1 140 | fi 141 | 142 | if ! pass ${PASS_GITHUB_RO_TOKEN} &> /dev/null; then 143 | log_error "no personal github r/o token found at pass path '${PASS_GITHUB_RO_TOKEN}', can be set via './do set-github-access-token-ro'" 144 | exit 1 145 | fi 146 | 147 | local ssh_pass_missing=0 148 | 149 | for ssh_pass_path in PASS_INSTANCE_ECDSA_KEY \ 150 | PASS_INSTANCE_ECDSA_PUB \ 151 | PASS_INSTANCE_RSA_KEY \ 152 | PASS_INSTANCE_RSA_PUB \ 153 | PASS_INSTANCE_ED25519_KEY \ 154 | PASS_INSTANCE_ED25519_PUB; do 155 | 156 | if ! pass ${!ssh_pass_path} &> /dev/null; then 157 | ssh_pass_missing=1 158 | fi 159 | done 160 | 161 | if [[ ${ssh_pass_missing} == 1 ]]; then 162 | log_error "no ssh identity information found at pass path '${!ssh_pass_path}', can be generated via './do generate-ssh-identities'" 163 | exit 1 164 | fi 165 | } 166 | 167 | function task_infra_storage { 168 | ensure_environment 169 | 170 | export TF_VAR_rds_instance_id="${RDS_INSTANCE_ID}" 171 | export TF_VAR_cloud_api_token="$(pass ${PASS_CLOUD_API_TOKEN})" 172 | 173 | terraform_wrapper_do "terraform/storage" "$@" 174 | } 175 | 176 | function task_ssh_instance { 177 | local public_ip="$(terraform_wrapper "terraform/instance" "output" "-json" | jq -r '.public_ip.value')" 178 | ssh root@${public_ip} "$@" 179 | } 180 | 181 | function task_test() { 182 | ( 183 | cd ${DIR}/test 184 | cthul_ruby_ensure_bundle 185 | bundle exec ruby runner.rb rds "$@" 186 | ) 187 | } 188 | 189 | function task_run() { 190 | ( 191 | cd ${DIR}/test/rds 192 | docker-compose up -d rds-test1 193 | local psql_port="$(docker inspect rds_rds-test1_1 | jq -r '.[0].NetworkSettings.Ports["5432/tcp"][0].HostPort')" 194 | echo "rds postgres is running at psql://localhost:${psql_port}" 195 | echo "press any key to shutdown" 196 | read 197 | ) 198 | } 199 | 200 | function task_deploy { 201 | ensure_environment 202 | ensure_docker_login 203 | docker push "${DOCKER_REGISTRY}/${DOCKER_REPOSITORY}/${DOCKER_IMAGE_NAME}:latest" 204 | } 205 | 206 | function task_set_github_access_token_rw { 207 | echo "Enter the Github personal read/write access token, followed by [ENTER]:" 208 | read -r github_access_token 209 | echo ${github_access_token} | pass insert -m "${GITHUB_RW_TOKEN}" 210 | } 211 | 212 | function task_set_github_access_token_ro { 213 | echo "Enter the Github personal readonly access token, followed by [ENTER]:" 214 | read -r github_access_token 215 | echo ${github_access_token} | pass insert -m "${GITHUB_RO_TOKEN}" 216 | } 217 | 218 | function task_set_cloud_api_token { 219 | echo "Enter the Hetzner Cloud API token, followed by [ENTER]:" 220 | read -r hetzner_cloud_api_token 221 | echo ${hetzner_cloud_api_token} | pass insert -m "infrastructure/${DOMAIN}/cloud_api_token" 222 | } 223 | 224 | ARG=${1:-} 225 | shift || true 226 | case ${ARG} in 227 | build) task_build "$@" ;; 228 | run) task_run "$@" ;; 229 | test) task_test "$@" ;; 230 | deploy) task_deploy "$@" ;; 231 | infra-instance) task_infra_instance "$@" ;; 232 | infra-storage) task_infra_storage "$@" ;; 233 | ssh-instance) task_ssh_instance "$@" ;; 234 | generate-ssh-identities) task_generate_ssh_identities ;; 235 | set-github-access-token-rw) task_set_github_access_token_rw ;; 236 | set-github-access-token-ro) task_set_github_access_token_ro ;; 237 | set-cloud-api-token) task_set_cloud_api_token ;; 238 | docker-login) task_docker_login ;; 239 | *) task_usage ;; 240 | esac 241 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ### This project was created as a code example for [this blog post](https://pelle.io/posts/hetzner-rds-postgres/) 3 | 4 | ### The code is now part of the [Solidblocks Infrastructure](https://pellepelster.github.io/solidblocks/hetzner/) library with more features and proper integration tests 5 | 6 | --- 7 | # Hetzner RDS Postgres 8 | 9 | An often overlooked alternative to the big players in the cloud market is the [Hetzner Cloud](https://www.hetzner.com/de/cloud), developed by the well known German hosting company Hetzner. 10 | 11 | Of course, it can't compete with the 2 gazillion services from AWS but it doesn't need to, because it already contains the basic building blocks for deploying scalable applications.. Namely, an equivalent for EC2, VPC, ALB, and EBS. What's missing though is a replacement for RDS which makes it difficult to run any application that stores state in an SQL database. 12 | 13 | In this blog post we will try to replicate this service on our own, based on the building blocks provided by the Hetzner Cloud. Due to the imminent hybris and madness in the IT sector, we will call this service RDS (please AWS don't sue me) because the acronym fits the purpose of this service the best. 14 | 15 | We can't compete with dozens of AWS engineers and replicate all variants of RDS. Instead we will focus on PostgreSQL as database backend and also skip the multi zone/master implementation. To optimize for MTTR instead of MTBF we aim for a solid backup and recovery strategy. 16 | 17 | # Architecture 18 | 19 | Here is a quick overview of what we are about to build: 20 | 21 | ![Architecture Overview](hetzner-rds-postgres.png) 22 | 23 | ## Storage 24 | 25 | Two data volumes will serve as our storage backend, where one will host the actual database files from PostgreSQL and the other the backup archives. This not only helps with performance, but gives us some flexibility on storage provisioning, because at a later stage we might want to migrate the data partition to local NVMe SSD storage for performance reasons. 26 | 27 | The storage is mounted on the server and again mounted into the docker container via a docker bind mount. 28 | 29 | ## Docker 30 | 31 | The PostgreSQL server will be packaged in docker which makes the deployment easier, more predictable, and also helps us to test our solution locally. All configuration is done by setting environment variables, that can be used in configuration files that are rendered by [gomplate](https://gomplate.ca/) where needed. 32 | 33 | ## Backup 34 | 35 | The backup and restore duty will be fulfilled by [pgBackRest](https://pgbackrest.org/) offering full, differential and incremental backups for PostgreSQL databases, 36 | and also include WAL archiving which enables us to implement Point In Time recovery (PITR) if needed. 37 | 38 | ## Provisioning 39 | 40 | As the Hetzner cloud offers an extensive [API](https://docs.hetzner.cloud/) for all their services, we can use terraform to provision all our needed cloud resources. 41 | 42 | # Implementation 43 | 44 | For the implementation part we will focus on the RDS-specific parts of our setup and not focus on all the nitty-gritty details of the glue code holding everything together (this will hopefully be part of a separate blog post in the future). A running example of everything explained in this post can be cloned from [https://github.com/pellepelster/hetzner-rds-postgres](https://github.com/pellepelster/hetzner-rds-postgres). 45 | 46 | ## Docker 47 | 48 | First step, of course, is to install all required packages that we need to implement our envisioned functionality: 49 | 50 | 51 | ``` 52 | ENV DEBIAN_FRONTEND=noninteractive 53 | 54 | RUN apt-get update && \ 55 | apt-get dist-upgrade --assume-yes --quiet && \ 56 | apt-get --assume-yes --quiet --no-install-recommends install \ 57 | postgresql-11 \ 58 | curl \ 59 | ca-certificates \ 60 | jq \ 61 | uuid-runtime \ 62 | pgbackrest \ 63 | libdbd-pg-perl \ 64 | libpq-dev 65 | ``` 66 | 67 | 68 | 69 | Being responsible developers, we always verify the checksums of the static binaries used to distribute golang based software, in this case the template renderer gomplate: 70 | 71 | 72 | ``` 73 | RUN curl -L -o /usr/local/bin/gomplate https://github.com/hairyhenderson/gomplate/releases/download/${GOMPLATE_VERION}/gomplate_linux-amd64-slim && \ 74 | echo "${GOMPLATE_CHECKSUM}" /usr/local/bin/gomplate | sha256sum -c && \ 75 | chmod +x /usr/local/bin/gomplate 76 | ``` 77 | 78 | 79 | 80 | Of course we won't run everything as root inside docker container, as this may lead to security issues in the future. Instead, we create a dedicated user. The important part here is we use a static predefined user and group id (the number 4000 is arbitrarily picked) to circumvent permission issues that we may run into when mounting directories from the host system. 81 | 82 | 83 | ``` 84 | RUN groupadd --gid "${USER_GID}" "${USER}" && \ 85 | useradd \ 86 | --uid ${USER_ID} \ 87 | --gid ${USER_GID} \ 88 | --create-home \ 89 | --home-dir /${USER} \ 90 | --shell /bin/bash \ 91 | ${USER} 92 | ``` 93 | 94 | 95 | 96 | The main configuration of the PostgreSQL instance is done via environment variables, namely `${DB_INSTANCE_ID}` which is a unique identifier for the database instance, and of course a password, provided by `${DB_PASSWORD}`. The name of the database schema and username defaults to the value of `${DB_INSTANCE_ID}` and can be overridden by `${DB_DATABASE}` and `${DB_USERNAME}` respectively. 97 | 98 | ## run.sh 99 | 100 | Finally, the Dockerfile hands off control to the `/rds/bin/run.sh` where we will configure and start the PostgreSQL server. 101 | 102 | ### Templates 103 | 104 | First order of business is to render the template files for the PostgreSQL and pgBackRest configuration and some helper scripts. During the template run all environment variable placeholders will be replaced with the actual runtime content of the variables: 105 | 106 | 107 | ``` 108 | gomplate --input-dir /rds/templates/config --output-dir /rds/config 109 | gomplate --input-dir /rds/templates/bin --output-dir /rds/bin 110 | ``` 111 | 112 | 113 | 114 | The `pgbackrest.conf` is a straightforward adaption of the example from the [pgBackRest User Guide](https://pgbackrest.org/user-guide.html) where we configure the directory that holds the backup files, and our db instance that should be backed up (later referred to as stanza, as it is called in the pgBackRest world). 115 | 116 | 117 | ``` 118 | [global] 119 | repo-path={{ .Env.BACKUP_DIR }}/{{ .Env.DB_INSTANCE_ID }} 120 | 121 | [{{ .Env.DB_INSTANCE_ID }}] 122 | pg1-path={{ .Env.INSTANCE_DATA_DIR }} 123 | pg1-socket-path=/rds/socket 124 | 125 | ``` 126 | 127 | 128 | 129 | The `postgresql.conf` itself is also pretty basic, most important aspect is that we use pgBackRest as archive command for storing the databases [WAL files](https://en.wikipedia.org/wiki/Write-ahead_logging) which will enable pgBackRest to restore the database up to the point of the last archived WAL file: 130 | 131 | 132 | ``` 133 | archive_command = 'pgbackrest --config /rds/config/pgbackrest.conf --log-level-console=info --log-path=/rds/log --stanza={{ .Env.DB_INSTANCE_ID }} archive-push %p' 134 | archive_mode = on 135 | max_wal_senders = 3 136 | wal_level = replica 137 | listen_addresses = '*' 138 | unix_socket_directories = '/rds/socket' 139 | 140 | ``` 141 | 142 | 143 | 144 | A small backup script that triggers the pgRestBackup backup is generated into `/rds/bin/backup.sh`. This file is intended to be called from the host system in order to trigger the database backup. 145 | 146 | 147 | ``` 148 | #!/usr/bin/env bash 149 | 150 | set -o pipefail -o errexit -o nounset 151 | 152 | pgbackrest --config /rds/config/pgbackrest.conf --log-level-console=info --log-path=/rds/log --stanza={{ .Env.DB_INSTANCE_ID }} backup --type=full 153 | ``` 154 | 155 | 156 | 157 | ### Server Start 158 | 159 | Before we can start the server we have to think about some special cases concerning the data the server is supposed to work with. Depending on the lifecycle of our instance we have to deal with the following scenarios: 160 | 161 | * Scenario 1: A freshly provisioned instance, data and backup directories are empty 162 | * Scenario 2: An empty data dir (maybe due to data loss) and a backup directory containing backups 163 | * Scenario 3: Data directory is not empty and the backup directory may or may not be filled with backups 164 | 165 | #### Scenario 1 166 | 167 | This scenario is relatively straightforward, we just have to initialize a fresh instance, create a database according to the environment variable configuration and create an initial backup: 168 | 169 | The database initialization is done the same way as in any other PostgreSQL instance by calling `initdb`: 170 | 171 | 172 | ``` 173 | ${POSTGRES_BIN_DIR}/initdb --username="rds" --encoding=UTF8 --pwfile=<(echo "${DB_PASSWORD}") -D "${INSTANCE_DATA_DIR}" || true 174 | ``` 175 | 176 | 177 | 178 | To be able to create a database we have to start the instance. To avoid any unwanted connections to the database we override the `listen_addresses` configuration, setting it to an empty string. By doing this, the server will not bind to any interfaces and per configuration from above only allow socket-based communication. We will only allow non-local communication when the instance is finally configured and ready to accept external traffic: 179 | 180 | 181 | 182 | ``` 183 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${DATA_DIR}/${DB_INSTANCE_ID}" start --options="-c listen_addresses=''" 184 | ``` 185 | 186 | 187 | 188 | Now that the server is running we can create the database: 189 | 190 | 191 | ``` 192 | if [[ $(psql_count "SELECT count(datname) FROM pg_database WHERE datname = '${DB_DATABASE}';") == "0" ]]; then 193 | psql_execute "CREATE DATABASE ${DB_DATABASE}" 194 | fi 195 | ``` 196 | 197 | 198 | 199 | and a user with permissions to access the just created database: 200 | 201 | 202 | ``` 203 | if [[ $(psql_count "SELECT count(u.usename) FROM pg_catalog.pg_user u WHERE u.usename = '${DB_USERNAME}';") == "0" ]]; then 204 | psql_execute "CREATE USER ${DB_USERNAME} WITH ENCRYPTED PASSWORD '${DB_PASSWORD}'" 205 | fi 206 | 207 | psql_execute "GRANT ALL PRIVILEGES ON DATABASE ${DB_DATABASE} TO ${DB_USERNAME}" 208 | ``` 209 | 210 | 211 | 212 | The last step in the initialization process is to create an initial backup, and stop the server, so it can be started with external connectivity later on: 213 | 214 | 215 | ``` 216 | pgbackrest_execute --log-level-console=info backup 217 | 218 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" stop 219 | ``` 220 | 221 | 222 | 223 | 224 | #### Scenario 2 225 | 226 | The case where we already have a valid data directory is a little bit easier to handle. We just have to start the server (again with the socket-only trick from scenario 1) and be sure to set the password for the database user, as it may have changed during restarts by providing a new value for the `${DB_PASSWORD}` environment variable. 227 | 228 | 229 | ``` 230 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" start --options="-c listen_addresses=''" 231 | psql_execute "ALTER USER ${DB_USERNAME} WITH ENCRYPTED PASSWORD '${DB_PASSWORD}'" 232 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" stop 233 | ``` 234 | 235 | 236 | 237 | #### Scenario 3 238 | 239 | Now the restore case is a little more delicate as we have to take some extra steps to make sure the restore is complete. First, we to actually restore the data from pgBackRest: 240 | 241 | 242 | ``` 243 | pgbackrest_execute --db-path="${INSTANCE_DATA_DIR}" restore --recovery-option="recovery_end_command=/rds/bin/recovery_complete.sh" 244 | ``` 245 | 246 | 247 | 248 | What the restore command also does, besides restoring the actual data, is to generate a recovery.conf` for PostgreSQL with instructions on how to restore the instance from the restored data (see [here](https://www.postgresql.org/docs/11/recovery-config.html) for more in-depth information about this process). We add a little customization to the process in form of a command that is triggered when the recovery is done. This command will just write a marker file so we know the recovery is complete, and looks like this: 249 | 250 | 251 | ``` 252 | #!/usr/bin/env bash 253 | 254 | set -o pipefail -o errexit -o nounset 255 | 256 | echo "complete" > /rds/run/recovery_complete 257 | 258 | ``` 259 | 260 | 261 | 262 | Now when we start the server (again only listening on sockets) it will pick up the `recovery.conf` and we just wait for the recovery finish marker to appear: 263 | 264 | 265 | ``` 266 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" start --options="-c listen_addresses=''" 267 | 268 | while [[ -f /rds/run/recovery_complete ]]; do 269 | echo "waiting for recovery completion" 270 | sleep 5 271 | done 272 | ``` 273 | 274 | 275 | 276 | Now what's left is to make sure that, like in scenario 2 we update the password if needed. A little edge case we have to cover here is that the server may still be in recovery state, despite the marker file saying otherwise. Therefore we have to wait for this state to finish, update the user and finally, again stop the server: 277 | 278 | 279 | ``` 280 | until [[ "$(psql_execute 'SELECT pg_is_in_recovery();' | tr -d '[:space:]')" == "f" ]]; do 281 | echo "waiting for server to be ready" 282 | sleep 5 283 | done 284 | psql_execute "ALTER USER ${DB_USERNAME} WITH ENCRYPTED PASSWORD '${DB_PASSWORD}'" 285 | 286 | ${POSTGRES_BIN_DIR}/pg_ctl -D "${INSTANCE_DATA_DIR}" stop 287 | ``` 288 | 289 | 290 | 291 | Now that we have got all scenarios covered we can start the server and begin serving requests: 292 | 293 | 294 | ``` 295 | exec ${POSTGRES_BIN_DIR}/postgres -D "${INSTANCE_DATA_DIR}" 296 | ``` 297 | 298 | 299 | 300 | # Test 301 | 302 | Now that we have a dockerized implementation of our shiny new service, we are in the lucky position that we can start the container locally and verify that everything works as expected. 303 | 304 | [minitest](https://github.com/seattlerb/minitest) will serve as testing framework, taking care of starting and stopping the test containers and orchestrating the tests. Base for the test is a docker-compose environment where we configure the username and password for our test instance. Docker volumes will be used as stand-in doubles for the bind mounts in the real world: 305 | 306 | 307 | ``` 308 | version: "3" 309 | services: 310 | rds-test1: 311 | image: hetzner-rds-postgres 312 | environment: 313 | - "DB_INSTANCE_ID=test1" 314 | - "DB_PASSWORD=password1" 315 | ports: 316 | - "5432" 317 | volumes: 318 | - "rds-data:/storage/data" 319 | - "rds-backup:/storage/backup" 320 | 321 | rds-test1-no-password: 322 | image: hetzner-rds-postgres 323 | environment: 324 | - "DB_INSTANCE_ID=test1" 325 | ports: 326 | - "5432" 327 | 328 | rds-test1-no-instance-id: 329 | image: hetzner-rds-postgres 330 | environment: 331 | - "DB_PASSWORD=password1" 332 | ports: 333 | - "5432" 334 | 335 | volumes: 336 | rds-data: 337 | rds-backup: 338 | 339 | ``` 340 | 341 | 342 | 343 | Two helper methods let us destroy the volumes on request, so we can simulate different failure modes and ensure the backup and restore strategy work as intended: 344 | 345 | 346 | ``` 347 | def remove_data_volume 348 | `docker volume rm -f rds_rds-data` 349 | end 350 | 351 | def remove_backup_volume 352 | `docker volume rm -f rds_rds-backup` 353 | end 354 | ``` 355 | 356 | 357 | 358 | Let's have an exemplary look at the test ensuring the restore works. In the test setup we start the server, create a new table, insert some data and ensure the data is in the table: 359 | 360 | 361 | ``` 362 | host, port = clean_start('rds-test1') 363 | conn = PG::Connection.new(host, port, '', '', 'test1', 'test1', 'password1') 364 | 365 | conn.exec('CREATE TABLE pets (id SERIAL PRIMARY KEY, name VARCHAR(64) NOT NULL);') 366 | 367 | petname = SecureRandom.uuid 368 | conn.exec("INSERT INTO pets (name) VALUES ('#{petname}');") 369 | 370 | pets = conn.exec("SELECT * FROM pets;").map { |row| row['name'] } 371 | assert_includes(pets, petname) 372 | conn.close 373 | ``` 374 | 375 | 376 | 377 | Now we can trigger the backup using the generated backup script inside the container, and then throw away the docker volume holding the database data: 378 | 379 | 380 | ``` 381 | @compose.exec('rds-test1', '/rds/bin/backup.sh') 382 | 383 | # stopping instance and remove data volume 384 | @compose.kill('rds-test1') 385 | wait_while { 386 | !@compose.logs('rds-test1').include? 'database system is shut down' 387 | } 388 | @compose.rm('rds-test1', force: true) 389 | remove_data_volume 390 | ``` 391 | 392 | 393 | 394 | On the subsequent start we expect scenario 3 to kick in and restore all of our data, which we can verify by trying to read the initially generated data again: 395 | 396 | 397 | ``` 398 | @compose.up('rds-test1', detached: true) 399 | 400 | 401 | host, port = @compose.address('rds-test1', 5432) 402 | wait_for_server_start('rds-test1') 403 | 404 | conn = PG::Connection.new(host, port, '', '', 'test1', 'test1', 'password1') 405 | pets = conn.exec("SELECT * FROM pets;").map { |row| row['name'] } 406 | assert_includes(pets, petname) 407 | ``` 408 | 409 | 410 | 411 | # Deployment 412 | 413 | For the sake of simplicity we will only focus on the [cloud init](https://cloudinit.readthedocs.io/en/latest/) configuration used to spin up the docker container. The supplied terraform configuration is only intended for showcasing this blog post, in the real world, we would not deploy a database with a publicly available port, but rather resort to a private network where only the application server instances are allowed to access the database. 414 | 415 | ## Storage 416 | 417 | To ease daily operations, the terraform files for the server instance and the storage volumes are divided into two different terraform modules. This allows us to destroy and rebuild the whole instance without having to worry about our precious data volumes getting deleted by accident. 418 | 419 | The definition of the data volumes follows the already explained architecture with one volume for data and another volume for backup storage: 420 | 421 | 422 | ``` 423 | resource "hcloud_volume" "data" { 424 | name = "${var.rds_instance_id}-data" 425 | size = 64 426 | format = "ext4" 427 | location = var.location 428 | } 429 | 430 | resource "hcloud_volume" "backup" { 431 | name = "${var.rds_instance_id}-backup" 432 | size = 64 433 | format = "ext4" 434 | location = var.location 435 | } 436 | ``` 437 | 438 | 439 | 440 | 441 | ## Instance 442 | 443 | Now that the data volumes are set up in the instance definition we can reference those volumes like this: 444 | 445 | 446 | ``` 447 | data "hcloud_volume" "data" { 448 | name = "${var.rds_instance_id}-data" 449 | } 450 | 451 | data "hcloud_volume" "backup" { 452 | name = "${var.rds_instance_id}-backup" 453 | } 454 | ``` 455 | 456 | 457 | 458 | and pass the linux devices to the cloud init template: 459 | 460 | 461 | ``` 462 | storage_device_data = data.hcloud_volume.data.linux_device 463 | storage_device_backup = data.hcloud_volume.backup.linux_device 464 | ``` 465 | 466 | 467 | 468 | where we then proceed to mount the two volumes, and ensure that the folders group and user ids match the ones that we used inside of our docker container: 469 | 470 | 471 | ``` 472 | 473 | ``` 474 | 475 | 476 | 477 | To simplify the instance provisioning, we use bash function based templating to render the needed configuration files for starting the docker container on the instance. The docker container is started like in the test environment via docker-compose. But contrarily to the test, we do not use docker volumes for data storage, but the data volumes we mounted in the previous step: 478 | 479 | 480 | ``` 481 | function docker_compose_config { 482 | cat <<-EOF 483 | version: "3" 484 | services: 485 | ${rds_instance_id}: 486 | image: docker.pkg.github.com/pellepelster/hetzner-rds-postgres/hetzner-rds-postgres:latest 487 | environment: 488 | - "DB_DATABASE=${rds_instance_id}" 489 | - "DB_PASSWORD=very-secret" 490 | ports: 491 | - "5432:5432" 492 | volumes: 493 | - "/storage/data:/storage/data" 494 | - "/storage/backup:/storage/backup" 495 | EOF 496 | } 497 | ``` 498 | 499 | 500 | 501 | The docker-compose start itself is triggered using a systemd unit (yes, I know, booo systemd). A little trick we use in this systemd unit is to pass the db instance id using the systemd specifier logic. This way we can generate an instance id agnostic systemd configuration like this: 502 | 503 | 504 | ``` 505 | function rds_service_systemd_config { 506 | cat <<-EOF 507 | [Unit] 508 | Description=rds instance %i 509 | Requires=docker.service 510 | After=docker.service 511 | 512 | [Service] 513 | Restart=always 514 | TimeoutStartSec=1200 515 | 516 | WorkingDirectory=/opt/dockerfiles/%i 517 | 518 | ExecStartPre=/usr/bin/docker-compose down -v 519 | ExecStartPre=/usr/bin/docker-compose rm -fv 520 | ExecStartPre=/usr/bin/docker-compose pull 521 | 522 | # Compose up 523 | ExecStart=/usr/bin/docker-compose up 524 | 525 | # Compose down, remove containers and volumes 526 | ExecStop=/usr/bin/docker-compose down -v 527 | 528 | [Install] 529 | WantedBy=multi-user.target 530 | EOF 531 | } 532 | ``` 533 | 534 | 535 | 536 | We write this file to `/etc/systemd/system/rds@.service`. Now, when we enable and start this instance via systemctl: 537 | 538 | ``` 539 | systemctl enable rds@instance1 540 | systemctl start rds@instance1 541 | ``` 542 | 543 | The `%i` in the system unit definition is replaced by the part after the `@`, so in this case `instance1`. We follow the same pattern for the backup service defined in `/etc/systemd/system/rds-backup@.service`, only that this unit is way simpler because we just have to call the `/rds/bin/backup.sh` backup script inside the container: 544 | 545 | 546 | ``` 547 | function rds_service_backup_systemd_config { 548 | cat <<-EOF 549 | [Unit] 550 | Description=rds instance %i backup 551 | Requires=docker.service 552 | After=docker.service 553 | 554 | [Service] 555 | WorkingDirectory=/opt/dockerfiles/%i 556 | ExecStart=/usr/bin/docker-compose exec -T %i /rds/bin/backup.sh 557 | 558 | [Install] 559 | WantedBy=multi-user.target 560 | EOF 561 | } 562 | ``` 563 | 564 | 565 | 566 | The final touch is a systemd timer definition that will trigger a backup every hour. The convention for systemd timers is, that the definitions have to lie next to the service file, but with the extension `.timer` instead of `.service`. So we write the following file to `/etc/systemd/system/rds-backup@.timer`: 567 | 568 | 569 | 570 | ``` 571 | function rds_service_backup_timer_systemd_config { 572 | cat <<-EOF 573 | 574 | [Unit] 575 | Description=rds instance %i backup timer 576 | 577 | [Timer] 578 | OnCalendar=hourly 579 | 580 | [Install] 581 | WantedBy=basic.target 582 | EOF 583 | } 584 | ``` 585 | 586 | 587 | 588 | # Try it out 589 | 590 | If you want to try out what we built, [https://github.com/pellepelster/hetzner-rds-postgres](https://github.com/pellepelster/hetzner-rds-postgres) not only contains all sources, but also a bash script with some tasks to build and test everything: 591 | 592 | ``` 593 | git clone --recurse-submodules https://github.com/pellepelster/hetzner-rds-postgres.git 594 | ``` 595 | 596 | To build the docker image run: 597 | 598 | ``` 599 | ./do build 600 | ``` 601 | 602 | and to run the tests (unsurprisingly): 603 | 604 | ``` 605 | ./do test 606 | ``` 607 | 608 | The deploy part is a little bit trickier, as you have to upload the docker images to a registry, the easiest way to do this is propably to fork the project on Github and use the docker registry provided by Github. Don't forget to change the configuration in the `do` file to point to your fork: 609 | 610 | ``` 611 | GITHUB_OWNER="pellepelster" 612 | GITHUB_REPOSITORY="hetzner-rds-postgres" 613 | ``` 614 | 615 | As soon as this is done you can provision storage and the service instance by calling: 616 | 617 | ``` 618 | ./do infra-storage apply 619 | ``` 620 | 621 | and 622 | 623 | ``` 624 | ./do infra-instance apply 625 | ``` 626 | 627 | When the apply is finished you will be greeted with an ip address: 628 | 629 | ``` 630 | hcloud_server.instance: Destruction complete after 1s 631 | hcloud_server.instance: Creating... 632 | hcloud_server.instance: Creation complete after 8s [id=9891886] 633 | hcloud_floating_ip_assignment.ip_assignment: Creating... 634 | hcloud_volume_attachment.data: Creating... 635 | hcloud_volume_attachment.backup: Creating... 636 | hcloud_floating_ip_assignment.ip_assignment: Creation complete after 1s [id=408320] 637 | hcloud_volume_attachment.backup: Creation complete after 3s [id=9275768] 638 | hcloud_volume_attachment.data: Creation complete after 6s [id=9275769] 639 | 640 | Apply complete! Resources: 4 added, 0 changed, 4 destroyed. 641 | 642 | Outputs: 643 | 644 | public_ip = "78.46.253.164" 645 | ``` 646 | 647 | behind this address out shiny new RDS service is listening for requests, and can be contacted with: 648 | 649 | ``` 650 | psql --host 78.46.253.164 --user instance1 651 | Password for user instance1: 652 | psql (12.5 (Ubuntu 12.5-0ubuntu0.20.10.1), server 11.9 (Debian 11.9-0+deb10u1)) 653 | Type "help" for help. 654 | 655 | instance1=> 656 | ``` 657 | 658 | Have Fun! 659 | 660 | # Where to go from here 661 | 662 | Now that we have a working solution there are tons of possible improvements and features we might want to tackle. 663 | 664 | * The configuration of the service is rather static, therefore we have to redeploy every time we want to change a setting via an environment variable. A better solution would be to switch from gomplate to, for example [consul-template](https://github.com/hashicorp/consul-template), and have the configuration dynamically managed by a key/value store like [consul](https://www.consul.io/) and/or [vault](https://www.vaultproject.io/). 665 | * We have currently not set an archive retention policy which leads to backups piling up until all space is exhausted. 666 | -------------------------------------------------------------------------------- /hetzner-rds-postgres.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "excalidraw", 3 | "version": 2, 4 | "source": "https://excalidraw.com", 5 | "elements": [ 6 | { 7 | "type": "rectangle", 8 | "version": 554, 9 | "versionNonce": 1259953134, 10 | "isDeleted": false, 11 | "id": "LO7-tKXLmvI_QaSzBfz57", 12 | "fillStyle": "hachure", 13 | "strokeWidth": 1, 14 | "strokeStyle": "solid", 15 | "roughness": 1, 16 | "opacity": 100, 17 | "angle": 0, 18 | "x": 530, 19 | "y": 497, 20 | "strokeColor": "#000000", 21 | "backgroundColor": "#fab005", 22 | "width": 235.99999999999991, 23 | "height": 110, 24 | "seed": 1636029383, 25 | "groupIds": [], 26 | "strokeSharpness": "sharp", 27 | "boundElementIds": [ 28 | "YfqcsXlkJ-7UlU2DYumHi", 29 | "XDQ0imlsHqMLNYlLo2yvm", 30 | "n0x653nTC4ltKvltx0kw6", 31 | "0E9EK7Cji_Yi73G2m6-HC" 32 | ] 33 | }, 34 | { 35 | "type": "rectangle", 36 | "version": 891, 37 | "versionNonce": 1522403314, 38 | "isDeleted": false, 39 | "id": "bzT5ByOBFej382C45OQGe", 40 | "fillStyle": "hachure", 41 | "strokeWidth": 1, 42 | "strokeStyle": "solid", 43 | "roughness": 1, 44 | "opacity": 100, 45 | "angle": 0, 46 | "x": 987, 47 | "y": 363, 48 | "strokeColor": "#000000", 49 | "backgroundColor": "#82c91e", 50 | "width": 1015.3333333333331, 51 | "height": 563.2222222222223, 52 | "seed": 1946508807, 53 | "groupIds": [], 54 | "strokeSharpness": "sharp", 55 | "boundElementIds": [ 56 | "AyVW93-OsaTeT85Dld_P2", 57 | "XDQ0imlsHqMLNYlLo2yvm", 58 | "CBYFH_RmNvREQEYIx5DB3" 59 | ] 60 | }, 61 | { 62 | "type": "text", 63 | "version": 400, 64 | "versionNonce": 564072494, 65 | "isDeleted": false, 66 | "id": "acDYsVAK_Yyn014BXxBOy", 67 | "fillStyle": "hachure", 68 | "strokeWidth": 1, 69 | "strokeStyle": "solid", 70 | "roughness": 1, 71 | "opacity": 100, 72 | "angle": 0, 73 | "x": 1391, 74 | "y": 381, 75 | "strokeColor": "#000000", 76 | "backgroundColor": "transparent", 77 | "width": 208, 78 | "height": 26, 79 | "seed": 960036551, 80 | "groupIds": [], 81 | "strokeSharpness": "round", 82 | "boundElementIds": [], 83 | "fontSize": 20, 84 | "fontFamily": 1, 85 | "text": "Hetzner Cloud Server", 86 | "baseline": 18, 87 | "textAlign": "center", 88 | "verticalAlign": "top" 89 | }, 90 | { 91 | "type": "rectangle", 92 | "version": 1409, 93 | "versionNonce": 1307259250, 94 | "isDeleted": false, 95 | "id": "1TQ3GvB5dSJeC-kdCo6dY", 96 | "fillStyle": "hachure", 97 | "strokeWidth": 1, 98 | "strokeStyle": "solid", 99 | "roughness": 1, 100 | "opacity": 100, 101 | "angle": 0, 102 | "x": 1209.5, 103 | "y": 447.5, 104 | "strokeColor": "#000000", 105 | "backgroundColor": "#e64980", 106 | "width": 670.9999999999999, 107 | "height": 426, 108 | "seed": 1046003143, 109 | "groupIds": [], 110 | "strokeSharpness": "round", 111 | "boundElementIds": [ 112 | "XDQ0imlsHqMLNYlLo2yvm", 113 | "F4ufLptr56V5u7FZjXFJO", 114 | "ksibu1ZGuYf98vgD0qgZY", 115 | "CBYFH_RmNvREQEYIx5DB3" 116 | ] 117 | }, 118 | { 119 | "type": "text", 120 | "version": 829, 121 | "versionNonce": 1976934002, 122 | "isDeleted": false, 123 | "id": "sUAq45Xrr3KU6k9n_dYuS", 124 | "fillStyle": "hachure", 125 | "strokeWidth": 1, 126 | "strokeStyle": "solid", 127 | "roughness": 1, 128 | "opacity": 100, 129 | "angle": 0, 130 | "x": 1346, 131 | "y": 470, 132 | "strokeColor": "#000000", 133 | "backgroundColor": "transparent", 134 | "width": 127, 135 | "height": 26, 136 | "seed": 603375689, 137 | "groupIds": [], 138 | "strokeSharpness": "round", 139 | "boundElementIds": [], 140 | "fontSize": 20, 141 | "fontFamily": 1, 142 | "text": "Docker RDS", 143 | "baseline": 18, 144 | "textAlign": "left", 145 | "verticalAlign": "top" 146 | }, 147 | { 148 | "type": "arrow", 149 | "version": 1797, 150 | "versionNonce": 589162738, 151 | "isDeleted": false, 152 | "id": "XDQ0imlsHqMLNYlLo2yvm", 153 | "fillStyle": "hachure", 154 | "strokeWidth": 1, 155 | "strokeStyle": "solid", 156 | "roughness": 1, 157 | "opacity": 100, 158 | "angle": 0, 159 | "x": 782, 160 | "y": 553.4468848723893, 161 | "strokeColor": "#000000", 162 | "backgroundColor": "#7950f2", 163 | "width": 98.92701938709536, 164 | "height": 0.0741495001518615, 165 | "seed": 1261449161, 166 | "groupIds": [], 167 | "strokeSharpness": "round", 168 | "boundElementIds": [], 169 | "startBinding": { 170 | "elementId": "LO7-tKXLmvI_QaSzBfz57", 171 | "focus": 0.041713974578673874, 172 | "gap": 16.000000000000057 173 | }, 174 | "endBinding": { 175 | "elementId": "jqjYmZ22XOHK_Y1ETe032", 176 | "focus": 0.266467345931433, 177 | "gap": 13.572980612904757 178 | }, 179 | "points": [ 180 | [ 181 | 0, 182 | 0 183 | ], 184 | [ 185 | 98.92701938709536, 186 | -0.0741495001518615 187 | ] 188 | ], 189 | "lastCommittedPoint": null, 190 | "startArrowhead": null, 191 | "endArrowhead": "arrow" 192 | }, 193 | { 194 | "type": "rectangle", 195 | "version": 1073, 196 | "versionNonce": 1853813422, 197 | "isDeleted": false, 198 | "id": "jqjYmZ22XOHK_Y1ETe032", 199 | "fillStyle": "hachure", 200 | "strokeWidth": 1, 201 | "strokeStyle": "solid", 202 | "roughness": 1, 203 | "opacity": 100, 204 | "angle": 0, 205 | "x": 894.5, 206 | "y": 533.5, 207 | "strokeColor": "#000000", 208 | "backgroundColor": "#fab005", 209 | "width": 208.00000000000003, 210 | "height": 54, 211 | "seed": 1646282537, 212 | "groupIds": [], 213 | "strokeSharpness": "round", 214 | "boundElementIds": [ 215 | "YfqcsXlkJ-7UlU2DYumHi", 216 | "AyVW93-OsaTeT85Dld_P2", 217 | "e2tOGSTIjupE3PLN3lzwY", 218 | "ksibu1ZGuYf98vgD0qgZY", 219 | "XDQ0imlsHqMLNYlLo2yvm", 220 | "n0x653nTC4ltKvltx0kw6", 221 | "0E9EK7Cji_Yi73G2m6-HC" 222 | ] 223 | }, 224 | { 225 | "type": "arrow", 226 | "version": 399, 227 | "versionNonce": 751834866, 228 | "isDeleted": false, 229 | "id": "ksibu1ZGuYf98vgD0qgZY", 230 | "fillStyle": "hachure", 231 | "strokeWidth": 1, 232 | "strokeStyle": "solid", 233 | "roughness": 1, 234 | "opacity": 100, 235 | "angle": 0, 236 | "x": 1167.9597099517662, 237 | "y": 977.7350203180341, 238 | "strokeColor": "#000000", 239 | "backgroundColor": "#15aabf", 240 | "width": 5.70614644770626, 241 | "height": 4.735020318034117, 242 | "seed": 719602601, 243 | "groupIds": [], 244 | "strokeSharpness": "round", 245 | "boundElementIds": [], 246 | "startBinding": null, 247 | "endBinding": null, 248 | "points": [ 249 | [ 250 | 0, 251 | 0 252 | ], 253 | [ 254 | 5.70614644770626, 255 | -4.735020318034117 256 | ] 257 | ], 258 | "lastCommittedPoint": null, 259 | "startArrowhead": null, 260 | "endArrowhead": "arrow" 261 | }, 262 | { 263 | "type": "text", 264 | "version": 384, 265 | "versionNonce": 743969714, 266 | "isDeleted": false, 267 | "id": "vF1Xp8P7CupUnPSxG9IkO", 268 | "fillStyle": "hachure", 269 | "strokeWidth": 1, 270 | "strokeStyle": "solid", 271 | "roughness": 1, 272 | "opacity": 100, 273 | "angle": 0, 274 | "x": 540, 275 | "y": 507.5, 276 | "strokeColor": "#000000", 277 | "backgroundColor": "transparent", 278 | "width": 205, 279 | "height": 26, 280 | "seed": 966077678, 281 | "groupIds": [], 282 | "strokeSharpness": "round", 283 | "boundElementIds": [], 284 | "fontSize": 20, 285 | "fontFamily": 1, 286 | "text": "Hetzner Cloud Volume", 287 | "baseline": 18, 288 | "textAlign": "center", 289 | "verticalAlign": "top" 290 | }, 291 | { 292 | "type": "text", 293 | "version": 686, 294 | "versionNonce": 1940502638, 295 | "isDeleted": false, 296 | "id": "Ha4a4uSKrbEAtVIDKy8WM", 297 | "fillStyle": "hachure", 298 | "strokeWidth": 1, 299 | "strokeStyle": "solid", 300 | "roughness": 1, 301 | "opacity": 100, 302 | "angle": 0, 303 | "x": 618.5, 304 | "y": 564, 305 | "strokeColor": "#000000", 306 | "backgroundColor": "transparent", 307 | "width": 47, 308 | "height": 25, 309 | "seed": 1721651442, 310 | "groupIds": [], 311 | "strokeSharpness": "round", 312 | "boundElementIds": [], 313 | "fontSize": 20, 314 | "fontFamily": 3, 315 | "text": "data", 316 | "baseline": 20, 317 | "textAlign": "left", 318 | "verticalAlign": "top" 319 | }, 320 | { 321 | "type": "rectangle", 322 | "version": 579, 323 | "versionNonce": 434302514, 324 | "isDeleted": false, 325 | "id": "xKjF8K-rBmMS2KVWRd1yh", 326 | "fillStyle": "hachure", 327 | "strokeWidth": 1, 328 | "strokeStyle": "solid", 329 | "roughness": 1, 330 | "opacity": 100, 331 | "angle": 0, 332 | "x": 537.5, 333 | "y": 722.5, 334 | "strokeColor": "#000000", 335 | "backgroundColor": "#fab005", 336 | "width": 235.99999999999991, 337 | "height": 110, 338 | "seed": 114175278, 339 | "groupIds": [], 340 | "strokeSharpness": "sharp", 341 | "boundElementIds": [ 342 | "YfqcsXlkJ-7UlU2DYumHi", 343 | "XDQ0imlsHqMLNYlLo2yvm", 344 | "CoYJvIz8BotMzt2wHcDkF" 345 | ] 346 | }, 347 | { 348 | "type": "text", 349 | "version": 411, 350 | "versionNonce": 934057390, 351 | "isDeleted": false, 352 | "id": "FHGm56Y_rNrd9TfR8SKFI", 353 | "fillStyle": "hachure", 354 | "strokeWidth": 1, 355 | "strokeStyle": "solid", 356 | "roughness": 1, 357 | "opacity": 100, 358 | "angle": 0, 359 | "x": 547.5, 360 | "y": 733, 361 | "strokeColor": "#000000", 362 | "backgroundColor": "transparent", 363 | "width": 205, 364 | "height": 26, 365 | "seed": 911803570, 366 | "groupIds": [], 367 | "strokeSharpness": "round", 368 | "boundElementIds": [], 369 | "fontSize": 20, 370 | "fontFamily": 1, 371 | "text": "Hetzner Cloud Volume", 372 | "baseline": 18, 373 | "textAlign": "center", 374 | "verticalAlign": "top" 375 | }, 376 | { 377 | "type": "text", 378 | "version": 728, 379 | "versionNonce": 1465082930, 380 | "isDeleted": false, 381 | "id": "aN3XJzcfEE3Q8oD2G7K4b", 382 | "fillStyle": "hachure", 383 | "strokeWidth": 1, 384 | "strokeStyle": "solid", 385 | "roughness": 1, 386 | "opacity": 100, 387 | "angle": 0, 388 | "x": 621, 389 | "y": 785.5, 390 | "strokeColor": "#000000", 391 | "backgroundColor": "transparent", 392 | "width": 70, 393 | "height": 25, 394 | "seed": 992761710, 395 | "groupIds": [], 396 | "strokeSharpness": "round", 397 | "boundElementIds": [], 398 | "fontSize": 20, 399 | "fontFamily": 3, 400 | "text": "backup", 401 | "baseline": 20, 402 | "textAlign": "left", 403 | "verticalAlign": "top" 404 | }, 405 | { 406 | "type": "text", 407 | "version": 839, 408 | "versionNonce": 411025774, 409 | "isDeleted": false, 410 | "id": "3gPuwmkPtBPmD2BOEGojI", 411 | "fillStyle": "hachure", 412 | "strokeWidth": 1, 413 | "strokeStyle": "solid", 414 | "roughness": 1, 415 | "opacity": 100, 416 | "angle": 0, 417 | "x": 922, 418 | "y": 547, 419 | "strokeColor": "#000000", 420 | "backgroundColor": "transparent", 421 | "width": 152, 422 | "height": 25, 423 | "seed": 724688558, 424 | "groupIds": [], 425 | "strokeSharpness": "round", 426 | "boundElementIds": [], 427 | "fontSize": 20, 428 | "fontFamily": 3, 429 | "text": "/storage/data", 430 | "baseline": 20, 431 | "textAlign": "left", 432 | "verticalAlign": "top" 433 | }, 434 | { 435 | "type": "rectangle", 436 | "version": 1102, 437 | "versionNonce": 1006946994, 438 | "isDeleted": false, 439 | "id": "Xr1j5mRAjACrPOXr7DrcN", 440 | "fillStyle": "hachure", 441 | "strokeWidth": 1, 442 | "strokeStyle": "solid", 443 | "roughness": 1, 444 | "opacity": 100, 445 | "angle": 0, 446 | "x": 885.5, 447 | "y": 751.5, 448 | "strokeColor": "#000000", 449 | "backgroundColor": "#fab005", 450 | "width": 209, 451 | "height": 54, 452 | "seed": 509565678, 453 | "groupIds": [], 454 | "strokeSharpness": "round", 455 | "boundElementIds": [ 456 | "YfqcsXlkJ-7UlU2DYumHi", 457 | "AyVW93-OsaTeT85Dld_P2", 458 | "e2tOGSTIjupE3PLN3lzwY", 459 | "ksibu1ZGuYf98vgD0qgZY", 460 | "XDQ0imlsHqMLNYlLo2yvm", 461 | "CoYJvIz8BotMzt2wHcDkF", 462 | "0E9EK7Cji_Yi73G2m6-HC" 463 | ] 464 | }, 465 | { 466 | "type": "text", 467 | "version": 869, 468 | "versionNonce": 378596846, 469 | "isDeleted": false, 470 | "id": "n9-1FoR6opom0A2GsMyXf", 471 | "fillStyle": "hachure", 472 | "strokeWidth": 1, 473 | "strokeStyle": "solid", 474 | "roughness": 1, 475 | "opacity": 100, 476 | "angle": 0, 477 | "x": 905.5, 478 | "y": 766, 479 | "strokeColor": "#000000", 480 | "backgroundColor": "transparent", 481 | "width": 176, 482 | "height": 25, 483 | "seed": 2062266286, 484 | "groupIds": [], 485 | "strokeSharpness": "round", 486 | "boundElementIds": [], 487 | "fontSize": 20, 488 | "fontFamily": 3, 489 | "text": "/storage/backup", 490 | "baseline": 20, 491 | "textAlign": "left", 492 | "verticalAlign": "top" 493 | }, 494 | { 495 | "type": "arrow", 496 | "version": 1735, 497 | "versionNonce": 1178379378, 498 | "isDeleted": false, 499 | "id": "CoYJvIz8BotMzt2wHcDkF", 500 | "fillStyle": "hachure", 501 | "strokeWidth": 1, 502 | "strokeStyle": "solid", 503 | "roughness": 1, 504 | "opacity": 100, 505 | "angle": 0, 506 | "x": 788.2141678364561, 507 | "y": 776.459820317575, 508 | "strokeColor": "#000000", 509 | "backgroundColor": "#7950f2", 510 | "width": 85.06091858486923, 511 | "height": 4.4522018861699735, 512 | "seed": 1520870322, 513 | "groupIds": [], 514 | "strokeSharpness": "round", 515 | "boundElementIds": [], 516 | "startBinding": { 517 | "elementId": "xKjF8K-rBmMS2KVWRd1yh", 518 | "focus": 0.11241555879210122, 519 | "gap": 14.71416783645617 520 | }, 521 | "endBinding": { 522 | "elementId": "Xr1j5mRAjACrPOXr7DrcN", 523 | "focus": 0.38811356662655877, 524 | "gap": 12.22491357867466 525 | }, 526 | "points": [ 527 | [ 528 | 0, 529 | 0 530 | ], 531 | [ 532 | 85.06091858486923, 533 | -4.4522018861699735 534 | ] 535 | ], 536 | "lastCommittedPoint": null, 537 | "startArrowhead": null, 538 | "endArrowhead": "arrow" 539 | }, 540 | { 541 | "id": "Z3oK4UEoRQ7glNbXIF47l", 542 | "type": "rectangle", 543 | "x": 1625.5, 544 | "y": 518.5, 545 | "width": 190, 546 | "height": 92, 547 | "angle": 0, 548 | "strokeColor": "#000000", 549 | "backgroundColor": "#228be6", 550 | "fillStyle": "hachure", 551 | "strokeWidth": 1, 552 | "strokeStyle": "solid", 553 | "roughness": 1, 554 | "opacity": 100, 555 | "groupIds": [], 556 | "strokeSharpness": "round", 557 | "seed": 922050862, 558 | "version": 292, 559 | "versionNonce": 1032162286, 560 | "isDeleted": false, 561 | "boundElementIds": [ 562 | "ueNZQxXASOCRFLvtsAO92", 563 | "rYYzRRAimfLidujY-U6s3" 564 | ] 565 | }, 566 | { 567 | "id": "cSYfayATzFIbOrvW_4Zeo", 568 | "type": "text", 569 | "x": 1666.5, 570 | "y": 550.5, 571 | "width": 114, 572 | "height": 26, 573 | "angle": 0, 574 | "strokeColor": "#000000", 575 | "backgroundColor": "#fab005", 576 | "fillStyle": "hachure", 577 | "strokeWidth": 1, 578 | "strokeStyle": "solid", 579 | "roughness": 1, 580 | "opacity": 100, 581 | "groupIds": [], 582 | "strokeSharpness": "round", 583 | "seed": 136845682, 584 | "version": 282, 585 | "versionNonce": 1052078830, 586 | "isDeleted": false, 587 | "boundElementIds": null, 588 | "text": "PostgreSQL", 589 | "fontSize": 20, 590 | "fontFamily": 1, 591 | "textAlign": "center", 592 | "verticalAlign": "middle", 593 | "baseline": 18 594 | }, 595 | { 596 | "type": "rectangle", 597 | "version": 1088, 598 | "versionNonce": 1269329134, 599 | "isDeleted": false, 600 | "id": "ocdJTsV5067Vx9yuA2O0m", 601 | "fillStyle": "hachure", 602 | "strokeWidth": 1, 603 | "strokeStyle": "solid", 604 | "roughness": 1, 605 | "opacity": 100, 606 | "angle": 0, 607 | "x": 1266.5, 608 | "y": 528.382489840983, 609 | "strokeColor": "#000000", 610 | "backgroundColor": "#fab005", 611 | "width": 208.00000000000003, 612 | "height": 54, 613 | "seed": 702953394, 614 | "groupIds": [], 615 | "strokeSharpness": "round", 616 | "boundElementIds": [ 617 | "YfqcsXlkJ-7UlU2DYumHi", 618 | "AyVW93-OsaTeT85Dld_P2", 619 | "e2tOGSTIjupE3PLN3lzwY", 620 | "CBYFH_RmNvREQEYIx5DB3", 621 | "XDQ0imlsHqMLNYlLo2yvm", 622 | "n0x653nTC4ltKvltx0kw6", 623 | "ueNZQxXASOCRFLvtsAO92" 624 | ] 625 | }, 626 | { 627 | "type": "text", 628 | "version": 852, 629 | "versionNonce": 2005554994, 630 | "isDeleted": false, 631 | "id": "3V-i3DQKcFyVf1sAk3L69", 632 | "fillStyle": "hachure", 633 | "strokeWidth": 1, 634 | "strokeStyle": "solid", 635 | "roughness": 1, 636 | "opacity": 100, 637 | "angle": 0, 638 | "x": 1294, 639 | "y": 541.882489840983, 640 | "strokeColor": "#000000", 641 | "backgroundColor": "transparent", 642 | "width": 152, 643 | "height": 25, 644 | "seed": 795373938, 645 | "groupIds": [], 646 | "strokeSharpness": "round", 647 | "boundElementIds": [], 648 | "fontSize": 20, 649 | "fontFamily": 3, 650 | "text": "/storage/data", 651 | "baseline": 20, 652 | "textAlign": "left", 653 | "verticalAlign": "top" 654 | }, 655 | { 656 | "type": "rectangle", 657 | "version": 1218, 658 | "versionNonce": 1336089582, 659 | "isDeleted": false, 660 | "id": "Gb7x0bPpdnrTw9E-OVfxr", 661 | "fillStyle": "hachure", 662 | "strokeWidth": 1, 663 | "strokeStyle": "solid", 664 | "roughness": 1, 665 | "opacity": 100, 666 | "angle": 0, 667 | "x": 1292.5, 668 | "y": 738.382489840983, 669 | "strokeColor": "#000000", 670 | "backgroundColor": "#fab005", 671 | "width": 209, 672 | "height": 54, 673 | "seed": 782702254, 674 | "groupIds": [], 675 | "strokeSharpness": "round", 676 | "boundElementIds": [ 677 | "YfqcsXlkJ-7UlU2DYumHi", 678 | "AyVW93-OsaTeT85Dld_P2", 679 | "e2tOGSTIjupE3PLN3lzwY", 680 | "CBYFH_RmNvREQEYIx5DB3", 681 | "XDQ0imlsHqMLNYlLo2yvm", 682 | "CoYJvIz8BotMzt2wHcDkF", 683 | "0E9EK7Cji_Yi73G2m6-HC", 684 | "EvXP-mJWfQCsyXfe8wXnb" 685 | ] 686 | }, 687 | { 688 | "type": "text", 689 | "version": 967, 690 | "versionNonce": 189761902, 691 | "isDeleted": false, 692 | "id": "ktOpf6TbplH5A65A4xjIV", 693 | "fillStyle": "hachure", 694 | "strokeWidth": 1, 695 | "strokeStyle": "solid", 696 | "roughness": 1, 697 | "opacity": 100, 698 | "angle": 0, 699 | "x": 1308.5, 700 | "y": 753.882489840983, 701 | "strokeColor": "#000000", 702 | "backgroundColor": "transparent", 703 | "width": 176, 704 | "height": 25, 705 | "seed": 1140683570, 706 | "groupIds": [], 707 | "strokeSharpness": "round", 708 | "boundElementIds": [], 709 | "fontSize": 20, 710 | "fontFamily": 3, 711 | "text": "/storage/backup", 712 | "baseline": 20, 713 | "textAlign": "left", 714 | "verticalAlign": "top" 715 | }, 716 | { 717 | "type": "arrow", 718 | "version": 2120, 719 | "versionNonce": 2010334770, 720 | "isDeleted": false, 721 | "id": "0E9EK7Cji_Yi73G2m6-HC", 722 | "fillStyle": "hachure", 723 | "strokeWidth": 1, 724 | "strokeStyle": "solid", 725 | "roughness": 1, 726 | "opacity": 100, 727 | "angle": 0, 728 | "x": 1105.7850072848332, 729 | "y": 779.8421318991857, 730 | "strokeColor": "#000000", 731 | "backgroundColor": "#7950f2", 732 | "width": 172.33130239256957, 733 | "height": 0.9922008181752062, 734 | "seed": 263647730, 735 | "groupIds": [], 736 | "strokeSharpness": "round", 737 | "boundElementIds": [], 738 | "startBinding": { 739 | "elementId": "Xr1j5mRAjACrPOXr7DrcN", 740 | "focus": 0.028570484450014384, 741 | "gap": 11.285007284833227 742 | }, 743 | "endBinding": { 744 | "elementId": "Gb7x0bPpdnrTw9E-OVfxr", 745 | "focus": -0.5846140259611315, 746 | "gap": 14.383690322597204 747 | }, 748 | "points": [ 749 | [ 750 | 0, 751 | 0 752 | ], 753 | [ 754 | 172.33130239256957, 755 | 0.9922008181752062 756 | ] 757 | ], 758 | "lastCommittedPoint": null, 759 | "startArrowhead": null, 760 | "endArrowhead": "arrow" 761 | }, 762 | { 763 | "type": "arrow", 764 | "version": 1909, 765 | "versionNonce": 1931564850, 766 | "isDeleted": false, 767 | "id": "n0x653nTC4ltKvltx0kw6", 768 | "fillStyle": "hachure", 769 | "strokeWidth": 1, 770 | "strokeStyle": "solid", 771 | "roughness": 1, 772 | "opacity": 100, 773 | "angle": 0, 774 | "x": 1117.3139928884327, 775 | "y": 559.2556945601754, 776 | "strokeColor": "#000000", 777 | "backgroundColor": "#7950f2", 778 | "width": 140.5411012364948, 779 | "height": 6.8399010617762315, 780 | "seed": 128443314, 781 | "groupIds": [], 782 | "strokeSharpness": "round", 783 | "boundElementIds": [], 784 | "startBinding": { 785 | "elementId": "jqjYmZ22XOHK_Y1ETe032", 786 | "focus": 0.1415458588292677, 787 | "gap": 14.813992888432722 788 | }, 789 | "endBinding": { 790 | "elementId": "ocdJTsV5067Vx9yuA2O0m", 791 | "focus": 0.26352274074521326, 792 | "gap": 8.644905875072482 793 | }, 794 | "points": [ 795 | [ 796 | 0, 797 | 0 798 | ], 799 | [ 800 | 140.5411012364948, 801 | -6.8399010617762315 802 | ] 803 | ], 804 | "lastCommittedPoint": null, 805 | "startArrowhead": null, 806 | "endArrowhead": "arrow" 807 | }, 808 | { 809 | "id": "qQteOFUbiRMzGYr_nzU0r", 810 | "type": "rectangle", 811 | "x": 1621.5, 812 | "y": 727.5, 813 | "width": 190, 814 | "height": 92, 815 | "angle": 0, 816 | "strokeColor": "#000000", 817 | "backgroundColor": "#228be6", 818 | "fillStyle": "hachure", 819 | "strokeWidth": 1, 820 | "strokeStyle": "solid", 821 | "roughness": 1, 822 | "opacity": 100, 823 | "groupIds": [], 824 | "strokeSharpness": "round", 825 | "seed": 241568558, 826 | "version": 401, 827 | "versionNonce": 1225383470, 828 | "isDeleted": false, 829 | "boundElementIds": [ 830 | "EvXP-mJWfQCsyXfe8wXnb", 831 | "rYYzRRAimfLidujY-U6s3" 832 | ] 833 | }, 834 | { 835 | "id": "cgaTu4qOkWU0kOuUiZJ3R", 836 | "type": "text", 837 | "x": 1661.5, 838 | "y": 761.5, 839 | "width": 114, 840 | "height": 26, 841 | "angle": 0, 842 | "strokeColor": "#000000", 843 | "backgroundColor": "#228be6", 844 | "fillStyle": "hachure", 845 | "strokeWidth": 1, 846 | "strokeStyle": "solid", 847 | "roughness": 1, 848 | "opacity": 100, 849 | "groupIds": [], 850 | "strokeSharpness": "round", 851 | "seed": 1711096494, 852 | "version": 228, 853 | "versionNonce": 363947634, 854 | "isDeleted": false, 855 | "boundElementIds": null, 856 | "text": "pgBackRest", 857 | "fontSize": 20, 858 | "fontFamily": 1, 859 | "textAlign": "center", 860 | "verticalAlign": "middle", 861 | "baseline": 18 862 | }, 863 | { 864 | "id": "ueNZQxXASOCRFLvtsAO92", 865 | "type": "arrow", 866 | "x": 1610.5, 867 | "y": 555.4695416555675, 868 | "width": 126, 869 | "height": 4.7983879244483205, 870 | "angle": 0, 871 | "strokeColor": "#000000", 872 | "backgroundColor": "#228be6", 873 | "fillStyle": "hachure", 874 | "strokeWidth": 1, 875 | "strokeStyle": "solid", 876 | "roughness": 1, 877 | "opacity": 100, 878 | "groupIds": [], 879 | "strokeSharpness": "round", 880 | "seed": 891128174, 881 | "version": 381, 882 | "versionNonce": 1345632430, 883 | "isDeleted": false, 884 | "boundElementIds": null, 885 | "points": [ 886 | [ 887 | 0, 888 | 0 889 | ], 890 | [ 891 | -126, 892 | 4.7983879244483205 893 | ] 894 | ], 895 | "lastCommittedPoint": null, 896 | "startBinding": { 897 | "elementId": "Z3oK4UEoRQ7glNbXIF47l", 898 | "focus": 0.2625169147496617, 899 | "gap": 15 900 | }, 901 | "endBinding": { 902 | "elementId": "ocdJTsV5067Vx9yuA2O0m", 903 | "focus": 0.2980189672743527, 904 | "gap": 10 905 | }, 906 | "startArrowhead": null, 907 | "endArrowhead": "arrow" 908 | }, 909 | { 910 | "id": "EvXP-mJWfQCsyXfe8wXnb", 911 | "type": "arrow", 912 | "x": 1610.5000000000002, 913 | "y": 765.3043056955917, 914 | "width": 96.00000000000023, 915 | "height": 2.299908363005329, 916 | "angle": 0, 917 | "strokeColor": "#000000", 918 | "backgroundColor": "#228be6", 919 | "fillStyle": "hachure", 920 | "strokeWidth": 1, 921 | "strokeStyle": "solid", 922 | "roughness": 1, 923 | "opacity": 100, 924 | "groupIds": [], 925 | "strokeSharpness": "round", 926 | "seed": 996751342, 927 | "version": 601, 928 | "versionNonce": 2070531250, 929 | "isDeleted": false, 930 | "boundElementIds": null, 931 | "points": [ 932 | [ 933 | 0, 934 | 0 935 | ], 936 | [ 937 | -96.00000000000023, 938 | -2.299908363005329 939 | ] 940 | ], 941 | "lastCommittedPoint": null, 942 | "startBinding": { 943 | "elementId": "qQteOFUbiRMzGYr_nzU0r", 944 | "focus": 0.10894804247081374, 945 | "gap": 10.999999999999773 946 | }, 947 | "endBinding": { 948 | "elementId": "Gb7x0bPpdnrTw9E-OVfxr", 949 | "focus": -0.17601560506160774, 950 | "gap": 13 951 | }, 952 | "startArrowhead": "arrow", 953 | "endArrowhead": "arrow" 954 | }, 955 | { 956 | "id": "rYYzRRAimfLidujY-U6s3", 957 | "type": "arrow", 958 | "x": 1711.0409389328706, 959 | "y": 619.5, 960 | "width": 5.342201384554755, 961 | "height": 95, 962 | "angle": 0, 963 | "strokeColor": "#000000", 964 | "backgroundColor": "#228be6", 965 | "fillStyle": "hachure", 966 | "strokeWidth": 1, 967 | "strokeStyle": "solid", 968 | "roughness": 1, 969 | "opacity": 100, 970 | "groupIds": [], 971 | "strokeSharpness": "round", 972 | "seed": 1744038642, 973 | "version": 516, 974 | "versionNonce": 1885073906, 975 | "isDeleted": false, 976 | "boundElementIds": null, 977 | "points": [ 978 | [ 979 | 0, 980 | 0 981 | ], 982 | [ 983 | 5.342201384554755, 984 | 95 985 | ] 986 | ], 987 | "lastCommittedPoint": null, 988 | "startBinding": { 989 | "elementId": "Z3oK4UEoRQ7glNbXIF47l", 990 | "focus": 0.12862314281147827, 991 | "gap": 9 992 | }, 993 | "endBinding": { 994 | "elementId": "qQteOFUbiRMzGYr_nzU0r", 995 | "focus": 0.03280085197018247, 996 | "gap": 13 997 | }, 998 | "startArrowhead": "arrow", 999 | "endArrowhead": "arrow" 1000 | } 1001 | ], 1002 | "appState": { 1003 | "gridSize": null, 1004 | "viewBackgroundColor": "#ffffff" 1005 | } 1006 | } --------------------------------------------------------------------------------