├── .gitignore ├── configure_backup.sh ├── k8s ├── ConfigMap.yaml.template ├── Secrets.yaml.template ├── Job-pg-backup.yaml ├── Deployment-backup.yaml ├── CronDelpoyment-pg-basebackup.yaml └── Deployment-recover.yaml ├── app ├── Makefile ├── entrypoint.sh ├── backup.sh ├── cronjob.sh ├── setup-wale.sh ├── Dockerfile └── recover.sh ├── stop_backup.sh ├── internal-notes.md ├── recovery.sh └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | k8s/Secrets.yaml 2 | k8s/ConfigMap.yaml 3 | -------------------------------------------------------------------------------- /configure_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl create -f k8s/ConfigMap.yaml 4 | kubectl create -f k8s/Secrets.yaml 5 | kubectl replace -f k8s/Deployment-backup.yaml 6 | kubectl create -f k8s/Job-pg-backup.yaml 7 | kubectl create -f k8s/CronDeployment-pg-backup.yaml 8 | -------------------------------------------------------------------------------- /k8s/ConfigMap.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | data: 4 | aws.region: "example: us-east-1" 5 | aws.s3.prefix: "s3:///pgbackup" 6 | metadata: 7 | labels: 8 | app: hasura-backup 9 | name: hasura-backup 10 | namespace: hasura 11 | -------------------------------------------------------------------------------- /app/Makefile: -------------------------------------------------------------------------------- 1 | registry := hasura 2 | version := 9.6.1-r1 3 | 4 | export PG_MAJOR=9.6 5 | export PG_VERSION=9.6.1-1.pgdg80+1 6 | 7 | image: Dockerfile 8 | docker build -t $(registry)/postgres-wal-e:$(version) . 9 | 10 | push: Dockerfile 11 | docker push $(registry)/postgres-wal-e:$(version) 12 | -------------------------------------------------------------------------------- /k8s/Secrets.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | data: 4 | aws.access.key: "paste your AWS access key : base64 ENCODED" 5 | aws.secret.access.key: "paste your AWS secret access key : base64 ENCODED" 6 | metadata: 7 | labels: 8 | app: hasura-backup 9 | name: hasura-backup 10 | namespace: hasura 11 | type: Opaque 12 | -------------------------------------------------------------------------------- /stop_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pod="$1" 4 | 5 | if [ -z $pod ]; then 6 | echo "Usage: $0 " 7 | exit 1 8 | fi 9 | 10 | kubectl delete -f k8s/Job-pg-backup.yaml 11 | kubectl delete -f k8s/CronDeployment-pg-backup.yaml 12 | kubectl delete -f k8s/Secrets.yaml 13 | kubectl delete -f k8s/ConfigMap.yaml 14 | 15 | kubectl exec -it $pod -n hasura -- sh -c "sed -i '/# Add settings for extensions here/q' /var/lib/postgresql/data/postgresql.conf" 16 | 17 | kubectl rollout undo deploy/postgres -n hasura 18 | 19 | -------------------------------------------------------------------------------- /internal-notes.md: -------------------------------------------------------------------------------- 1 | postgres backup strategies: 2 | https://www.postgresql.org/docs/current/static/backup.html 3 | https://www.hagander.net/talks/Backup%20strategies.pdf 4 | 5 | continuous backup: 6 | https://www.postgresql.org/docs/9.6/static/continuous-archiving.html 7 | 8 | streaming replication: 9 | https://www.postgresql.org/docs/9.6/static/warm-standby.html 10 | 11 | http://dba.stackexchange.com/questions/133420/postgres-how-to-restore-backup-with-wal-eo 12 | 13 | http://dba.stackexchange.com/questions/109444/backup-a-large-postgres-database-for-pitr 14 | -------------------------------------------------------------------------------- /app/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if [ "$1" = 'postgres' ]; then 6 | 7 | if [ ! -s "$PGDATA/PG_VERSION" ]; then 8 | echo "$PGDATA/PG_VERSION does not exist" 9 | else 10 | echo "$PGDATA/PG_VERSION exist, ensuring wal-e is set to run" 11 | . /docker-entrypoint-initdb.d/setup-wale.sh 12 | fi 13 | 14 | echo "Running command $1" 15 | . /docker-entrypoint.sh $1 16 | fi 17 | 18 | if [ "$1" = 'backup' ]; then 19 | . /backup.sh 20 | exit 0 21 | fi 22 | 23 | if [ "$1" = 'recover' ]; then 24 | . /recover.sh 25 | fi 26 | 27 | echo "Executing: $@" 28 | exec "$@" 29 | -------------------------------------------------------------------------------- /app/backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z ${PGDATA+x} ]]; then 4 | PGDATA=/var/lib/postgresql/data 5 | fi 6 | 7 | echo "Hasura Backup System: Configuring WAL-E" 8 | 9 | echo "$WALE_S3_PREFIX" > /etc/wal-e.d/env/WALE_S3_PREFIX 10 | echo "$AWS_ACCESS_KEY_ID" > /etc/wal-e.d/env/AWS_ACCESS_KEY_ID 11 | echo "$AWS_SECRET_ACCESS_KEY" > /etc/wal-e.d/env/AWS_SECRET_ACCESS_KEY 12 | echo "$AWS_REGION" > /etc/wal-e.d/env/AWS_REGION 13 | 14 | chown -R postgres:postgres /etc/wal-e.d/* 15 | 16 | echo "Hasura Backup System: Pushing base backup" 17 | 18 | envdir /etc/wal-e.d/env /usr/local/bin/wal-e backup-push $PGDATA 19 | 20 | if [[ $? -ne 0 ]]; then 21 | echo "Hasura Backup System: Error pushing base backup" 22 | exit 1 23 | fi 24 | 25 | echo "Hasura Backup System: Done" 26 | -------------------------------------------------------------------------------- /app/cronjob.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z ${PGDATA+x} ]]; then 4 | PGDATA=/var/lib/postgresql/data 5 | fi 6 | 7 | if [[ -z $SCHEDULE ]]; then 8 | # daily 4 times 9 | SCHEDULE='0 */6 * * *' 10 | fi 11 | 12 | echo "Hasura Backup System: Configuring WAL-E" 13 | 14 | echo "$WALE_S3_PREFIX" > /etc/wal-e.d/env/WALE_S3_PREFIX 15 | echo "$AWS_ACCESS_KEY_ID" > /etc/wal-e.d/env/AWS_ACCESS_KEY_ID 16 | echo "$AWS_SECRET_ACCESS_KEY" > /etc/wal-e.d/env/AWS_SECRET_ACCESS_KEY 17 | echo "$AWS_REGION" > /etc/wal-e.d/env/AWS_REGION 18 | 19 | chown -R postgres:postgres /etc/wal-e.d/* 20 | 21 | echo "Hasura Backup System: Setting up cron job" 22 | 23 | cat <> /etc/cron.d/pg_basebackup 24 | # Run pg base backups 25 | 26 | ${SCHEDULE} postgres envdir /etc/wal-e.d/env /usr/local/bin/wal-e backup-push $PGDATA >> /var/log/cron.log 2>&1 27 | EOF 28 | 29 | cat /etc/cron.d/pg_basebackup 30 | 31 | touch /var/log/cron.log 32 | chown root:postgres /var/log/cron.log 33 | chmod g+w /var/log/cron.log 34 | 35 | cron 36 | 37 | exec tail -F /var/log/cron.log 38 | -------------------------------------------------------------------------------- /app/setup-wale.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Assumption: the group is trusted to read secret information 4 | umask u=rwx,g=rx,o= 5 | #mkdir -p /etc/wal-e.d/env 6 | 7 | echo "$WALE_S3_PREFIX" > /etc/wal-e.d/env/WALE_S3_PREFIX 8 | echo "$AWS_ACCESS_KEY_ID" > /etc/wal-e.d/env/AWS_ACCESS_KEY_ID 9 | echo "$AWS_SECRET_ACCESS_KEY" > /etc/wal-e.d/env/AWS_SECRET_ACCESS_KEY 10 | echo "$AWS_REGION" > /etc/wal-e.d/env/AWS_REGION 11 | 12 | # TODO: check if configuration already exists - then exit 13 | 14 | chown -R postgres:postgres /etc/wal-e.d/* 15 | 16 | echo "Hasura Backup System: Configuring Postgres for continous backup" 17 | 18 | cat <> /var/lib/postgresql/data/postgresql.conf 19 | 20 | # Hasura wal-e configuration 21 | wal_level = replica 22 | archive_mode = on 23 | archive_command = 'envdir /etc/wal-e.d/env /usr/local/bin/wal-e wal-push %p' 24 | archive_timeout = 60 25 | 26 | EOF 27 | 28 | echo "Hasura Backup System: Scheduling WAL-E backups" 29 | # echo "0 8 * * * postgres envdir /etc/wal-e.d/env wal-e backup-push $PGDATA" > /etc/cron.d/pg_base_backup 30 | 31 | echo "Hasura Postgres: Done" 32 | -------------------------------------------------------------------------------- /app/Dockerfile: -------------------------------------------------------------------------------- 1 | # TODO: remove python-all-dev? , libevent-dev? 2 | 3 | FROM postgres:9.6.1 4 | MAINTAINER anon@hasura.io 5 | 6 | RUN apt-get update \ 7 | && apt-get install -y \ 8 | python-pip python3-pip python3-pkg-resources \ 9 | postgresql-server-dev-$PG_MAJOR=$PG_VERSION \ 10 | libpq-dev \ 11 | daemontools libevent-dev python3-all-dev lzop pv \ 12 | && pip install pgxnclient && pgxn install first_last_agg \ 13 | && pip3 install wal-e[aws] \ 14 | && pip uninstall -y pgxnclient \ 15 | && apt-get purge -y --auto-remove python-pip python3-pip libpq-dev postgresql-server-dev-$PG_MAJOR=$PG_VERSION \ 16 | && rm -rf ~/.cache/pip \ 17 | && rm -rf /var/lib/apt/lists/* 18 | 19 | COPY setup-wale.sh /docker-entrypoint-initdb.d/ 20 | RUN chmod +x /docker-entrypoint-initdb.d/setup-wale.sh 21 | 22 | RUN mkdir -p /etc/wal-e.d/env 23 | RUN chown -R postgres:postgres /etc/wal-e.d 24 | 25 | COPY backup.sh /backup.sh 26 | RUN chmod +x /backup.sh 27 | 28 | COPY recover.sh /recover.sh 29 | RUN chmod +x /recover.sh 30 | 31 | COPY cronjob.sh /cronjob.sh 32 | RUN chmod +x /cronjob.sh 33 | 34 | COPY entrypoint.sh / 35 | RUN chmod +x /entrypoint.sh 36 | ENTRYPOINT ["/entrypoint.sh"] 37 | 38 | CMD ["postgres"] 39 | -------------------------------------------------------------------------------- /app/recover.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Hasura Recovery System: Configuring WAL-E" 4 | 5 | echo "$WALE_S3_PREFIX" > /etc/wal-e.d/env/WALE_S3_PREFIX 6 | echo "$AWS_ACCESS_KEY_ID" > /etc/wal-e.d/env/AWS_ACCESS_KEY_ID 7 | echo "$AWS_SECRET_ACCESS_KEY" > /etc/wal-e.d/env/AWS_SECRET_ACCESS_KEY 8 | echo "$AWS_REGION" > /etc/wal-e.d/env/AWS_REGION 9 | 10 | chown -R postgres:postgres /etc/wal-e.d/* 11 | 12 | # backup postgresql.conf 13 | cp $PGDATA/postgresql.conf /postgresql.conf 14 | cp $PGDATA/pg_hba.conf /pg_hba.conf 15 | cp $PGDATA/pg_ident.conf /pg_ident.conf 16 | 17 | echo "Hasura Recovery System: Obliterating current data directory ${PGDATA}/*" 18 | gosu postgres rm -r ${PGDATA}/* 19 | 20 | echo "Hasura Recovery System: Fetching latest base backup" 21 | gosu postgres envdir /etc/wal-e.d/env /usr/local/bin/wal-e backup-fetch $PGDATA LATEST 22 | 23 | echo "Hasura Recovery System: Restoring backups" 24 | cat <> $PGDATA/recovery.conf 25 | restore_command = 'envdir /etc/wal-e.d/env /usr/local/bin/wal-e wal-fetch "%f" "%p"' 26 | EOF 27 | 28 | # restore the postgresql.conf 29 | mv /postgresql.conf $PGDATA/postgresql.conf 30 | mv /pg_hba.conf $PGDATA/pg_hba.conf 31 | mv /pg_ident.conf $PGDATA/pg_ident.conf 32 | 33 | chown -R postgres:postgres $PGDATA/* 34 | 35 | echo "Hasura Recovery System: Starting Postgres server" 36 | /docker-entrypoint.sh postgres 37 | -------------------------------------------------------------------------------- /recovery.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Setting up configuration to start recovery" 4 | 5 | kubectl create -f k8s/ConfigMap.yaml 6 | kubectl create -f k8s/Secrets.yaml 7 | 8 | kubectl replace -f k8s/Deployment-recover.yaml 9 | 10 | echo "" 11 | echo "*** STATUS ***" 12 | echo "--------------" 13 | echo "The recovery process will take some time depending on your data and" 14 | echo "size and number of backups." 15 | echo "" 16 | echo "You can see the status by tail-ing the log of the postgres pod:" 17 | echo " $ kubectl logs -n hasura" 18 | echo "" 19 | 20 | echo "*** POST RECOVERY STEPS ***" 21 | echo "---------------------------" 22 | echo "More details in the documentation." 23 | echo "" 24 | echo "Make sure kubectl is pointing to old project" 25 | echo " $ kubectl config set current-context " 26 | echo "Then:" 27 | echo " $ kubectl get secret postgres -n hasura -o yaml" 28 | echo "The value in postgres.password is the postgres admin password." 29 | echo "Copy the value in the postgres.password field and keep it." 30 | echo "Again:" 31 | echo " $ kubectl get secret auth -n hasura" 32 | echo "The value in django.sapass is the project admin password." 33 | echo "Copy the value in the django.sapass field and keep it." 34 | echo "" 35 | echo "Now switch to new project:" 36 | echo " $ kubectl config set current-context " 37 | echo "Then:" 38 | echo " $ kubectl edit secret postgres -n hasura" 39 | echo "In the postgres.password field, paste the value from previous step." 40 | echo "And:" 41 | echo " $ kubectl edit secret auth -n hasura" 42 | echo "In the django.sapass field, paste the value from previous step." 43 | echo "" 44 | echo "Now restart auth and data pods:" 45 | echo " $ kubectl delete pod -n hasura" 46 | echo " $ kubectl delete pod -n hasura" 47 | -------------------------------------------------------------------------------- /k8s/Job-pg-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | labels: 5 | app: pg-backup 6 | name: pg-backup 7 | namespace: hasura 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | app: pg-backup 13 | name: pg-backup 14 | spec: 15 | containers: 16 | - image: hasura/postgres-wal-e:9.6.1-r1 17 | name: pg-backup 18 | env: 19 | - name: PGHOST 20 | value: "postgres.hasura" 21 | - name: PGPORT 22 | value: "5432" 23 | - name: PGUSER 24 | valueFrom: 25 | secretKeyRef: 26 | key: postgres.user 27 | name: postgres 28 | - name: PGPASSWORD 29 | valueFrom: 30 | secretKeyRef: 31 | key: postgres.password 32 | name: postgres 33 | - name: AWS_REGION 34 | valueFrom: 35 | configMapKeyRef: 36 | key: aws.region 37 | name: hasura-backup 38 | - name: WALE_S3_PREFIX 39 | valueFrom: 40 | configMapKeyRef: 41 | key: aws.s3.prefix 42 | name: hasura-backup 43 | - name: AWS_ACCESS_KEY_ID 44 | valueFrom: 45 | secretKeyRef: 46 | name: hasura-backup 47 | key: aws.access.key 48 | - name: AWS_SECRET_ACCESS_KEY 49 | valueFrom: 50 | secretKeyRef: 51 | name: hasura-backup 52 | key: aws.secret.access.key 53 | args: 54 | - backup 55 | volumeMounts: 56 | - mountPath: /var/lib/postgresql/data 57 | name: data 58 | restartPolicy: Never 59 | volumes: 60 | - hostPath: 61 | path: /home/core/persist/hasura/postgres 62 | name: data 63 | -------------------------------------------------------------------------------- /k8s/Deployment-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: postgres 6 | name: postgres 7 | namespace: hasura 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: postgres 13 | template: 14 | metadata: 15 | labels: 16 | app: postgres 17 | spec: 18 | containers: 19 | - env: 20 | - name: POSTGRES_USER 21 | valueFrom: 22 | secretKeyRef: 23 | key: postgres.user 24 | name: postgres 25 | - name: POSTGRES_PASSWORD 26 | valueFrom: 27 | secretKeyRef: 28 | key: postgres.password 29 | name: postgres 30 | - name: AWS_REGION 31 | valueFrom: 32 | configMapKeyRef: 33 | key: aws.region 34 | name: hasura-backup 35 | - name: WALE_S3_PREFIX 36 | valueFrom: 37 | configMapKeyRef: 38 | key: aws.s3.prefix 39 | name: hasura-backup 40 | - name: AWS_ACCESS_KEY_ID 41 | valueFrom: 42 | secretKeyRef: 43 | name: hasura-backup 44 | key: aws.access.key 45 | - name: AWS_SECRET_ACCESS_KEY 46 | valueFrom: 47 | secretKeyRef: 48 | name: hasura-backup 49 | key: aws.secret.access.key 50 | image: hasura/postgres-wal-e:9.6.1-r1 51 | imagePullPolicy: IfNotPresent 52 | name: postgres 53 | ports: 54 | - containerPort: 5432 55 | protocol: TCP 56 | volumeMounts: 57 | - mountPath: /var/lib/postgresql/data 58 | name: data 59 | dnsPolicy: ClusterFirst 60 | restartPolicy: Always 61 | volumes: 62 | - hostPath: 63 | path: /home/core/persist/hasura/postgres 64 | name: data 65 | -------------------------------------------------------------------------------- /k8s/CronDelpoyment-pg-basebackup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: cron-pg-basebackup 6 | name: cron-pg-basebackup 7 | namespace: hasura 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: cron-pg-basebackup 13 | template: 14 | metadata: 15 | labels: 16 | app: cron-pg-basebackup 17 | name: cron-pg-basebackup 18 | spec: 19 | containers: 20 | - image: hasura/postgres-wal-e:9.6.1-r1 21 | name: cron-pg-basebackup 22 | env: 23 | - name: PGUSER 24 | valueFrom: 25 | secretKeyRef: 26 | key: postgres.user 27 | name: postgres 28 | - name: PGPASSWORD 29 | valueFrom: 30 | secretKeyRef: 31 | key: postgres.password 32 | name: postgres 33 | - name: AWS_REGION 34 | valueFrom: 35 | configMapKeyRef: 36 | key: aws.region 37 | name: hasura-backup 38 | - name: WALE_S3_PREFIX 39 | valueFrom: 40 | configMapKeyRef: 41 | key: aws.s3.prefix 42 | name: hasura-backup 43 | - name: AWS_ACCESS_KEY_ID 44 | valueFrom: 45 | secretKeyRef: 46 | name: hasura-backup 47 | key: aws.access.key 48 | - name: AWS_SECRET_ACCESS_KEY 49 | valueFrom: 50 | secretKeyRef: 51 | name: hasura-backup 52 | key: aws.secret.access.key 53 | - name: SCHEDULE 54 | value: "0 0 * * *" 55 | args: 56 | - /cronjob.sh 57 | volumeMounts: 58 | - mountPath: /var/lib/postgresql/data 59 | name: data 60 | volumes: 61 | - hostPath: 62 | path: /home/core/persist/hasura/postgres 63 | name: data 64 | -------------------------------------------------------------------------------- /k8s/Deployment-recover.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: postgres 6 | name: postgres 7 | namespace: hasura 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: postgres 13 | template: 14 | metadata: 15 | labels: 16 | app: postgres 17 | spec: 18 | containers: 19 | - env: 20 | - name: POSTGRES_USER 21 | valueFrom: 22 | secretKeyRef: 23 | key: postgres.user 24 | name: postgres 25 | - name: POSTGRES_PASSWORD 26 | valueFrom: 27 | secretKeyRef: 28 | key: postgres.password 29 | name: postgres 30 | - name: AWS_REGION 31 | valueFrom: 32 | configMapKeyRef: 33 | key: aws.region 34 | name: hasura-backup 35 | - name: WALE_S3_PREFIX 36 | valueFrom: 37 | configMapKeyRef: 38 | key: aws.s3.prefix 39 | name: hasura-backup 40 | - name: AWS_ACCESS_KEY_ID 41 | valueFrom: 42 | secretKeyRef: 43 | name: hasura-backup 44 | key: aws.access.key 45 | - name: AWS_SECRET_ACCESS_KEY 46 | valueFrom: 47 | secretKeyRef: 48 | name: hasura-backup 49 | key: aws.secret.access.key 50 | args: 51 | - recover 52 | image: hasura/postgres-wal-e:9.6.1-r1 53 | imagePullPolicy: IfNotPresent 54 | name: postgres 55 | ports: 56 | - containerPort: 5432 57 | protocol: TCP 58 | volumeMounts: 59 | - mountPath: /var/lib/postgresql/data 60 | name: data 61 | dnsPolicy: ClusterFirst 62 | restartPolicy: Always 63 | volumes: 64 | - hostPath: 65 | path: /home/core/persist/hasura/postgres 66 | # path: /data/persist/hasura/postgres 67 | name: data 68 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Continuous backups on Hasura 2 | 3 | We use the continuous archiving method of Postgres to backup your Postgres 4 | database. This gives the flexibility of hot backups and Point-in-time recovery 5 | (PITR). 6 | 7 | We use [wal-e](https://github.com/wal-e/wal-e) to push the Postgres backups to 8 | storage services of popular cloud providers. We **do not** keep your backups. 9 | 10 | Currently we only support S3. 11 | 12 | Support for Azure blob storage, Google container storage and OpenStack Swift is 13 | coming soon. 14 | 15 | ## Overview 16 | 17 | The way Hasura backup works is: 18 | 19 | * You setup a S3 bucket, Azure blob storage, GCP container etc. - where you want 20 | to save your backups. 21 | 22 | * Take the collection of sample Kubernetes resources files (in `k8s` folder), 23 | and edit the two files to put appropriate configuration data. 24 | 25 | * Then run the shell script: `configure_backup.sh`. This will configure your 26 | Postgres instance with continuous backup. 27 | 28 | 29 | What happens under the hood is, Postgres is configured with the archive command 30 | to start continuous archiving and pushing the backups to your configured cloud 31 | storage using wal-e. 32 | 33 | Further reading: 34 | 35 | * [Postgres continuous archiving](https://www.postgresql.org/docs/current/static/continuous-archiving.html) 36 | 37 | * [Backup strategies on Postgres](https://www.postgresql.org/docs/current/static/backup.html) 38 | 39 | 40 | # Configure backup on a Hasura project 41 | 42 | * Download this repository. 43 | 44 | * `k8s` folder lists all the Kubernetes resource files. 45 | 46 | * You have to edit 2 files: the secret and configmap files: 47 | 48 | * Copy file `k8s/Secrets.yaml.template` into `k8s/Secrets.yaml`. 49 | * Open the `k8s/Secrets.yaml` file in an editor and put **base64 encoded** 50 | string of your AWS Access Key and AWS Secret Key. 51 | * Copy file `k8s/ConfigMap.yaml.template` into `k8s/ConfigMap.yaml`. 52 | * Open the `k8s/ConfigMap.yaml` file in an editor and put the path to your S3 53 | bucket (where you want the backups to be saved), and the AWS region. 54 | 55 | * You can put these Kubernetes files in version control. But remember, **do 56 | not** put the `k8s/Secrets.yaml` and `k8s/ConfigMap.yaml` files in version 57 | control. Or you risk leak of secret data!! 58 | 59 | * Once the secrets and configmap are configured, we can run the script to 60 | configure our cluster. 61 | 62 | * Make sure you have `kubectl` installed and the `current-context` is set to 63 | Hasura project cluster you are configuring the backup for. 64 | 65 | * Then run: 66 | ```shell 67 | $ ./configure_backup.sh 68 | ``` 69 | 70 | ### More options 71 | 72 | If you ever want to stop the backup process altogether, you can run the 73 | following script to do it. 74 | 75 | * First, make sure you have followed the above steps and have configured the 76 | `k8s/Secrets.yaml` and `k8s/ConfigMap.yaml` files and also have installed and 77 | configured `kubectl` correctly. 78 | 79 | * Then run: 80 | ```shell 81 | $ ./stop_backup.sh 82 | ``` 83 | 84 | **NOTE**: This script is not guaranteed to have rolled back the postgres 85 | deployment correctly. You might need to manually intervene. 86 | 87 | 88 | # Recover from backup on a Hasura project 89 | 90 | ## Setup for the recovery 91 | 92 | * Download this repository. 93 | 94 | * `k8s` folder lists all the Kubernetes resource files. 95 | 96 | * You have to edit 2 files: the secret and configmap files: 97 | 98 | * Copy file `k8s/Secrets.yaml.template` into `k8s/Secrets.yaml`. 99 | * Open the `k8s/Secrets.yaml` file in an editor and put **base64 encoded** 100 | string of your AWS Access Key and AWS Secret Key. 101 | * Copy file `k8s/ConfigMap.yaml.template` into `k8s/ConfigMap.yaml`. 102 | * Open the `k8s/ConfigMap.yaml` file in an editor and put the path to your S3 103 | bucket (where you want the backups to be saved), and the AWS region. 104 | 105 | * You can put these Kubernetes files in version control. But remember, **do 106 | not** put the `k8s/Secrets.yaml` and `k8s/ConfigMap.yaml` files in version 107 | control. Or you risk leak of secret data!! 108 | 109 | ## Configure the recovery to start 110 | 111 | * Once the secrets and configmap are configured, we can run the script to 112 | configure our cluster. 113 | 114 | * Make sure you have `kubectl` installed and the `current-context` is set to 115 | Hasura project cluster you are configuring the backup for. 116 | 117 | * Then run: 118 | ```shell 119 | $ ./recovery.sh 120 | ``` 121 | 122 | ## Checking status 123 | 124 | The recovery process will take some time depending on your data and 125 | size and number of backups. 126 | 127 | You can see the status by looking at the logs of the postgres pod: 128 | 129 | ```shell 130 | $ kubectl logs -n hasura 131 | ``` 132 | 133 | Alternatively, you can also check for a `recovery.done` file in the `PGDATA` 134 | directory. 135 | 136 | 137 | ## Post recovery steps 138 | 139 | **NOTE**: If this step is not completed, you won't be able to use the project. 140 | 141 | When the database is recovered from a backup, it is an exact replica of the 142 | source database. Hence everything, including postgres and project admin 143 | paswords, will be as it is in the old project. 144 | 145 | As a result, after the recovery is complete, we need to change the passwords of 146 | the current project to that of the older project. 147 | 148 | Follow the steps to achieve that: 149 | 150 | * Make sure kubectl is pointing to old project: 151 | ```shell 152 | $ kubectl config set current-context 153 | ``` 154 | 155 | * Then run: 156 | ```shell 157 | $ kubectl get secret postgres -n hasura -o yaml 158 | ``` 159 | The value in postgres.password is the postgres admin password. 160 | Copy the value in the postgres.password field and keep it. 161 | 162 | * Again: 163 | ```shell 164 | $ kubectl get secret auth -n hasura 165 | ``` 166 | The value in django.sapass is the project admin password. 167 | Copy the value in the django.sapass field and keep it. 168 | 169 | * Now switch to new project: 170 | ```shell 171 | $ kubectl config set current-context 172 | ``` 173 | 174 | * Then run: 175 | ```shell 176 | $ kubectl edit secret postgres -n hasura 177 | ``` 178 | In the postgres.password field, paste the value from previous step. 179 | 180 | * And: 181 | ```shell 182 | $ kubectl edit secret auth -n hasura 183 | ``` 184 | In the django.sapass field, paste the value from previous step. 185 | 186 | * Now restart auth and data pods: 187 | 188 | ```shell 189 | $ kubectl delete pod -n hasura 190 | $ kubectl delete pod -n hasura 191 | ``` 192 | 193 | Now you should be able to login to your new project with old project's 194 | credentials. 195 | --------------------------------------------------------------------------------