├── .gitignore
├── CONTRIBUTING.md
├── DCO
├── Dockerfile
├── LICENSE
├── MAINTAINERS.md
├── Makefile
├── README.md
├── charts
└── database
│ ├── Chart.yaml
│ ├── templates
│ ├── database-deployment.yaml
│ ├── database-secret-creds.yaml
│ ├── database-service-account.yaml
│ └── database-service.yaml
│ └── values.yaml
├── contrib
└── ci
│ ├── test-minio.sh
│ ├── test-swift.sh
│ └── test.sh
├── rootfs
├── bin
│ ├── backup
│ ├── create_bucket
│ ├── do_backup
│ └── is_running
├── docker-entrypoint-initdb.d
│ ├── 001_setup_envdir.sh
│ ├── 002_create_bucket.sh
│ ├── 003_restore_from_backup.sh
│ └── 004_run_backups.sh
└── docker-entrypoint.sh
└── versioning.mk
/.gitignore:
--------------------------------------------------------------------------------
1 | contrib/ci/tmp
2 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | This project is part of Deis. You can find the latest contribution
4 | guidelines [at the Deis project](https://github.com/deis/deis/blob/master/CONTRIBUTING.md).
5 |
6 |
--------------------------------------------------------------------------------
/DCO:
--------------------------------------------------------------------------------
1 | Developer Certificate of Origin
2 | Version 1.1
3 |
4 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
5 | 660 York Street, Suite 102,
6 | San Francisco, CA 94110 USA
7 |
8 | Everyone is permitted to copy and distribute verbatim copies of this
9 | license document, but changing it is not allowed.
10 |
11 |
12 | Developer's Certificate of Origin 1.1
13 |
14 | By making a contribution to this project, I certify that:
15 |
16 | (a) The contribution was created in whole or in part by me and I
17 | have the right to submit it under the open source license
18 | indicated in the file; or
19 |
20 | (b) The contribution is based upon previous work that, to the best
21 | of my knowledge, is covered under an appropriate open source
22 | license and I have the right under that license to submit that
23 | work with modifications, whether created in whole or in part
24 | by me, under the same open source license (unless I am
25 | permitted to submit under a different license), as indicated
26 | in the file; or
27 |
28 | (c) The contribution was provided directly to me by some other
29 | person who certified (a), (b) or (c) and I have not modified
30 | it.
31 |
32 | (d) I understand and agree that this project and the contribution
33 | are public and that a record of the contribution (including all
34 | personal information I submit with it, including my sign-off) is
35 | maintained indefinitely and may be redistributed consistent with
36 | this project or the open source license(s) involved.
37 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM quay.io/deis/base:v0.3.6
2 |
3 | ENV LANG=en_US.utf8 \
4 | PG_MAJOR=9.4 \
5 | PG_VERSION=9.4.14-1.pgdg16.04+1 \
6 | PGDATA=/var/lib/postgresql/data
7 |
8 | # Set this separately from those above since it depends on one of them
9 | ENV PATH=/usr/lib/postgresql/$PG_MAJOR/bin:$PATH
10 |
11 | # Add postgres user and group
12 | RUN adduser --system \
13 | --shell /bin/bash \
14 | --disabled-password \
15 | --group \
16 | postgres
17 |
18 | RUN buildDeps='gcc git libffi-dev libssl-dev python3-dev python3-pip python3-wheel' && \
19 | localedef -i en_US -c -f UTF-8 -A /etc/locale.alias en_US.UTF-8 && \
20 | export DEBIAN_FRONTEND=noninteractive && \
21 | apt-key adv --keyserver ha.pool.sks-keyservers.net --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 && \
22 | echo 'deb http://apt.postgresql.org/pub/repos/apt/ xenial-pgdg main' $PG_MAJOR > /etc/apt/sources.list.d/pgdg.list && \
23 | apt-get update && \
24 | apt-get install -y --no-install-recommends \
25 | $buildDeps \
26 | gosu \
27 | lzop \
28 | postgresql-$PG_MAJOR=$PG_VERSION \
29 | postgresql-contrib-$PG_MAJOR=$PG_VERSION \
30 | pv \
31 | python3 \
32 | postgresql-common \
33 | util-linux \
34 | # swift package needs pkg_resources and setuptools
35 | python3-pkg-resources \
36 | python3-setuptools && \
37 | ln -sf /usr/bin/python3 /usr/bin/python && \
38 | ln -sf /usr/bin/pip3 /usr/bin/pip && \
39 | mkdir -p /run/postgresql && \
40 | chown -R postgres /run/postgresql && \
41 | pip install --disable-pip-version-check --no-cache-dir \
42 | envdir==0.7 \
43 | wal-e[aws,azure,google,swift]==v1.0.2 \
44 | # pin azure-storage to version wal-e uses (see docker-entrypoint.sh)
45 | azure-storage==0.20.0 && \
46 | # "upgrade" boto to 2.43.0 + the patch to fix minio connections
47 | pip install --disable-pip-version-check --no-cache-dir --upgrade git+https://github.com/deis/boto@88c980e56d1053892eb940d43a15a68af4ebb5e6 && \
48 | # cleanup
49 | apt-get purge -y --auto-remove $buildDeps && \
50 | apt-get autoremove -y && \
51 | apt-get clean -y && \
52 | # package up license files if any by appending to existing tar
53 | COPYRIGHT_TAR='/usr/share/copyrights.tar' && \
54 | gunzip -f $COPYRIGHT_TAR.gz && \
55 | tar -rf $COPYRIGHT_TAR /usr/share/doc/*/copyright && \
56 | gzip $COPYRIGHT_TAR && \
57 | rm -rf \
58 | /usr/share/doc \
59 | /usr/share/man \
60 | /usr/share/info \
61 | /usr/share/locale \
62 | /var/lib/apt/lists/* \
63 | /var/log/* \
64 | /var/cache/debconf/* \
65 | /etc/systemd \
66 | /lib/lsb \
67 | /lib/udev \
68 | /usr/lib/x86_64-linux-gnu/gconv/IBM* \
69 | /usr/lib/x86_64-linux-gnu/gconv/EBC* && \
70 | bash -c "mkdir -p /usr/share/man/man{1..8}"
71 |
72 | COPY rootfs /
73 | ENV WALE_ENVDIR=/etc/wal-e.d/env
74 | RUN mkdir -p $WALE_ENVDIR
75 |
76 | CMD ["/docker-entrypoint.sh", "postgres"]
77 | EXPOSE 5432
78 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) Microsoft Corporation. All rights reserved.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MAINTAINERS.md:
--------------------------------------------------------------------------------
1 | # Deis Maintainers
2 |
3 | This project is part of Deis. The official maintainers documentation is
4 | located [in the main project](https://github.com/deis/deis/blob/master/MAINTAINERS.md).
5 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Short name: Short name, following [a-zA-Z_], used all over the place.
2 | # Some uses for short name:
3 | # - Docker image name
4 | # - Kubernetes service, rc, pod, secret, volume names
5 | SHORT_NAME := postgres
6 | DEIS_REGISTY ?= ${DEV_REGISTRY}/
7 | IMAGE_PREFIX ?= deis
8 |
9 | include versioning.mk
10 |
11 | SHELL_SCRIPTS = $(wildcard _scripts/*.sh contrib/ci/*.sh rootfs/bin/*backup) rootfs/bin/is_running
12 |
13 | # The following variables describe the containerized development environment
14 | # and other build options
15 | DEV_ENV_IMAGE := quay.io/deis/go-dev:0.20.0
16 | DEV_ENV_WORK_DIR := /go/src/${REPO_PATH}
17 | DEV_ENV_CMD := docker run --rm -v ${CURDIR}:${DEV_ENV_WORK_DIR} -w ${DEV_ENV_WORK_DIR} ${DEV_ENV_IMAGE}
18 | DEV_ENV_CMD_INT := docker run -it --rm -v ${CURDIR}:${DEV_ENV_WORK_DIR} -w ${DEV_ENV_WORK_DIR} ${DEV_ENV_IMAGE}
19 |
20 | all: docker-build docker-push
21 |
22 | # For cases where we're building from local
23 | # We also alter the RC file to set the image name.
24 | docker-build:
25 | docker build ${DOCKER_BUILD_FLAGS} -t ${IMAGE} .
26 | docker tag ${IMAGE} ${MUTABLE_IMAGE}
27 |
28 | test: test-style test-functional
29 |
30 | test-style:
31 | ${DEV_ENV_CMD} shellcheck $(SHELL_SCRIPTS)
32 |
33 | test-functional: test-functional-swift test-functional-minio
34 |
35 | test-functional-minio:
36 | contrib/ci/test-minio.sh ${IMAGE}
37 |
38 | test-functional-swift:
39 | contrib/ci/test-swift.sh ${IMAGE}
40 |
41 | .PHONY: all docker-build docker-push test
42 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | | | Deis Workflow is no longer maintained.
Please [read the announcement](https://deis.com/blog/2017/deis-workflow-final-release/) for more detail. |
3 | |---:|---|
4 | | 09/07/2017 | Deis Workflow [v2.18][] final release before entering maintenance mode |
5 | | 03/01/2018 | End of Workflow maintenance: critical patches no longer merged |
6 | | | [Hephy](https://github.com/teamhephy/workflow) is a fork of Workflow that is actively developed and accepts code contributions. |
7 |
8 | # Deis Postgres
9 |
10 | [](https://ci.deis.io/job/postgres)
11 | [](https://quay.io/repository/deis/postgres)
12 |
13 | Deis (pronounced DAY-iss) Workflow is an open source Platform as a Service (PaaS) that adds a developer-friendly layer to any [Kubernetes](http://kubernetes.io) cluster, making it easy to deploy and manage applications on your own servers.
14 |
15 | For more information about the Deis Workflow, please visit the main project page at https://github.com/deis/workflow.
16 |
17 | We welcome your input! If you have feedback, please submit an [issue][issues]. If you'd like to participate in development, please read the "Development" section below and submit a [pull request][prs].
18 |
19 | # About
20 |
21 | This component is a PostgreSQL database for use in Kubernetes. It builds on the official [postgres](https://registry.hub.docker.com/_/postgres/) Docker image. While it's intended for use inside of the Deis Workflow open source [PaaS](https://en.wikipedia.org/wiki/Platform_as_a_service), it's flexible enough to be used as a standalone pod on any Kubernetes cluster or even as a standalone Docker container.
22 |
23 | # Development
24 |
25 | The Deis project welcomes contributions from all developers. The high level process for development matches many other open source projects. See below for an outline.
26 |
27 | - Fork this repository
28 | - Make your changes
29 | - Submit a [pull request][prs] (PR) to this repository with your changes, and unit tests whenever possible
30 | - If your PR fixes any [issues][issues], make sure you write Fixes #1234 in your PR description (where #1234 is the number of the issue you're closing)
31 | - The Deis core contributors will review your code. After each of them sign off on your code, they'll label your PR with LGTM1 and LGTM2 (respectively). Once that happens, a contributor will merge it
32 |
33 | ## Prerequisites
34 |
35 | In order to develop and test this component in a Deis cluster, you'll need the following:
36 |
37 | * [GNU Make](https://www.gnu.org/software/make/)
38 | * [Docker](https://www.docker.com/) installed, configured and running
39 | * A working Kubernetes cluster and `kubectl` installed and configured to talk to the cluster
40 | * If you don't have this setup, please see [the Kubernetes documentation][k8s-docs]
41 |
42 | ## Testing Your Code
43 |
44 | Once you have all the aforementioned prerequisites, you are ready to start writing code. Once you've finished building a new feature or fixed a bug, please write a unit or integration test for it if possible. See [an existing test](https://github.com/deis/postgres/blob/master/contrib/ci/test.sh) for an example test.
45 |
46 | If your feature or bugfix doesn't easily lend itself to unit/integration testing, you may need to add tests at a higher level. Please consider adding a test to our [end-to-end test suite](https://github.com/deis/workflow-e2e) in that case. If you do, please reference the end-to-end test pull request in your pull request for this repository.
47 |
48 | ### Dogfooding
49 |
50 | Finally, we encourage you to [dogfood](https://en.wikipedia.org/wiki/Eating_your_own_dog_food) this component while you're writing code on it. To do so, you'll need to build and push Docker images with your changes.
51 |
52 | This project has a [Makefile](https://github.com/deis/postgres/blob/master/Makefile) that makes these tasks significantly easier. It requires the following environment variables to be set:
53 |
54 | * `DEIS_REGISTRY` - A Docker registry that you have push access to and your Kubernetes cluster can pull from
55 | * If this is [Docker Hub](https://hub.docker.com/), leave this variable empty
56 | * Otherwise, ensure it has a trailing `/`. For example, if you're using [Quay.io](https://quay.io), use `quay.io/`
57 | * `IMAGE_PREFIX` - The organization in the Docker repository. This defaults to `deis`, but if you don't have access to that organization, set this to one you have push access to.
58 | * `SHORT_NAME` (optional) - The name of the image. This defaults to `postgres`
59 | * `VERSION` (optional) - The tag of the Docker image. This defaults to the current Git SHA (the output of `git rev-parse --short HEAD`)
60 |
61 | Assuming you have these variables set correctly, run `make docker-build` to build the new image, and `make docker-push` to push it. Here is an example command that would push to `quay.io/arschles/postgres:devel`:
62 |
63 | ```console
64 | export DEIS_REGISTRY=quay.io/
65 | export IMAGE_PREFIX=arschles
66 | export VERSION=devel
67 | make docker-build docker-push
68 | ```
69 |
70 | Note that you'll have to push your image to a Docker repository (`make docker-push`) in order for your Kubernetes cluster to pull the image. This is important for testing in your cluster.
71 |
72 |
73 | [issues]: https://github.com/deis/postgres/issues
74 | [k8s-docs]: http://kubernetes.io/docs
75 | [prs]: https://github.com/deis/postgres/pulls
76 | [v2.18]: https://github.com/deis/workflow/releases/tag/v2.18.0
77 |
--------------------------------------------------------------------------------
/charts/database/Chart.yaml:
--------------------------------------------------------------------------------
1 | name: database
2 | home: https://github.com/deis/postgres
3 | version:
4 | description: A PostgreSQL database used by Deis Workflow.
5 | keywords:
6 | - database
7 | - postgres
8 | maintainers:
9 | - name: Deis Team
10 | email: engineering@deis.com
11 |
--------------------------------------------------------------------------------
/charts/database/templates/database-deployment.yaml:
--------------------------------------------------------------------------------
1 | {{- if eq .Values.global.database_location "on-cluster" }}
2 | apiVersion: extensions/v1beta1
3 | kind: Deployment
4 | metadata:
5 | name: deis-database
6 | labels:
7 | heritage: deis
8 | annotations:
9 | component.deis.io/version: {{ .Values.docker_tag }}
10 | spec:
11 | replicas: 1
12 | strategy:
13 | type: Recreate
14 | selector:
15 | matchLabels:
16 | app: deis-database
17 | template:
18 | metadata:
19 | labels:
20 | app: deis-database
21 | spec:
22 | serviceAccount: deis-database
23 | containers:
24 | - name: deis-database
25 | image: quay.io/{{.Values.org}}/postgres:{{.Values.docker_tag}}
26 | imagePullPolicy: {{.Values.pull_policy}}
27 | ports:
28 | - containerPort: 5432
29 | {{- if or (.Values.limits_cpu) (.Values.limits_memory)}}
30 | resources:
31 | limits:
32 | {{- if (.Values.limits_cpu) }}
33 | cpu: {{.Values.limits_cpu}}
34 | {{- end}}
35 | {{- if (.Values.limits_memory) }}
36 | memory: {{.Values.limits_memory}}
37 | {{- end}}
38 | {{- end}}
39 | env:
40 | - name: DATABASE_STORAGE
41 | value: "{{.Values.global.storage}}"
42 | - name: PGCTLTIMEOUT
43 | value: "{{.Values.postgres.timeout}}"
44 | lifecycle:
45 | preStop:
46 | exec:
47 | command:
48 | - gosu
49 | - postgres
50 | - do_backup
51 | readinessProbe:
52 | exec:
53 | command:
54 | - is_running
55 | initialDelaySeconds: 30
56 | timeoutSeconds: 1
57 | volumeMounts:
58 | - name: database-creds
59 | mountPath: /var/run/secrets/deis/database/creds
60 | - name: objectstore-creds
61 | mountPath: /var/run/secrets/deis/objectstore/creds
62 | volumes:
63 | - name: database-creds
64 | secret:
65 | secretName: database-creds
66 | - name: objectstore-creds
67 | secret:
68 | secretName: objectstorage-keyfile
69 | {{- end }}
70 |
--------------------------------------------------------------------------------
/charts/database/templates/database-secret-creds.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: database-creds
5 | labels:
6 | app: deis-database
7 | heritage: deis
8 | annotations:
9 | "helm.sh/hook": pre-install
10 | data: {{ if eq .Values.global.database_location "on-cluster"}}
11 | user: {{ if .Values.username | default "" | ne "" }}{{ .Values.username | b64enc }}{{ else }}{{ randAlphaNum 32 | b64enc }}{{ end }}
12 | password: {{ if .Values.password | default "" | ne "" }}{{ .Values.password | b64enc }}{{ else }}{{ randAlphaNum 32 | b64enc }}{{ end }}{{ else if eq .Values.global.database_location "off-cluster"}}
13 | user: {{ .Values.postgres.username | b64enc }}
14 | password: {{ .Values.postgres.password | b64enc }}
15 | name: {{ .Values.postgres.name | b64enc }}
16 | host: {{ .Values.postgres.host | b64enc }}
17 | port: {{ .Values.postgres.port | b64enc }}{{ end }}
18 |
--------------------------------------------------------------------------------
/charts/database/templates/database-service-account.yaml:
--------------------------------------------------------------------------------
1 | {{- if eq .Values.global.database_location "on-cluster" }}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: deis-database
6 | labels:
7 | heritage: deis
8 | {{- end }}
9 |
--------------------------------------------------------------------------------
/charts/database/templates/database-service.yaml:
--------------------------------------------------------------------------------
1 | {{- if eq .Values.global.database_location "on-cluster" }}
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: deis-database
6 | labels:
7 | heritage: deis
8 | spec:
9 | ports:
10 | - name: postgres
11 | port: 5432
12 | selector:
13 | app: deis-database
14 | {{- end }}
15 |
--------------------------------------------------------------------------------
/charts/database/values.yaml:
--------------------------------------------------------------------------------
1 | org: "deisci"
2 | pull_policy: "Always"
3 | docker_tag: canary
4 | # limits_cpu: "100m"
5 | # limits_memory: "50Mi"
6 | # The username and password to be used by the on-cluster database.
7 | # If left empty they will be generated using randAlphaNum
8 | username: ""
9 | password: ""
10 |
11 | postgres:
12 | name: "database name"
13 | username: "database username"
14 | password: "database password"
15 | host: "database host"
16 | port: "database port"
17 | timeout: "1200"
18 |
--------------------------------------------------------------------------------
/contrib/ci/test-minio.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -eof pipefail
4 |
5 | cleanup() {
6 | kill-containers "${MINIO_JOB}" "${PG_JOB}"
7 | }
8 | trap cleanup EXIT
9 |
10 | TEST_ROOT=$(dirname "${BASH_SOURCE[0]}")/
11 | # shellcheck source=/dev/null
12 | source "${TEST_ROOT}/test.sh"
13 |
14 | # make sure we are in this dir
15 | CURRENT_DIR=$(cd "$(dirname "$0")"; pwd)
16 |
17 | create-postgres-creds
18 |
19 | puts-step "creating fake minio credentials"
20 |
21 | # create fake AWS credentials for minio admin credentials
22 | mkdir -p "${CURRENT_DIR}"/tmp/aws-admin
23 | # needs to be 20 characters long
24 | echo "12345678901234567890" > "${CURRENT_DIR}"/tmp/aws-admin/access-key-id
25 | # needs to be 40 characters long
26 | echo "1234567890123456789012345678901234567890" > "${CURRENT_DIR}"/tmp/aws-admin/access-secret-key
27 |
28 | # create fake AWS credentials for minio user credentials
29 | mkdir -p "${CURRENT_DIR}"/tmp/aws-user
30 | # needs to be 20 characters long
31 | echo "12345678901234567890" > "${CURRENT_DIR}"/tmp/aws-user/accesskey
32 | echo "12345678901234567890" > "${CURRENT_DIR}"/tmp/aws-user/access-key-id
33 | # needs to be 40 characters long
34 | echo "1234567890123456789012345678901234567890" > "${CURRENT_DIR}"/tmp/aws-user/secretkey
35 | echo "1234567890123456789012345678901234567890" > "${CURRENT_DIR}"/tmp/aws-user/access-secret-key
36 |
37 | puts-step "creating fake kubernetes service account token"
38 |
39 | # create fake k8s serviceaccount token for minio to "discover" itself
40 | mkdir -p "${CURRENT_DIR}"/tmp/k8s
41 | echo "token" > "${CURRENT_DIR}"/tmp/k8s/token
42 | echo "cert" > "${CURRENT_DIR}"/tmp/k8s/ca.crt
43 |
44 | # boot minio
45 | MINIO_JOB=$(docker run -d \
46 | -v "${CURRENT_DIR}"/tmp/aws-admin:/var/run/secrets/deis/minio/admin \
47 | -v "${CURRENT_DIR}"/tmp/aws-user:/var/run/secrets/deis/minio/user \
48 | -v "${CURRENT_DIR}"/tmp/k8s:/var/run/secrets/kubernetes.io/serviceaccount \
49 | quay.io/deisci/minio:canary boot server /home/minio/)
50 |
51 | # boot postgres, linking the minio container and setting DEIS_MINIO_SERVICE_HOST and DEIS_MINIO_SERVICE_PORT
52 | PG_CMD="docker run -d --link ${MINIO_JOB}:minio -e PGCTLTIMEOUT=1200 \
53 | -e BACKUP_FREQUENCY=1s -e DATABASE_STORAGE=minio \
54 | -e DEIS_MINIO_SERVICE_HOST=minio -e DEIS_MINIO_SERVICE_PORT=9000 \
55 | -v ${CURRENT_DIR}/tmp/creds:/var/run/secrets/deis/database/creds \
56 | -v ${CURRENT_DIR}/tmp/aws-user:/var/run/secrets/deis/objectstore/creds $1"
57 |
58 | start-postgres "${PG_CMD}"
59 |
60 | # display logs for debugging purposes
61 | puts-step "displaying minio logs"
62 | docker logs "${MINIO_JOB}"
63 |
64 | check-postgres "${PG_JOB}"
65 |
66 | # check if minio has the 5 backups
67 | puts-step "checking if minio has 5 backups"
68 | BACKUPS="$(docker exec "${MINIO_JOB}" ls /home/minio/dbwal/basebackups_005/ | grep json)"
69 | NUM_BACKUPS="$(echo "${BACKUPS}" | wc -w)"
70 | # NOTE (bacongobbler): the BACKUP_FREQUENCY is only 1 second, so we could technically be checking
71 | # in the middle of a backup. Instead of failing, let's consider N+1 backups an acceptable case
72 | if [[ ! "${NUM_BACKUPS}" -eq "5" && ! "${NUM_BACKUPS}" -eq "6" ]]; then
73 | puts-error "did not find 5 or 6 base backups. 5 is the default, but 6 may exist if a backup is currently in progress (found $NUM_BACKUPS)"
74 | puts-error "${BACKUPS}"
75 | exit 1
76 | fi
77 |
78 | # kill off postgres, then reboot and see if it's running after recovering from backups
79 | puts-step "shutting off postgres, then rebooting to test data recovery"
80 | kill-containers "${PG_JOB}"
81 |
82 | start-postgres "${PG_CMD}"
83 |
84 | check-postgres "${PG_JOB}"
85 |
86 | puts-step "tests PASSED!"
87 | exit 0
88 |
--------------------------------------------------------------------------------
/contrib/ci/test-swift.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -eof pipefail
4 |
5 | cleanup() {
6 | kill-containers "${SWIFT_DATA}" "${SWIFT_JOB}" "${PG_JOB}"
7 | }
8 | trap cleanup EXIT
9 |
10 | TEST_ROOT=$(dirname "${BASH_SOURCE[0]}")/
11 | # shellcheck source=/dev/null
12 | source "${TEST_ROOT}/test.sh"
13 |
14 | # make sure we are in this dir
15 | CURRENT_DIR=$(cd "$(dirname "$0")"; pwd)
16 |
17 | create-postgres-creds
18 |
19 | puts-step "fetching openstack credentials"
20 |
21 | # turn creds into something that we can use.
22 | mkdir -p "${CURRENT_DIR}"/tmp/swift
23 |
24 | # guess which value to use for tenant:
25 | TENANT=""
26 |
27 | echo "test:tester" > "${CURRENT_DIR}"/tmp/swift/username
28 | echo "testing" > "${CURRENT_DIR}"/tmp/swift/password
29 | echo "${TENANT}" > "${CURRENT_DIR}"/tmp/swift/tenant
30 | echo "http://swift:8080/auth/v1.0" > "${CURRENT_DIR}"/tmp/swift/authurl
31 | echo "1" > "${CURRENT_DIR}"/tmp/swift/authversion
32 | echo "deis-swift-test" > "${CURRENT_DIR}"/tmp/swift/database-container
33 |
34 | # boot swift
35 | SWIFT_DATA=$(docker run -d -v /srv --name SWIFT_DATA busybox)
36 |
37 | SWIFT_JOB=$(docker run -d --name onlyone --hostname onlyone --volumes-from SWIFT_DATA -t deis/swift-onlyone:git-8516d23)
38 |
39 | # postgres container command
40 | PG_CMD="docker run -d --link ${SWIFT_JOB}:swift -e BACKUP_FREQUENCY=3s \
41 | -e DATABASE_STORAGE=swift \
42 | -e PGCTLTIMEOUT=1200 \
43 | -v ${CURRENT_DIR}/tmp/creds:/var/run/secrets/deis/database/creds \
44 | -v ${CURRENT_DIR}/tmp/swift:/var/run/secrets/deis/objectstore/creds \
45 | $1"
46 |
47 | start-postgres "$PG_CMD"
48 |
49 | # display logs for debugging purposes
50 | puts-step "displaying swift logs"
51 | docker logs "${SWIFT_JOB}"
52 |
53 | check-postgres "${PG_JOB}"
54 |
55 | # check if swift has some backups ... 3 ?
56 | puts-step "checking if swift has at least 3 backups"
57 |
58 | BACKUPS="$(docker exec "${SWIFT_JOB}" swift -A http://127.0.0.1:8080/auth/v1.0 \
59 | -U test:tester -K testing list deis-swift-test | grep basebackups_005 | grep json)"
60 | NUM_BACKUPS="$(echo "${BACKUPS}" | wc -w)"
61 | # NOTE (bacongobbler): the BACKUP_FREQUENCY is only 1 second, so we could technically be checking
62 | # in the middle of a backup. Instead of failing, let's consider N+1 backups an acceptable case
63 | if [[ ! "${NUM_BACKUPS}" -eq "5" && ! "${NUM_BACKUPS}" -eq "6" ]]; then
64 | puts-error "did not find 5 or 6 base backups. 5 is the default, but 6 may exist if a backup is currently in progress (found $NUM_BACKUPS)"
65 | puts-error "${BACKUPS}"
66 | exit 1
67 | fi
68 |
69 | # kill off postgres, then reboot and see if it's running after recovering from backups
70 | puts-step "shutting off postgres, then rebooting to test data recovery"
71 | kill-containers "${PG_JOB}"
72 |
73 | start-postgres "${PG_CMD}"
74 |
75 | check-postgres "${PG_JOB}"
76 |
77 | puts-step "tests PASSED!"
78 | exit 0
79 |
--------------------------------------------------------------------------------
/contrib/ci/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -eof pipefail
4 |
5 | puts-step() {
6 | echo "-----> $*"
7 | }
8 |
9 | puts-error() {
10 | echo "!!! $*"
11 | }
12 |
13 | kill-containers() {
14 | puts-step "destroying containers $*"
15 | docker rm -f "$@"
16 | }
17 |
18 | create-postgres-creds() {
19 | puts-step "creating fake postgres credentials"
20 |
21 | # create fake postgres credentials
22 | mkdir -p "${CURRENT_DIR}"/tmp/creds
23 | echo "testuser" > "${CURRENT_DIR}"/tmp/creds/user
24 | echo "icanttellyou" > "${CURRENT_DIR}"/tmp/creds/password
25 | }
26 |
27 | start-postgres() {
28 | export PG_JOB
29 | PG_JOB=$($1)
30 | # wait for postgres to boot
31 | puts-step "sleeping for 90s while postgres is booting..."
32 | sleep 90s
33 | }
34 |
35 | check-postgres() {
36 | # display logs for debugging purposes
37 | puts-step "displaying postgres logs"
38 | docker logs "$1"
39 |
40 | # check if postgres is running
41 | puts-step "checking if postgres is running"
42 | docker exec "$1" is_running
43 | }
44 |
--------------------------------------------------------------------------------
/rootfs/bin/backup:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | export BACKUP_FREQUENCY=${BACKUP_FREQUENCY:-4h}
4 |
5 | while true; do
6 | sleep "$BACKUP_FREQUENCY"
7 | do_backup
8 | done
9 |
--------------------------------------------------------------------------------
/rootfs/bin/create_bucket:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 |
5 | import boto.s3
6 | import json
7 | import swiftclient
8 | from boto import config as botoconfig
9 | from boto.exception import S3CreateError
10 | from boto.s3.connection import S3Connection, OrdinaryCallingFormat
11 | from oauth2client.service_account import ServiceAccountCredentials
12 | from gcloud.storage.client import Client
13 | from gcloud import exceptions
14 | from azure.storage.blob import BlobService
15 |
16 | def bucket_exists(conn, name):
17 | bucket = conn.lookup(name)
18 | if not bucket:
19 | return False
20 | return True
21 |
22 | bucket_name = os.getenv('BUCKET_NAME')
23 | region = os.getenv('AWS_REGION')
24 |
25 | if os.getenv('DATABASE_STORAGE') == "s3":
26 | conn = boto.s3.connect_to_region(region)
27 | if not bucket_exists(conn, bucket_name):
28 | try:
29 | if region == "us-east-1":
30 | # use "US Standard" region. workaround for https://github.com/boto/boto3/issues/125
31 | conn.create_bucket(bucket_name)
32 | else:
33 | conn.create_bucket(bucket_name, location=region)
34 | # NOTE(bacongobbler): for versions prior to v2.9.0, the bucket is created in the default region.
35 | # if we got here, we need to propagate "us-east-1" into WALE_S3_ENDPOINT because the bucket
36 | # exists in a different region and we cannot find it.
37 | # TODO(bacongobbler): deprecate this once we drop support for v2.8.0 and lower
38 | except S3CreateError as err:
39 | if region != 'us-east-1':
40 | print('Failed to create bucket in {}. We are now assuming that the bucket was created in us-east-1.'.format(region))
41 | with open(os.path.join(os.environ['WALE_ENVDIR'], "WALE_S3_ENDPOINT"), "w+") as file:
42 | file.write('https+path://s3.amazonaws.com:443')
43 | else:
44 | raise
45 |
46 | elif os.getenv('DATABASE_STORAGE') == "gcs":
47 | scopes = ['https://www.googleapis.com/auth/devstorage.full_control']
48 | credentials = ServiceAccountCredentials.from_json_keyfile_name(os.getenv('GOOGLE_APPLICATION_CREDENTIALS'), scopes=scopes)
49 | with open(os.getenv('GOOGLE_APPLICATION_CREDENTIALS')) as data_file:
50 | data = json.load(data_file)
51 | conn = Client(credentials=credentials, project=data['project_id'])
52 | exists = True
53 | try:
54 | conn.get_bucket(bucket_name)
55 | except exceptions.NotFound:
56 | exists = False
57 | except:
58 | raise
59 | if not exists:
60 | conn.create_bucket(bucket_name)
61 |
62 | elif os.getenv('DATABASE_STORAGE') == "azure":
63 | conn = BlobService(account_name=os.getenv('WABS_ACCOUNT_NAME'), account_key=os.getenv('WABS_ACCESS_KEY'))
64 | #It doesn't throw an exception if the container exists by default(https://github.com/Azure/azure-storage-python/blob/master/azure/storage/blob/baseblobservice.py#L504).
65 | conn.create_container(bucket_name)
66 |
67 | elif os.getenv('DATABASE_STORAGE') == "swift":
68 | conn = swiftclient.Connection(
69 | user=os.getenv('SWIFT_USER'),
70 | key=os.getenv('SWIFT_PASSWORD'),
71 | authurl=os.getenv('SWIFT_AUTHURL'),
72 | auth_version=os.getenv('SWIFT_AUTH_VERSION'),
73 | tenant_name=os.getenv('SWIFT_TENANT')
74 | )
75 | # swift also does not throw exception if container already exists.
76 | conn.put_container(os.getenv('BUCKET_NAME'))
77 |
78 | else:
79 | botoconfig.add_section('s3')
80 | botoconfig.set('s3', 'use-sigv4', 'True')
81 | botoconfig.add_section('Boto')
82 | botoconfig.set('Boto', 'is_secure', 'False')
83 | conn = S3Connection(
84 | host=os.getenv('S3_HOST'),
85 | port=int(os.getenv('S3_PORT')),
86 | calling_format=OrdinaryCallingFormat())
87 | # HACK(bacongobbler): allow boto to connect to minio by changing the region name for s3v4 auth
88 | conn.auth_region_name = os.getenv('AWS_REGION')
89 | if not bucket_exists(conn, bucket_name):
90 | conn.create_bucket(bucket_name)
91 |
--------------------------------------------------------------------------------
/rootfs/bin/do_backup:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | export BACKUPS_TO_RETAIN=${BACKUPS_TO_RETAIN:-5}
4 |
5 | echo "Performing a base backup..."
6 | if [[ -f "$PGDATA/recovery.conf" ]] ; then
7 | echo "Database is currently recovering from a backup. Aborting"
8 | else
9 | # perform a backup
10 | envdir "$WALE_ENVDIR" wal-e backup-push "$PGDATA"
11 | # only retain the latest BACKUPS_TO_RETAIN backups
12 | envdir "$WALE_ENVDIR" wal-e delete --confirm retain "$BACKUPS_TO_RETAIN"
13 | echo "Backup has been completed."
14 | fi
15 |
--------------------------------------------------------------------------------
/rootfs/bin/is_running:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # fail fast
4 | set -e
5 |
6 | if [[ -f "$PGDATA/recovery.conf" ]]; then
7 | # postgres is in recovery mode, so we know it's not ready to accept incoming connections.
8 | exit 1
9 | fi
10 |
11 | gosu postgres pg_ctl status
12 |
--------------------------------------------------------------------------------
/rootfs/docker-entrypoint-initdb.d/001_setup_envdir.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd "$WALE_ENVDIR"
4 |
5 | if [[ "$DATABASE_STORAGE" == "s3" || "$DATABASE_STORAGE" == "minio" ]]; then
6 | AWS_ACCESS_KEY_ID=$(cat /var/run/secrets/deis/objectstore/creds/accesskey)
7 | AWS_SECRET_ACCESS_KEY=$(cat /var/run/secrets/deis/objectstore/creds/secretkey)
8 | if [[ "$DATABASE_STORAGE" == "s3" ]]; then
9 | AWS_REGION=$(cat /var/run/secrets/deis/objectstore/creds/region)
10 | BUCKET_NAME=$(cat /var/run/secrets/deis/objectstore/creds/database-bucket)
11 | # Convert $AWS_REGION into $WALE_S3_ENDPOINT to avoid "Connection reset by peer" from
12 | # regions other than us-standard.
13 | # See https://github.com/wal-e/wal-e/issues/167
14 | # See https://github.com/boto/boto/issues/2207
15 | if [[ "$AWS_REGION" == "us-east-1" ]]; then
16 | echo "https+path://s3.amazonaws.com:443" > WALE_S3_ENDPOINT
17 | else
18 | echo "https+path://s3-${AWS_REGION}.amazonaws.com:443" > WALE_S3_ENDPOINT
19 | fi
20 | else
21 | AWS_REGION="us-east-1"
22 | BUCKET_NAME="dbwal"
23 | # these only need to be set if we're not accessing S3 (boto will figure this out)
24 | echo "http+path://$DEIS_MINIO_SERVICE_HOST:$DEIS_MINIO_SERVICE_PORT" > WALE_S3_ENDPOINT
25 | echo "$DEIS_MINIO_SERVICE_HOST" > S3_HOST
26 | echo "$DEIS_MINIO_SERVICE_PORT" > S3_PORT
27 | # enable sigv4 authentication
28 | echo "true" > S3_USE_SIGV4
29 | fi
30 | echo "s3://$BUCKET_NAME" > WALE_S3_PREFIX
31 | # if these values are empty, then the user is using IAM credentials so we don't want these in the
32 | # environment
33 | if [[ "$AWS_ACCESS_KEY_ID" != "" && "$AWS_SECRET_ACCESS_KEY" != "" ]]; then
34 | echo $AWS_ACCESS_KEY_ID > AWS_ACCESS_KEY_ID
35 | echo $AWS_SECRET_ACCESS_KEY > AWS_SECRET_ACCESS_KEY
36 | else
37 | echo "1" > AWS_INSTANCE_PROFILE
38 | fi
39 | echo $AWS_REGION > AWS_REGION
40 | echo $BUCKET_NAME > BUCKET_NAME
41 | elif [ "$DATABASE_STORAGE" == "gcs" ]; then
42 | GOOGLE_APPLICATION_CREDENTIALS="/var/run/secrets/deis/objectstore/creds/key.json"
43 | BUCKET_NAME=$(cat /var/run/secrets/deis/objectstore/creds/database-bucket)
44 | echo "gs://$BUCKET_NAME" > WALE_GS_PREFIX
45 | echo $GOOGLE_APPLICATION_CREDENTIALS > GOOGLE_APPLICATION_CREDENTIALS
46 | echo $BUCKET_NAME > BUCKET_NAME
47 | elif [ "$DATABASE_STORAGE" == "azure" ]; then
48 | WABS_ACCOUNT_NAME=$(cat /var/run/secrets/deis/objectstore/creds/accountname)
49 | WABS_ACCESS_KEY=$(cat /var/run/secrets/deis/objectstore/creds/accountkey)
50 | BUCKET_NAME=$(cat /var/run/secrets/deis/objectstore/creds/database-container)
51 | echo $WABS_ACCOUNT_NAME > WABS_ACCOUNT_NAME
52 | echo $WABS_ACCESS_KEY > WABS_ACCESS_KEY
53 | echo "wabs://$BUCKET_NAME" > WALE_WABS_PREFIX
54 | echo $BUCKET_NAME > BUCKET_NAME
55 | elif [ "$DATABASE_STORAGE" == "swift" ]; then
56 | SWIFT_USER=$(cat /var/run/secrets/deis/objectstore/creds/username)
57 | SWIFT_PASSWORD=$(cat /var/run/secrets/deis/objectstore/creds/password)
58 | SWIFT_TENANT=$(cat /var/run/secrets/deis/objectstore/creds/tenant)
59 | SWIFT_AUTHURL=$(cat /var/run/secrets/deis/objectstore/creds/authurl)
60 | SWIFT_AUTH_VERSION=$(cat /var/run/secrets/deis/objectstore/creds/authversion)
61 | BUCKET_NAME=$(cat /var/run/secrets/deis/objectstore/creds/database-container)
62 | # set defaults for variables that we can guess at
63 | echo $SWIFT_USER > SWIFT_USER
64 | echo $SWIFT_PASSWORD > SWIFT_PASSWORD
65 | echo $SWIFT_TENANT > SWIFT_TENANT
66 | echo $SWIFT_AUTHURL > SWIFT_AUTHURL
67 | echo $SWIFT_AUTH_VERSION > SWIFT_AUTH_VERSION
68 | echo "swift://$BUCKET_NAME" > WALE_SWIFT_PREFIX
69 | echo $BUCKET_NAME > BUCKET_NAME
70 | fi
71 |
--------------------------------------------------------------------------------
/rootfs/docker-entrypoint-initdb.d/002_create_bucket.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # ensure WAL log bucket exists
4 | envdir "$WALE_ENVDIR" create_bucket
5 |
--------------------------------------------------------------------------------
/rootfs/docker-entrypoint-initdb.d/003_restore_from_backup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cat << EOF >> "$PGDATA/postgresql.conf"
4 | wal_level = archive
5 | archive_mode = on
6 | archive_command = 'envdir "${WALE_ENVDIR}" wal-e wal-push %p'
7 | archive_timeout = 60
8 | EOF
9 |
10 | # ensure $PGDATA has the right permissions
11 | chown -R postgres:postgres "$PGDATA"
12 | chmod 0700 "$PGDATA"
13 |
14 | # reboot the server for wal_level to be set before backing up
15 | echo "Rebooting postgres to enable archive mode"
16 | gosu postgres pg_ctl -D "$PGDATA" -w restart
17 |
18 | # check if there are any backups -- if so, let's restore
19 | # we could probably do better than just testing number of lines -- one line is just a heading, meaning no backups
20 | if [[ $(envdir "$WALE_ENVDIR" wal-e --terse backup-list | wc -l) -gt "1" ]]; then
21 | echo "Found backups. Restoring from backup..."
22 | gosu postgres pg_ctl -D "$PGDATA" -w stop
23 | rm -rf "$PGDATA"
24 | envdir "$WALE_ENVDIR" wal-e backup-fetch "$PGDATA" LATEST
25 | cat << EOF > "$PGDATA/postgresql.conf"
26 | # These settings are initialized by initdb, but they can be changed.
27 | log_timezone = 'UTC'
28 | lc_messages = 'C' # locale for system error message
29 | lc_monetary = 'C' # locale for monetary formatting
30 | lc_numeric = 'C' # locale for number formatting
31 | lc_time = 'C' # locale for time formatting
32 | default_text_search_config = 'pg_catalog.english'
33 | wal_level = archive
34 | archive_mode = on
35 | archive_command = 'envdir "${WALE_ENVDIR}" wal-e wal-push %p'
36 | archive_timeout = 60
37 | listen_addresses = '*'
38 | EOF
39 | cat << EOF > "$PGDATA/pg_hba.conf"
40 | # "local" is for Unix domain socket connections only
41 | local all all trust
42 | # IPv4 local connections:
43 | host all all 127.0.0.1/32 trust
44 | # IPv6 local connections:
45 | host all all ::1/128 trust
46 | # IPv4 global connections
47 | host all all 0.0.0.0/0 md5
48 | EOF
49 | touch "$PGDATA/pg_ident.conf"
50 | echo "restore_command = 'envdir /etc/wal-e.d/env wal-e wal-fetch \"%f\" \"%p\"'" >> "$PGDATA/recovery.conf"
51 | chown -R postgres:postgres "$PGDATA"
52 | chmod 0700 "$PGDATA"
53 | gosu postgres pg_ctl -D "$PGDATA" \
54 | -o "-c listen_addresses=''" \
55 | -w start
56 | fi
57 |
58 | echo "Performing an initial backup..."
59 | gosu postgres envdir "$WALE_ENVDIR" wal-e backup-push "$PGDATA"
60 |
61 | # ensure $PGDATA has the right permissions
62 | chown -R postgres:postgres "$PGDATA"
63 | chmod 0700 "$PGDATA"
64 |
--------------------------------------------------------------------------------
/rootfs/docker-entrypoint-initdb.d/004_run_backups.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Run periodic backups in the background
4 | gosu postgres backup &
5 |
--------------------------------------------------------------------------------
/rootfs/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Originally copied and modified from
4 | # https://github.com/docker-library/postgres/blob/ec5ce80ca914e02c2d5eb9fde424039d4cee032e/9.4/docker-entrypoint.sh
5 | #
6 | set -e
7 |
8 | set_listen_addresses() {
9 | sedEscapedValue="$(echo "$1" | sed 's/[\/&]/\\&/g')"
10 | sed -ri "s/^#?(listen_addresses\s*=\s*)\S+/\1'$sedEscapedValue'/" "$PGDATA/postgresql.conf"
11 | }
12 |
13 | POSTGRES_USER="$(cat /var/run/secrets/deis/database/creds/user)"
14 | POSTGRES_PASSWORD="$(cat /var/run/secrets/deis/database/creds/password)"
15 |
16 | if [ "$1" = 'postgres' ]; then
17 | mkdir -p "$PGDATA"
18 | chmod 700 "$PGDATA"
19 | chown -R postgres "$PGDATA"
20 |
21 | chmod g+s /run/postgresql
22 | chown -R postgres /run/postgresql
23 |
24 | # look specifically for PG_VERSION, as it is expected in the DB dir
25 | if [ ! -s "$PGDATA/PG_VERSION" ]; then
26 | gosu postgres initdb
27 |
28 | # check password first so we can output the warning before postgres
29 | # messes it up
30 | if [ "$POSTGRES_PASSWORD" ]; then
31 | pass="PASSWORD '$POSTGRES_PASSWORD'"
32 | authMethod=md5
33 | else
34 | # The - option suppresses leading tabs but *not* spaces. :)
35 | cat >&2 <<-'EOWARN'
36 | ****************************************************
37 | WARNING: No password has been set for the database.
38 | This will allow anyone with access to the
39 | Postgres port to access your database. In
40 | Docker's default configuration, this is
41 | effectively any other container on the same
42 | system.
43 |
44 | Use "-e POSTGRES_PASSWORD=password" to set
45 | it in "docker run".
46 | ****************************************************
47 | EOWARN
48 |
49 | pass=
50 | authMethod=trust
51 | fi
52 |
53 | { echo; echo "host all all 0.0.0.0/0 $authMethod"; } >> "$PGDATA/pg_hba.conf"
54 |
55 | # internal start of server in order to allow set-up using psql-client
56 | # does not listen on TCP/IP and waits until start finishes
57 | gosu postgres pg_ctl -D "$PGDATA" \
58 | -o "-c listen_addresses=''" \
59 | -w start
60 |
61 | : ${POSTGRES_USER:=postgres}
62 | : ${POSTGRES_DB:=$POSTGRES_USER}
63 | export POSTGRES_USER POSTGRES_DB
64 |
65 | if [ "$POSTGRES_DB" != 'postgres' ]; then
66 | psql --username postgres <<-EOSQL
67 | CREATE DATABASE "$POSTGRES_DB" ;
68 | EOSQL
69 | echo
70 | fi
71 |
72 | if [ "$POSTGRES_USER" = 'postgres' ]; then
73 | op='ALTER'
74 | else
75 | op='CREATE'
76 | fi
77 |
78 | psql --username postgres <<-EOSQL
79 | $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
80 | EOSQL
81 | echo
82 |
83 | echo
84 | for f in /docker-entrypoint-initdb.d/*; do
85 | case "$f" in
86 | *.sh) echo "$0: running $f"; . "$f" ;;
87 | *.sql)
88 | echo "$0: running $f";
89 | psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < "$f"
90 | echo
91 | ;;
92 | *) echo "$0: ignoring $f" ;;
93 | esac
94 | echo
95 | done
96 |
97 | gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop
98 | set_listen_addresses '*'
99 |
100 | echo
101 | echo 'PostgreSQL init process complete; ready for start up.'
102 | echo
103 | fi
104 |
105 | exec gosu postgres "$@"
106 | fi
107 |
108 | exec "$@"
109 |
--------------------------------------------------------------------------------
/versioning.mk:
--------------------------------------------------------------------------------
1 | MUTABLE_VERSION ?= canary
2 | VERSION ?= git-$(shell git rev-parse --short HEAD)
3 |
4 | IMAGE := ${DEIS_REGISTRY}${IMAGE_PREFIX}/${SHORT_NAME}:${VERSION}
5 | MUTABLE_IMAGE := ${DEIS_REGISTRY}${IMAGE_PREFIX}/${SHORT_NAME}:${MUTABLE_VERSION}
6 |
7 | info:
8 | @echo "Build tag: ${VERSION}"
9 | @echo "Registry: ${DEIS_REGISTRY}"
10 | @echo "Immutable tag: ${IMAGE}"
11 | @echo "Mutable tag: ${MUTABLE_IMAGE}"
12 |
13 | .PHONY: docker-push
14 | docker-push: docker-immutable-push docker-mutable-push
15 |
16 | .PHONY: docker-immutable-push
17 | docker-immutable-push:
18 | docker push ${IMAGE}
19 |
20 | .PHONY: docker-mutable-push
21 | docker-mutable-push:
22 | docker push ${MUTABLE_IMAGE}
23 |
--------------------------------------------------------------------------------