├── .dockercfg ├── .dockercfg.enc ├── .github ├── stale.yml └── workflows │ ├── docker-release.yml │ ├── integration-tests-docker.yml │ └── main.yml ├── .gitignore ├── .gitmodules ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── cmd ├── agent │ └── agent.go ├── all │ └── all.go ├── backup │ └── backup.go ├── cmd.go ├── info │ └── info.go ├── manager │ └── manager.go ├── restic │ └── restic.go ├── restore │ └── restore.go └── volumes │ └── volumes.go ├── contrib ├── charts │ └── bivac │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── role.yaml │ │ └── service.yaml │ │ └── values.yaml ├── examples │ └── docker-compose │ │ ├── alertmanager │ │ └── config.yml │ │ ├── docker-compose.yml │ │ ├── grafana │ │ └── provisioning │ │ │ ├── dashboards │ │ │ ├── Bivac.json │ │ │ └── dashboard.yml │ │ │ └── datasources │ │ │ └── datasource.yml │ │ └── prometheus │ │ ├── alert.rules │ │ └── prometheus.yml └── openshift │ ├── README.md │ ├── bivac-template.yaml │ └── bivac2-agent.template.yaml ├── go.mod ├── go.sum ├── img ├── bivac.png ├── bivac.svg ├── bivac_descr.png ├── bivac_descr.svg ├── bivac_handdrawn.svg ├── bivac_small.png ├── bivac_tent3_cont_orange_green.svg ├── bivac_tent3_cont_orange_green3_front.png ├── bivac_tent3_cont_orange_green3_front_descr.png ├── bivac_tent3_cont_orange_green3_front_descr_400.png ├── bivac_tent3_cont_orange_green3_front_persp.png ├── bivac_tent3_cont_orange_green3_front_persp_300dpi.png └── bivac_tent3_cont_orange_green_descr.svg ├── internal ├── agent │ └── agent.go ├── engine │ └── engine.go ├── manager │ ├── backup.go │ ├── manager.go │ ├── manager_test.go │ ├── provider.go │ ├── restore.go │ ├── server.go │ ├── server_test.go │ ├── volumes.go │ └── volumes_test.go └── utils │ ├── utils.go │ └── utils_test.go ├── main.go ├── mocks ├── mock_docker.go ├── mock_orchestrator.go └── mock_prometheus.go ├── pkg ├── client │ ├── client.go │ └── client_test.go ├── orchestrators │ ├── cattle.go │ ├── docker.go │ ├── docker_test.go │ ├── kubernetes.go │ └── orchestrators.go └── volume │ ├── volume.go │ └── volume_test.go ├── providers-config.default.toml ├── scripts └── build-release.sh └── test └── integration ├── cattle ├── Vagrantfile.builder ├── Vagrantfile.runner ├── build.sh ├── prepare.sh └── tests │ └── 01_basic └── docker ├── docker-compose.yml ├── mysql_seed.sql ├── postgres_seed.sql └── tests ├── mysql ├── postgres └── raw /.dockercfg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/.dockercfg -------------------------------------------------------------------------------- /.dockercfg.enc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/.dockercfg.enc -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 60 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 7 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - pinned 8 | - security 9 | # Label to use when marking an issue as stale 10 | staleLabel: wontfix 11 | # Comment to post when marking an issue as stale. Set to `false` to disable 12 | markComment: > 13 | This issue has been automatically marked as stale because it has not had 14 | recent activity. It will be closed if no further activity occurs. Thank you 15 | for your contributions. 16 | # Comment to post when closing a stale issue. Set to `false` to disable 17 | closeComment: false 18 | -------------------------------------------------------------------------------- /.github/workflows/docker-release.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image Release 2 | 3 | on: 4 | release: 5 | types: [published] 6 | 7 | env: 8 | IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/bivac 9 | DOCKER_CLI_EXPERIMENTAL: enabled 10 | 11 | jobs: 12 | push_to_registry: 13 | name: Push Docker images to Docker Hub 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Check out the repo 17 | uses: actions/checkout@v2 18 | with: 19 | fetch-depth: 0 20 | ref: ${{ github.event.release.tag_name }} 21 | 22 | - name: Login to Github Registry 23 | uses: docker/login-action@v1 24 | with: 25 | registry: ghcr.io 26 | username: ${{ github.repository_owner }} 27 | password: ${{ secrets.CR_PAT }} 28 | 29 | - name: Build and push docker images 30 | run: | 31 | IMAGE_NAME=${{ env.IMAGE_NAME }} IMAGE_VERSION=${{ github.event.release.tag_name }} KEEP_IMAGES=yes make docker-images 32 | 33 | - uses: actions-ecosystem/action-regex-match@v2 34 | id: regex-match 35 | with: 36 | text: ${{ github.event.release.tag_name }} 37 | regex: '^([0-9]+).([0-9]+).[0-9]+$' 38 | 39 | - name: Move tags and push to Github Registry 40 | if: ${{ steps.regex-match.outputs.match != '' }} 41 | run: | 42 | docker manifest create ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }}.${{ steps.regex-match.outputs.group2 }} \ 43 | ${{ env.IMAGE_NAME }}-linux-amd64:${{ github.event.release.tag_name }} \ 44 | ${{ env.IMAGE_NAME }}-linux-386:${{ github.event.release.tag_name }} \ 45 | ${{ env.IMAGE_NAME }}-linux-arm:${{ github.event.release.tag_name }} 46 | docker manifest annotate ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }}.${{ steps.regex-match.outputs.group2 }} \ 47 | ${{ env.IMAGE_NAME }}-linux-amd64:${{ github.event.release.tag_name }} --os linux --arch amd64 48 | docker manifest annotate ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }}.${{ steps.regex-match.outputs.group2 }} \ 49 | ${{ env.IMAGE_NAME }}-linux-386:${{ github.event.release.tag_name }} --os linux --arch 386 50 | docker manifest annotate ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }}.${{ steps.regex-match.outputs.group2 }} \ 51 | ${{ env.IMAGE_NAME }}-linux-arm:${{ github.event.release.tag_name }} --os linux --arch arm 52 | docker manifest push ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }}.${{ steps.regex-match.outputs.group2 }} 53 | 54 | docker manifest create ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }} \ 55 | ${{ env.IMAGE_NAME }}-linux-amd64:${{ github.event.release.tag_name }} \ 56 | ${{ env.IMAGE_NAME }}-linux-386:${{ github.event.release.tag_name }} \ 57 | ${{ env.IMAGE_NAME }}-linux-arm:${{ github.event.release.tag_name }} 58 | docker manifest annotate ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }} \ 59 | ${{ env.IMAGE_NAME }}-linux-amd64:${{ github.event.release.tag_name }} --os linux --arch amd64 60 | docker manifest annotate ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }} \ 61 | ${{ env.IMAGE_NAME }}-linux-386:${{ github.event.release.tag_name }} --os linux --arch 386 62 | docker manifest annotate ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }} \ 63 | ${{ env.IMAGE_NAME }}-linux-arm:${{ github.event.release.tag_name }} --os linux --arch arm 64 | docker manifest push ${{ env.IMAGE_NAME }}:${{ steps.regex-match.outputs.group1 }} 65 | -------------------------------------------------------------------------------- /.github/workflows/integration-tests-docker.yml: -------------------------------------------------------------------------------- 1 | name: Integration tests - Docker 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | integration-tests-docker-raw: 11 | name: Backup raw data 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Set up Go 1.x 15 | uses: actions/setup-go@v2 16 | with: 17 | go-version: ^1.14 18 | 19 | - name: Check out code into the Go module directory 20 | uses: actions/checkout@v2 21 | 22 | - name: Build docker image 23 | run: docker build --build-arg GO_VERSION=1.14 --build-arg GOOS=linux --build-arg GOARCH=amd64 -t bivac-testing . 24 | 25 | - name: Run raw data test case 26 | run: ./tests/raw 27 | working-directory: ./test/integration/docker/ 28 | 29 | integration-tests-docker-mysql: 30 | name: Backup MySQL database 31 | runs-on: ubuntu-latest 32 | steps: 33 | - name: Set up Go 1.x 34 | uses: actions/setup-go@v2 35 | with: 36 | go-version: ^1.14 37 | 38 | - name: Check out code into the Go module directory 39 | uses: actions/checkout@v2 40 | 41 | - name: Build docker image 42 | run: docker build --build-arg GO_VERSION=1.14 --build-arg GOOS=linux --build-arg GOARCH=amd64 -t bivac-testing . 43 | 44 | - name: Run MySQL test case 45 | run: ./tests/mysql 46 | working-directory: ./test/integration/docker/ 47 | 48 | integration-tests-docker-postgres: 49 | name: Backup PostgreSQL database 50 | runs-on: ubuntu-latest 51 | steps: 52 | - name: Set up Go 1.x 53 | uses: actions/setup-go@v2 54 | with: 55 | go-version: ^1.14 56 | 57 | - name: Check out code into the Go module directory 58 | uses: actions/checkout@v2 59 | 60 | - name: Build docker image 61 | run: docker build --build-arg GO_VERSION=1.14 --build-arg GOOS=linux --build-arg GOARCH=amd64 -t bivac-testing . 62 | 63 | - name: Run PostgreSQL test case 64 | run: ./tests/postgres 65 | working-directory: ./test/integration/docker/ 66 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Binary 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | env: 10 | IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/bivac 11 | DOCKER_CLI_EXPERIMENTAL: enabled 12 | 13 | jobs: 14 | linting: 15 | name: Linting 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Set up Go 1.x 19 | uses: actions/setup-go@v2 20 | with: 21 | go-version: ^1.14 22 | 23 | - name: Check out code into the Go module directory 24 | uses: actions/checkout@v2 25 | 26 | - name: Run Go Vet 27 | run: make vet 28 | 29 | - name: Lint code 30 | run: make lint 31 | 32 | tests: 33 | name: Unit testing 34 | runs-on: ubuntu-latest 35 | steps: 36 | - name: Set up Go 1.x 37 | uses: actions/setup-go@v2 38 | with: 39 | go-version: ^1.14 40 | 41 | - name: Check out code into the Go module directory 42 | uses: actions/checkout@v2 43 | 44 | - name: Run unit tests 45 | run: make test 46 | 47 | - name: Send coverage 48 | uses: shogo82148/actions-goveralls@v1 49 | with: 50 | path-to-profile: coverage 51 | 52 | build-binary: 53 | name: Build Binary 54 | runs-on: ubuntu-latest 55 | steps: 56 | - name: Set up Go 1.x 57 | uses: actions/setup-go@v2 58 | with: 59 | go-version: ^1.14 60 | 61 | - name: Check out code into the Go module directory 62 | uses: actions/checkout@v2 63 | with: 64 | fetch-depth: 0 65 | 66 | - name: Build binary 67 | run: make bivac 68 | 69 | build-docker-image: 70 | name: Build Docker Image 71 | runs-on: ubuntu-latest 72 | steps: 73 | - name: Set up Go 1.x 74 | uses: actions/setup-go@v2 75 | with: 76 | go-version: ^1.14 77 | 78 | - name: Check out code into the Go module directory 79 | uses: actions/checkout@v2 80 | with: 81 | fetch-depth: 0 82 | 83 | - name: Build docker image 84 | run: docker build --build-arg GO_VERSION=1.14 --build-arg GOOS=linux --build-arg GOARCH=amd64 . 85 | 86 | publish-docker-image-latest: 87 | name: Publish docker image tagged latest 88 | runs-on: ubuntu-latest 89 | if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} 90 | steps: 91 | - name: Check out the repo 92 | uses: actions/checkout@v2 93 | with: 94 | fetch-depth: 0 95 | 96 | - name: Login to Github Registry 97 | uses: docker/login-action@v1 98 | with: 99 | registry: ghcr.io 100 | username: ${{ github.repository_owner }} 101 | password: ${{ secrets.CR_PAT }} 102 | 103 | - name: Build and push docker images 104 | run: | 105 | IMAGE_NAME=${{ env.IMAGE_NAME }} IMAGE_VERSION=latest KEEP_IMAGES=yes make docker-images 106 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .*.swp 2 | hugo/public 3 | *.out 4 | coverage 5 | *~ 6 | /bivac 7 | *.log 8 | *.box 9 | .vagrant/ 10 | release/ 11 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "hugo/themes/elate"] 2 | path = hugo/themes/elate 3 | url = https://github.com/camptocamp/hugo-elate-theme 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GO_VERSION 2 | FROM golang:${GO_VERSION} as builder 3 | 4 | ARG GOOS 5 | ARG GOARCH 6 | ARG GOARM 7 | 8 | ENV GO111MODULE on 9 | ENV GOOS ${GOOS} 10 | ENV GOARCH ${GOARCH} 11 | ENV GOARM ${GOARM} 12 | 13 | # RClone 14 | RUN git clone https://github.com/rclone/rclone /go/src/github.com/rclone/rclone 15 | WORKDIR /go/src/github.com/rclone/rclone 16 | RUN git checkout v1.54.0 17 | RUN go get ./... 18 | RUN env ${BUILD_OPTS} go build 19 | 20 | # Restic 21 | RUN git clone https://github.com/restic/restic /go/src/github.com/restic/restic 22 | WORKDIR /go/src/github.com/restic/restic 23 | RUN git checkout v0.12.0 24 | RUN go get ./... 25 | RUN GOOS= GOARCH= GOARM= go run -mod=vendor build.go || go run build.go 26 | 27 | # Bivac 28 | WORKDIR /go/src/github.com/camptocamp/bivac 29 | COPY . . 30 | RUN env ${BUILD_OPTS} make bivac 31 | 32 | FROM debian 33 | RUN apt-get update && \ 34 | apt-get install -y openssh-client procps && \ 35 | rm -rf /var/lib/apt/lists/* 36 | COPY --from=builder /etc/ssl /etc/ssl 37 | COPY --from=builder /go/src/github.com/camptocamp/bivac/bivac /bin/bivac 38 | COPY --from=builder /go/src/github.com/camptocamp/bivac/providers-config.default.toml / 39 | COPY --from=builder /go/src/github.com/restic/restic/restic /bin/restic 40 | COPY --from=builder /go/src/github.com/rclone/rclone/rclone /bin/rclone 41 | ENTRYPOINT ["/bin/bivac"] 42 | CMD [""] 43 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | DEPS = $(wildcard */*/*/*.go) 2 | VERSION = $(shell git describe --always --dirty) 3 | COMMIT_SHA1 = $(shell git rev-parse HEAD) 4 | BUILD_DATE = $(shell date +%Y-%m-%d) 5 | 6 | GO_VERSION = 1.14 7 | 8 | all: lint vet test bivac 9 | 10 | bivac: main.go $(DEPS) 11 | GO111MODULE=on CGO_ENABLED=0 GOARCH=$(GOARCH) GOOS=$(GOOS) GOARM=$(GOARM) \ 12 | go build \ 13 | -a -ldflags="-s -X main.version=$(VERSION) -X main.buildDate=$(BUILD_DATE) -X main.commitSha1=$(COMMIT_SHA1)" \ 14 | -installsuffix cgo -o $@ $< 15 | @if [ "${GOOS}" = "linux" ] && [ "${GOARCH}" = "amd64" ]; then strip $@; fi 16 | 17 | release: clean 18 | GO_VERSION=$(GO_VERSION) ./scripts/build-release.sh 19 | 20 | docker-images: clean 21 | @if [ -z "$(IMAGE_NAME)" ]; then echo "IMAGE_NAME cannot be empty."; exit 1; fi 22 | export IMAGE_NAME=$(IMAGE_NAME) 23 | # Linux/amd64 24 | docker build --no-cache --pull -t $(IMAGE_NAME)-linux-amd64:$(IMAGE_VERSION) \ 25 | --build-arg GO_VERSION=$(GO_VERSION) \ 26 | --build-arg GOOS=linux --build-arg GOARCH=amd64 . 27 | docker push $(IMAGE_NAME)-linux-amd64:$(IMAGE_VERSION) 28 | # Linux/386 29 | docker build --no-cache --pull -t $(IMAGE_NAME)-linux-386:$(IMAGE_VERSION) \ 30 | --build-arg GO_VERSION=${GO_VERSION} \ 31 | --build-arg GOOS=linux --build-arg GOARCH=386 . 32 | docker push $(IMAGE_NAME)-linux-386:$(IMAGE_VERSION) 33 | # Linux/arm 34 | docker build --no-cache --pull -t $(IMAGE_NAME)-linux-arm:$(IMAGE_VERSION) \ 35 | --build-arg GO_VERSION=${GO_VERSION} \ 36 | --build-arg GOOS=linux --build-arg GOARCH=arm --build-arg GOARM=7 . 37 | docker push $(IMAGE_NAME)-linux-arm:$(IMAGE_VERSION) 38 | # Linux/arm64 39 | docker build --no-cache --pull -t $(IMAGE_NAME)-linux-arm64:$(IMAGE_VERSION) \ 40 | --build-arg GO_VERSION=${GO_VERSION} \ 41 | --build-arg GOOS=linux --build-arg GOARCH=arm64 --build-arg GOARM=7 . 42 | docker push $(IMAGE_NAME)-linux-arm64:$(IMAGE_VERSION) 43 | # Manifest 44 | docker manifest create $(IMAGE_NAME):$(IMAGE_VERSION) \ 45 | $(IMAGE_NAME)-linux-amd64:$(IMAGE_VERSION) \ 46 | $(IMAGE_NAME)-linux-386:$(IMAGE_VERSION) \ 47 | $(IMAGE_NAME)-linux-arm:$(IMAGE_VERSION) \ 48 | $(IMAGE_NAME)-linux-arm64:$(IMAGE_VERSION) 49 | docker manifest annotate $(IMAGE_NAME):$(IMAGE_VERSION) \ 50 | $(IMAGE_NAME)-linux-amd64:$(IMAGE_VERSION) --os linux --arch amd64 51 | docker manifest annotate $(IMAGE_NAME):$(IMAGE_VERSION) \ 52 | $(IMAGE_NAME)-linux-386:$(IMAGE_VERSION) --os linux --arch 386 53 | docker manifest annotate $(IMAGE_NAME):$(IMAGE_VERSION) \ 54 | $(IMAGE_NAME)-linux-arm:$(IMAGE_VERSION) --os linux --arch arm 55 | docker manifest annotate $(IMAGE_NAME):$(IMAGE_VERSION) \ 56 | $(IMAGE_NAME)-linux-arm64:$(IMAGE_VERSION) --os linux --arch arm64 57 | docker manifest push $(IMAGE_NAME):$(IMAGE_VERSION) 58 | 59 | lint: 60 | @GO111MODULE=off go get -u -v golang.org/x/lint/golint 61 | @for file in $$(go list ./... | grep -v '_workspace/' | grep -v 'vendor'); do \ 62 | export output="$$(golint $${file} | grep -v 'type name will be used as docker.DockerInfo')"; \ 63 | [ -n "$${output}" ] && echo "$${output}" && export status=1; \ 64 | done; \ 65 | exit $${status:-0} 66 | 67 | vet: main.go 68 | go vet $< 69 | 70 | clean: 71 | git clean -fXd -e \!vendor -e \!vendor/**/* && rm -f ./bivac 72 | 73 | test: 74 | go test -cover -coverprofile=coverage -v ./... 75 | 76 | .PHONY: all lint vet clean test 77 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Bivac : Backup Interface for Volumes Attached to Containers 2 | =========================================================== 3 | 4 | Website: [https://camptocamp.github.io/bivac](https://camptocamp.github.io/bivac) 5 | 6 | 7 | [![Docker Pulls](https://img.shields.io/docker/pulls/camptocamp/bivac.svg)](https://hub.docker.com/r/camptocamp/bivac/) 8 | [![Build Status](https://img.shields.io/travis/camptocamp/bivac/master.svg)](https://travis-ci.org/camptocamp/bivac) 9 | [![Coverage Status](https://img.shields.io/coveralls/camptocamp/bivac.svg)](https://coveralls.io/r/camptocamp/bivac?branch=master) 10 | [![Go Report Card](https://goreportcard.com/badge/github.com/camptocamp/bivac)](https://goreportcard.com/report/github.com/camptocamp/bivac) 11 | [![Gitter](https://img.shields.io/gitter/room/camptocamp/bivac.svg)](https://gitter.im/camptocamp/bivac) 12 | [![By Camptocamp](https://img.shields.io/badge/by-camptocamp-fb7047.svg)](http://www.camptocamp.com) 13 | 14 | 15 | Bivac lets you backup all your containers volumes deployed on Docker Engine, Cattle or Kubernetes using Restic. 16 | 17 | ![Bivac](img/bivac_small.png) 18 | 19 | ## Documentation 20 | 21 | * [Overview](https://github.com/camptocamp/bivac/wiki/Home) 22 | * [Installation](https://github.com/camptocamp/bivac/wiki/Installation) 23 | - [Docker](https://github.com/camptocamp/bivac/wiki/Installation#docker) 24 | - [Rancher (Cattle)](https://github.com/camptocamp/bivac/wiki/Installation#rancher-cattle) 25 | - [Kubernetes](https://github.com/camptocamp/bivac/wiki/Installation#kubernetes) 26 | * [Usage](https://github.com/camptocamp/bivac/wiki/Usage) 27 | - [Backup a volume](https://github.com/camptocamp/bivac/wiki/Usage#backup-a-volume) 28 | - [Restore a volume](https://github.com/camptocamp/bivac/wiki/Usage#restore-a-volume) 29 | - [Manage a remote Restic repository](https://github.com/camptocamp/bivac/wiki/Usage#manage-a-remote-restic-repository) 30 | - [Troubleshooting](https://github.com/camptocamp/bivac/wiki/Usage#troubleshooting) 31 | * [API](https://github.com/camptocamp/bivac/wiki/API) 32 | -------------------------------------------------------------------------------- /cmd/agent/agent.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | 6 | "github.com/camptocamp/bivac/cmd" 7 | "github.com/camptocamp/bivac/internal/agent" 8 | ) 9 | 10 | var ( 11 | targetURL string 12 | backupPath string 13 | hostname string 14 | force bool 15 | logReceiver string 16 | snapshotName string 17 | ) 18 | 19 | var agentCmd = &cobra.Command{ 20 | Use: "agent", 21 | Short: "Run Bivac agent", 22 | Run: func(cmd *cobra.Command, args []string) { 23 | switch args[0] { 24 | case "backup": 25 | agent.Backup(targetURL, backupPath, hostname, force, logReceiver) 26 | case "restore": 27 | agent.Restore(targetURL, backupPath, hostname, force, logReceiver, snapshotName) 28 | } 29 | }, 30 | } 31 | 32 | func init() { 33 | agentCmd.Flags().StringVarP(&targetURL, "target.url", "r", "", "The target URL to push the backups to.") 34 | agentCmd.Flags().StringVarP(&backupPath, "backup.path", "p", "", "Path to the volume to backup.") 35 | agentCmd.Flags().StringVarP(&hostname, "host", "", "", "Custom hostname.") 36 | agentCmd.Flags().BoolVarP(&force, "force", "", false, "Force a backup by removing all locks.") 37 | agentCmd.Flags().StringVarP(&logReceiver, "log.receiver", "", "", "Address where the manager will collect the logs.") 38 | agentCmd.Flags().StringVarP(&snapshotName, "snapshot", "s", "latest", "Name of snapshot to restore") 39 | cmd.RootCmd.AddCommand(agentCmd) 40 | } 41 | -------------------------------------------------------------------------------- /cmd/all/all.go: -------------------------------------------------------------------------------- 1 | package all 2 | 3 | import ( 4 | // Run a Bivac agent 5 | _ "github.com/camptocamp/bivac/cmd/agent" 6 | // Backup a volume 7 | _ "github.com/camptocamp/bivac/cmd/backup" 8 | // Restore a volume 9 | _ "github.com/camptocamp/bivac/cmd/restore" 10 | // Get informations regarding the Bivac manager 11 | _ "github.com/camptocamp/bivac/cmd/info" 12 | // Run a Bivac manager 13 | _ "github.com/camptocamp/bivac/cmd/manager" 14 | // Run a custom Restic command on a volume's remote repository 15 | _ "github.com/camptocamp/bivac/cmd/restic" 16 | // List volumes and display informations regarding the backed up volumes 17 | _ "github.com/camptocamp/bivac/cmd/volumes" 18 | ) 19 | -------------------------------------------------------------------------------- /cmd/backup/backup.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | log "github.com/Sirupsen/logrus" 8 | "github.com/spf13/cobra" 9 | "github.com/tatsushid/go-prettytable" 10 | 11 | "github.com/camptocamp/bivac/cmd" 12 | "github.com/camptocamp/bivac/pkg/client" 13 | ) 14 | 15 | var ( 16 | remoteAddress string 17 | psk string 18 | force bool 19 | ) 20 | 21 | var envs = make(map[string]string) 22 | 23 | var backupCmd = &cobra.Command{ 24 | Use: "backup [VOLUME_ID]", 25 | Short: "Backup volumes", 26 | Args: cobra.MinimumNArgs(1), 27 | Run: func(cmd *cobra.Command, args []string) { 28 | c, err := client.NewClient(remoteAddress, psk) 29 | if err != nil { 30 | log.Errorf("failed to create new client: %s", err) 31 | return 32 | } 33 | 34 | for _, a := range args { 35 | fmt.Printf("Backing up `%s'...\n", a) 36 | err = c.BackupVolume(a, force) 37 | if err != nil { 38 | log.Errorf("failed to backup volume: %s", err) 39 | return 40 | } 41 | } 42 | 43 | volumes, err := c.GetVolumes() 44 | if err != nil { 45 | log.Errorf("failed to get volumes: %s", err) 46 | return 47 | } 48 | 49 | for _, a := range args { 50 | for _, v := range volumes { 51 | if v.ID == a { 52 | tbl, err := prettytable.NewTable([]prettytable.Column{ 53 | {}, 54 | {}, 55 | {}, 56 | }...) 57 | if err != nil { 58 | log.WithFields(log.Fields{ 59 | "volume": v.Name, 60 | "hostname": v.Hostname, 61 | }).Errorf("failed to format output: %s", err) 62 | return 63 | } 64 | tbl.Separator = "\t" 65 | 66 | fmt.Printf("ID: %s\n", v.ID) 67 | fmt.Printf("Name: %s\n", v.Name) 68 | fmt.Printf("Mountpoint: %s\n", v.Mountpoint) 69 | fmt.Printf("Backup date: %s\n", v.LastBackupDate) 70 | fmt.Printf("Backup status: %s\n", v.LastBackupStatus) 71 | fmt.Printf("Logs:\n") 72 | tbl.AddRow("", "testInit", strings.Replace(v.Logs["testInit"], "\n", "\n\t\t\t", -1)) 73 | tbl.AddRow("", "init", strings.Replace(v.Logs["init"], "\n", "\n\t\t\t", -1)) 74 | tbl.AddRow("", "backup", strings.Replace(v.Logs["backup"], "\n", "\n\t\t\t", -1)) 75 | tbl.AddRow("", "forget", strings.Replace(v.Logs["forget"], "\n", "\n\t\t\t", -1)) 76 | tbl.Print() 77 | } 78 | } 79 | } 80 | }, 81 | } 82 | 83 | func init() { 84 | backupCmd.Flags().StringVarP(&remoteAddress, "remote.address", "", "http://127.0.0.1:8182", "Address of the remote Bivac server.") 85 | envs["BIVAC_REMOTE_ADDRESS"] = "remote.address" 86 | 87 | backupCmd.Flags().StringVarP(&psk, "server.psk", "", "", "Pre-shared key.") 88 | envs["BIVAC_SERVER_PSK"] = "server.psk" 89 | 90 | backupCmd.Flags().BoolVarP(&force, "force", "", false, "Force backup by removing locks.") 91 | 92 | cmd.SetValuesFromEnv(envs, backupCmd.Flags()) 93 | cmd.RootCmd.AddCommand(backupCmd) 94 | } 95 | -------------------------------------------------------------------------------- /cmd/cmd.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | log "github.com/Sirupsen/logrus" 8 | "github.com/spf13/cobra" 9 | "github.com/spf13/pflag" 10 | "github.com/spf13/viper" 11 | 12 | "github.com/camptocamp/bivac/internal/utils" 13 | ) 14 | 15 | var ( 16 | verbose bool 17 | whitelist string 18 | blacklist string 19 | 20 | // BuildInfo contains the Bivac build informations (filled by main.go at build time) 21 | BuildInfo utils.BuildInfo 22 | ) 23 | 24 | var persistentEnvs = make(map[string]string) 25 | var localEnvs = make(map[string]string) 26 | 27 | // RootCmd is a global variable which will handle all subcommands 28 | var RootCmd = &cobra.Command{ 29 | Use: "bivac", 30 | } 31 | 32 | func initConfig() { 33 | if verbose { 34 | log.SetLevel(log.DebugLevel) 35 | } 36 | } 37 | 38 | func init() { 39 | cobra.OnInitialize(initConfig) 40 | viper.AutomaticEnv() 41 | RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose output") 42 | localEnvs["BIVAC_VERBOSE"] = "verbose" 43 | RootCmd.PersistentFlags().StringVarP(&whitelist, "whitelist", "w", "", "Only backup whitelisted volumes.") 44 | localEnvs["BIVAC_WHITELIST"] = "whitelist" 45 | localEnvs["BIVAC_VOLUMES_WHITELIST"] = "whitelist" 46 | RootCmd.PersistentFlags().StringVarP(&blacklist, "blacklist", "b", "", "Do not backup blacklisted volumes.") 47 | localEnvs["BIVAC_BLACKLIST"] = "blacklist" 48 | localEnvs["BIVAC_VOLUMES_BLACKLIST"] = "blacklist" 49 | 50 | SetValuesFromEnv(localEnvs, RootCmd.PersistentFlags()) 51 | SetValuesFromEnv(persistentEnvs, RootCmd.PersistentFlags()) 52 | } 53 | 54 | // Execute is the main thread, required by Cobra 55 | func Execute(buildInfo utils.BuildInfo) { 56 | BuildInfo = buildInfo 57 | if err := RootCmd.Execute(); err != nil { 58 | fmt.Println(err) 59 | os.Exit(1) 60 | } 61 | } 62 | 63 | // SetValuesFromEnv assigns values to Cobra variables from environment variables 64 | func SetValuesFromEnv(envs map[string]string, flags *pflag.FlagSet) { 65 | for env, flag := range envs { 66 | flag := flags.Lookup(flag) 67 | flag.Usage = fmt.Sprintf("%v [%v]", flag.Usage, env) 68 | if value := os.Getenv(env); value != "" { 69 | flag.Value.Set(value) 70 | } else { 71 | os.Setenv(env, flag.Value.String()) 72 | } 73 | } 74 | return 75 | } 76 | -------------------------------------------------------------------------------- /cmd/info/info.go: -------------------------------------------------------------------------------- 1 | package info 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | log "github.com/Sirupsen/logrus" 8 | "github.com/spf13/cobra" 9 | 10 | "github.com/camptocamp/bivac/cmd" 11 | "github.com/camptocamp/bivac/pkg/client" 12 | ) 13 | 14 | var ( 15 | remoteAddress string 16 | psk string 17 | ) 18 | 19 | var envs = make(map[string]string) 20 | 21 | var infoCmd = &cobra.Command{ 22 | Use: "info", 23 | Short: "Retrieve Bivac informations", 24 | Run: func(cmd *cobra.Command, args []string) { 25 | c, err := client.NewClient(remoteAddress, psk) 26 | if err != nil { 27 | log.Errorf("failed to create a new client: %s", err) 28 | return 29 | } 30 | 31 | informations, err := c.GetInformations() 32 | if err != nil { 33 | log.Errorf("failed to get informations: %s", err) 34 | return 35 | } 36 | 37 | for infok, infov := range informations { 38 | if infok == "volumes_count" { 39 | infok = "Managed volumes" 40 | } 41 | fmt.Printf("%s: %s\n", strings.Title(infok), infov) 42 | } 43 | }, 44 | } 45 | 46 | func init() { 47 | infoCmd.Flags().StringVarP(&remoteAddress, "remote.address", "", "http://127.0.0.1:8182", "Address of the remote Bivac server.") 48 | envs["BIVAC_REMOTE_ADDRESS"] = "remote.address" 49 | 50 | infoCmd.Flags().StringVarP(&psk, "server.psk", "", "", "Pre-shared key.") 51 | envs["BIVAC_SERVER_PSK"] = "server.psk" 52 | 53 | cmd.SetValuesFromEnv(envs, infoCmd.Flags()) 54 | cmd.RootCmd.AddCommand(infoCmd) 55 | 56 | } 57 | -------------------------------------------------------------------------------- /cmd/manager/manager.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | log "github.com/Sirupsen/logrus" 8 | "github.com/spf13/cobra" 9 | 10 | bivacCmd "github.com/camptocamp/bivac/cmd" 11 | "github.com/camptocamp/bivac/internal/manager" 12 | "github.com/camptocamp/bivac/internal/utils" 13 | "github.com/camptocamp/bivac/pkg/volume" 14 | ) 15 | 16 | var ( 17 | server manager.Server 18 | orchestrator string 19 | 20 | // Orchestrators is a copy of manager.Orchestrators which allows orchestrator 21 | // configuration from Cobra variables 22 | Orchestrators manager.Orchestrators 23 | 24 | dbPath string 25 | resticForgetArgs string 26 | 27 | providersFile string 28 | targetURL string 29 | retryCount int 30 | logServer string 31 | agentImage string 32 | whitelistVolumes string 33 | blacklistVolumes string 34 | whitelistAnnotation bool 35 | parallelCount int 36 | refreshRate string 37 | backupInterval string 38 | ) 39 | var envs = make(map[string]string) 40 | 41 | var managerCmd = &cobra.Command{ 42 | Use: "manager", 43 | Short: "Start Bivac backup manager", 44 | Run: func(cmd *cobra.Command, args []string) { 45 | volumesFilters := volume.Filters{ 46 | Blacklist: strings.Split(blacklistVolumes, ","), 47 | Whitelist: strings.Split(whitelistVolumes, ","), 48 | WhitelistAnnotation: whitelistAnnotation, 49 | } 50 | 51 | o, err := manager.GetOrchestrator(orchestrator, Orchestrators) 52 | if err != nil { 53 | log.Errorf("failed to retrieve orchestrator: %s", err) 54 | return 55 | } 56 | 57 | if agentImage == "" { 58 | managerVersion := bivacCmd.BuildInfo.Version 59 | agentImage = fmt.Sprintf("ghcr.io/camptocamp/bivac:%s", utils.ComputeDockerAgentImage(managerVersion)) 60 | } 61 | 62 | err = manager.Start(bivacCmd.BuildInfo, o, server, volumesFilters, providersFile, targetURL, logServer, agentImage, retryCount, parallelCount, refreshRate, backupInterval) 63 | if err != nil { 64 | log.Errorf("failed to start manager: %s", err) 65 | return 66 | } 67 | }, 68 | } 69 | 70 | func init() { 71 | managerCmd.Flags().StringVarP(&server.Address, "server.address", "", "0.0.0.0:8182", "Address to bind on.") 72 | envs["BIVAC_SERVER_ADDRESS"] = "server.address" 73 | managerCmd.Flags().StringVarP(&server.PSK, "server.psk", "", "", "Pre-shared key.") 74 | envs["BIVAC_SERVER_PSK"] = "server.psk" 75 | 76 | managerCmd.Flags().StringVarP(&orchestrator, "orchestrator", "o", "", "Orchestrator on which Bivac should connect to.") 77 | envs["BIVAC_ORCHESTRATOR"] = "orchestrator" 78 | 79 | managerCmd.Flags().StringVarP(&Orchestrators.Docker.Endpoint, "docker.endpoint", "", "unix:///var/run/docker.sock", "Docker endpoint.") 80 | envs["BIVAC_DOCKER_ENDPOINT"] = "docker.endpoint" 81 | managerCmd.Flags().StringVarP(&Orchestrators.Docker.Network, "docker.network", "", "bridge", "Docker network.") 82 | envs["BIVAC_DOCKER_NETWORK"] = "docker.network" 83 | 84 | managerCmd.Flags().StringVarP(&Orchestrators.Cattle.URL, "cattle.url", "", "", "The Cattle URL.") 85 | envs["CATTLE_URL"] = "cattle.url" 86 | managerCmd.Flags().StringVarP(&Orchestrators.Cattle.AccessKey, "cattle.accesskey", "", "", "The Cattle access key.") 87 | envs["CATTLE_ACCESS_KEY"] = "cattle.accesskey" 88 | managerCmd.Flags().StringVarP(&Orchestrators.Cattle.SecretKey, "cattle.secretkey", "", "", "The Cattle secret key.") 89 | envs["CATTLE_SECRET_KEY"] = "cattle.secretkey" 90 | 91 | managerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.Namespace, "kubernetes.namespace", "", "", "Namespace where you want to run Bivac.") 92 | envs["KUBERNETES_NAMESPACE"] = "kubernetes.namespace" 93 | managerCmd.Flags().BoolVarP(&Orchestrators.Kubernetes.AllNamespaces, "kubernetes.all-namespaces", "", false, "Backup volumes of all namespaces.") 94 | envs["KUBERNETES_ALL_NAMESPACES"] = "kubernetes.all-namespaces" 95 | managerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.KubeConfig, "kubernetes.kubeconfig", "", "", "Path to your kuberconfig file.") 96 | envs["KUBERNETES_KUBECONFIG"] = "kubernetes.kubeconfig" 97 | managerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.AgentServiceAccount, "kubernetes.agent-service-account", "", "", "Specify service account for agents.") 98 | envs["KUBERNETES_AGENT_SERVICE_ACCOUNT"] = "kubernetes.agent-service-account" 99 | managerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.AgentLabelsInline, "kubernetes.agent-labels", "", "app=bivac", "Additional labels for agents.") 100 | envs["KUBERNETES_AGENT_LABELS"] = "kubernetes.agent-labels" 101 | managerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.AgentAnnotationsInline, "kubernetes.agent-annotations", "", "", "Additional annotations for agents.") 102 | envs["KUBERNETES_AGENT_ANNOTATIONS"] = "kubernetes.agent-annotations" 103 | 104 | managerCmd.Flags().StringVarP(&resticForgetArgs, "restic.forget.args", "", "--group-by host --keep-daily 15 --prune", "Restic forget arguments.") 105 | envs["RESTIC_FORGET_ARGS"] = "restic.forget.args" 106 | 107 | managerCmd.Flags().StringVarP(&providersFile, "providers.config", "", "/providers-config.default.toml", "Configuration file for providers.") 108 | envs["BIVAC_PROVIDERS_CONFIG"] = "providers.config" 109 | 110 | managerCmd.Flags().StringVarP(&targetURL, "target.url", "r", "", "The target URL to push the backups to.") 111 | envs["BIVAC_TARGET_URL"] = "target.url" 112 | 113 | managerCmd.Flags().IntVarP(&retryCount, "retry.count", "", 0, "Retry to backup the volume if something goes wrong with Bivac.") 114 | envs["BIVAC_RETRY_COUNT"] = "retry.count" 115 | 116 | managerCmd.Flags().StringVarP(&logServer, "log.server", "", "", "Manager's API address that will receive logs from agents.") 117 | envs["BIVAC_LOG_SERVER"] = "log.server" 118 | 119 | managerCmd.Flags().StringVarP(&agentImage, "agent.image", "", "", "Agent's Docker image.") 120 | envs["BIVAC_AGENT_IMAGE"] = "agent.image" 121 | 122 | managerCmd.Flags().StringVarP(&whitelistVolumes, "whitelist", "", "", "Whitelist volumes.") 123 | envs["BIVAC_WHITELIST"] = "whitelist" 124 | envs["BIVAC_VOLUMES_WHITELIST"] = "whitelist" 125 | 126 | managerCmd.Flags().StringVarP(&blacklistVolumes, "blacklist", "", "", "Blacklist volumes.") 127 | envs["BIVAC_BLACKLIST"] = "blacklist" 128 | envs["BIVAC_VOLUMES_BLACKLIST"] = "blacklist" 129 | 130 | managerCmd.Flags().BoolVarP(&whitelistAnnotation, "whitelist.annotations", "", false, "Require pvc whitelist annotation") 131 | envs["BIVAC_WHITELIST_ANNOTATION"] = "whitelist.annotations" 132 | 133 | managerCmd.Flags().IntVarP(¶llelCount, "parallel.count", "", 2, "The count of agents to run in parallel") 134 | envs["BIVAC_PARALLEL_COUNT"] = "parallel.count" 135 | 136 | managerCmd.Flags().StringVarP(&refreshRate, "refresh.rate", "", "10m", "The volume list refresh rate.") 137 | envs["BIVAC_REFRESH_RATE"] = "refresh.rate" 138 | 139 | managerCmd.Flags().StringVarP(&backupInterval, "backup.interval", "", "23h", "Interval between two backups of a volume.") 140 | envs["BIVAC_BACKUP_INTERVAL"] = "backup.interval" 141 | 142 | bivacCmd.SetValuesFromEnv(envs, managerCmd.Flags()) 143 | bivacCmd.RootCmd.AddCommand(managerCmd) 144 | } 145 | -------------------------------------------------------------------------------- /cmd/restic/restic.go: -------------------------------------------------------------------------------- 1 | package restic 2 | 3 | import ( 4 | "fmt" 5 | 6 | log "github.com/Sirupsen/logrus" 7 | "github.com/spf13/cobra" 8 | 9 | "github.com/camptocamp/bivac/cmd" 10 | "github.com/camptocamp/bivac/pkg/client" 11 | ) 12 | 13 | var ( 14 | remoteAddress string 15 | psk string 16 | volumeID string 17 | ) 18 | 19 | var envs = make(map[string]string) 20 | 21 | var resticCmd = &cobra.Command{ 22 | Use: "restic --volume [VOLUME_ID] -- [RESTIC_COMMAND]", 23 | Short: "Run Restic command on a volume's repository", 24 | Args: cobra.ArbitraryArgs, 25 | PreRun: func(cmd *cobra.Command, args []string) { 26 | if volumeID == "" { 27 | log.Fatal("You must provide a volume ID.") 28 | return 29 | } 30 | }, 31 | Run: func(cmd *cobra.Command, args []string) { 32 | c, err := client.NewClient(remoteAddress, psk) 33 | if err != nil { 34 | log.Errorf("failed to create new client: %s", err) 35 | return 36 | } 37 | 38 | output, err := c.RunRawCommand(volumeID, args) 39 | if err != nil { 40 | log.Errorf("failed to run command: %s", err) 41 | return 42 | } 43 | 44 | fmt.Println(output) 45 | }, 46 | } 47 | 48 | func init() { 49 | resticCmd.Flags().StringVarP(&remoteAddress, "remote.address", "", "http://127.0.0.1:8182", "Address of the remote Bivac server.") 50 | envs["BIVAC_REMOTE_ADDRESS"] = "remote.address" 51 | 52 | resticCmd.Flags().StringVarP(&psk, "server.psk", "", "", "Pre-shared key.") 53 | envs["BIVAC_SERVER_PSK"] = "server.psk" 54 | 55 | resticCmd.Flags().StringVarP(&volumeID, "volume", "", "", "Volume ID") 56 | 57 | cmd.SetValuesFromEnv(envs, resticCmd.Flags()) 58 | cmd.RootCmd.AddCommand(resticCmd) 59 | } 60 | -------------------------------------------------------------------------------- /cmd/restore/restore.go: -------------------------------------------------------------------------------- 1 | package restore 2 | 3 | import ( 4 | "fmt" 5 | log "github.com/Sirupsen/logrus" 6 | "github.com/camptocamp/bivac/cmd" 7 | "github.com/camptocamp/bivac/pkg/client" 8 | "github.com/spf13/cobra" 9 | "github.com/tatsushid/go-prettytable" 10 | ) 11 | 12 | var ( 13 | force bool 14 | psk string 15 | remoteAddress string 16 | snapshotName string 17 | ) 18 | 19 | var envs = make(map[string]string) 20 | 21 | var restoreCmd = &cobra.Command{ 22 | Use: "restore [VOLUME_ID]", 23 | Short: "Restore volumes", 24 | Args: cobra.MinimumNArgs(1), 25 | Run: func(cmd *cobra.Command, args []string) { 26 | c, err := client.NewClient(remoteAddress, psk) 27 | if err != nil { 28 | log.Errorf("failed to create new client: %s", err) 29 | return 30 | } 31 | for _, a := range args { 32 | fmt.Printf("Restoring `%s'...\n", a) 33 | err = c.RestoreVolume(a, force, snapshotName) 34 | if err != nil { 35 | log.Errorf("failed to restore volume: %s", err) 36 | return 37 | } 38 | } 39 | volumes, err := c.GetVolumes() 40 | if err != nil { 41 | log.Errorf("failed to get volumes: %s", err) 42 | return 43 | } 44 | for _, a := range args { 45 | for _, v := range volumes { 46 | if v.ID == a { 47 | tbl, err := prettytable.NewTable( 48 | []prettytable.Column{ 49 | {}, 50 | {}, 51 | }..., 52 | ) 53 | if err != nil { 54 | log.WithFields(log.Fields{ 55 | "volume": v.Name, 56 | "hostname": v.Hostname, 57 | }).Errorf( 58 | "failed to format output: %s", 59 | err, 60 | ) 61 | return 62 | } 63 | tbl.Separator = "\t" 64 | fmt.Printf("ID: %s\n", v.ID) 65 | fmt.Printf("Name: %s\n", v.Name) 66 | fmt.Printf( 67 | "Mountpoint: %s\n", 68 | v.Mountpoint, 69 | ) 70 | fmt.Printf( 71 | "Backup date: %s\n", 72 | v.LastBackupDate, 73 | ) 74 | fmt.Printf( 75 | "Backup status: %s\n", 76 | v.LastBackupStatus, 77 | ) 78 | fmt.Printf("Logs:\n") 79 | for stepKey, stepValue := range v.Logs { 80 | tbl.AddRow(stepKey, stepValue) 81 | } 82 | tbl.Print() 83 | } 84 | } 85 | } 86 | }, 87 | } 88 | 89 | func init() { 90 | restoreCmd.Flags().StringVarP( 91 | &remoteAddress, 92 | "remote.address", 93 | "", 94 | "http://127.0.0.1:8182", 95 | "Address of the remote Bivac server.", 96 | ) 97 | envs["BIVAC_REMOTE_ADDRESS"] = "remote.address" 98 | restoreCmd.Flags().StringVarP( 99 | &psk, 100 | "server.psk", 101 | "", 102 | "", 103 | "Pre-shared key.", 104 | ) 105 | envs["BIVAC_SERVER_PSK"] = "server.psk" 106 | restoreCmd.Flags().BoolVarP( 107 | &force, 108 | "force", 109 | "", 110 | false, 111 | "Force restore by removing locks.", 112 | ) 113 | restoreCmd.Flags().StringVarP( 114 | &snapshotName, 115 | "snapshot", 116 | "s", 117 | "latest", 118 | "Name of snapshot to restore", 119 | ) 120 | cmd.SetValuesFromEnv(envs, restoreCmd.Flags()) 121 | cmd.RootCmd.AddCommand(restoreCmd) 122 | } 123 | -------------------------------------------------------------------------------- /cmd/volumes/volumes.go: -------------------------------------------------------------------------------- 1 | package volumes 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | 8 | log "github.com/Sirupsen/logrus" 9 | "github.com/spf13/cobra" 10 | "github.com/tatsushid/go-prettytable" 11 | 12 | "github.com/camptocamp/bivac/cmd" 13 | "github.com/camptocamp/bivac/pkg/client" 14 | ) 15 | 16 | var ( 17 | remoteAddress string 18 | psk string 19 | ) 20 | 21 | var envs = make(map[string]string) 22 | 23 | var volumesCmd = &cobra.Command{ 24 | Use: "volumes", 25 | Short: "Show volumes", 26 | Args: cobra.ArbitraryArgs, 27 | Run: func(cmd *cobra.Command, args []string) { 28 | c, err := client.NewClient(remoteAddress, psk) 29 | if err != nil { 30 | log.Errorf("failed to create new client: %s", err) 31 | return 32 | } 33 | volumes, err := c.GetVolumes() 34 | if err != nil { 35 | log.Errorf("failed to get volumes: %s", err) 36 | return 37 | } 38 | 39 | if len(args) == 0 { 40 | tbl, err := prettytable.NewTable([]prettytable.Column{ 41 | {Header: "ID"}, 42 | {Header: "Name"}, 43 | {Header: "Hostname"}, 44 | {Header: "Mountpoint"}, 45 | {Header: "LastBackupDate"}, 46 | {Header: "LastBackupStatus"}, 47 | {Header: "Backing up"}, 48 | }...) 49 | if err != nil { 50 | log.Errorf("failed to format output: %s", err) 51 | return 52 | } 53 | tbl.Separator = "\t" 54 | 55 | for _, v := range volumes { 56 | tbl.AddRow(v.ID, v.Name, v.Hostname, v.Mountpoint, v.LastBackupDate, v.LastBackupStatus, strconv.FormatBool(v.BackingUp)) 57 | } 58 | 59 | tbl.Print() 60 | return 61 | } 62 | 63 | for _, a := range args { 64 | for _, v := range volumes { 65 | if v.ID == a { 66 | tbl, err := prettytable.NewTable([]prettytable.Column{ 67 | {}, 68 | {}, 69 | {}, 70 | }...) 71 | if err != nil { 72 | log.WithFields(log.Fields{ 73 | "volume": v.Name, 74 | "hostname": v.Hostname, 75 | }).Errorf("failed to format output: %s", err) 76 | return 77 | } 78 | tbl.Separator = "\t" 79 | 80 | fmt.Printf("ID: %s\n", v.ID) 81 | fmt.Printf("Name: %s\n", v.Name) 82 | fmt.Printf("Hostname: %s\n", v.Hostname) 83 | fmt.Printf("Mountpoint: %s\n", v.Mountpoint) 84 | fmt.Printf("Backup date: %s\n", v.LastBackupDate) 85 | fmt.Printf("Backup status: %s\n", v.LastBackupStatus) 86 | fmt.Printf("Logs:\n") 87 | tbl.AddRow("", "testInit", strings.Replace(v.Logs["testInit"], "\n", "\n\t\t\t", -1)) 88 | tbl.AddRow("", "init", strings.Replace(v.Logs["init"], "\n", "\n\t\t\t", -1)) 89 | tbl.AddRow("", "backup", strings.Replace(v.Logs["backup"], "\n", "\n\t\t\t", -1)) 90 | tbl.AddRow("", "forget", strings.Replace(v.Logs["forget"], "\n", "\n\t\t\t", -1)) 91 | tbl.Print() 92 | } 93 | } 94 | } 95 | }, 96 | } 97 | 98 | func init() { 99 | volumesCmd.Flags().StringVarP(&remoteAddress, "remote.address", "", "http://127.0.0.1:8182", "Address of the remote Bivac server.") 100 | envs["BIVAC_REMOTE_ADDRESS"] = "remote.address" 101 | 102 | volumesCmd.Flags().StringVarP(&psk, "server.psk", "", "", "Pre-shared key.") 103 | envs["BIVAC_SERVER_PSK"] = "server.psk" 104 | 105 | cmd.SetValuesFromEnv(envs, volumesCmd.Flags()) 106 | cmd.RootCmd.AddCommand(volumesCmd) 107 | } 108 | -------------------------------------------------------------------------------- /contrib/charts/bivac/.helmignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.swp 3 | -------------------------------------------------------------------------------- /contrib/charts/bivac/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: bivac 2 | home: https://github.com/camptocamp/bivac 3 | icon: https://raw.githubusercontent.com/camptocamp/bivac/master/img/bivac_small.png 4 | version: 1.0.0 5 | appVersion: 2.4 6 | description: 'Backup Interface for Volumes Attached to Containers' 7 | keywords: 8 | - backup 9 | - bivac 10 | - restic 11 | - volume 12 | - kubernetes 13 | - openshift 14 | sources: 15 | - https://github.com/camptocamp/bivac 16 | maintainers: 17 | - name: Camptocamp 18 | email: info@camptocamp.com 19 | -------------------------------------------------------------------------------- /contrib/charts/bivac/README.md: -------------------------------------------------------------------------------- 1 | # Helm chart for Bivac 2 | 3 | > Backup Interface for Volumes Attached to Containers 4 | 5 | ## Configuration 6 | 7 | The following tables list the configurable parameters of the Bivac chart and their default values. 8 | 9 | | Parameter | Description | Default | 10 | | --------- | ----------- | ------- | 11 | | `image.repository` | Repository for the Bivac image. | `camptocamp/bivac` | 12 | | `image.tag` | Tag of the Bivac image. | `2.4` | 13 | | `image.pullPolicy` | Pull policy for the Bivac image. | `IfNotPresent` | 14 | | `orchestrator` | Orchestrator Bivac will run on. | `kubernetes` | 15 | | `watchAllNamespaces` | Let Bivac backup volumes from all namespaces. | `true` | 16 | | `targetURL` | URL where to Restic should push the backups. This field is required. | `nil` | 17 | | `resticPassword` | Password used by Restic to encrypt the backups. If left empty, a generated one will be used. | `nil` | 18 | | `serverPSK` | Pre-shared key which protect the Bivac server. If left empty, a generated one will be used. | `nil` | 19 | | `extraEnv` | Additional environment variables. | `[]` | 20 | | `service.type` | Bivac server type. | `ClusterIP` | 21 | | `service.port` | Port to expose Bivac. | `8182` | 22 | | `resources` | Resource limits for Bivac. | `{}` | 23 | | `nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` | 24 | | `tolerations` | If specified, the pod's tolerations. | `[]` | 25 | | `affinity` | Assign custom affinity rules. | `{}` | 26 | 27 | -------------------------------------------------------------------------------- /contrib/charts/bivac/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing 2 | -------------------------------------------------------------------------------- /contrib/charts/bivac/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "bivac.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this 12 | (by the DNS naming spec). 13 | */}} 14 | {{- define "bivac.fullname" -}} 15 | {{- $name := default .Chart.Name .Values.nameOverride -}} 16 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 17 | {{- end -}} 18 | -------------------------------------------------------------------------------- /contrib/charts/bivac/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ .Release.Name }} 6 | labels: 7 | app: {{ template "bivac.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | data: 12 | providers-config.toml: | 13 | [providers] 14 | [providers.mysql] 15 | pre_cmd = """ 16 | mkdir -p $volume/backups && \ 17 | echo $(if [ -z '$MYSQL_ROOT_PASSWORD' ]; then \ 18 | mysqldump --all-databases --extended-insert --user=$MYSQL_USER --password=$MYSQL_PASSWORD; else \ 19 | mysqldump --all-databases --extended-insert --password=$MYSQL_ROOT_PASSWORD; \ 20 | fi) > $volume/backups/all.sql""" 21 | detect_cmd = "[[ -d $volume/mysql ]]" 22 | post_cmd = "rm -rf $volume/backups" 23 | backup_dir = "backups" 24 | 25 | [providers.postgresql] 26 | pre_cmd = "mkdir -p $volume/backups && pg_dumpall --clean -Upostgres > $volume/backups/all.sql" 27 | post_cmd = "rm -rf $volume/backups" 28 | detect_cmd = "[[ -f $volume/PG_VERSION ]]" 29 | backup_dir = "backups" 30 | 31 | [providers.openldap] 32 | pre_cmd = "mkdir -p $volume/backups && slapcat > $volume/backups/all.ldif" 33 | detect_cmd = "[[ -f $volume/DB_CONFIG ]]" 34 | backup_dir = "backups" 35 | 36 | [providers.mongo] 37 | pre_cmd = """ 38 | if [ -z '$MONGO_INITDB_ROOT_USERNAME' ]; then \ 39 | mongodump -o $volume/backups; else \ 40 | mongodump -o $volume/backups --username=$MONGO_INITDB_ROOT_USERNAME --password=$MONGO_INITDB_ROOT_PASSWORD; \ 41 | fi""" 42 | post_cmd = "rm -rf $volume/backups" 43 | detect_cmd = "[[ -f $volume/mongod.lock ]]" 44 | backup_dir = "backups" 45 | -------------------------------------------------------------------------------- /contrib/charts/bivac/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ .Release.Name }} 5 | labels: 6 | app: {{ template "bivac.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | {{- with .Values.labels }} 11 | {{ toYaml . | indent 4 }} 12 | {{- end }} 13 | {{- with .Values.annotations }} 14 | annotations: 15 | {{ toYaml . | indent 4 }} 16 | {{- end }} 17 | spec: 18 | selector: 19 | matchLabels: 20 | app: {{ template "bivac.name" . }} 21 | release: {{ .Release.Name }} 22 | template: 23 | metadata: 24 | labels: 25 | app: {{ template "bivac.name" . }} 26 | release: {{ .Release.Name }} 27 | {{- with .Values.labels }} 28 | {{ toYaml . | indent 8 }} 29 | {{- end }} 30 | {{- with .Values.annotations }} 31 | annotations: 32 | {{ toYaml . | indent 8 }} 33 | {{- end }} 34 | spec: 35 | serviceAccountName: {{ .Release.Name }} 36 | containers: 37 | - name: manager 38 | image: {{ .Values.image.repository }}:{{ .Values.image.tag }} 39 | imagePullPolicy: {{ .Values.image.pullPolicy }} 40 | command: 41 | - bivac 42 | - manager 43 | ports: 44 | - name: api 45 | containerPort: 8182 46 | env: 47 | - name: BIVAC_ORCHESTRATOR 48 | value: {{ .Values.orchestrator }} 49 | - name: KUBERNETES_ALL_NAMESPACES 50 | value: "{{ .Values.watchAllNamespaces }}" 51 | - name: BIVAC_TARGET_URL 52 | value: {{ required "A target URL must be specified" .Values.targetURL }} 53 | - name: RESTIC_PASSWORD 54 | {{- if .Values.resticPassword }} 55 | value: {{ .Values.resticPassword }} 56 | {{- else }} 57 | value: {{ randAlphaNum 15 }} 58 | {{- end }} 59 | - name: BIVAC_SERVER_PSK 60 | {{- if .Values.serverPSK }} 61 | value: {{ .Values.serverPSK }} 62 | {{- else }} 63 | value: {{ randAlphaNum 15 }} 64 | {{- end }} 65 | - name: BIVAC_LOG_SERVER 66 | value: "http://{{ .Release.Name }}.{{ .Release.Namespace }}.svc:{{ .Values.service.port }}" 67 | {{- with .Values.extraEnv }} 68 | {{ toYaml . | indent 12 }} 69 | {{- end }} 70 | livenessProbe: 71 | tcpSocket: 72 | port: api 73 | initialDelaySeconds: 120 74 | timeoutSeconds: 5 75 | failureThreshold: 6 76 | readinessProbe: 77 | tcpSocket: 78 | port: api 79 | initialDelaySeconds: 30 80 | timeoutSeconds: 5 81 | failureThreshold: 6 82 | volumeMounts: 83 | - name: bivac-providers-conf 84 | mountPath: /etc/bivac 85 | resources: 86 | {{ toYaml .Values.resources | indent 12 }} 87 | {{- with .Values.nodeSelector }} 88 | nodeSelector: 89 | {{ toYaml . | indent 8 }} 90 | {{- end }} 91 | {{- with .Values.affinity }} 92 | affinity: 93 | {{ toYaml . | indent 8 }} 94 | {{- end }} 95 | {{- with .Values.tolerations }} 96 | tolerations: 97 | {{ toYaml . | indent 8 }} 98 | {{- end }} 99 | volumes: 100 | - name: bivac-providers-conf 101 | configMap: 102 | name: {{ .Release.Name }} 103 | -------------------------------------------------------------------------------- /contrib/charts/bivac/templates/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: {{ .Release.Name }} 5 | labels: 6 | app: {{ template "bivac.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: {{ .Release.Name }} 14 | subjects: 15 | - kind: ServiceAccount 16 | name: {{ .Release.Name }} 17 | namespace: {{ .Release.Namespace }} 18 | --- 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | kind: ClusterRole 21 | metadata: 22 | name: {{ .Release.Name }} 23 | labels: 24 | app: {{ template "bivac.name" . }} 25 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 26 | release: {{ .Release.Name }} 27 | heritage: {{ .Release.Service }} 28 | rules: 29 | - apiGroups: [''] 30 | resources: 31 | - pods 32 | verbs: 33 | - create 34 | - delete 35 | - get 36 | - list 37 | - apiGroups: [''] 38 | resources: 39 | - namespaces 40 | - nodes 41 | - persistentvolumeclaims 42 | - serviceaccounts 43 | verbs: 44 | - get 45 | - list 46 | - apiGroups: [''] 47 | resources: 48 | - pods/exec 49 | - pods/log 50 | verbs: 51 | - create 52 | - get 53 | - list 54 | - post 55 | --- 56 | apiVersion: v1 57 | kind: ServiceAccount 58 | {{ if .Values.image.pullSecret -}} 59 | imagePullSecrets: 60 | - name: {{ .Values.image.pullSecret }} 61 | {{- end }} 62 | metadata: 63 | name: {{ .Release.Name }} 64 | labels: 65 | app: {{ template "bivac.name" . }} 66 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 67 | release: {{ .Release.Name }} 68 | heritage: {{ .Release.Service }} 69 | -------------------------------------------------------------------------------- /contrib/charts/bivac/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Release.Name }} 5 | labels: 6 | app: {{ template "bivac.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | type: {{ .Values.service.type }} 12 | ports: 13 | - name: api 14 | port: {{ .Values.service.port }} 15 | targetPort: api 16 | selector: 17 | app: {{ template "bivac.name" . }} 18 | release: {{ .Release.Name }} 19 | -------------------------------------------------------------------------------- /contrib/charts/bivac/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | repository: camptocamp/bivac 3 | tag: "2.4" 4 | pullPolicy: IfNotPresent 5 | 6 | annotations: 7 | foo: bar 8 | 9 | labels: {} 10 | 11 | ## Orchestrator Bivac will run on 12 | # 13 | orchestrator: kubernetes 14 | 15 | ## Let Bivac backup volumes from all namespaces 16 | # 17 | watchAllNamespaces: true 18 | 19 | ## URL where to Restic should push the backups 20 | # This field is required 21 | # 22 | targetURL: "" 23 | 24 | ## Password used by Restic to encrypt the backups 25 | # If left empty, a generated one will be used 26 | # 27 | resticPassword: "" 28 | 29 | ## Pre-shared key which protect the Bivac server 30 | # If left empty, a generated one will be used 31 | # 32 | serverPSK: "" 33 | 34 | ## Additional environment variables 35 | # 36 | extraEnv: [] 37 | 38 | service: 39 | type: ClusterIP 40 | port: 8182 41 | 42 | resources: {} 43 | # We usually recommend not to specify default resources and to leave this as a conscious 44 | # choice for the user. This also increases chances charts run on environments with little 45 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 46 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 47 | # limits: 48 | # cpu: 100m 49 | # memory: 128Mi 50 | # requests: 51 | # cpu: 100m 52 | # memory: 128Mi 53 | 54 | nodeSelector: {} 55 | 56 | tolerations: [] 57 | 58 | affinity: {} 59 | -------------------------------------------------------------------------------- /contrib/examples/docker-compose/alertmanager/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | resolve_timeout: 5m 3 | route: 4 | group_wait: 30s 5 | group_interval: 5m 6 | repeat_interval: 12h 7 | receiver: default 8 | receivers: 9 | - name: default 10 | -------------------------------------------------------------------------------- /contrib/examples/docker-compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | bivac: 4 | image: ghcr.io/camptocamp/bivac:2.4 5 | command: "manager -v" 6 | ports: 7 | - "8182:8182" 8 | volumes: 9 | - "/var/run/docker.sock:/var/run/docker.sock:ro" 10 | environment: 11 | BIVAC_SERVER_PSK: super-secret-psk 12 | RESTIC_PASSWORD: not-so-good-password 13 | BIVAC_TARGET_URL: s3:my-bucket 14 | AWS_ACCESS_KEY_ID: XXXXX 15 | AWS_SECRET_ACCESS_KEY: XXXXX 16 | prometheus: 17 | image: prom/prometheus:latest 18 | volumes: 19 | - ./prometheus/:/etc/prometheus/ 20 | - prometheus_data:/prometheus 21 | command: 22 | - '--config.file=/etc/prometheus/prometheus.yml' 23 | - '--storage.tsdb.path=/prometheus' 24 | - '--web.console.libraries=/usr/share/prometheus/console_libraries' 25 | - '--web.console.templates=/usr/share/prometheus/consoles' 26 | ports: 27 | - "9090:9090" 28 | 29 | alertmanager: 30 | image: prom/alertmanager 31 | ports: 32 | - "9093:9093" 33 | volumes: 34 | - ./alertmanager/:/etc/alertmanager/ 35 | command: 36 | - '--config.file=/etc/alertmanager/config.yml' 37 | - '--storage.path=/alertmanager' 38 | 39 | grafana: 40 | image: grafana/grafana 41 | user: "104" 42 | ports: 43 | - "3000:3000" 44 | volumes: 45 | - grafana_data:/var/lib/grafana 46 | - ./grafana/provisioning/:/etc/grafana/provisioning/ 47 | environment: 48 | GF_SECURITY_ADMIN_PASSWORD: admin 49 | GF_USERS_ALLOW_SIGN_UP: "false" 50 | 51 | volumes: 52 | prometheus_data: 53 | grafana_data: 54 | -------------------------------------------------------------------------------- /contrib/examples/docker-compose/grafana/provisioning/dashboards/Bivac.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "editable": true, 16 | "gnetId": null, 17 | "graphTooltip": 0, 18 | "id": null, 19 | "links": [], 20 | "panels": [ 21 | { 22 | "columns": [], 23 | "datasource": "Prometheus", 24 | "fontSize": "100%", 25 | "gridPos": { 26 | "h": 9, 27 | "w": 24, 28 | "x": 0, 29 | "y": 0 30 | }, 31 | "id": 2, 32 | "links": [], 33 | "options": {}, 34 | "pageSize": null, 35 | "scroll": true, 36 | "showHeader": true, 37 | "sort": { 38 | "col": 5, 39 | "desc": true 40 | }, 41 | "styles": [ 42 | { 43 | "alias": "Time", 44 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 45 | "pattern": "Time", 46 | "type": "date" 47 | }, 48 | { 49 | "alias": "", 50 | "colorMode": "row", 51 | "colors": [ 52 | "rgba(50, 172, 45, 0.97)", 53 | "rgba(237, 129, 40, 0.89)", 54 | "rgba(245, 54, 54, 0.9)" 55 | ], 56 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 57 | "decimals": 0, 58 | "mappingType": 1, 59 | "pattern": "Value", 60 | "thresholds": [ 61 | "1", 62 | "1" 63 | ], 64 | "type": "number", 65 | "unit": "short" 66 | } 67 | ], 68 | "targets": [ 69 | { 70 | "expr": "sum(bivac_backupExitCode) by (hostname, instance, volume_id, volume_name)", 71 | "format": "table", 72 | "instant": true, 73 | "intervalFactor": 1, 74 | "legendFormat": "", 75 | "refId": "A" 76 | } 77 | ], 78 | "timeFrom": null, 79 | "timeShift": null, 80 | "title": "Backups status", 81 | "transform": "table", 82 | "type": "table" 83 | }, 84 | { 85 | "columns": [], 86 | "datasource": "Prometheus", 87 | "fontSize": "100%", 88 | "gridPos": { 89 | "h": 9, 90 | "w": 24, 91 | "x": 0, 92 | "y": 9 93 | }, 94 | "id": 3, 95 | "links": [], 96 | "options": {}, 97 | "pageSize": null, 98 | "scroll": true, 99 | "showHeader": true, 100 | "sort": { 101 | "col": 5, 102 | "desc": true 103 | }, 104 | "styles": [ 105 | { 106 | "alias": "Time", 107 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 108 | "pattern": "Time", 109 | "type": "date" 110 | }, 111 | { 112 | "alias": "", 113 | "colorMode": "row", 114 | "colors": [ 115 | "rgba(50, 172, 45, 0.97)", 116 | "rgba(237, 129, 40, 0.89)", 117 | "rgba(245, 54, 54, 0.9)" 118 | ], 119 | "dateFormat": "YYYY-MM-DD HH:mm:ss", 120 | "decimals": 0, 121 | "mappingType": 1, 122 | "pattern": "Value", 123 | "thresholds": [ 124 | "176400", 125 | "176400" 126 | ], 127 | "type": "number", 128 | "unit": "s" 129 | } 130 | ], 131 | "targets": [ 132 | { 133 | "expr": "time() - sum(bivac_lastBackup) by (hostname, instance, volume_id, volume_name, hostname)", 134 | "format": "table", 135 | "instant": true, 136 | "intervalFactor": 1, 137 | "legendFormat": "", 138 | "refId": "A" 139 | } 140 | ], 141 | "timeFrom": null, 142 | "timeShift": null, 143 | "title": "Last backups", 144 | "transform": "table", 145 | "type": "table" 146 | } 147 | ], 148 | "schemaVersion": 18, 149 | "style": "dark", 150 | "tags": [], 151 | "templating": { 152 | "list": [] 153 | }, 154 | "time": { 155 | "from": "now-6h", 156 | "to": "now" 157 | }, 158 | "timepicker": { 159 | "refresh_intervals": [ 160 | "5s", 161 | "10s", 162 | "30s", 163 | "1m", 164 | "5m", 165 | "15m", 166 | "30m", 167 | "1h", 168 | "2h", 169 | "1d" 170 | ], 171 | "time_options": [ 172 | "5m", 173 | "15m", 174 | "1h", 175 | "6h", 176 | "12h", 177 | "24h", 178 | "2d", 179 | "7d", 180 | "30d" 181 | ] 182 | }, 183 | "timezone": "", 184 | "title": "Bivac - Backups", 185 | "uid": null, 186 | "version": 0 187 | } 188 | -------------------------------------------------------------------------------- /contrib/examples/docker-compose/grafana/provisioning/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'Bivac - Backups' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | editable: true 10 | options: 11 | path: /etc/grafana/provisioning/dashboards 12 | -------------------------------------------------------------------------------- /contrib/examples/docker-compose/grafana/provisioning/datasources/datasource.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | deleteDatasources: 4 | - name: Prometheus 5 | orgId: 1 6 | 7 | datasources: 8 | - name: Prometheus 9 | type: prometheus 10 | access: proxy 11 | orgId: 1 12 | url: http://prometheus:9090 13 | editable: true 14 | -------------------------------------------------------------------------------- /contrib/examples/docker-compose/prometheus/alert.rules: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: example 3 | rules: 4 | - alert: BackupError 5 | expr: bivac_backupExitCode{} > 0 6 | 7 | - alert: BackupOutdated 8 | expr: time() - bivac_lastBackup{} > 49 * 3600 9 | -------------------------------------------------------------------------------- /contrib/examples/docker-compose/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s 3 | evaluation_interval: 15s 4 | 5 | rule_files: 6 | - 'alert.rules' 7 | 8 | alerting: 9 | alertmanagers: 10 | - scheme: http 11 | static_configs: 12 | - targets: 13 | - "alertmanager:9093" 14 | 15 | scrape_configs: 16 | - job_name: 'prometheus' 17 | static_configs: 18 | - targets: ['localhost:9090'] 19 | - job_name: 'bivac' 20 | static_configs: 21 | - targets: ['bivac:8182'] 22 | -------------------------------------------------------------------------------- /contrib/openshift/README.md: -------------------------------------------------------------------------------- 1 | # Bivac Openshift Template 2 | Templates to install bivac in openshift/OKD without helm 3 | 4 | * Install template: 5 | ```bash 6 | oc create -f bivac-template.yaml 7 | ``` 8 | * Instanciate template directly from file: 9 | ```bash 10 | oc process -f bivac-template.yaml \ 11 | -p BIVAC_TARGET_URL=s3:s3.amazonaws.com/ \ 12 | -p AWS_ACCESS_KEY_ID= \ 13 | -p AWS_SECRET_ACCESS_KEY= \ 14 | -p RESTIC_PASSWORD= \ 15 | -p NAMESPACE= | oc create -f - 16 | ``` 17 | 18 | This will create a new namespace with a bivac-manager deployment, including all required resources like serviceaccount and secret. Note that you will need to create serviceaccounts & rolebindings for all namespaces in which you wish to backup PVCs. You can use the second file (bivac2-agent.template.yaml) for this: 19 | ```bash 20 | oc process -f bivac2-agent.template.yaml -p NAMESPACE= 21 | ``` 22 | 23 | * To delete bivac and all related resources: 24 | ```bash 25 | oc delete -n serviceaccount bivac 26 | oc delete -n deploymentconfig bivac 27 | oc delete -n secret bivac 28 | oc delete -n service bivac 29 | oc delete -n route bivac 30 | oc delete clusterrolebinding bivac 31 | oc delete clusterrole bivac 32 | oc delete namespace 33 | ``` 34 | -------------------------------------------------------------------------------- /contrib/openshift/bivac-template.yaml: -------------------------------------------------------------------------------- 1 | # yamllint disable rule:line-length 2 | --- 3 | apiVersion: v1 4 | kind: Template 5 | labels: 6 | app: ${APP_NAME} 7 | metadata: 8 | name: ${APP_NAME} 9 | annotations: 10 | description: "Bivac lets you backup all your containers volumes on Amazon S3 using Restic." 11 | tags: "backup,bivac,restic" 12 | objects: 13 | - kind: Namespace 14 | apiVersion: v1 15 | metadata: 16 | annotations: 17 | openshift.io/description: Bivac Backup Manager 18 | name: ${NAMESPACE} 19 | spec: 20 | finalizers: 21 | - openshift.io/origin 22 | - kubernetes 23 | - kind: ClusterRole 24 | apiVersion: rbac.authorization.k8s.io/v1 25 | metadata: 26 | name: bivac 27 | rules: 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - pods 32 | verbs: 33 | - create 34 | - delete 35 | - get 36 | - list 37 | - apiGroups: 38 | - "" 39 | resources: 40 | - persistentvolumeclaims 41 | - persistentvolumes 42 | - namespaces 43 | - nodes 44 | verbs: 45 | - get 46 | - list 47 | - apiGroups: 48 | - "" 49 | resources: 50 | - pods/log 51 | - pods/exec 52 | verbs: 53 | - get 54 | - post 55 | - create 56 | - kind: ClusterRoleBinding 57 | apiVersion: rbac.authorization.k8s.io/v1 58 | metadata: 59 | name: ${APP_NAME} 60 | roleRef: 61 | apiGroup: rbac.authorization.k8s.io 62 | kind: ClusterRole 63 | name: bivac 64 | subjects: 65 | - kind: ServiceAccount 66 | name: bivac 67 | namespace: ${NAMESPACE} 68 | - apiVersion: v1 69 | kind: ServiceAccount 70 | metadata: 71 | name: bivac 72 | namespace: ${NAMESPACE} 73 | secrets: 74 | - name: bivac 75 | - kind: DeploymentConfig 76 | apiVersion: apps.openshift.io/v1 77 | metadata: 78 | labels: 79 | app: ${APP_NAME} 80 | annotations: 81 | template.alpha.openshift.io/wait-for-ready: "true" 82 | name: ${APP_NAME} 83 | namespace: ${NAMESPACE} 84 | spec: 85 | replicas: 1 86 | selector: 87 | app: ${APP_NAME} 88 | deploymentconfig: ${APP_NAME} 89 | strategy: 90 | activeDeadlineSeconds: 21600 91 | resources: {} 92 | rollingParams: 93 | intervalSeconds: 1 94 | maxSurge: 25% 95 | maxUnavailable: 25% 96 | timeoutSeconds: 600 97 | updatePeriodSeconds: 1 98 | type: Rolling 99 | template: 100 | metadata: 101 | labels: 102 | app: ${APP_NAME} 103 | deploymentconfig: ${APP_NAME} 104 | annotations: ${BIVAC_ANNOTATIONS} 105 | spec: 106 | dnsPolicy: ClusterFirst 107 | restartPolicy: Always 108 | schedulerName: default-scheduler 109 | securityContext: {} 110 | serviceAccount: bivac 111 | serviceAccountName: bivac 112 | terminationGracePeriodSeconds: 30 113 | containers: 114 | - image: ${BIVAC_MANAGER_IMAGE} 115 | args: 116 | - manager 117 | imagePullPolicy: IfNotPresent 118 | name: bivac 119 | resources: 120 | requests: 121 | cpu: 100m 122 | memory: 200Mi 123 | terminationMessagePath: /dev/termination-log 124 | terminationMessagePolicy: File 125 | env: 126 | - name: BIVAC_ORCHESTRATOR 127 | value: kubernetes 128 | - name: RESTIC_FORGET_ARGS 129 | value: ${RESTIC_FORGET_ARGS} 130 | - name: KUBERNETES_AGENT_SERVICE_ACCOUNT 131 | value: bivac 132 | - name: BIVAC_TARGET_URL 133 | value: ${BIVAC_TARGET_URL} 134 | - name: BIVAC_RETRY_COUNT 135 | value: ${BIVAC_RETRY_COUNT} 136 | - name: KUBERNETES_NAMESPACE 137 | value: ${KUBERNETES_NAMESPACE} 138 | - name: KUBERNETES_ALL_NAMESPACES 139 | value: ${KUBERNETES_ALL_NAMESPACES} 140 | - name: BIVAC_WHITELIST 141 | value: ${BIVAC_WHITELIST} 142 | - name: BIVAC_BLACKLIST 143 | value: ${BIVAC_BLACKLIST} 144 | - name: BIVAC_VERBOSE 145 | value: ${BIVAC_VERBOSE} 146 | envFrom: 147 | - secretRef: 148 | name: ${APP_NAME} 149 | - apiVersion: v1 150 | kind: Secret 151 | metadata: 152 | name: ${APP_NAME} 153 | namespace: ${NAMESPACE} 154 | stringData: 155 | AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID} 156 | AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY} 157 | BIVAC_SERVER_PSK: ${BIVAC_SERVER_PSK} 158 | RESTIC_PASSWORD: ${RESTIC_PASSWORD} 159 | - kind: Service 160 | apiVersion: v1 161 | metadata: 162 | labels: 163 | app: ${APP_NAME} 164 | name: ${APP_NAME} 165 | namespace: ${NAMESPACE} 166 | spec: 167 | ports: 168 | - name: 8182-tcp 169 | port: 8182 170 | protocol: TCP 171 | targetPort: 8182 172 | selector: 173 | deploymentconfig: ${APP_NAME} 174 | sessionAffinity: None 175 | type: ClusterIP 176 | - kind: Route 177 | apiVersion: route.openshift.io/v1 178 | metadata: 179 | labels: 180 | app: ${APP_NAME} 181 | name: ${APP_NAME} 182 | namespace: ${NAMESPACE} 183 | spec: 184 | port: 185 | targetPort: 8182-tcp 186 | tls: 187 | insecureEdgeTerminationPolicy: Redirect 188 | termination: edge 189 | to: 190 | kind: Service 191 | name: ${APP_NAME} 192 | weight: 100 193 | wildcardPolicy: None 194 | parameters: 195 | - name: NAMESPACE 196 | description: namespace 197 | required: true 198 | - name: APP_NAME 199 | description: Name used for most bivac objects. This name should be unique across project. 200 | value: bivac 201 | required: true 202 | - name: KUBERNETES_ALL_NAMESPACES 203 | value: "true" 204 | description: Backup volumes of all namespaces (bool) 205 | - name: KUBERNETES_NAMESPACE 206 | description: Namespace where you want to run Bivac 207 | - name: RESTIC_FORGET_ARGS 208 | description: retention policy for restic in restic syntax (https://restic.readthedocs.io/en/latest/060_forget.html#removing-snapshots-according-to-a-policy) 209 | value: "--keep-daily 15 --keep-weekly 5 --keep-hourly 48 --keep-monthly 13 --keep-last 50 --prune" 210 | - name: RESTIC_PASSWORD 211 | from: '[\w]{64}' 212 | generate: expression 213 | description: password used by restic to encrypt backups. Note that you will need this value to access your backups - if you don't provide one, it will be randomly generated, please securely store a copy of this key off-site! 214 | required: true 215 | - name: BIVAC_TARGET_URL 216 | description: URL of the bucket where backups are to be stored, in the form s3://s3.amazonaws.com/bucketname/subdir 217 | required: true 218 | - name: AWS_ACCESS_KEY_ID 219 | required: true 220 | - name: AWS_SECRET_ACCESS_KEY 221 | required: true 222 | - name: BIVAC_SERVER_PSK 223 | from: '[\w]{64}' 224 | generate: expression 225 | - name: BIVAC_RETRY_COUNT 226 | description: Retry to backup the volume if something goes wrong with Bivac. 227 | value: "0" 228 | - name: BIVAC_WHITELIST 229 | description: Only backup whitelisted volumes (comma-separated list of PVC names) 230 | - name: BIVAC_BLACKLIST 231 | description: Do not backup blacklisted volumes (comma-separated list of PVC names) 232 | - name: BIVAC_MANAGER_IMAGE 233 | value: ghcr.io/camptocamp/bivac:2.4 234 | description: image used for bivac manager 235 | - name: BIVAC_VERBOSE 236 | description: Enable verbose output (bool) 237 | value: "true" 238 | - name: BIVAC_ANNOTATIONS 239 | description: set of annotations for bivac manager, which will be inherited to bivac-agent 240 | required: false 241 | -------------------------------------------------------------------------------- /contrib/openshift/bivac2-agent.template.yaml: -------------------------------------------------------------------------------- 1 | # yamllint disable rule:line-length 2 | --- 3 | apiVersion: v1 4 | kind: Template 5 | labels: 6 | app: bivac 7 | metadata: 8 | name: bivac 9 | annotations: 10 | description: "required serviceaccount and permissions for bivcac-agent. Needs to be instantiated in all namespaces in which PVCs should be backup-ed." 11 | tags: "backup,bivac,agent,restic" 12 | objects: 13 | - kind: ClusterRoleBinding 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | metadata: 16 | name: bivac-${NAMESPACE} 17 | roleRef: 18 | apiGroup: rbac.authorization.k8s.io 19 | kind: ClusterRole 20 | name: bivac 21 | subjects: 22 | - kind: ServiceAccount 23 | name: bivac 24 | namespace: ${NAMESPACE} 25 | - apiVersion: v1 26 | kind: ServiceAccount 27 | metadata: 28 | name: bivac 29 | namespace: ${NAMESPACE} 30 | parameters: 31 | - name: NAMESPACE 32 | description: namespace in which to add the serviceaccount 33 | required: true 34 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/camptocamp/bivac 2 | 3 | go 1.14 4 | 5 | require ( 6 | github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect 7 | github.com/BurntSushi/toml v0.3.1 8 | github.com/Microsoft/go-winio v0.4.11 // indirect 9 | github.com/Sirupsen/logrus v1.0.6 10 | github.com/docker/distribution v2.7.1+incompatible // indirect 11 | github.com/docker/docker v0.0.0-20190121204153-8d7889e51013 12 | github.com/docker/go-connections v0.4.0 // indirect 13 | github.com/docker/go-units v0.3.3 // indirect 14 | github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect 15 | github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f // indirect 16 | github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect 17 | github.com/golang/mock v1.4.0 18 | github.com/googleapis/gnostic v0.2.0 // indirect 19 | github.com/gorilla/context v1.1.1 // indirect 20 | github.com/gorilla/mux v1.6.2 21 | github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f // indirect 22 | github.com/imdario/mergo v0.3.7 // indirect 23 | github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 24 | github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect 25 | github.com/mattn/go-runewidth v0.0.8 // indirect 26 | github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect 27 | github.com/onsi/ginkgo v1.9.0 // indirect 28 | github.com/onsi/gomega v1.6.0 // indirect 29 | github.com/opencontainers/go-digest v1.0.0-rc1 // indirect 30 | github.com/opencontainers/image-spec v1.0.1 // indirect 31 | github.com/peterbourgon/diskv v2.0.1+incompatible // indirect 32 | github.com/pkg/errors v0.9.1 // indirect 33 | github.com/prometheus/client_golang v1.4.1 34 | github.com/rancher/go-rancher v0.0.0-20190109212254-cbc1b0a3f68d 35 | github.com/rancher/go-rancher-metadata v0.0.0-20170929155856-d2103caca587 36 | github.com/spf13/afero v1.2.0 // indirect 37 | github.com/spf13/cobra v1.0.0 38 | github.com/spf13/pflag v1.0.5 39 | github.com/spf13/viper v1.4.0 40 | github.com/stretchr/testify v1.5.1 41 | github.com/tatsushid/go-prettytable v0.0.0-20141013043238-ed2d14c29939 42 | golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect 43 | golang.org/x/net v0.0.0-20200625001655-4c5254603344 44 | golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect 45 | golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect 46 | golang.org/x/tools v0.0.0-20200828161849-5deb26317202 // indirect 47 | google.golang.org/appengine v1.6.5 // indirect 48 | google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688 // indirect 49 | google.golang.org/grpc v1.27.1 // indirect 50 | gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect 51 | gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect 52 | gopkg.in/inf.v0 v0.9.1 // indirect 53 | gopkg.in/jarcoal/httpmock.v1 v1.0.0-20181117152235-275e9df93516 54 | gopkg.in/yaml.v2 v2.2.8 // indirect 55 | gotest.tools v2.2.0+incompatible // indirect 56 | k8s.io/api v0.0.0-20190126160303-ccdd560a045f 57 | k8s.io/apimachinery v0.0.0-20190126155707-0e6dcdd1b5ce 58 | k8s.io/client-go v0.0.0-20190126161006-6134db91200e 59 | k8s.io/klog v0.1.0 // indirect 60 | k8s.io/utils v0.0.0-20190131231213-4ae6e769426e // indirect 61 | sigs.k8s.io/yaml v1.1.0 // indirect 62 | ) 63 | 64 | replace github.com/docker/docker => github.com/moby/moby v0.7.3-0.20190217132422-c093c1e08b60 65 | -------------------------------------------------------------------------------- /img/bivac.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/img/bivac.png -------------------------------------------------------------------------------- /img/bivac.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 22 | 24 | 47 | 49 | 50 | 52 | image/svg+xml 53 | 55 | 56 | 57 | 58 | 59 | 64 | 81 | 88 | 91 | 98 | 103 | 104 | BIVAC 117 | 125 | 129 | 135 | 141 | 142 | 147 | 154 | 155 | 156 | -------------------------------------------------------------------------------- /img/bivac_descr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/img/bivac_descr.png -------------------------------------------------------------------------------- /img/bivac_descr.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 22 | 24 | 47 | 49 | 50 | 52 | image/svg+xml 53 | 55 | 56 | 57 | 58 | 59 | 64 | 81 | 83 | 90 | 93 | 100 | 105 | 106 | 107 | BIVAC 120 | 128 | 132 | 138 | 144 | 145 | 150 | 157 | Backup Interface for Volumes Attached to Containers 169 | 176 | 177 | 178 | -------------------------------------------------------------------------------- /img/bivac_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/img/bivac_small.png -------------------------------------------------------------------------------- /img/bivac_tent3_cont_orange_green.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 22 | 24 | 47 | 49 | 50 | 52 | image/svg+xml 53 | 55 | 56 | 57 | 58 | 59 | 64 | 81 | 88 | 91 | 98 | 103 | 104 | BIVAC 117 | 125 | 129 | 135 | 141 | 142 | 147 | 154 | 155 | 156 | -------------------------------------------------------------------------------- /img/bivac_tent3_cont_orange_green3_front.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/img/bivac_tent3_cont_orange_green3_front.png -------------------------------------------------------------------------------- /img/bivac_tent3_cont_orange_green3_front_descr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/img/bivac_tent3_cont_orange_green3_front_descr.png -------------------------------------------------------------------------------- /img/bivac_tent3_cont_orange_green3_front_descr_400.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/img/bivac_tent3_cont_orange_green3_front_descr_400.png -------------------------------------------------------------------------------- /img/bivac_tent3_cont_orange_green3_front_persp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/img/bivac_tent3_cont_orange_green3_front_persp.png -------------------------------------------------------------------------------- /img/bivac_tent3_cont_orange_green3_front_persp_300dpi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/camptocamp/bivac/fea7057dd002ae6c79472a1f6480e158449215ad/img/bivac_tent3_cont_orange_green3_front_persp_300dpi.png -------------------------------------------------------------------------------- /img/bivac_tent3_cont_orange_green_descr.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 22 | 24 | 47 | 49 | 50 | 52 | image/svg+xml 53 | 55 | 56 | 57 | 58 | 59 | 64 | 81 | 83 | 90 | 93 | 100 | 105 | 106 | 107 | BIVAC 120 | 128 | 132 | 138 | 144 | 145 | 150 | 157 | Backup Interface for Volumes Attached to Containers 169 | 176 | 177 | 178 | -------------------------------------------------------------------------------- /internal/agent/agent.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "bytes" 5 | "encoding/base64" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "os" 10 | 11 | log "github.com/Sirupsen/logrus" 12 | 13 | "github.com/camptocamp/bivac/internal/engine" 14 | "github.com/camptocamp/bivac/internal/utils" 15 | ) 16 | 17 | // Backup runs Restic commands to backup a volume 18 | func Backup(targetURL, backupPath, hostname string, force bool, logReceiver string) { 19 | e := &engine.Engine{ 20 | DefaultArgs: []string{ 21 | "--no-cache", 22 | "--json", 23 | "-r", 24 | targetURL, 25 | }, 26 | Output: make(map[string]utils.OutputFormat), 27 | } 28 | 29 | output := e.Backup(backupPath, hostname, force) 30 | 31 | if logReceiver != "" { 32 | data := `{"data":` + output + `}` 33 | req, err := http.NewRequest("POST", logReceiver, bytes.NewBuffer([]byte(data))) 34 | if err != nil { 35 | log.Errorf("failed to build new request: %s\n", err) 36 | return 37 | } 38 | req.Header.Set("Content-Type", "application/json") 39 | req.Header.Set("Authorization", "Bearer "+os.Getenv("BIVAC_SERVER_PSK")) 40 | 41 | client := &http.Client{} 42 | resp, err := client.Do(req) 43 | if err != nil { 44 | log.Errorf("failed to send data: %s\n", err) 45 | return 46 | } 47 | defer resp.Body.Close() 48 | 49 | b, err := ioutil.ReadAll(resp.Body) 50 | if err != nil { 51 | log.Errorf("failed to read body: %s\n", err) 52 | return 53 | } 54 | if resp.StatusCode != 200 { 55 | log.Infof("Response from API: %s", b) 56 | return 57 | } 58 | return 59 | } 60 | 61 | fmt.Println(base64.StdEncoding.EncodeToString([]byte(output))) 62 | return 63 | } 64 | 65 | // Restore runs Restic commands to restore backed up data to a new volume 66 | func Restore( 67 | targetURL, 68 | backupPath, 69 | hostname string, 70 | force bool, 71 | logReceiver string, 72 | snapshotName string, 73 | ) { 74 | e := &engine.Engine{ 75 | DefaultArgs: []string{ 76 | "--no-cache", 77 | "--json", 78 | "-r", 79 | targetURL, 80 | }, 81 | Output: make(map[string]utils.OutputFormat), 82 | } 83 | output := e.Restore(backupPath, hostname, force, snapshotName) 84 | if logReceiver != "" { 85 | data := `{"data":` + output + `}` 86 | req, err := http.NewRequest( 87 | "POST", 88 | logReceiver, 89 | bytes.NewBuffer([]byte(data)), 90 | ) 91 | if err != nil { 92 | log.Errorf("failed to build new request: %s\n", err) 93 | return 94 | } 95 | req.Header.Set("Content-Type", "application/json") 96 | req.Header.Set( 97 | "Authorization", 98 | "Bearer "+os.Getenv("BIVAC_SERVER_PSK"), 99 | ) 100 | client := &http.Client{} 101 | resp, err := client.Do(req) 102 | if err != nil { 103 | log.Errorf("failed to send data: %s\n", err) 104 | return 105 | } 106 | defer resp.Body.Close() 107 | b, err := ioutil.ReadAll(resp.Body) 108 | if err != nil { 109 | log.Errorf("failed to read body: %s\n", err) 110 | return 111 | } 112 | if resp.StatusCode != 200 { 113 | log.Infof("Response from API: %s", b) 114 | } 115 | return 116 | } 117 | 118 | fmt.Println(base64.StdEncoding.EncodeToString([]byte(output))) 119 | return 120 | } 121 | -------------------------------------------------------------------------------- /internal/engine/engine.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "encoding/base64" 5 | "encoding/json" 6 | "fmt" 7 | "io/ioutil" 8 | "os" 9 | "os/exec" 10 | "strings" 11 | "time" 12 | 13 | "github.com/camptocamp/bivac/internal/utils" 14 | ) 15 | 16 | // Engine stores informations to use Restic backup engine 17 | type Engine struct { 18 | DefaultArgs []string 19 | Output map[string]utils.OutputFormat 20 | } 21 | 22 | // Snapshot is a struct returned by the function snapshots() 23 | type Snapshot struct { 24 | Time time.Time `json:"time"` 25 | Parent string `json:"parent"` 26 | Tree string `json:"tree"` 27 | Path []string `json:"path"` 28 | Hostname string `json:"hostname"` 29 | ID string `json:"id"` 30 | ShortID string `json:"short_id"` 31 | } 32 | 33 | // GetName returns the engine name 34 | func (*Engine) GetName() string { 35 | return "restic" 36 | } 37 | 38 | // Backup performs the backup of the passed volume 39 | func (r *Engine) Backup(backupPath, hostname string, force bool) string { 40 | var err error 41 | 42 | err = r.initializeRepository() 43 | if err != nil { 44 | return utils.ReturnFormattedOutput(r.Output) 45 | } 46 | 47 | if force { 48 | err = r.unlockRepository() 49 | if err != nil { 50 | return utils.ReturnFormattedOutput(r.Output) 51 | } 52 | } 53 | 54 | err = r.backupVolume(hostname, backupPath) 55 | if err != nil { 56 | return utils.ReturnFormattedOutput(r.Output) 57 | } 58 | 59 | // A backup lock may remains. A retry loop with sleeps is probably the best solution to avoid lock errors. 60 | for i := 0; i < 3; i++ { 61 | err = r.forget() 62 | if err == nil { 63 | break 64 | } 65 | time.Sleep(60 * time.Second) 66 | } 67 | if err != nil { 68 | return utils.ReturnFormattedOutput(r.Output) 69 | } 70 | 71 | for i := 0; i < 3; i++ { 72 | err = r.retrieveBackupsStats() 73 | if err == nil { 74 | break 75 | } 76 | time.Sleep(10 * time.Second) 77 | } 78 | 79 | return utils.ReturnFormattedOutput(r.Output) 80 | } 81 | 82 | // Restore performs the restore of the passed volume 83 | func (r *Engine) Restore( 84 | backupPath, 85 | hostname string, 86 | force bool, 87 | snapshotName string, 88 | ) string { 89 | var err error 90 | if force { 91 | err = r.unlockRepository() 92 | if err != nil { 93 | return utils.ReturnFormattedOutput(r.Output) 94 | } 95 | } 96 | err = r.restoreVolume(hostname, backupPath, snapshotName) 97 | if err != nil { 98 | return utils.ReturnFormattedOutput(r.Output) 99 | } 100 | for i := 0; i < 3; i++ { 101 | err = r.retrieveBackupsStats() 102 | if err == nil { 103 | break 104 | } 105 | time.Sleep(10 * time.Second) 106 | } 107 | if err != nil { 108 | return utils.ReturnFormattedOutput(r.Output) 109 | } 110 | return utils.ReturnFormattedOutput(r.Output) 111 | } 112 | 113 | func (r *Engine) initializeRepository() (err error) { 114 | rc := 0 115 | 116 | // Check if the remote repository exists 117 | output, err := exec.Command("restic", append(r.DefaultArgs, "snapshots")...).CombinedOutput() 118 | if err != nil { 119 | rc = utils.HandleExitCode(err) 120 | } 121 | if rc == 0 { 122 | return 123 | } 124 | r.Output["testInit"] = utils.OutputFormat{ 125 | Stdout: base64.StdEncoding.EncodeToString(output), 126 | ExitCode: rc, 127 | } 128 | err = nil 129 | 130 | rc = 0 131 | // Create remote repository 132 | output, err = exec.Command("restic", append(r.DefaultArgs, "init")...).CombinedOutput() 133 | if err != nil { 134 | rc = utils.HandleExitCode(err) 135 | } 136 | r.Output["init"] = utils.OutputFormat{ 137 | Stdout: base64.StdEncoding.EncodeToString(output), 138 | ExitCode: rc, 139 | } 140 | fmt.Printf("init: %s\n", output) 141 | return 142 | } 143 | 144 | func (r *Engine) backupVolume(hostname, backupPath string) (err error) { 145 | rc := 0 146 | output, err := exec.Command("restic", append(r.DefaultArgs, []string{"--host", hostname, "backup", backupPath}...)...).CombinedOutput() 147 | if err != nil { 148 | rc = utils.HandleExitCode(err) 149 | } 150 | r.Output["backup"] = utils.OutputFormat{ 151 | Stdout: base64.StdEncoding.EncodeToString(output), 152 | ExitCode: rc, 153 | } 154 | fmt.Printf("backup: %s\n", output) 155 | return 156 | } 157 | 158 | func (r *Engine) forget() (err error) { 159 | rc := 0 160 | cmd := append(r.DefaultArgs, "forget") 161 | cmd = append(cmd, strings.Split(os.Getenv("RESTIC_FORGET_ARGS"), " ")...) 162 | 163 | output, err := exec.Command("restic", cmd...).CombinedOutput() 164 | if err != nil { 165 | rc = utils.HandleExitCode(err) 166 | } 167 | r.Output["forget"] = utils.OutputFormat{ 168 | Stdout: base64.StdEncoding.EncodeToString(output), 169 | ExitCode: rc, 170 | } 171 | fmt.Printf("forget: %s\n", output) 172 | return 173 | } 174 | 175 | func (r *Engine) restoreVolume( 176 | hostname, 177 | backupPath string, 178 | snapshotName string, 179 | ) (err error) { 180 | rc := 0 181 | origionalBackupPath := r.getOrigionalBackupPath( 182 | hostname, 183 | backupPath, 184 | snapshotName, 185 | ) 186 | workingPath, err := utils.GetRandomFilePath(backupPath) 187 | workingPath = strings.ReplaceAll(workingPath, "//", "/") 188 | if err != nil { 189 | rc = utils.HandleExitCode(err) 190 | } 191 | err = os.MkdirAll(workingPath, 0700) 192 | if err != nil { 193 | rc = utils.HandleExitCode(err) 194 | } 195 | output, err := exec.Command( 196 | "restic", 197 | append( 198 | r.DefaultArgs, 199 | []string{ 200 | "restore", 201 | snapshotName, 202 | "--target", 203 | workingPath, 204 | }..., 205 | )..., 206 | ).CombinedOutput() 207 | restoreDumpPath := workingPath + origionalBackupPath 208 | files, err := ioutil.ReadDir(restoreDumpPath) 209 | if err != nil { 210 | rc = utils.HandleExitCode(err) 211 | } 212 | collisionName := "" 213 | for _, f := range files { 214 | fileName := f.Name() 215 | restoreSubPath := strings.ReplaceAll(backupPath+"/"+fileName, "//", "/") 216 | if restoreSubPath == workingPath { 217 | collisionName, err = utils.GetRandomFileName(workingPath) 218 | if err != nil { 219 | rc = utils.HandleExitCode(err) 220 | } 221 | restoreSubPath = strings.ReplaceAll(workingPath+"/"+collisionName, "//", "/") 222 | } 223 | err = utils.MergePaths( 224 | strings.ReplaceAll(restoreDumpPath+"/"+fileName, "//", "/"), 225 | restoreSubPath, 226 | ) 227 | if err != nil { 228 | rc = utils.HandleExitCode(err) 229 | } 230 | err = os.RemoveAll( 231 | strings.ReplaceAll(restoreDumpPath+"/"+fileName, "//", "/"), 232 | ) 233 | if err != nil { 234 | rc = utils.HandleExitCode(err) 235 | } 236 | } 237 | if len(collisionName) > 0 { 238 | tmpWorkingPath, err := utils.GetRandomFilePath(backupPath) 239 | if err != nil { 240 | rc = utils.HandleExitCode(err) 241 | } 242 | err = os.Rename( 243 | workingPath, 244 | tmpWorkingPath, 245 | ) 246 | if err != nil { 247 | rc = utils.HandleExitCode(err) 248 | } 249 | err = os.Rename( 250 | strings.ReplaceAll(tmpWorkingPath+"/"+collisionName, "//", "/"), 251 | workingPath, 252 | ) 253 | if err != nil { 254 | rc = utils.HandleExitCode(err) 255 | } 256 | err = os.RemoveAll(tmpWorkingPath) 257 | if err != nil { 258 | rc = utils.HandleExitCode(err) 259 | } 260 | } else { 261 | err = os.RemoveAll(workingPath) 262 | if err != nil { 263 | rc = utils.HandleExitCode(err) 264 | } 265 | } 266 | r.Output["restore"] = utils.OutputFormat{ 267 | Stdout: base64.StdEncoding.EncodeToString(output), 268 | ExitCode: rc, 269 | } 270 | err = nil 271 | return 272 | } 273 | 274 | func (r *Engine) getOrigionalBackupPath( 275 | hostname, 276 | backupPath string, 277 | snapshotName string, 278 | ) string { 279 | output, err := exec.Command( 280 | "restic", 281 | append( 282 | r.DefaultArgs, 283 | []string{"ls", snapshotName}..., 284 | )..., 285 | ).CombinedOutput() 286 | if err != nil { 287 | return err.Error() 288 | } 289 | type Header struct { 290 | Paths []string `json:"paths"` 291 | } 292 | headerJSON := []byte("{\"paths\": [\"\"]") 293 | jsons := strings.Split(string(output), "\n") 294 | for i := 0; i < len(jsons); i++ { 295 | if strings.Index(jsons[i], "\",\"paths\":[\"") > -1 { 296 | headerJSON = []byte(jsons[i]) 297 | break 298 | } 299 | } 300 | var header Header 301 | err = json.Unmarshal(headerJSON, &header) 302 | if err != nil { 303 | return "" 304 | } 305 | return header.Paths[0] 306 | } 307 | 308 | func (r *Engine) retrieveBackupsStats() (err error) { 309 | rc := 0 310 | output, err := exec.Command("restic", append(r.DefaultArgs, []string{"snapshots"}...)...).CombinedOutput() 311 | if err != nil { 312 | rc = utils.HandleExitCode(err) 313 | } 314 | r.Output["snapshots"] = utils.OutputFormat{ 315 | Stdout: base64.StdEncoding.EncodeToString(output), 316 | ExitCode: rc, 317 | } 318 | fmt.Printf("snapshots: %s\n", output) 319 | 320 | return 321 | } 322 | 323 | func (r *Engine) unlockRepository() (err error) { 324 | rc := 0 325 | output, err := exec.Command("restic", append(r.DefaultArgs, []string{"unlock", "--remove-all"}...)...).CombinedOutput() 326 | if err != nil { 327 | rc = utils.HandleExitCode(err) 328 | } 329 | r.Output["unlock"] = utils.OutputFormat{ 330 | Stdout: base64.StdEncoding.EncodeToString(output), 331 | ExitCode: rc, 332 | } 333 | fmt.Printf("unlock: %s\n", output) 334 | err = nil 335 | return 336 | } 337 | 338 | // GetBackupDates runs a Restic command locally to retrieve latest snapshot date 339 | func (r *Engine) GetBackupDates() (latestSnapshotDate, oldestSnapshotDate time.Time, err error) { 340 | output, _ := exec.Command("restic", append(r.DefaultArgs, []string{"snapshots"}...)...).CombinedOutput() 341 | 342 | var data []Snapshot 343 | err = json.Unmarshal(output, &data) 344 | if err != nil { 345 | return 346 | } 347 | 348 | if len(data) == 0 { 349 | return 350 | } 351 | 352 | latestSnapshot := data[len(data)-1] 353 | 354 | latestSnapshotDate = latestSnapshot.Time 355 | if err != nil { 356 | return 357 | } 358 | 359 | oldestSnapshot := data[0] 360 | 361 | oldestSnapshotDate = oldestSnapshot.Time 362 | if err != nil { 363 | return 364 | } 365 | return 366 | } 367 | 368 | // RawCommand runs a custom Restic command locally 369 | func (r *Engine) RawCommand(cmd []string) (err error) { 370 | rc := 0 371 | output, err := exec.Command("restic", append(r.DefaultArgs, cmd...)...).CombinedOutput() 372 | if err != nil { 373 | rc = utils.HandleExitCode(err) 374 | } 375 | r.Output["raw"] = utils.OutputFormat{ 376 | Stdout: base64.StdEncoding.EncodeToString(output), 377 | ExitCode: rc, 378 | } 379 | return 380 | } 381 | -------------------------------------------------------------------------------- /internal/manager/backup.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "encoding/base64" 5 | "encoding/json" 6 | "fmt" 7 | "os" 8 | "regexp" 9 | "strings" 10 | "time" 11 | 12 | log "github.com/Sirupsen/logrus" 13 | 14 | "github.com/camptocamp/bivac/internal/engine" 15 | "github.com/camptocamp/bivac/internal/utils" 16 | "github.com/camptocamp/bivac/pkg/volume" 17 | ) 18 | 19 | func backupVolume(m *Manager, v *volume.Volume, force bool) (err error) { 20 | 21 | v.BackingUp = true 22 | defer func() { 23 | v.BackingUp = false 24 | v.LastBackupStartDate = "" 25 | }() 26 | 27 | v.Mux.Lock() 28 | defer v.Mux.Unlock() 29 | 30 | useLogReceiver := false 31 | if m.LogServer != "" { 32 | useLogReceiver = true 33 | } 34 | 35 | v.LastBackupStartDate = time.Now().Format("2006-01-02 15:04:05") 36 | 37 | p, err := m.Providers.GetProvider(m.Orchestrator, v) 38 | if err != nil { 39 | err = fmt.Errorf("failed to get provider: %s", err) 40 | return 41 | } 42 | 43 | if p.PreCmd != "" { 44 | log.WithFields(log.Fields{ 45 | "volume": v.Name, 46 | "hostname": v.Hostname, 47 | "provider": p.Name, 48 | }).Debug("running pre-command...") 49 | 50 | err = RunCmd(p, m.Orchestrator, v, p.PreCmd, "precmd") 51 | if err != nil { 52 | log.WithFields(log.Fields{ 53 | "volume": v.Name, 54 | "hostname": v.Hostname, 55 | "provider": p.Name, 56 | }).Warningf("failed to run pre-command: %s", err) 57 | } 58 | } 59 | 60 | cmd := []string{ 61 | "agent", 62 | "backup", 63 | "-p", 64 | v.Mountpoint + v.SubPath + "/" + v.BackupDir, 65 | "-r", 66 | m.TargetURL + "/" + m.Orchestrator.GetPath(v) + "/" + v.RepoName, 67 | "--host", 68 | m.Orchestrator.GetPath(v), 69 | } 70 | 71 | if force { 72 | cmd = append(cmd, "--force") 73 | } 74 | 75 | if useLogReceiver { 76 | cmd = append(cmd, []string{"--log.receiver", m.LogServer + "/backup/" + v.ID + "/logs"}...) 77 | } 78 | 79 | log.WithFields(log.Fields{ 80 | "volume": v.Name, 81 | "hostname": v.Hostname, 82 | "agent_image": m.AgentImage, 83 | }).Debug("deploying agent...") 84 | 85 | _, output, err := m.Orchestrator.DeployAgent( 86 | m.AgentImage, 87 | cmd, 88 | os.Environ(), 89 | v, 90 | ) 91 | if err != nil { 92 | err = fmt.Errorf("failed to deploy agent: %s", err) 93 | return 94 | } 95 | 96 | if !useLogReceiver { 97 | decodedOutput, err := base64.StdEncoding.DecodeString(strings.Replace(output, " ", "", -1)) 98 | if err != nil { 99 | log.Errorf("failed to decode agent output of `%s` : %s -> `%s`", v.Name, err, strings.Replace(output, " ", "", -1)) 100 | } else { 101 | var agentOutput utils.MsgFormat 102 | err = json.Unmarshal(decodedOutput, &agentOutput) 103 | if err != nil { 104 | log.WithFields(log.Fields{ 105 | "volume": v.Name, 106 | "hostname": v.Hostname, 107 | }).Warningf("failed to unmarshal agent output: %s -> `%s`", err, strings.TrimSpace(output)) 108 | } 109 | 110 | m.updateBackupLogs(v, agentOutput) 111 | } 112 | } 113 | 114 | if p.PostCmd != "" { 115 | log.WithFields(log.Fields{ 116 | "volume": v.Name, 117 | "hostname": v.Hostname, 118 | "provider": p.Name, 119 | }).Debug("running post-command...") 120 | 121 | err = RunCmd(p, m.Orchestrator, v, p.PostCmd, "postcmd") 122 | if err != nil { 123 | log.WithFields(log.Fields{ 124 | "volume": v.Name, 125 | "hostname": v.Hostname, 126 | }).Warningf("failed to run post-command: %s", err) 127 | } 128 | } 129 | 130 | return 131 | } 132 | 133 | func (m *Manager) attachOrphanAgent(containerID string, v *volume.Volume) { 134 | defer func() { v.BackingUp = false }() 135 | 136 | p, err := m.Providers.GetProvider(m.Orchestrator, v) 137 | if err != nil { 138 | err = fmt.Errorf("failed to get provider: %s", err) 139 | return 140 | } 141 | useLogReceiver := false 142 | if m.LogServer != "" { 143 | useLogReceiver = true 144 | } 145 | 146 | _, output, err := m.Orchestrator.AttachOrphanAgent(containerID, v.Namespace) 147 | if err != nil { 148 | log.WithFields(log.Fields{ 149 | "volume": v.Name, 150 | "hostname": v.Hostname, 151 | }).Errorf("failed to attach orphan agent: %s", err) 152 | return 153 | } 154 | 155 | if !useLogReceiver { 156 | decodedOutput, err := base64.StdEncoding.DecodeString(strings.TrimSpace(output)) 157 | if err != nil { 158 | log.Errorf("failed to decode agent output of `%s` : %s -> `%s`", v.Name, err, output) 159 | } else { 160 | var agentOutput utils.MsgFormat 161 | err = json.Unmarshal(decodedOutput, &agentOutput) 162 | if err != nil { 163 | log.WithFields(log.Fields{ 164 | "volume": v.Name, 165 | "hostname": v.Hostname, 166 | }).Warningf("failed to unmarshal agent output: %s -> `%s`", err, output) 167 | } 168 | 169 | m.updateBackupLogs(v, agentOutput) 170 | } 171 | } 172 | if p.PostCmd != "" { 173 | log.WithFields(log.Fields{ 174 | "volume": v.Name, 175 | "hostname": v.Hostname, 176 | "provider": p.Name, 177 | }).Debug("running post-command...") 178 | 179 | err = RunCmd(p, m.Orchestrator, v, p.PostCmd, "postcmd") 180 | if err != nil { 181 | log.WithFields(log.Fields{ 182 | "volume": v.Name, 183 | "hostname": v.Hostname, 184 | }).Warningf("failed to run post-command: %s", err) 185 | } 186 | } 187 | return 188 | } 189 | 190 | func (m *Manager) updateBackupLogs(v *volume.Volume, agentOutput utils.MsgFormat) { 191 | if agentOutput.Type != "success" { 192 | v.LastBackupStatus = "Failed" 193 | v.Metrics.LastBackupStatus.Set(1.0) 194 | } else { 195 | success := true 196 | v.Logs = make(map[string]string) 197 | for stepKey, stepValue := range agentOutput.Content.(map[string]interface{}) { 198 | if stepKey != "testInit" && stepValue.(map[string]interface{})["rc"].(float64) > 0.0 { 199 | success = false 200 | } 201 | stdout, _ := base64.StdEncoding.DecodeString(stepValue.(map[string]interface{})["stdout"].(string)) 202 | v.Logs[stepKey] = fmt.Sprintf("[%d] %s", int(stepValue.(map[string]interface{})["rc"].(float64)), stdout) 203 | } 204 | if success { 205 | v.LastBackupStatus = "Success" 206 | v.Metrics.LastBackupStatus.Set(0.0) 207 | err := m.setOldestBackupDate(v) 208 | if err != nil { 209 | log.Errorf("failed to set oldest backup date: %s", err) 210 | } 211 | } else { 212 | v.LastBackupStatus = "Failed" 213 | v.Metrics.LastBackupStatus.Set(1.0) 214 | } 215 | } 216 | 217 | v.LastBackupDate = time.Now().UTC().Format("2006-01-02 15:04:05") 218 | return 219 | } 220 | 221 | func (m *Manager) setOldestBackupDate(v *volume.Volume) (err error) { 222 | r, err := regexp.Compile(`\S{3} (.*)`) 223 | 224 | matches := r.FindStringSubmatch(v.Logs["snapshots"]) 225 | 226 | stdout := "" 227 | 228 | if len(matches) >= 1 { 229 | stdout = matches[1] 230 | } 231 | 232 | var snapshots []engine.Snapshot 233 | 234 | err = json.Unmarshal([]byte(stdout), &snapshots) 235 | if err != nil { 236 | err = fmt.Errorf("failed to unmarshal: %s", err) 237 | return 238 | } 239 | 240 | if len(snapshots) > 0 { 241 | v.Metrics.OldestBackupDate.Set(float64(snapshots[0].Time.Unix())) 242 | v.Metrics.LastBackupDate.Set(float64(snapshots[len(snapshots)-1].Time.Unix())) 243 | v.Metrics.BackupCount.Set(float64(len(snapshots))) 244 | } 245 | 246 | return 247 | } 248 | 249 | // RunResticCommand runs a custom Restic command 250 | func (m *Manager) RunResticCommand(v *volume.Volume, cmd []string) (output string, err error) { 251 | e := &engine.Engine{ 252 | DefaultArgs: []string{ 253 | "--no-cache", 254 | "-r", 255 | m.TargetURL + "/" + m.Orchestrator.GetPath(v) + "/" + v.RepoName, 256 | }, 257 | Output: make(map[string]utils.OutputFormat), 258 | } 259 | 260 | err = e.RawCommand(cmd) 261 | 262 | output = e.Output["raw"].Stdout 263 | return 264 | } 265 | -------------------------------------------------------------------------------- /internal/manager/manager.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | log "github.com/Sirupsen/logrus" 8 | 9 | "github.com/camptocamp/bivac/internal/utils" 10 | "github.com/camptocamp/bivac/pkg/orchestrators" 11 | "github.com/camptocamp/bivac/pkg/volume" 12 | ) 13 | 14 | // Orchestrators groups the parameters of all supported orchestrators in one structure 15 | type Orchestrators struct { 16 | Docker orchestrators.DockerConfig 17 | Cattle orchestrators.CattleConfig 18 | Kubernetes orchestrators.KubernetesConfig 19 | } 20 | 21 | // Manager contains all informations used by the Bivac manager 22 | type Manager struct { 23 | Orchestrator orchestrators.Orchestrator 24 | Volumes []*volume.Volume 25 | Server *Server 26 | Providers *Providers 27 | TargetURL string 28 | RetryCount int 29 | LogServer string 30 | BuildInfo utils.BuildInfo 31 | AgentImage string 32 | 33 | backupSlots chan *volume.Volume 34 | } 35 | 36 | // Start starts a Bivac manager which handle backups management 37 | func Start(buildInfo utils.BuildInfo, o orchestrators.Orchestrator, s Server, volumeFilters volume.Filters, providersFile, targetURL, logServer, agentImage string, retryCount, parallelCount int, refreshRate, backupInterval string) (err error) { 38 | p, err := LoadProviders(providersFile) 39 | if err != nil { 40 | err = fmt.Errorf("failed to read providers file: %s", err) 41 | return 42 | } 43 | 44 | refreshInterval, err := time.ParseDuration(refreshRate) 45 | if err != nil { 46 | err = fmt.Errorf("failed to parse refresh time: %s", err) 47 | return 48 | } 49 | 50 | backupInt, err := time.ParseDuration(backupInterval) 51 | if err != nil { 52 | err = fmt.Errorf("failed to parse backup interval: %s", err) 53 | return 54 | } 55 | 56 | m := &Manager{ 57 | Orchestrator: o, 58 | Server: &s, 59 | Providers: &p, 60 | TargetURL: targetURL, 61 | RetryCount: retryCount, 62 | LogServer: logServer, 63 | BuildInfo: buildInfo, 64 | AgentImage: agentImage, 65 | 66 | backupSlots: make(chan *volume.Volume, 100), 67 | } 68 | 69 | // Catch orphan agents 70 | orphanAgents, err := m.Orchestrator.RetrieveOrphanAgents() 71 | if err != nil { 72 | log.Errorf("failed to retrieve orphan agents: %s", err) 73 | } 74 | 75 | // Manage volumes 76 | go func(m *Manager, volumeFilters volume.Filters) { 77 | 78 | log.Debugf("Starting volume manager...") 79 | 80 | for { 81 | err = retrieveVolumes(m, volumeFilters) 82 | if err != nil { 83 | log.Errorf("failed to retrieve volumes: %s", err) 84 | } 85 | 86 | for _, v := range m.Volumes { 87 | if val, ok := orphanAgents[v.ID]; ok { 88 | v.BackingUp = true 89 | go m.attachOrphanAgent(val, v) 90 | delete(orphanAgents, val) 91 | } 92 | 93 | if !isBackupNeeded(v, backupInt) { 94 | continue 95 | } 96 | 97 | m.backupSlots <- v 98 | } 99 | 100 | time.Sleep(refreshInterval) 101 | } 102 | }(m, volumeFilters) 103 | 104 | // Manage backups 105 | go func(m *Manager, parallelCount int) { 106 | slots := make(map[string](chan bool)) 107 | 108 | log.Infof("Starting backup manager...") 109 | 110 | for { 111 | v := <-m.backupSlots 112 | if _, ok := slots[v.HostBind]; !ok { 113 | slots[v.HostBind] = make(chan bool, parallelCount) 114 | } 115 | select { 116 | case slots[v.HostBind] <- true: 117 | default: 118 | continue 119 | } 120 | if ok, _ := m.Orchestrator.IsNodeAvailable(v.HostBind); !ok && v.HostBind != "unbound" && m.Orchestrator.GetName() == "cattle" { 121 | log.WithFields(log.Fields{ 122 | "node": v.HostBind, 123 | }).Warning("Node unavailable.") 124 | <-slots[v.HostBind] 125 | continue 126 | } 127 | 128 | go func(v *volume.Volume) { 129 | var timedout bool 130 | tearDown := make(chan bool) 131 | 132 | log.WithFields(log.Fields{ 133 | "volume": v.Name, 134 | "hostname": v.Hostname, 135 | }).Debugf("Backing up volume.") 136 | defer func() { 137 | if !timedout { 138 | tearDown <- true 139 | <-slots[v.HostBind] 140 | } 141 | }() 142 | 143 | // Workaround which avoid a stucked backup to block the whole backup process 144 | // If the backup process takes more than one hour, 145 | // the backup slot is released. 146 | go func() { 147 | timeout := time.After(1 * time.Hour) 148 | select { 149 | case <-tearDown: 150 | return 151 | case <-timeout: 152 | timedout = true 153 | <-slots[v.HostBind] 154 | } 155 | }() 156 | 157 | err = nil 158 | for i := 0; i <= m.RetryCount; i++ { 159 | err = backupVolume(m, v, false) 160 | if err != nil { 161 | log.WithFields(log.Fields{ 162 | "volume": v.Name, 163 | "hostname": v.Hostname, 164 | "try": i + 1, 165 | }).Errorf("failed to backup volume: %s", err) 166 | 167 | time.Sleep(2 * time.Second) 168 | } else { 169 | break 170 | } 171 | } 172 | }(v) 173 | } 174 | }(m, parallelCount) 175 | 176 | // Manage API server 177 | m.StartServer() 178 | 179 | return 180 | } 181 | 182 | func isBackupNeeded(v *volume.Volume, backupInt time.Duration) bool { 183 | if v.BackingUp { 184 | return false 185 | } 186 | 187 | if v.LastBackupDate == "" { 188 | return true 189 | } 190 | 191 | var dateRef string 192 | if v.LastBackupStartDate == "" { 193 | dateRef = v.LastBackupDate 194 | } else { 195 | dateRef = v.LastBackupStartDate 196 | } 197 | 198 | lbd, err := time.Parse("2006-01-02 15:04:05", dateRef) 199 | if err != nil { 200 | log.WithFields(log.Fields{ 201 | "volume": v.Name, 202 | "hostname": v.Hostname, 203 | }).Errorf("failed to parse backup date of volume `%s': %s", v.Name, err) 204 | return false 205 | } 206 | 207 | if lbd.Add(time.Hour).Before(time.Now().UTC()) && v.LastBackupStatus == "Failed" { 208 | return true 209 | } 210 | 211 | if lbd.Add(backupInt).Before(time.Now().UTC()) { 212 | return true 213 | } 214 | return false 215 | } 216 | 217 | // GetOrchestrator returns an orchestrator interface based on the name you specified or on the orchestrator Bivac is running on 218 | func GetOrchestrator(name string, orchs Orchestrators) (o orchestrators.Orchestrator, err error) { 219 | if name != "" { 220 | log.Debugf("Choosing orchestrator based on configuration...") 221 | switch name { 222 | case "docker": 223 | o, err = orchestrators.NewDockerOrchestrator(&orchs.Docker) 224 | case "cattle": 225 | o, err = orchestrators.NewCattleOrchestrator(&orchs.Cattle) 226 | case "kubernetes": 227 | o, err = orchestrators.NewKubernetesOrchestrator(&orchs.Kubernetes) 228 | default: 229 | err = fmt.Errorf("'%s' is not a valid orchestrator", name) 230 | return 231 | } 232 | } else { 233 | log.Debugf("Trying to detect orchestrator based on environment...") 234 | 235 | if orchestrators.DetectCattle() { 236 | o, err = orchestrators.NewCattleOrchestrator(&orchs.Cattle) 237 | } else if orchestrators.DetectKubernetes() { 238 | o, err = orchestrators.NewKubernetesOrchestrator(&orchs.Kubernetes) 239 | } else if orchestrators.DetectDocker(&orchs.Docker) { 240 | o, err = orchestrators.NewDockerOrchestrator(&orchs.Docker) 241 | } else { 242 | err = fmt.Errorf("no orchestrator detected") 243 | return 244 | } 245 | } 246 | if err != nil { 247 | log.Infof("Using orchestrator: %s", o.GetName()) 248 | } 249 | return 250 | } 251 | 252 | // BackupVolume does a backup of a volume 253 | func (m *Manager) BackupVolume(volumeID string, force bool) (err error) { 254 | for _, v := range m.Volumes { 255 | if v.ID == volumeID { 256 | log.WithFields(log.Fields{ 257 | "volume": v.Name, 258 | "hostname": v.Hostname, 259 | }).Debug("Backup manually requested.") 260 | err = backupVolume(m, v, force) 261 | if err != nil { 262 | err = fmt.Errorf("failed to backup volume: %s", err) 263 | return 264 | } 265 | } 266 | } 267 | return 268 | } 269 | 270 | // RestoreVolume does a restore of a volume 271 | func (m *Manager) RestoreVolume( 272 | volumeID string, 273 | force bool, 274 | snapshotName string, 275 | ) (err error) { 276 | for _, v := range m.Volumes { 277 | if v.ID == volumeID { 278 | log.WithFields(log.Fields{ 279 | "volume": v.Name, 280 | "hostname": v.Hostname, 281 | }).Debug("Restore manually requested.") 282 | err = restoreVolume(m, v, force, snapshotName) 283 | if err != nil { 284 | err = fmt.Errorf( 285 | "failed to restore volume: %s", 286 | err, 287 | ) 288 | return 289 | } 290 | } 291 | } 292 | return 293 | } 294 | 295 | // GetInformations returns informations regarding the Bivac manager 296 | func (m *Manager) GetInformations() (informations map[string]string) { 297 | informations = map[string]string{ 298 | "version": m.BuildInfo.Version, 299 | "build_date": m.BuildInfo.Date, 300 | "build_commit": m.BuildInfo.CommitSha1, 301 | "golang_version": m.BuildInfo.Runtime, 302 | "orchestrator": m.Orchestrator.GetName(), 303 | "address": m.Server.Address, 304 | "volumes_count": fmt.Sprintf("%d", len(m.Volumes)), 305 | } 306 | return 307 | } 308 | -------------------------------------------------------------------------------- /internal/manager/manager_test.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/camptocamp/bivac/pkg/volume" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | // isBackupNeeded 13 | func TestIsBackupNeededBackupIntervalStatusSuccess(t *testing.T) { 14 | givenVolume := &volume.Volume{ 15 | BackingUp: false, 16 | LastBackupDate: time.Now().UTC().Add(time.Hour * -2).Format("2006-01-02 15:04:05"), 17 | LastBackupStatus: "Success", 18 | Name: "foo", 19 | Hostname: "bar", 20 | } 21 | 22 | h, _ := time.ParseDuration("30m") 23 | assert.Equal(t, isBackupNeeded(givenVolume, h), true) 24 | h, _ = time.ParseDuration("12h") 25 | assert.Equal(t, isBackupNeeded(givenVolume, h), false) 26 | } 27 | 28 | func TestIsBackupNeededBackupIntervalStatusFailed(t *testing.T) { 29 | givenVolume := &volume.Volume{ 30 | BackingUp: false, 31 | LastBackupDate: time.Now().UTC().Add(time.Hour * -2).Format("2006-01-02 15:04:05"), 32 | LastBackupStatus: "Failed", 33 | Name: "foo", 34 | Hostname: "bar", 35 | } 36 | 37 | h, _ := time.ParseDuration("30m") 38 | assert.Equal(t, isBackupNeeded(givenVolume, h), true) 39 | h, _ = time.ParseDuration("12h") 40 | assert.Equal(t, isBackupNeeded(givenVolume, h), true) 41 | } 42 | -------------------------------------------------------------------------------- /internal/manager/provider.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/BurntSushi/toml" 8 | "github.com/camptocamp/bivac/pkg/orchestrators" 9 | "github.com/camptocamp/bivac/pkg/volume" 10 | ) 11 | 12 | // Providers stores the list of available providers 13 | type Providers struct { 14 | Providers map[string]Provider 15 | } 16 | 17 | // Provider stores data for one provider 18 | type Provider struct { 19 | Name string `toml:"-"` 20 | PreCmd string `toml:"pre_cmd"` 21 | PostCmd string `toml:"post_cmd"` 22 | DetectionCmd string `toml:"detect_cmd"` 23 | BackupDir string `toml:"backup_dir"` 24 | RestorePostCmd string `toml:"restore_post_cmd"` 25 | RestorePreCmd string `toml:"restore_pre_cmd"` 26 | } 27 | 28 | type configToml struct { 29 | Providers map[string]Provider `toml:"providers"` 30 | } 31 | 32 | // LoadProviders returns the list of providers from the provider config file 33 | func LoadProviders(path string) (providers Providers, err error) { 34 | c := &configToml{} 35 | providers.Providers = make(map[string]Provider) 36 | _, err = toml.DecodeFile(path, &c) 37 | if err != nil { 38 | err = fmt.Errorf("failed to load providers from config file: %s", err) 39 | return 40 | } 41 | 42 | for key, value := range c.Providers { 43 | provider := Provider{ 44 | Name: key, 45 | PreCmd: value.PreCmd, 46 | PostCmd: value.PostCmd, 47 | DetectionCmd: value.DetectionCmd, 48 | BackupDir: value.BackupDir, 49 | RestorePostCmd: value.RestorePostCmd, 50 | RestorePreCmd: value.RestorePreCmd, 51 | } 52 | providers.Providers[key] = provider 53 | } 54 | return 55 | } 56 | 57 | // GetProvider returns a provider based on detection commands 58 | func (providers *Providers) GetProvider(o orchestrators.Orchestrator, v *volume.Volume) (prov Provider, err error) { 59 | detectionCmds := []string{} 60 | for _, p := range providers.Providers { 61 | detectionCmds = append(detectionCmds, fmt.Sprintf("(%s && echo '%s')", p.DetectionCmd, p.Name)) 62 | } 63 | detectionCmds = append(detectionCmds, "true") 64 | fullDetectionCmd := strings.Join(detectionCmds, " || ") 65 | 66 | containers, err := o.GetContainersMountingVolume(v) 67 | if err != nil { 68 | return 69 | } 70 | if len(containers) < 1 { 71 | return 72 | } 73 | 74 | var stdout string 75 | for _, container := range containers { 76 | fullDetectionCmd = strings.Replace(fullDetectionCmd, "$volume", container.Path, -1) 77 | stdout, err = o.ContainerExec(container, []string{"bash", "-c", fullDetectionCmd}) 78 | if err != nil { 79 | err = nil 80 | continue 81 | } 82 | stdout = strings.TrimSpace(stdout) 83 | 84 | for _, p := range providers.Providers { 85 | if p.Name == stdout { 86 | prov = p 87 | v.BackupDir = p.BackupDir 88 | return 89 | } 90 | } 91 | } 92 | return 93 | } 94 | 95 | // RunCmd runs a command into a container 96 | func RunCmd(p Provider, o orchestrators.Orchestrator, v *volume.Volume, cmd, cmdKey string) (err error) { 97 | containers, err := o.GetContainersMountingVolume(v) 98 | if err != nil { 99 | return err 100 | } 101 | 102 | cmdSuccess := false 103 | var stdout string 104 | for _, container := range containers { 105 | cmd = strings.Replace(cmd, "$volume", container.Path, -1) 106 | 107 | stdout, err = o.ContainerExec(container, []string{"bash", "-c", cmd}) 108 | if err == nil { 109 | cmdSuccess = true 110 | break 111 | } 112 | } 113 | v.Logs[cmdKey] = fmt.Sprintf("$ %s\n%s", cmd, stdout) 114 | 115 | if !cmdSuccess { 116 | return fmt.Errorf("failed to run command \"%s\" in containers mounting volume %s", cmd, v.Name) 117 | } 118 | return 119 | } 120 | -------------------------------------------------------------------------------- /internal/manager/restore.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "encoding/base64" 5 | "encoding/json" 6 | "fmt" 7 | log "github.com/Sirupsen/logrus" 8 | "github.com/camptocamp/bivac/internal/utils" 9 | "github.com/camptocamp/bivac/pkg/volume" 10 | "os" 11 | "strings" 12 | "time" 13 | ) 14 | 15 | func restoreVolume( 16 | m *Manager, 17 | v *volume.Volume, 18 | force bool, 19 | snapshotName string, 20 | ) (err error) { 21 | v.Mux.Lock() 22 | defer v.Mux.Unlock() 23 | useLogReceiver := false 24 | if m.LogServer != "" { 25 | useLogReceiver = true 26 | } 27 | p, err := m.Providers.GetProvider(m.Orchestrator, v) 28 | if err != nil { 29 | err = fmt.Errorf("failed to get provider: %s", err) 30 | return 31 | } 32 | if p.RestorePreCmd != "" { 33 | err = RunCmd(p, m.Orchestrator, v, p.RestorePreCmd, "precmd") 34 | if err != nil { 35 | log.WithFields(log.Fields{ 36 | "volume": v.Name, 37 | "hostname": v.Hostname, 38 | }).Warningf("failed to run pre-command: %s", err) 39 | } 40 | } 41 | cmd := []string{ 42 | "agent", 43 | "restore", 44 | "-p", 45 | v.Mountpoint + "/" + v.BackupDir, 46 | "-r", 47 | m.TargetURL + "/" + m.Orchestrator.GetPath(v) + "/" + v.Name, 48 | "-s", 49 | snapshotName, 50 | "--host", 51 | m.Orchestrator.GetPath(v), 52 | } 53 | if force { 54 | cmd = append(cmd, "--force") 55 | } 56 | if useLogReceiver { 57 | cmd = append(cmd, []string{"--log.receiver", m.LogServer + "/restore/" + v.ID + "/logs"}...) 58 | } 59 | _, output, err := m.Orchestrator.DeployAgent( 60 | m.AgentImage, 61 | cmd, 62 | os.Environ(), 63 | v, 64 | ) 65 | if err != nil { 66 | err = fmt.Errorf("failed to deploy agent: %s", err) 67 | return 68 | } 69 | if !useLogReceiver { 70 | decodedOutput, err := base64.StdEncoding.DecodeString(strings.Replace(output, " ", "", -1)) 71 | if err != nil { 72 | log.Errorf("failed to decode agent output of `%s` : %s -> `%s`", v.Name, err, strings.Replace(output, " ", "", -1)) 73 | } else { 74 | var agentOutput utils.MsgFormat 75 | err = json.Unmarshal(decodedOutput, &agentOutput) 76 | if err != nil { 77 | log.WithFields(log.Fields{ 78 | "volume": v.Name, 79 | "hostname": v.Hostname, 80 | }).Warningf("failed to unmarshal agent output: %s -> `%s`", err, strings.TrimSpace(output)) 81 | } 82 | 83 | m.updateBackupLogs(v, agentOutput) 84 | } 85 | } else { 86 | if output != "" { 87 | log.WithFields(log.Fields{ 88 | "volume": v.Name, 89 | "hostname": v.Hostname, 90 | }).Errorf("failed to send output: %s", output) 91 | } 92 | } 93 | if p.RestorePostCmd != "" { 94 | err = RunCmd(p, m.Orchestrator, v, p.RestorePostCmd, "postcmd") 95 | if err != nil { 96 | log.WithFields(log.Fields{ 97 | "volume": v.Name, 98 | "hostname": v.Hostname, 99 | }).Warningf("failed to run post-command: %s", err) 100 | } 101 | } 102 | return 103 | } 104 | 105 | func (m *Manager) updateRestoreLogs(v *volume.Volume, agentOutput utils.MsgFormat) { 106 | if agentOutput.Type != "success" { 107 | v.LastBackupStatus = "Failed" 108 | v.Metrics.LastBackupStatus.Set(1.0) 109 | } else { 110 | success := true 111 | v.Logs = make(map[string]string) 112 | for stepKey, stepValue := range agentOutput.Content.(map[string]interface{}) { 113 | if stepKey != "testInit" && stepValue.(map[string]interface{})["rc"].(float64) > 0.0 { 114 | success = false 115 | } 116 | v.Logs[stepKey] = fmt.Sprintf("[%d] %s", int(stepValue.(map[string]interface{})["rc"].(float64)), stepValue.(map[string]interface{})["stdout"].(string)) 117 | } 118 | if success { 119 | v.LastBackupStatus = "Success" 120 | v.Metrics.LastBackupStatus.Set(0.0) 121 | } else { 122 | v.LastBackupStatus = "Failed" 123 | v.Metrics.LastBackupStatus.Set(1.0) 124 | } 125 | } 126 | v.LastBackupDate = time.Now().Format("2006-01-02 15:04:05") 127 | v.Metrics.LastBackupDate.SetToCurrentTime() 128 | return 129 | } 130 | -------------------------------------------------------------------------------- /internal/manager/server.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | "strconv" 8 | 9 | log "github.com/Sirupsen/logrus" 10 | "github.com/gorilla/mux" 11 | "github.com/prometheus/client_golang/prometheus" 12 | "github.com/prometheus/client_golang/prometheus/promhttp" 13 | 14 | "github.com/camptocamp/bivac/internal/utils" 15 | ) 16 | 17 | // Server contains informations used by the server part 18 | type Server struct { 19 | Address string 20 | PSK string 21 | } 22 | 23 | // StartServer starts the API server 24 | func (m *Manager) StartServer() (err error) { 25 | router := mux.NewRouter().StrictSlash(true) 26 | 27 | setupMetrics(m.BuildInfo) 28 | 29 | router.Handle("/volumes", m.handleAPIRequest(http.HandlerFunc(m.getVolumes))) 30 | router.Handle("/ping", m.handleAPIRequest(http.HandlerFunc(m.ping))) 31 | router.Handle("/metrics", promhttp.Handler()).Methods("GET") 32 | router.Handle("/backup/{volumeName}", m.handleAPIRequest(http.HandlerFunc(m.backupVolume))).Queries("force", "{force}") 33 | router.Handle("/backup/{volumeID}/logs", m.handleAPIRequest(http.HandlerFunc(m.getBackupLogs))) 34 | router.Handle("/restore/{volumeName}", m.handleAPIRequest(http.HandlerFunc(m.restoreVolume))).Queries("force", "{force}") 35 | router.Handle("/restore/{volumeName}/{snapshotName}", m.handleAPIRequest(http.HandlerFunc(m.restoreVolume))).Queries("force", "{force}") 36 | router.Handle("/restic/{volumeID}", m.handleAPIRequest(http.HandlerFunc(m.runRawCommand))) 37 | router.Handle("/info", m.handleAPIRequest(http.HandlerFunc(m.info))) 38 | 39 | log.Infof("Listening on %s", m.Server.Address) 40 | log.Fatal(http.ListenAndServe(m.Server.Address, router)) 41 | return 42 | } 43 | 44 | func (m *Manager) handleAPIRequest(next http.Handler) http.Handler { 45 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 46 | if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", m.Server.PSK) { 47 | w.WriteHeader(http.StatusUnauthorized) 48 | w.Write([]byte("Unauthorized")) 49 | return 50 | } 51 | next.ServeHTTP(w, r) 52 | }) 53 | } 54 | 55 | func (m *Manager) getVolumes(w http.ResponseWriter, r *http.Request) { 56 | b, err := json.Marshal(m.Volumes) 57 | if err != nil { 58 | log.Errorf("failed to marshal volumes: %s", err) 59 | w.WriteHeader(http.StatusInternalServerError) 60 | w.Write([]byte("500 - Internal server error")) 61 | return 62 | } 63 | w.WriteHeader(http.StatusOK) 64 | w.Write(b) 65 | return 66 | } 67 | 68 | func (m *Manager) backupVolume(w http.ResponseWriter, r *http.Request) { 69 | params := mux.Vars(r) 70 | force, err := strconv.ParseBool(params["force"]) 71 | if err != nil { 72 | force = false 73 | err = nil 74 | } 75 | 76 | err = m.BackupVolume(params["volumeName"], force) 77 | if err != nil { 78 | w.WriteHeader(http.StatusInternalServerError) 79 | w.Write([]byte("500 - Internal server error")) 80 | return 81 | } 82 | w.WriteHeader(http.StatusOK) 83 | w.Write([]byte(`{"type": "success"}`)) 84 | return 85 | } 86 | 87 | func (m *Manager) restoreVolume(w http.ResponseWriter, r *http.Request) { 88 | params := mux.Vars(r) 89 | force, err := strconv.ParseBool(params["force"]) 90 | if err != nil { 91 | force = false 92 | err = nil 93 | } 94 | snapshotName := "latest" 95 | if _, ok := params["snapshotName"]; ok { 96 | snapshotName = params["snapshotName"] 97 | } 98 | err = m.RestoreVolume(params["volumeName"], force, snapshotName) 99 | if err != nil { 100 | w.WriteHeader(http.StatusInternalServerError) 101 | w.Write([]byte("500 - Internal server error")) 102 | return 103 | } 104 | w.WriteHeader(http.StatusOK) 105 | w.Write([]byte(`{"type": "success"}`)) 106 | return 107 | } 108 | 109 | func (m *Manager) info(w http.ResponseWriter, r *http.Request) { 110 | informations := m.GetInformations() 111 | 112 | data := map[string]interface{}{ 113 | "type": "success", 114 | "data": informations, 115 | } 116 | 117 | encodedData, _ := json.Marshal(data) 118 | 119 | w.WriteHeader(http.StatusOK) 120 | w.Write([]byte(encodedData)) 121 | return 122 | } 123 | 124 | func (m *Manager) getBackupLogs(w http.ResponseWriter, r *http.Request) { 125 | var data struct { 126 | Data utils.MsgFormat 127 | } 128 | 129 | params := mux.Vars(r) 130 | decoder := json.NewDecoder(r.Body) 131 | err := decoder.Decode(&data) 132 | if err != nil { 133 | w.WriteHeader(http.StatusInternalServerError) 134 | w.Write([]byte("500 - Internal server error: " + err.Error())) 135 | return 136 | } 137 | 138 | for _, v := range m.Volumes { 139 | if v.ID == params["volumeID"] { 140 | m.updateBackupLogs(v, data.Data) 141 | w.WriteHeader(http.StatusOK) 142 | w.Write([]byte(`{"type": "success"}`)) 143 | return 144 | } 145 | } 146 | w.WriteHeader(http.StatusNotFound) 147 | w.Write([]byte("404 - Volume not found")) 148 | return 149 | } 150 | 151 | func (m *Manager) runRawCommand(w http.ResponseWriter, r *http.Request) { 152 | params := mux.Vars(r) 153 | var err error 154 | var output string 155 | 156 | var postData map[string][]string 157 | 158 | decoder := json.NewDecoder(r.Body) 159 | err = decoder.Decode(&postData) 160 | if err != nil { 161 | w.WriteHeader(http.StatusInternalServerError) 162 | w.Write([]byte("500 - Internal server error: " + err.Error())) 163 | return 164 | } 165 | 166 | for _, v := range m.Volumes { 167 | if v.ID == params["volumeID"] { 168 | output, err = m.RunResticCommand(v, postData["cmd"]) 169 | } 170 | } 171 | 172 | data := map[string]string{ 173 | "type": "success", 174 | "data": output, 175 | } 176 | encodedData, _ := json.Marshal(data) 177 | 178 | w.WriteHeader(http.StatusOK) 179 | w.Write([]byte(encodedData)) 180 | return 181 | } 182 | func (m *Manager) ping(w http.ResponseWriter, r *http.Request) { 183 | w.WriteHeader(http.StatusOK) 184 | w.Write([]byte(`{"type":"pong"}`)) 185 | return 186 | } 187 | 188 | func setupMetrics(buildInfo utils.BuildInfo) { 189 | buildInfoMetric := prometheus.NewGaugeVec(prometheus.GaugeOpts{ 190 | Namespace: "bivac", 191 | Name: "build_info", 192 | Help: "Bivac build informations", 193 | }, []string{"version", "commit_sha", "build_date", "golang_version"}) 194 | buildInfoMetric.WithLabelValues(buildInfo.Version, buildInfo.CommitSha1, buildInfo.Date, buildInfo.Runtime).Set(1) 195 | prometheus.MustRegister(buildInfoMetric) 196 | } 197 | -------------------------------------------------------------------------------- /internal/manager/server_test.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | //"fmt" 5 | "testing" 6 | //"github.com/stretchr/testify/assert" 7 | ) 8 | 9 | // getVolumes 10 | func TestGetVolumes(t *testing.T) { 11 | } 12 | 13 | // handleAPIRequest 14 | func TestHandleAPIRequest(t *testing.T) { 15 | } 16 | -------------------------------------------------------------------------------- /internal/manager/volumes.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "sort" 5 | "unicode/utf8" 6 | 7 | "github.com/camptocamp/bivac/internal/engine" 8 | "github.com/camptocamp/bivac/pkg/volume" 9 | ) 10 | 11 | func retrieveVolumes(m *Manager, volumeFilters volume.Filters) (err error) { 12 | volumes, err := m.Orchestrator.GetVolumes(volume.Filters{}) 13 | if err != nil { 14 | return 15 | } 16 | 17 | var newVolumes []*volume.Volume 18 | for _, v := range volumes { 19 | b, _, _ := blacklistedVolume(v, volumeFilters) 20 | if !b { 21 | newVolumes = append(newVolumes, v) 22 | } 23 | } 24 | 25 | // Append new volumes 26 | var volumeManaged bool 27 | for _, nv := range newVolumes { 28 | volumeManaged = false 29 | for _, mv := range m.Volumes { 30 | if mv.ID == nv.ID { 31 | volumeManaged = true 32 | break 33 | } 34 | } 35 | if !volumeManaged { 36 | nv.SetupMetrics() 37 | getLastBackupDate(m, nv) 38 | m.Volumes = append(m.Volumes, nv) 39 | } 40 | } 41 | 42 | // Remove deleted volumes 43 | var vols []*volume.Volume 44 | for _, mv := range m.Volumes { 45 | volumeExists := false 46 | for _, nv := range newVolumes { 47 | if mv.ID == nv.ID { 48 | volumeExists = true 49 | break 50 | } 51 | } 52 | if volumeExists { 53 | vols = append(vols, mv) 54 | } else { 55 | mv.CleanupMetrics() 56 | mv = nil 57 | } 58 | } 59 | 60 | m.Volumes = vols 61 | return 62 | } 63 | 64 | func blacklistedVolume(vol *volume.Volume, volumeFilters volume.Filters) (bool, string, string) { 65 | if utf8.RuneCountInString(vol.Name) == 64 || vol.Name == "lost+found" { 66 | return true, "unnamed", "" 67 | } 68 | 69 | // Use whitelist if defined 70 | if l := volumeFilters.Whitelist; len(l) > 0 && l[0] != "" { 71 | sort.Strings(l) 72 | i := sort.SearchStrings(l, vol.Name) 73 | if i < len(l) && l[i] == vol.Name { 74 | return false, "", "" 75 | } 76 | return true, "blacklisted", "whitelist config" 77 | } 78 | 79 | if l := volumeFilters.Blacklist; len(l) > 0 && l[0] != "" { 80 | sort.Strings(l) 81 | i := sort.SearchStrings(l, vol.Name) 82 | if i < len(l) && l[i] == vol.Name { 83 | return true, "blacklisted", "blacklist config" 84 | } 85 | } 86 | return false, "", "" 87 | } 88 | 89 | func getLastBackupDate(m *Manager, v *volume.Volume) (err error) { 90 | e := &engine.Engine{ 91 | DefaultArgs: []string{ 92 | "--no-cache", 93 | "--json", 94 | "-r", 95 | m.TargetURL + "/" + m.Orchestrator.GetPath(v) + "/" + v.RepoName, 96 | }, 97 | } 98 | 99 | latestBackup, oldestBackup, err := e.GetBackupDates() 100 | if err != nil { 101 | return 102 | } 103 | 104 | v.LastBackupDate = latestBackup.Format("2006-01-02 15:04:05") 105 | v.LastBackupStatus = "Unknown" 106 | 107 | // Leads to several flaws, should be improved 108 | v.Metrics.LastBackupDate.Set(float64(latestBackup.Unix())) 109 | 110 | v.Metrics.OldestBackupDate.Set(float64(oldestBackup.Unix())) 111 | 112 | // Unknown status 113 | v.Metrics.LastBackupStatus.Set(-1) 114 | return 115 | } 116 | -------------------------------------------------------------------------------- /internal/manager/volumes_test.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | gomock "github.com/golang/mock/gomock" 8 | "github.com/stretchr/testify/assert" 9 | 10 | "github.com/camptocamp/bivac/mocks" 11 | "github.com/camptocamp/bivac/pkg/volume" 12 | ) 13 | 14 | // retrieveVolumes 15 | func TestRetrieveVolumesBasic(t *testing.T) { 16 | // Prepare test 17 | mockCtrl := gomock.NewController(t) 18 | defer mockCtrl.Finish() 19 | mockOrchestrator := mocks.NewMockOrchestrator(mockCtrl) 20 | 21 | givenVolumes := []*volume.Volume{ 22 | &volume.Volume{ 23 | ID: "foo", 24 | Name: "foo", 25 | HostBind: "localhost", 26 | }, 27 | &volume.Volume{ 28 | ID: "bar", 29 | Name: "bar", 30 | HostBind: "localhost", 31 | }, 32 | } 33 | givenFilters := volume.Filters{} 34 | expectedVolumes := []*volume.Volume{ 35 | &volume.Volume{ 36 | ID: "foo", 37 | Name: "foo", 38 | HostBind: "localhost", 39 | }, 40 | &volume.Volume{ 41 | ID: "bar", 42 | Name: "bar", 43 | HostBind: "localhost", 44 | }, 45 | } 46 | 47 | m := &Manager{ 48 | Orchestrator: mockOrchestrator, 49 | } 50 | 51 | // Run test 52 | mockOrchestrator.EXPECT().GetPath(gomock.Any()).Return("localhost").Times(2) 53 | mockOrchestrator.EXPECT().GetVolumes(volume.Filters{}).Return(givenVolumes, nil).Times(1) 54 | 55 | m.Volumes = []*volume.Volume{} 56 | err := retrieveVolumes(m, givenFilters) 57 | 58 | // Do not manage Metrics field 59 | // Should be properly fixed 60 | for k := range m.Volumes { 61 | m.Volumes[k].Metrics = nil 62 | } 63 | 64 | assert.Nil(t, err) 65 | assert.Equal(t, m.Volumes, expectedVolumes) 66 | } 67 | 68 | func TestRetrieveVolumesBlacklist(t *testing.T) { 69 | // Prepare test 70 | mockCtrl := gomock.NewController(t) 71 | defer mockCtrl.Finish() 72 | mockOrchestrator := mocks.NewMockOrchestrator(mockCtrl) 73 | 74 | givenVolumes := []*volume.Volume{ 75 | &volume.Volume{ 76 | ID: "foo", 77 | Name: "foo", 78 | }, 79 | &volume.Volume{ 80 | ID: "bar", 81 | Name: "bar", 82 | }, 83 | } 84 | givenFilters := volume.Filters{ 85 | Blacklist: []string{"foo"}, 86 | } 87 | expectedVolumes := []*volume.Volume{ 88 | &volume.Volume{ 89 | ID: "bar", 90 | Name: "bar", 91 | }, 92 | } 93 | 94 | m := &Manager{ 95 | Orchestrator: mockOrchestrator, 96 | } 97 | 98 | // Run test 99 | mockOrchestrator.EXPECT().GetPath(gomock.Any()).Return("localhost").Times(1) 100 | mockOrchestrator.EXPECT().GetVolumes(volume.Filters{}).Return(givenVolumes, nil).Times(1) 101 | 102 | m.Volumes = []*volume.Volume{} 103 | err := retrieveVolumes(m, givenFilters) 104 | 105 | // Do not manage Metrics field 106 | // Should be properly fixed 107 | for k := range m.Volumes { 108 | m.Volumes[k].Metrics = nil 109 | } 110 | 111 | assert.Nil(t, err) 112 | assert.Equal(t, m.Volumes, expectedVolumes) 113 | } 114 | 115 | /* 116 | func TestRetrieveVolumesWhitelist(t *testing.T) { 117 | // Prepare test 118 | mockCtrl := gomock.NewController(t) 119 | defer mockCtrl.Finish() 120 | mockOrchestrator := mocks.NewMockOrchestrator(mockCtrl) 121 | 122 | givenVolumes := []*volume.Volume{ 123 | &volume.Volume{ 124 | ID: "foo", 125 | Name: "foo", 126 | }, 127 | &volume.Volume{ 128 | ID: "bar", 129 | Name: "bar", 130 | }, 131 | } 132 | givenFilters := volume.Filters{ 133 | Whitelist: []string{"foo"}, 134 | } 135 | expectedVolumes := []*volume.Volume{ 136 | &volume.Volume{ 137 | ID: "foo", 138 | Name: "foo", 139 | }, 140 | } 141 | 142 | m := &Manager{ 143 | Orchestrator: mockOrchestrator, 144 | } 145 | 146 | // Run test 147 | mockOrchestrator.EXPECT().GetVolumes(volume.Filters{}).Return(givenVolumes, nil).Times(1) 148 | 149 | m.Volumes = []*volume.Volume{} 150 | err := retrieveVolumes(m, givenFilters) 151 | 152 | assert.Nil(t, err) 153 | assert.Equal(t, m.Volumes, expectedVolumes) 154 | } 155 | */ 156 | func TestRetrieveVolumesOrchestratorError(t *testing.T) { 157 | // Prepare test 158 | mockCtrl := gomock.NewController(t) 159 | defer mockCtrl.Finish() 160 | mockOrchestrator := mocks.NewMockOrchestrator(mockCtrl) 161 | 162 | givenVolumes := []*volume.Volume{ 163 | &volume.Volume{ 164 | ID: "foo", 165 | Name: "foo", 166 | }, 167 | } 168 | 169 | m := &Manager{ 170 | Orchestrator: mockOrchestrator, 171 | } 172 | 173 | // Run test 174 | mockOrchestrator.EXPECT().GetVolumes(volume.Filters{}).Return(givenVolumes, fmt.Errorf("error")).Times(1) 175 | 176 | m.Volumes = []*volume.Volume{} 177 | err := retrieveVolumes(m, volume.Filters{}) 178 | 179 | assert.Equal(t, err.Error(), "error") 180 | assert.Equal(t, m.Volumes, []*volume.Volume{}) 181 | } 182 | 183 | func TestRetrieveVolumesAppend(t *testing.T) { 184 | // Prepare test 185 | mockCtrl := gomock.NewController(t) 186 | defer mockCtrl.Finish() 187 | mockOrchestrator := mocks.NewMockOrchestrator(mockCtrl) 188 | 189 | givenVolumes := []*volume.Volume{ 190 | &volume.Volume{ 191 | ID: "foo", 192 | Name: "foo", 193 | HostBind: "localhost", 194 | }, 195 | //&volume.Volume{ 196 | // ID: "bar", 197 | // Name: "bar", 198 | //}, 199 | } 200 | givenFilters := volume.Filters{} 201 | expectedVolumes := []*volume.Volume{ 202 | &volume.Volume{ 203 | ID: "foo", 204 | Name: "foo", 205 | HostBind: "localhost", 206 | }, 207 | //&volume.Volume{ 208 | // ID: "bar", 209 | // Name: "bar", 210 | //}, 211 | } 212 | 213 | m := &Manager{ 214 | Orchestrator: mockOrchestrator, 215 | } 216 | 217 | // Run test 218 | mockOrchestrator.EXPECT().GetVolumes(volume.Filters{}).Return(givenVolumes, nil).Times(1) 219 | 220 | m.Volumes = []*volume.Volume{ 221 | &volume.Volume{ 222 | ID: "foo", 223 | Name: "foo", 224 | HostBind: "localhost", 225 | }, 226 | } 227 | err := retrieveVolumes(m, givenFilters) 228 | 229 | assert.Nil(t, err) 230 | assert.Equal(t, m.Volumes, expectedVolumes) 231 | } 232 | 233 | func TestRetrieveVolumesRemove(t *testing.T) { 234 | // Prepare test 235 | mockCtrl := gomock.NewController(t) 236 | defer mockCtrl.Finish() 237 | mockOrchestrator := mocks.NewMockOrchestrator(mockCtrl) 238 | mockRegisterer := mocks.NewMockRegisterer(mockCtrl) 239 | 240 | givenVolumes := []*volume.Volume{ 241 | &volume.Volume{ 242 | ID: "bar", 243 | Name: "bar", 244 | HostBind: "bar", 245 | Hostname: "bar", 246 | }, 247 | } 248 | givenFilters := volume.Filters{} 249 | expectedVolumes := []*volume.Volume{ 250 | &volume.Volume{ 251 | ID: "bar", 252 | Name: "bar", 253 | HostBind: "bar", 254 | Hostname: "bar", 255 | }, 256 | } 257 | 258 | m := &Manager{ 259 | Orchestrator: mockOrchestrator, 260 | } 261 | 262 | // Run test 263 | mockOrchestrator.EXPECT().GetVolumes(volume.Filters{}).Return(givenVolumes, nil).Times(1) 264 | mockRegisterer.EXPECT().Unregister(gomock.Any()).Return(true).AnyTimes() 265 | 266 | m.Volumes = []*volume.Volume{ 267 | &volume.Volume{ 268 | ID: "foo", 269 | Name: "foo", 270 | HostBind: "foo", 271 | Hostname: "foo", 272 | }, 273 | &volume.Volume{ 274 | ID: "bar", 275 | Name: "bar", 276 | HostBind: "bar", 277 | Hostname: "bar", 278 | }, 279 | &volume.Volume{ 280 | ID: "fake", 281 | Name: "fake", 282 | HostBind: "fake", 283 | Hostname: "fake", 284 | }, 285 | } 286 | 287 | for _, v := range m.Volumes { 288 | v.SetupMetrics() 289 | } 290 | 291 | err := retrieveVolumes(m, givenFilters) 292 | 293 | // Do not manage Metrics field 294 | // Should be properly fixed 295 | for k := range m.Volumes { 296 | m.Volumes[k].Metrics = nil 297 | } 298 | 299 | assert.Nil(t, err) 300 | assert.Equal(t, m.Volumes, expectedVolumes) 301 | } 302 | 303 | // backlistedVolume 304 | func TestBlacklistedVolumeValid(t *testing.T) { 305 | givenVolume := &volume.Volume{ 306 | ID: "foo", 307 | Name: "foo", 308 | } 309 | givenFilters := volume.Filters{} 310 | 311 | // Run test 312 | result0, result1, result2 := blacklistedVolume(givenVolume, givenFilters) 313 | 314 | assert.Equal(t, result0, false) 315 | assert.Equal(t, result1, "") 316 | assert.Equal(t, result2, "") 317 | } 318 | 319 | func TestBlacklistedVolumeUnnamedVolume(t *testing.T) { 320 | givenVolume := &volume.Volume{ 321 | ID: "acf1e8ec1e87191518f29ff5ef4d983384fd3dc2228265c09bb64b9747e5af67", 322 | Name: "acf1e8ec1e87191518f29ff5ef4d983384fd3dc2228265c09bb64b9747e5af67", 323 | } 324 | givenFilters := volume.Filters{} 325 | 326 | // Run test 327 | result0, result1, result2 := blacklistedVolume(givenVolume, givenFilters) 328 | 329 | assert.Equal(t, result0, true) 330 | assert.Equal(t, result1, "unnamed") 331 | assert.Equal(t, result2, "") 332 | } 333 | 334 | func TestBlacklistedVolumeBlacklisted(t *testing.T) { 335 | givenVolume := &volume.Volume{ 336 | ID: "foo", 337 | Name: "foo", 338 | } 339 | givenFilters := volume.Filters{ 340 | Blacklist: []string{"foo"}, 341 | } 342 | 343 | // Run test 344 | result0, result1, result2 := blacklistedVolume(givenVolume, givenFilters) 345 | 346 | assert.Equal(t, result0, true) 347 | assert.Equal(t, result1, "blacklisted") 348 | assert.Equal(t, result2, "blacklist config") 349 | } 350 | 351 | func TestBlacklistedVolumeWhitelisted(t *testing.T) { 352 | givenVolume := &volume.Volume{ 353 | ID: "foo", 354 | Name: "foo", 355 | } 356 | givenFilters := volume.Filters{ 357 | Whitelist: []string{"foo"}, 358 | } 359 | 360 | // Run test 361 | result0, result1, result2 := blacklistedVolume(givenVolume, givenFilters) 362 | 363 | assert.Equal(t, result0, false) 364 | assert.Equal(t, result1, "") 365 | assert.Equal(t, result2, "") 366 | } 367 | 368 | func TestBlacklistedVolumeBlacklistedBecauseWhitelist(t *testing.T) { 369 | givenVolume := &volume.Volume{ 370 | ID: "foo", 371 | Name: "foo", 372 | } 373 | givenFilters := volume.Filters{ 374 | Whitelist: []string{"bar"}, 375 | } 376 | 377 | // Run test 378 | result0, result1, result2 := blacklistedVolume(givenVolume, givenFilters) 379 | 380 | assert.Equal(t, result0, true) 381 | assert.Equal(t, result1, "blacklisted") 382 | assert.Equal(t, result2, "whitelist config") 383 | } 384 | -------------------------------------------------------------------------------- /internal/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "encoding/json" 5 | "io/ioutil" 6 | "math/rand" 7 | "os" 8 | "os/exec" 9 | "path/filepath" 10 | "strings" 11 | "syscall" 12 | "time" 13 | ) 14 | 15 | var seededRand = rand.New(rand.NewSource(time.Now().UnixNano())) 16 | 17 | // BuildInfo contains Bivac build informations 18 | type BuildInfo struct { 19 | Version string 20 | Date string 21 | CommitSha1 string 22 | Runtime string 23 | } 24 | 25 | // OutputFormat stores output of Restic commands 26 | type OutputFormat struct { 27 | Stdout string `json:"stdout"` 28 | ExitCode int `json:"rc"` 29 | } 30 | 31 | // MsgFormat is a format used to communicate with the Bivac API 32 | type MsgFormat struct { 33 | Type string `json:"type"` 34 | Content interface{} `json:"content"` 35 | } 36 | 37 | // ReturnFormattedOutput returns a formatted message 38 | func ReturnFormattedOutput(output interface{}) string { 39 | m := MsgFormat{ 40 | Type: "success", 41 | Content: output, 42 | } 43 | b, err := json.Marshal(m) 44 | if err != nil { 45 | return ReturnError(err) 46 | } 47 | return string(b) 48 | } 49 | 50 | // ReturnError returns a formatted error 51 | func ReturnError(e error) string { 52 | msg := MsgFormat{ 53 | Type: "error", 54 | Content: e.Error(), 55 | } 56 | data, _ := json.Marshal(msg) 57 | return string(data) 58 | } 59 | 60 | // HandleExitCode retrieve a command exit code from an error 61 | func HandleExitCode(err error) int { 62 | if exiterr, ok := err.(*exec.ExitError); ok { 63 | if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { 64 | return status.ExitStatus() 65 | } 66 | } 67 | return 0 68 | } 69 | 70 | // GenerateRandomString generate a random string 71 | func GenerateRandomString(length int) string { 72 | const charset = "abcdefghijklmnopqrstuvwxyz" + 73 | "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" 74 | stringByte := make([]byte, length) 75 | for i := range stringByte { 76 | stringByte[i] = charset[seededRand.Intn(len(charset))] 77 | } 78 | return string(stringByte) 79 | } 80 | 81 | // GetRandomFileName get a random file name unique from the files found in the parentPath 82 | func GetRandomFileName(parentPath string) (string, error) { 83 | randomFileName := GenerateRandomString(16) 84 | randomFilePath := strings.ReplaceAll(parentPath+"/"+randomFileName, "//", "/") 85 | _, err := os.Stat(randomFilePath) 86 | if err != nil { 87 | if os.IsNotExist(err) { 88 | return randomFileName, nil 89 | } 90 | return "", err 91 | } 92 | return GetRandomFileName(parentPath) 93 | } 94 | 95 | // GetRandomFilePath get a random file name unique from the file paths found in the parentPath 96 | func GetRandomFilePath(parentPath string) (string, error) { 97 | randomFileName, err := GetRandomFileName(parentPath) 98 | if err != nil { 99 | return "", err 100 | } 101 | randomFilePath := strings.ReplaceAll(parentPath+"/"+randomFileName, "//", "/") 102 | return randomFilePath, nil 103 | } 104 | 105 | // MergePaths merge a source path into a target path 106 | func MergePaths(rootSourcePath string, rootTargetDir string) error { 107 | rootSourceFInfo, err := os.Stat(rootSourcePath) 108 | if err != nil { 109 | return err 110 | } 111 | if !rootSourceFInfo.IsDir() { 112 | err = CopyFile(rootSourcePath, rootTargetDir) 113 | if err != nil { 114 | return err 115 | } 116 | return nil 117 | } 118 | rootTargetFInfo, err := os.Stat(rootTargetDir) 119 | if err != nil { 120 | if !os.IsNotExist(err) { 121 | return err 122 | } 123 | } else { 124 | if !rootTargetFInfo.IsDir() { 125 | err = os.Remove(rootTargetDir) 126 | if err != nil { 127 | return err 128 | } 129 | } 130 | } 131 | err = filepath.Walk( 132 | rootSourcePath, 133 | func( 134 | sourcePath string, 135 | sourceFInfo os.FileInfo, 136 | err error, 137 | ) error { 138 | sharedPath := sourcePath[len(rootSourcePath):] 139 | if err != nil { 140 | return err 141 | } 142 | targetPath := strings.ReplaceAll(rootTargetDir+"/"+sharedPath, "//", "/") 143 | if sourceFInfo.IsDir() { 144 | targetFInfo, err := os.Stat(targetPath) 145 | if err != nil { 146 | if !os.IsNotExist(err) { 147 | return err 148 | } 149 | } else { 150 | if !targetFInfo.IsDir() { 151 | err = os.Remove(targetPath) 152 | if err != nil { 153 | return err 154 | } 155 | } 156 | } 157 | os.MkdirAll(targetPath, sourceFInfo.Mode()) 158 | } else { 159 | err = CopyFile(sourcePath, targetPath) 160 | if err != nil { 161 | return err 162 | } 163 | } 164 | return nil 165 | }, 166 | ) 167 | if err != nil { 168 | return err 169 | } 170 | return nil 171 | } 172 | 173 | // CopyFile copy a file's binary contents to another file 174 | func CopyFile(sourcePath string, targetPath string) error { 175 | sourceFInfo, err := os.Stat(sourcePath) 176 | if err != nil { 177 | return err 178 | } 179 | if !sourceFInfo.Mode().IsRegular() { 180 | return nil 181 | } 182 | targetFInfo, err := os.Stat(targetPath) 183 | if err != nil { 184 | if !os.IsNotExist(err) { 185 | return err 186 | } 187 | } else if !targetFInfo.Mode().IsRegular() { 188 | if targetFInfo.IsDir() { 189 | err := os.RemoveAll(targetPath) 190 | if err != nil { 191 | return err 192 | } 193 | } else { 194 | return nil 195 | } 196 | } 197 | if os.SameFile(sourceFInfo, targetFInfo) { 198 | return nil 199 | } 200 | err = os.Link(sourcePath, targetPath) 201 | if err != nil { 202 | err = copyFileContents(sourcePath, targetPath) 203 | if err != nil { 204 | return err 205 | } 206 | } 207 | return nil 208 | } 209 | 210 | // slower but safer than creating a hardlink when a target file exists 211 | func copyFileContents(sourcePath string, targetPath string) error { 212 | sourceFInfo, err := os.Stat(sourcePath) 213 | if err != nil { 214 | return err 215 | } 216 | sourceData, err := ioutil.ReadFile(sourcePath) 217 | if err != nil { 218 | return err 219 | } 220 | err = ioutil.WriteFile(targetPath, sourceData, sourceFInfo.Mode()) 221 | if err != nil { 222 | if !os.IsNotExist(err) { 223 | return err 224 | } 225 | } 226 | return nil 227 | } 228 | 229 | // ComputeDockerAgentImage detects which Docker image to choose for the Agent 230 | // based on the manager's version 231 | func ComputeDockerAgentImage(managerVersion string) string { 232 | if strings.Contains(managerVersion, "-dirty") || managerVersion == "" { 233 | return "latest" 234 | } 235 | return managerVersion 236 | } 237 | -------------------------------------------------------------------------------- /internal/utils/utils_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestComputeDockerAgentImage(t *testing.T) { 10 | testCases := []struct { 11 | givenManagerVersion string 12 | expectedAgentVersion string 13 | }{ 14 | { 15 | "2.2.1-ad68ec-dirty", 16 | "latest", 17 | }, 18 | { 19 | "2.2.1", 20 | "2.2.1", 21 | }, 22 | { 23 | "", 24 | "latest", 25 | }, 26 | { 27 | "2.1.0-rc0", 28 | "2.1.0-rc0", 29 | }, 30 | } 31 | 32 | for _, testCase := range testCases { 33 | agentVersion := ComputeDockerAgentImage(testCase.givenManagerVersion) 34 | assert.Equal(t, agentVersion, testCase.expectedAgentVersion) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "runtime" 5 | 6 | "github.com/camptocamp/bivac/cmd" 7 | _ "github.com/camptocamp/bivac/cmd/all" 8 | "github.com/camptocamp/bivac/internal/utils" 9 | ) 10 | 11 | var ( 12 | exitCode int 13 | buildInfo utils.BuildInfo 14 | 15 | // Following variables are filled in by the build script 16 | version = "<<< filled in by build >>>" 17 | buildDate = "<<< filled in by build >>>" 18 | commitSha1 = "<<< filled in by build >>>" 19 | ) 20 | 21 | func main() { 22 | buildInfo.Version = version 23 | buildInfo.Date = buildDate 24 | buildInfo.CommitSha1 = commitSha1 25 | buildInfo.Runtime = runtime.Version() 26 | cmd.Execute(buildInfo) 27 | } 28 | -------------------------------------------------------------------------------- /mocks/mock_orchestrator.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: pkg/orchestrators/orchestrators.go 3 | 4 | // Package mocks is a generated GoMock package. 5 | package mocks 6 | 7 | import ( 8 | volume "github.com/camptocamp/bivac/pkg/volume" 9 | gomock "github.com/golang/mock/gomock" 10 | reflect "reflect" 11 | ) 12 | 13 | // MockOrchestrator is a mock of Orchestrator interface 14 | type MockOrchestrator struct { 15 | ctrl *gomock.Controller 16 | recorder *MockOrchestratorMockRecorder 17 | } 18 | 19 | // MockOrchestratorMockRecorder is the mock recorder for MockOrchestrator 20 | type MockOrchestratorMockRecorder struct { 21 | mock *MockOrchestrator 22 | } 23 | 24 | // NewMockOrchestrator creates a new mock instance 25 | func NewMockOrchestrator(ctrl *gomock.Controller) *MockOrchestrator { 26 | mock := &MockOrchestrator{ctrl: ctrl} 27 | mock.recorder = &MockOrchestratorMockRecorder{mock} 28 | return mock 29 | } 30 | 31 | // EXPECT returns an object that allows the caller to indicate expected use 32 | func (m *MockOrchestrator) EXPECT() *MockOrchestratorMockRecorder { 33 | return m.recorder 34 | } 35 | 36 | // GetName mocks base method 37 | func (m *MockOrchestrator) GetName() string { 38 | m.ctrl.T.Helper() 39 | ret := m.ctrl.Call(m, "GetName") 40 | ret0, _ := ret[0].(string) 41 | return ret0 42 | } 43 | 44 | // GetName indicates an expected call of GetName 45 | func (mr *MockOrchestratorMockRecorder) GetName() *gomock.Call { 46 | mr.mock.ctrl.T.Helper() 47 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockOrchestrator)(nil).GetName)) 48 | } 49 | 50 | // GetPath mocks base method 51 | func (m *MockOrchestrator) GetPath(v *volume.Volume) string { 52 | m.ctrl.T.Helper() 53 | ret := m.ctrl.Call(m, "GetPath", v) 54 | ret0, _ := ret[0].(string) 55 | return ret0 56 | } 57 | 58 | // GetPath indicates an expected call of GetPath 59 | func (mr *MockOrchestratorMockRecorder) GetPath(v interface{}) *gomock.Call { 60 | mr.mock.ctrl.T.Helper() 61 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPath", reflect.TypeOf((*MockOrchestrator)(nil).GetPath), v) 62 | } 63 | 64 | // GetVolumes mocks base method 65 | func (m *MockOrchestrator) GetVolumes(volumeFilters volume.Filters) ([]*volume.Volume, error) { 66 | m.ctrl.T.Helper() 67 | ret := m.ctrl.Call(m, "GetVolumes", volumeFilters) 68 | ret0, _ := ret[0].([]*volume.Volume) 69 | ret1, _ := ret[1].(error) 70 | return ret0, ret1 71 | } 72 | 73 | // GetVolumes indicates an expected call of GetVolumes 74 | func (mr *MockOrchestratorMockRecorder) GetVolumes(volumeFilters interface{}) *gomock.Call { 75 | mr.mock.ctrl.T.Helper() 76 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVolumes", reflect.TypeOf((*MockOrchestrator)(nil).GetVolumes), volumeFilters) 77 | } 78 | 79 | // DeployAgent mocks base method 80 | func (m *MockOrchestrator) DeployAgent(image string, cmd, envs []string, volume *volume.Volume) (bool, string, error) { 81 | m.ctrl.T.Helper() 82 | ret := m.ctrl.Call(m, "DeployAgent", image, cmd, envs, volume) 83 | ret0, _ := ret[0].(bool) 84 | ret1, _ := ret[1].(string) 85 | ret2, _ := ret[2].(error) 86 | return ret0, ret1, ret2 87 | } 88 | 89 | // DeployAgent indicates an expected call of DeployAgent 90 | func (mr *MockOrchestratorMockRecorder) DeployAgent(image, cmd, envs, volume interface{}) *gomock.Call { 91 | mr.mock.ctrl.T.Helper() 92 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeployAgent", reflect.TypeOf((*MockOrchestrator)(nil).DeployAgent), image, cmd, envs, volume) 93 | } 94 | 95 | // GetContainersMountingVolume mocks base method 96 | func (m *MockOrchestrator) GetContainersMountingVolume(v *volume.Volume) ([]*volume.MountedVolume, error) { 97 | m.ctrl.T.Helper() 98 | ret := m.ctrl.Call(m, "GetContainersMountingVolume", v) 99 | ret0, _ := ret[0].([]*volume.MountedVolume) 100 | ret1, _ := ret[1].(error) 101 | return ret0, ret1 102 | } 103 | 104 | // GetContainersMountingVolume indicates an expected call of GetContainersMountingVolume 105 | func (mr *MockOrchestratorMockRecorder) GetContainersMountingVolume(v interface{}) *gomock.Call { 106 | mr.mock.ctrl.T.Helper() 107 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainersMountingVolume", reflect.TypeOf((*MockOrchestrator)(nil).GetContainersMountingVolume), v) 108 | } 109 | 110 | // ContainerExec mocks base method 111 | func (m *MockOrchestrator) ContainerExec(mountedVolumes *volume.MountedVolume, command []string) (string, error) { 112 | m.ctrl.T.Helper() 113 | ret := m.ctrl.Call(m, "ContainerExec", mountedVolumes, command) 114 | ret0, _ := ret[0].(string) 115 | ret1, _ := ret[1].(error) 116 | return ret0, ret1 117 | } 118 | 119 | // ContainerExec indicates an expected call of ContainerExec 120 | func (mr *MockOrchestratorMockRecorder) ContainerExec(mountedVolumes, command interface{}) *gomock.Call { 121 | mr.mock.ctrl.T.Helper() 122 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerExec", reflect.TypeOf((*MockOrchestrator)(nil).ContainerExec), mountedVolumes, command) 123 | } 124 | 125 | // IsNodeAvailable mocks base method 126 | func (m *MockOrchestrator) IsNodeAvailable(hostID string) (bool, error) { 127 | m.ctrl.T.Helper() 128 | ret := m.ctrl.Call(m, "IsNodeAvailable", hostID) 129 | ret0, _ := ret[0].(bool) 130 | ret1, _ := ret[1].(error) 131 | return ret0, ret1 132 | } 133 | 134 | // IsNodeAvailable indicates an expected call of IsNodeAvailable 135 | func (mr *MockOrchestratorMockRecorder) IsNodeAvailable(hostID interface{}) *gomock.Call { 136 | mr.mock.ctrl.T.Helper() 137 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsNodeAvailable", reflect.TypeOf((*MockOrchestrator)(nil).IsNodeAvailable), hostID) 138 | } 139 | 140 | // RetrieveOrphanAgents mocks base method 141 | func (m *MockOrchestrator) RetrieveOrphanAgents() (map[string]string, error) { 142 | m.ctrl.T.Helper() 143 | ret := m.ctrl.Call(m, "RetrieveOrphanAgents") 144 | ret0, _ := ret[0].(map[string]string) 145 | ret1, _ := ret[1].(error) 146 | return ret0, ret1 147 | } 148 | 149 | // RetrieveOrphanAgents indicates an expected call of RetrieveOrphanAgents 150 | func (mr *MockOrchestratorMockRecorder) RetrieveOrphanAgents() *gomock.Call { 151 | mr.mock.ctrl.T.Helper() 152 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RetrieveOrphanAgents", reflect.TypeOf((*MockOrchestrator)(nil).RetrieveOrphanAgents)) 153 | } 154 | 155 | // AttachOrphanAgent mocks base method 156 | func (m *MockOrchestrator) AttachOrphanAgent(containerID, namespace string) (bool, string, error) { 157 | m.ctrl.T.Helper() 158 | ret := m.ctrl.Call(m, "AttachOrphanAgent", containerID, namespace) 159 | ret0, _ := ret[0].(bool) 160 | ret1, _ := ret[1].(string) 161 | ret2, _ := ret[2].(error) 162 | return ret0, ret1, ret2 163 | } 164 | 165 | // AttachOrphanAgent indicates an expected call of AttachOrphanAgent 166 | func (mr *MockOrchestratorMockRecorder) AttachOrphanAgent(containerID, namespace interface{}) *gomock.Call { 167 | mr.mock.ctrl.T.Helper() 168 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachOrphanAgent", reflect.TypeOf((*MockOrchestrator)(nil).AttachOrphanAgent), containerID, namespace) 169 | } 170 | -------------------------------------------------------------------------------- /mocks/mock_prometheus.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: github.com/prometheus/client_golang/prometheus (interfaces: Registerer) 3 | 4 | // Package mocks is a generated GoMock package. 5 | package mocks 6 | 7 | import ( 8 | gomock "github.com/golang/mock/gomock" 9 | prometheus "github.com/prometheus/client_golang/prometheus" 10 | reflect "reflect" 11 | ) 12 | 13 | // MockRegisterer is a mock of Registerer interface 14 | type MockRegisterer struct { 15 | ctrl *gomock.Controller 16 | recorder *MockRegistererMockRecorder 17 | } 18 | 19 | // MockRegistererMockRecorder is the mock recorder for MockRegisterer 20 | type MockRegistererMockRecorder struct { 21 | mock *MockRegisterer 22 | } 23 | 24 | // NewMockRegisterer creates a new mock instance 25 | func NewMockRegisterer(ctrl *gomock.Controller) *MockRegisterer { 26 | mock := &MockRegisterer{ctrl: ctrl} 27 | mock.recorder = &MockRegistererMockRecorder{mock} 28 | return mock 29 | } 30 | 31 | // EXPECT returns an object that allows the caller to indicate expected use 32 | func (m *MockRegisterer) EXPECT() *MockRegistererMockRecorder { 33 | return m.recorder 34 | } 35 | 36 | // MustRegister mocks base method 37 | func (m *MockRegisterer) MustRegister(arg0 ...prometheus.Collector) { 38 | m.ctrl.T.Helper() 39 | varargs := []interface{}{} 40 | for _, a := range arg0 { 41 | varargs = append(varargs, a) 42 | } 43 | m.ctrl.Call(m, "MustRegister", varargs...) 44 | } 45 | 46 | // MustRegister indicates an expected call of MustRegister 47 | func (mr *MockRegistererMockRecorder) MustRegister(arg0 ...interface{}) *gomock.Call { 48 | mr.mock.ctrl.T.Helper() 49 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MustRegister", reflect.TypeOf((*MockRegisterer)(nil).MustRegister), arg0...) 50 | } 51 | 52 | // Register mocks base method 53 | func (m *MockRegisterer) Register(arg0 prometheus.Collector) error { 54 | m.ctrl.T.Helper() 55 | ret := m.ctrl.Call(m, "Register", arg0) 56 | ret0, _ := ret[0].(error) 57 | return ret0 58 | } 59 | 60 | // Register indicates an expected call of Register 61 | func (mr *MockRegistererMockRecorder) Register(arg0 interface{}) *gomock.Call { 62 | mr.mock.ctrl.T.Helper() 63 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockRegisterer)(nil).Register), arg0) 64 | } 65 | 66 | // Unregister mocks base method 67 | func (m *MockRegisterer) Unregister(arg0 prometheus.Collector) bool { 68 | m.ctrl.T.Helper() 69 | ret := m.ctrl.Call(m, "Unregister", arg0) 70 | ret0, _ := ret[0].(bool) 71 | return ret0 72 | } 73 | 74 | // Unregister indicates an expected call of Unregister 75 | func (mr *MockRegistererMockRecorder) Unregister(arg0 interface{}) *gomock.Call { 76 | mr.mock.ctrl.T.Helper() 77 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unregister", reflect.TypeOf((*MockRegisterer)(nil).Unregister), arg0) 78 | } 79 | -------------------------------------------------------------------------------- /pkg/client/client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "encoding/base64" 6 | "encoding/json" 7 | "fmt" 8 | "io/ioutil" 9 | "net/http" 10 | "strconv" 11 | "strings" 12 | 13 | "github.com/camptocamp/bivac/pkg/volume" 14 | ) 15 | 16 | // Client contains informations needed to connect to a Bivac API 17 | type Client struct { 18 | remoteAddress string 19 | psk string 20 | } 21 | 22 | // NewClient returns a Bivac client 23 | func NewClient(remoteAddress string, psk string) (c *Client, err error) { 24 | c = &Client{ 25 | remoteAddress: remoteAddress, 26 | psk: psk, 27 | } 28 | 29 | var pingResponse map[string]string 30 | err = c.newRequest(&pingResponse, "GET", "/ping", "") 31 | if err != nil { 32 | err = fmt.Errorf("failed to connect to the remote Bivac instance: %s", err) 33 | return 34 | } 35 | if pingResponse["type"] != "pong" { 36 | err = fmt.Errorf("wrong response from the Bivac instance: %v", pingResponse) 37 | return 38 | } 39 | return 40 | } 41 | 42 | // GetVolumes returns the list of the volumes managed by Bivac 43 | func (c *Client) GetVolumes() (volumes []volume.Volume, err error) { 44 | err = c.newRequest(&volumes, "GET", "/volumes", "") 45 | if err != nil { 46 | err = fmt.Errorf("failed to connect to the remote Bivac instance: %s", err) 47 | return 48 | } 49 | return 50 | } 51 | 52 | // BackupVolume requests a backup of a volume 53 | func (c *Client) BackupVolume(volumeName string, force bool) (err error) { 54 | err = c.newRequest(nil, "POST", fmt.Sprintf("/backup/%s?force=%s", volumeName, strconv.FormatBool(force)), "") 55 | if err != nil { 56 | err = fmt.Errorf("failed to connect to the remote Bivac instance: %s", err) 57 | return 58 | } 59 | return 60 | } 61 | 62 | // RestoreVolume requests a restore of a volume 63 | func (c *Client) RestoreVolume( 64 | volumeName string, 65 | force bool, 66 | snapshotName string, 67 | ) (err error) { 68 | err = c.newRequest( 69 | nil, 70 | "POST", 71 | fmt.Sprintf( 72 | "/restore/%s/%s?force=%s", 73 | volumeName, 74 | snapshotName, 75 | strconv.FormatBool(force), 76 | ), 77 | "", 78 | ) 79 | if err != nil { 80 | err = fmt.Errorf( 81 | "failed to connect to the remote Bivac instance: %s", 82 | err, 83 | ) 84 | return 85 | } 86 | return 87 | } 88 | 89 | // RunRawCommand runs a custom Restic command on a volume's repository and returns the output 90 | func (c *Client) RunRawCommand(volumeID string, cmd []string) (output string, err error) { 91 | var response map[string]interface{} 92 | 93 | postValue := make(map[string][]string) 94 | postValue["cmd"] = cmd 95 | 96 | postValueEncoded, _ := json.Marshal(postValue) 97 | 98 | err = c.newRequest(&response, "POST", fmt.Sprintf("/restic/%s", volumeID), string(postValueEncoded)) 99 | if err != nil { 100 | err = fmt.Errorf("failed to connect to the remote Bivac instance: %s", err) 101 | return 102 | } 103 | decodedOutput, err := base64.StdEncoding.DecodeString(strings.Replace(response["data"].(string), " ", "", -1)) 104 | if err != nil { 105 | err = fmt.Errorf("failed to decode output: %s -> `%s`", err, strings.Replace(output, " ", "", -1)) 106 | } 107 | output = string(decodedOutput) 108 | return 109 | } 110 | 111 | // GetInformations returns informations about the Bivac manager 112 | func (c *Client) GetInformations() (informations map[string]string, err error) { 113 | var data struct { 114 | Type string `json:"type"` 115 | Data map[string]string 116 | } 117 | err = c.newRequest(&data, "GET", "/info", "") 118 | if err != nil { 119 | err = fmt.Errorf("failed to connect to the remote Bivac instance: %s", err) 120 | return 121 | } 122 | informations = data.Data 123 | return 124 | } 125 | 126 | func (c *Client) newRequest(data interface{}, method, endpoint, value string) (err error) { 127 | client := &http.Client{} 128 | req, err := http.NewRequest(method, c.remoteAddress+endpoint, bytes.NewBuffer([]byte(value))) 129 | if err != nil { 130 | err = fmt.Errorf("failed to build request: %s", err) 131 | return 132 | } 133 | 134 | req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.psk)) 135 | 136 | res, err := client.Do(req) 137 | if err != nil { 138 | err = fmt.Errorf("failed to send request: %s", err) 139 | return 140 | } 141 | defer res.Body.Close() 142 | 143 | body, err := ioutil.ReadAll(res.Body) 144 | if err != nil { 145 | err = fmt.Errorf("failed to read body: %s", err) 146 | return 147 | } 148 | 149 | if res.StatusCode == http.StatusOK { 150 | if err := json.Unmarshal(body, &data); err != nil { 151 | err = fmt.Errorf("failed to unmarshal response from the Bivac instance: %s", err) 152 | return err 153 | } 154 | } else { 155 | err = fmt.Errorf("received wrong status code from the Bivac instance: [%d] %s", res.StatusCode, string(body)) 156 | return 157 | } 158 | return 159 | } 160 | -------------------------------------------------------------------------------- /pkg/client/client_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "gopkg.in/jarcoal/httpmock.v1" 9 | 10 | "github.com/camptocamp/bivac/pkg/volume" 11 | ) 12 | 13 | // NewClient 14 | func TestNewClientValid(t *testing.T) { 15 | // Prepare test 16 | httpmock.Activate() 17 | defer httpmock.DeactivateAndReset() 18 | 19 | expectedClient := &Client{ 20 | remoteAddress: "http://fakeserver", 21 | psk: "psk", 22 | } 23 | 24 | // Run test 25 | httpmock.RegisterResponder("GET", "http://fakeserver/ping", 26 | httpmock.NewStringResponder(200, `{"type": "pong"}`)) 27 | 28 | c, err := NewClient("http://fakeserver", "psk") 29 | 30 | assert.Nil(t, err) 31 | assert.Equal(t, c, expectedClient) 32 | } 33 | 34 | func TestNewClientFailedToConnect(t *testing.T) { 35 | // Prepare test 36 | httpmock.Activate() 37 | defer httpmock.DeactivateAndReset() 38 | 39 | expectedError := errors.New("failed to connect") 40 | expectedClient := &Client{ 41 | remoteAddress: "http://fakefakeserver", 42 | psk: "psk", 43 | } 44 | 45 | // Run test cases 46 | c, err := NewClient("http://fakefakeserver", "psk") 47 | 48 | assert.NotNil(t, err) 49 | assert.Contains(t, err.Error(), expectedError.Error()) 50 | assert.Equal(t, c, expectedClient) 51 | } 52 | 53 | func TestNewClientWrongResponse(t *testing.T) { 54 | // Prepare test 55 | httpmock.Activate() 56 | defer httpmock.DeactivateAndReset() 57 | 58 | expectedClient := &Client{ 59 | remoteAddress: "http://fakeserver", 60 | psk: "psk", 61 | } 62 | 63 | // Run test 64 | httpmock.RegisterResponder("GET", "http://fakeserver/ping", 65 | httpmock.NewStringResponder(200, `{"type": "foo"}`)) 66 | 67 | c, err := NewClient("http://fakeserver", "psk") 68 | 69 | assert.NotNil(t, err) 70 | assert.Contains(t, err.Error(), "wrong response") 71 | assert.Equal(t, c, expectedClient) 72 | } 73 | 74 | func TestNewClientFailedToUnmarshal(t *testing.T) { 75 | // Prepare test 76 | httpmock.Activate() 77 | defer httpmock.DeactivateAndReset() 78 | 79 | expectedClient := &Client{ 80 | remoteAddress: "http://fakeserver", 81 | psk: "psk", 82 | } 83 | 84 | // Run test 85 | httpmock.RegisterResponder("GET", "http://fakeserver/ping", 86 | httpmock.NewStringResponder(200, ``)) 87 | 88 | c, err := NewClient("http://fakeserver", "psk") 89 | 90 | assert.NotNil(t, err) 91 | assert.Contains(t, err.Error(), "unmarshal") 92 | assert.Equal(t, c, expectedClient) 93 | } 94 | 95 | func TestNewClientWrongStatusCode(t *testing.T) { 96 | // Prepare test 97 | httpmock.Activate() 98 | defer httpmock.DeactivateAndReset() 99 | 100 | expectedClient := &Client{ 101 | remoteAddress: "http://fakeserver", 102 | psk: "psk", 103 | } 104 | 105 | // Run test 106 | httpmock.RegisterResponder("GET", "http://fakeserver/ping", 107 | httpmock.NewStringResponder(404, ``)) 108 | 109 | c, err := NewClient("http://fakeserver", "psk") 110 | 111 | assert.NotNil(t, err) 112 | assert.Contains(t, err.Error(), "wrong status code") 113 | assert.Equal(t, c, expectedClient) 114 | } 115 | 116 | // GetVolumes 117 | func TestGetVolumesValid(t *testing.T) { 118 | // Prepare test 119 | httpmock.Activate() 120 | defer httpmock.DeactivateAndReset() 121 | 122 | fakeResponse := `[ 123 | { 124 | "id": "foo", 125 | "name": "foo", 126 | "mountpoint": "/foo" 127 | }, 128 | { 129 | "id": "bar", 130 | "name": "bar", 131 | "mountpoint": "/bar" 132 | } 133 | ]` 134 | 135 | expectedVolumes := []volume.Volume{ 136 | volume.Volume{ 137 | ID: "foo", 138 | Name: "foo", 139 | Mountpoint: "/foo", 140 | }, 141 | volume.Volume{ 142 | ID: "bar", 143 | Name: "bar", 144 | Mountpoint: "/bar", 145 | }, 146 | } 147 | 148 | // Run test 149 | httpmock.RegisterResponder("GET", "http://fakeserver/volumes", 150 | httpmock.NewStringResponder(200, fakeResponse)) 151 | 152 | c := &Client{ 153 | remoteAddress: "http://fakeserver", 154 | psk: "psk", 155 | } 156 | volumes, err := c.GetVolumes() 157 | 158 | assert.Nil(t, err) 159 | assert.Equal(t, volumes, expectedVolumes) 160 | } 161 | -------------------------------------------------------------------------------- /pkg/orchestrators/orchestrators.go: -------------------------------------------------------------------------------- 1 | package orchestrators 2 | 3 | import ( 4 | "github.com/camptocamp/bivac/pkg/volume" 5 | ) 6 | 7 | // Orchestrator implements a container Orchestrator interface 8 | type Orchestrator interface { 9 | GetName() string 10 | GetPath(v *volume.Volume) string 11 | GetVolumes(volumeFilters volume.Filters) (volumes []*volume.Volume, err error) 12 | DeployAgent(image string, cmd []string, envs []string, volume *volume.Volume) (success bool, output string, err error) 13 | GetContainersMountingVolume(v *volume.Volume) (mountedVolumes []*volume.MountedVolume, err error) 14 | ContainerExec(mountedVolumes *volume.MountedVolume, command []string) (stdout string, err error) 15 | IsNodeAvailable(hostID string) (ok bool, err error) 16 | RetrieveOrphanAgents() (containers map[string]string, err error) 17 | AttachOrphanAgent(containerID, namespace string) (success bool, output string, err error) 18 | } 19 | -------------------------------------------------------------------------------- /pkg/volume/volume.go: -------------------------------------------------------------------------------- 1 | package volume 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/promauto" 8 | ) 9 | 10 | // Volume provides backup methods for a single volume 11 | type Volume struct { 12 | ID string 13 | Name string 14 | BackupDir string 15 | Mount string 16 | Mountpoint string 17 | Driver string 18 | Labels map[string]string 19 | ReadOnly bool 20 | HostBind string 21 | Hostname string 22 | Namespace string 23 | RepoName string 24 | SubPath string 25 | 26 | BackingUp bool 27 | LastBackupDate string 28 | LastBackupStatus string 29 | LastBackupStartDate string 30 | Logs map[string]string 31 | 32 | Metrics *Metrics `json:"-"` 33 | 34 | Mux sync.Mutex 35 | } 36 | 37 | // Filters contains the volumes filters 38 | type Filters struct { 39 | Blacklist []string 40 | Whitelist []string 41 | WhitelistAnnotation bool 42 | } 43 | 44 | // Metrics are used to fill the Prometheus endpoint 45 | // TODO: Merge LastBackupDate and LastBackupStatus 46 | type Metrics struct { 47 | LastBackupDate prometheus.Gauge 48 | LastBackupStatus prometheus.Gauge 49 | OldestBackupDate prometheus.Gauge 50 | BackupCount prometheus.Gauge 51 | } 52 | 53 | // MountedVolume stores mounted volumes inside a container 54 | type MountedVolume struct { 55 | PodID string 56 | ContainerID string 57 | HostID string 58 | Volume *Volume 59 | Path string 60 | } 61 | 62 | // SetupMetrics initializes the volume's metrics 63 | func (v *Volume) SetupMetrics() { 64 | v.Metrics = &Metrics{} 65 | 66 | v.Metrics.LastBackupDate = promauto.NewGauge(prometheus.GaugeOpts{ 67 | Name: "bivac_lastBackup", 68 | Help: "Date of the last backup", 69 | ConstLabels: map[string]string{ 70 | "volume_id": v.ID, 71 | "volume_name": v.Name, 72 | "hostbind": v.HostBind, 73 | "hostname": v.Hostname, 74 | }, 75 | }) 76 | v.Metrics.LastBackupStatus = promauto.NewGauge(prometheus.GaugeOpts{ 77 | Name: "bivac_backupExitCode", 78 | Help: "Status of the last backup", 79 | ConstLabels: map[string]string{ 80 | "volume_id": v.ID, 81 | "volume_name": v.Name, 82 | "hostbind": v.HostBind, 83 | "hostname": v.Hostname, 84 | }, 85 | }) 86 | v.Metrics.OldestBackupDate = promauto.NewGauge(prometheus.GaugeOpts{ 87 | Name: "bivac_oldestBackup", 88 | Help: "Date of the oldest snapshot", 89 | ConstLabels: map[string]string{ 90 | "volume_id": v.ID, 91 | "volume_name": v.Name, 92 | "hostbind": v.HostBind, 93 | "hostname": v.Hostname, 94 | }, 95 | }) 96 | v.Metrics.BackupCount = promauto.NewGauge(prometheus.GaugeOpts{ 97 | Name: "bivac_backupCount", 98 | Help: "Backups count, should match --keep-* option", 99 | ConstLabels: map[string]string{ 100 | "volume_id": v.ID, 101 | "volume_name": v.Name, 102 | "hostbind": v.HostBind, 103 | "hostname": v.Hostname, 104 | }, 105 | }) 106 | 107 | return 108 | } 109 | 110 | // CleanupMetrics cleans up volume's metrics 111 | func (v *Volume) CleanupMetrics() { 112 | prometheus.Unregister(v.Metrics.LastBackupDate) 113 | prometheus.Unregister(v.Metrics.LastBackupStatus) 114 | prometheus.Unregister(v.Metrics.OldestBackupDate) 115 | prometheus.Unregister(v.Metrics.BackupCount) 116 | return 117 | } 118 | -------------------------------------------------------------------------------- /pkg/volume/volume_test.go: -------------------------------------------------------------------------------- 1 | package volume 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | var fakeHostname, _ = os.Hostname() 11 | 12 | // SetupMetrics 13 | func TestSetupMetrics(t *testing.T) { 14 | v := Volume{ 15 | ID: "bar", 16 | Name: "bar", 17 | Mountpoint: "/bar", 18 | HostBind: fakeHostname, 19 | Hostname: fakeHostname, 20 | Logs: make(map[string]string), 21 | BackingUp: false, 22 | RepoName: "bar", 23 | SubPath: "", 24 | } 25 | v.SetupMetrics() 26 | assert.Equal(t, v.ID, "bar") 27 | } 28 | -------------------------------------------------------------------------------- /providers-config.default.toml: -------------------------------------------------------------------------------- 1 | [providers] 2 | [providers.mysql] 3 | pre_cmd = """ 4 | mkdir -p $volume/backups && \ 5 | if [ "$MYSQL_ALLOW_EMPTY_PASSWORD" == "yes" ]; then \ 6 | mysqldump --opt --max_allowed_packet=16M --all-databases --extended-insert > $volume/backups/all.sql; \ 7 | elif [ -z ${MYSQL_ROOT_PASSWORD+x} ]; then \ 8 | mysqldump --opt --max_allowed_packet=16M --all-databases --extended-insert --user=$MYSQL_USER --password=$MYSQL_PASSWORD $MYSQL_DATABASE > $volume/backups/all.sql; \ 9 | else \ 10 | mysqldump --opt --max_allowed_packet=16M --all-databases --extended-insert --password=$MYSQL_ROOT_PASSWORD > $volume/backups/all.sql; \ 11 | fi""" 12 | detect_cmd = "[[ -d $volume/mysql ]]" 13 | post_cmd = "rm -rf $volume/backups" 14 | backup_dir = "backups" 15 | 16 | [providers.postgresql] 17 | pre_cmd = "mkdir -p $volume/backups && pg_dumpall --clean -Upostgres > $volume/backups/all.sql" 18 | post_cmd = "rm -rf $volume/backups" 19 | detect_cmd = "[[ -f $volume/PG_VERSION ]]" 20 | backup_dir = "backups" 21 | 22 | [providers.openldap] 23 | pre_cmd = "mkdir -p $volume/backups && slapcat > $volume/backups/all.ldif" 24 | detect_cmd = "[[ -f $volume/DB_CONFIG ]]" 25 | backup_dir = "backups" 26 | 27 | [providers.mongo] 28 | pre_cmd = """ 29 | if [ -z \"$MONGO_INITDB_ROOT_USERNAME\" ]; then \ 30 | mongodump -o $volume/backups; else \ 31 | mongodump -o $volume/backups --username=$MONGO_INITDB_ROOT_USERNAME --password=$MONGO_INITDB_ROOT_PASSWORD; \ 32 | fi""" 33 | post_cmd = "rm -rf $volume/backups" 34 | detect_cmd = "[[ -f $volume/mongod.lock ]]" 35 | backup_dir = "backups" 36 | -------------------------------------------------------------------------------- /scripts/build-release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z $GO_VERSION ]; then 4 | GO_VERSION=latest 5 | fi 6 | 7 | docker pull golang:$GO_VERSION 8 | 9 | VERSION=$(git describe --always --dirty) 10 | 11 | PLATFORMS=(darwin linux windows) 12 | ARCHITECTURES=(386 amd64) 13 | 14 | mkdir -p release 15 | 16 | for platform in ${PLATFORMS[@]}; do 17 | for arch in ${ARCHITECTURES[@]}; do 18 | rm -f bivac 19 | docker run -it --rm -w /go/src/github.com/camptocamp/bivac -v $(pwd):/go/src/github.com/camptocamp/bivac \ 20 | -e GOOS=${platform} \ 21 | -e GOARCH=${arch} \ 22 | golang:$GO_VERSION make bivac 23 | sha256sum bivac >> release/SHA256SUM.txt 24 | zip release/bivac_${VERSION}_${platform}_${arch}.zip bivac 25 | done 26 | done 27 | -------------------------------------------------------------------------------- /test/integration/cattle/Vagrantfile.builder: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure("2") do |config| 5 | config.vm.box = "ubuntu/bionic64" 6 | config.vm.hostname = "testing" 7 | 8 | config.ssh.insert_key = false 9 | 10 | 11 | config.vm.provider "virtualbox" do |v| 12 | v.memory = 4096 13 | v.cpus = 2 14 | end 15 | 16 | config.vm.provision "shell", inline: <<-SHELL 17 | sudo apt-get update 18 | sudo apt-get upgrade -y 19 | sudo apt-get install apt-transport-https ca-certificates curl software-properties-common jq wget net-tools iproute2 -y 20 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 21 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 22 | sudo apt-get update 23 | sudo apt-get install docker-ce -y 24 | sudo curl -L "https://github.com/docker/compose/releases/download/1.22.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 25 | sudo chmod +x /usr/local/bin/docker-compose 26 | sudo usermod -aG docker vagrant 27 | echo '{"dns": ["8.8.8.8","4.4.4.4"]}' | sudo tee /etc/docker/daemon.json 28 | sudo systemctl restart docker 29 | 30 | # Add default vagrant key 31 | curl -k https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant.pub > /home/vagrant/.ssh/authorized_keys 32 | chmod 0700 /home/vagrant/.ssh 33 | chmod 0600 /home/vagrant/.ssh/authorized_keys 34 | SHELL 35 | 36 | config.vm.provision "shell", path: "prepare.sh" 37 | end 38 | -------------------------------------------------------------------------------- /test/integration/cattle/Vagrantfile.runner: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure("2") do |config| 5 | config.vm.box = "bivac-cattle" 6 | config.vm.box_url = "file://bivac-cattle.box" 7 | config.vm.hostname = "testing" 8 | 9 | config.vm.provider "virtualbox" do |v| 10 | v.memory = 4096 11 | v.cpus = 2 12 | end 13 | 14 | config.ssh.insert_key = false 15 | end 16 | -------------------------------------------------------------------------------- /test/integration/cattle/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export VAGRANT_VAGRANTFILE=Vagrantfile.builder 4 | 5 | if [ -f bivac-cattle.box ]; then 6 | rm bivac-cattle.box 7 | fi 8 | 9 | vagrant up -d 10 | vagrant package --output bivac-cattle.box 11 | vagrant destroy -f 12 | 13 | vagrant box list | grep bivac-cattle && vagrant box remove bivac-cattle 14 | -------------------------------------------------------------------------------- /test/integration/cattle/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "[*] Retrieving internal IP..." 4 | export INTERNAL_IP=$(ip -o -4 addr show dev `ls /sys/class/net | grep -E "^eth|^en" | head -n 1` | cut -d' ' -f7 | cut -d'/' -f1) 5 | 6 | echo "[*] Starting local registry..." 7 | docker run --restart=always -d -p 5000:5000 --name registry registry:2 8 | sleep 10 9 | 10 | echo "[*] Starting Minio..." 11 | docker run --restart=always -d -p 9000:9000 -e MINIO_ACCESS_KEY=OBQZY3DV6VOEZ9PG6NIM -e MINIO_SECRET_KEY=7e88XeX0j3YdB6b1o0zU2GhG0dX6tFMy3Haty --name minio -v /root/minio:/data minio/minio server /data 12 | sleep 10 13 | docker pull minio/mc 14 | docker run --rm -e MC_HOST_minio=http://OBQZY3DV6VOEZ9PG6NIM:7e88XeX0j3YdB6b1o0zU2GhG0dX6tFMy3Haty@${INTERNAL_IP}:9000 minio/mc mb minio/bivac-testing 15 | 16 | echo "[*] Starting Rancher..." 17 | docker run -d --restart=unless-stopped -p 8080:8080 rancher/server:stable 18 | sleep 60 19 | curl 'http://localhost:8080/v2-beta/setting' -H 'Accept: application/json' -H 'content-type: application/json' --data '{"type":"setting","name":"telemetry.opt","value":"in"}' 20 | sleep 1 21 | curl 'http://localhost:8080/v2-beta/settings/api.host' -X PUT -H 'Accept: application/json' -H 'content-type: application/json' --data '{"id":"api.host","type":"activeSetting","baseType":"setting","name":"api.host","activeValue":null,"inDb":false,"source":null,"value":"http://'${INTERNAL_IP}':8080"}' 22 | sleep 1 23 | curl 'http://localhost:8080/v2-beta/projects/1a5/registrationtoken' --data '{"type":"registrationToken"}' 24 | sleep 1 25 | command=$(curl -s 'http://localhost:8080/v2-beta/projects/1a5/registrationtokens?state=active&limit=-1&sort=name' -H 'Accept: application/json' -H 'content-type: application/json' | jq -r ".data[0].command") 26 | echo $command 27 | $command 28 | 29 | echo "[*] Installing Rancher CLI..." 30 | wget https://releases.rancher.com/cli/v0.6.12/rancher-linux-amd64-v0.6.12.tar.gz 31 | tar zxvf rancher-linux-amd64-v0.6.12.tar.gz 32 | sudo cp ./rancher-v0.6.12/rancher /bin/rancher 33 | sudo chmod +x /bin/rancher 34 | rm -rf ./rancher-v0.6.12 35 | rm rancher-linux-amd64-v0.6.12.tar.gz 36 | rancher 37 | sync 38 | -------------------------------------------------------------------------------- /test/integration/cattle/tests/01_basic: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Prepare environment 4 | echo "[*] Retrieving internal IP..." 5 | export INTERNAL_IP=$(ip -o -4 addr show dev `ls /sys/class/net | grep -E "^eth|^en" | head -n 1` | cut -d' ' -f7 | cut -d'/' -f1) 6 | 7 | test_valid=true 8 | canary=8ft8HJ3teCg8S1WeH5bwhNBZEtBJNs 9 | export AWS_ACCESS_KEY_ID=OBQZY3DV6VOEZ9PG6NIM 10 | export AWS_SECRET_ACCESS_KEY=7e88XeX0j3YdB6b1o0zU2GhG0dX6tFMy3Haty 11 | 12 | export RANCHER_URL=http://${INTERNAL_IP}:8080/v2-beta 13 | 14 | echo "[*] Starting Rancher..." 15 | while true; do 16 | rancher stacks -s > /dev/null 2>&1 17 | if [ $? -ne 0 ]; then 18 | sleep 10 19 | continue 20 | fi 21 | 22 | sh -c "rancher stacks -s | grep -v unhealthy" > /dev/null 2>&1 23 | if [ $? -ne 0 ]; then 24 | sleep 10 25 | continue 26 | fi 27 | 28 | break 29 | done 30 | 31 | echo "[*] Starting services..." 32 | cat < /tmp/services-docker-compose.yml 33 | --- 34 | version: '2' 35 | services: 36 | canary: 37 | image: busybox 38 | volumes: 39 | - canary:/data 40 | command: ["sh", "-c", "echo $canary > /data/canary; mkdir -p /data/foo/bar; echo $canary > /data/foo/bar/canary;ls -la /data;echo $canary > /data/canary"] 41 | labels: 42 | io.rancher.container.start_once: true 43 | 44 | postgresql: 45 | image: postgres:latest 46 | volumes: 47 | - postgres:/var/lib/postgresql/data 48 | 49 | mysql: 50 | image: mysql 51 | volumes: 52 | - mysql:/var/lib/mysql 53 | environment: 54 | MYSQL_DATABASE: foo 55 | MYSQL_USER: foo 56 | MYSQL_PASSWORD: bar 57 | MYSQL_ROOT_PASSWORD: root 58 | EOF 59 | 60 | services_stack=$(rancher stack create --start -f /tmp/services-docker-compose.yml services) 61 | while [[ $(rancher inspect $services_stack| jq -r ".healthState") != "healthy" ]]; do 62 | sleep 10 63 | done 64 | 65 | echo "[*] Starting Bivac..." 66 | cat < /tmp/bivac-docker-compose.yml 67 | --- 68 | version: '2' 69 | services: 70 | bivac: 71 | image: $1 72 | environment: 73 | BIVAC_WHITELIST: "canary,mysql,postgres" 74 | BIVAC_TARGET_URL: s3:http://${INTERNAL_IP}:9000/bivac-testing 75 | BIVAC_LOG_LEVEL: $2 76 | AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID 77 | AWS_SECRET_ACCESS_KEY: $AWS_SECRET_ACCESS_KEY 78 | RESTIC_PASSWORD: toto 79 | BIVAC_SERVER_PSK: toto 80 | BIVAC_REFRESH_RATE: 10s 81 | BIVAC_AGENT_IMAGE: $1 82 | BIVAC_LOG_SERVER: http://${INTERNAL_IP}:8182 83 | command: manager 84 | ports: 85 | - "8182:8182" 86 | labels: 87 | io.rancher.container.agent.role: environmentAdmin 88 | io.rancher.container.create_agent: 'true' 89 | EOF 90 | 91 | bivac_stack=$(rancher stack create --start -s -f /tmp/bivac-docker-compose.yml bivac) 92 | 93 | echo "[*] Waiting for backups..." 94 | 95 | canary_waiting=true 96 | mysql_waiting=true 97 | postgres_waiting=true 98 | 99 | while $canary_waiting || $mysql_waiting || $postgres_waiting; do 100 | canary_volume=$(curl -s -H "Authorization: Bearer toto" http://${INTERNAL_IP}:8182/volumes | jq -r '.[] | select(.Name | contains("canary"))') 101 | if [ "$(echo $canary_volume | jq -r '.LastBackupStatus')" = "Success" ]; then 102 | canary_waiting=false 103 | elif [ "$(echo $canary_volume | jq -r '.LastBackupStatus')" = "Failed" ]; then 104 | echo $canary_volume | jq -r '.Logs' 105 | canary_waiting=false 106 | elif [ "$(echo $canary_volume | jq -r '.LastBackupStatus')" = "Unknown" ]; then 107 | echo "Volume already backed up, the remote repository may not have been cleaned up." 108 | canary_waiting=false 109 | test_valid=false 110 | fi 111 | mysql_volume=$(curl -s -H "Authorization: Bearer toto" http://${INTERNAL_IP}:8182/volumes | jq -r '.[] | select(.Name | contains("mysql"))') 112 | if [ "$(echo $mysql_volume | jq -r '.LastBackupStatus')" = "Success" ]; then 113 | mysql_waiting=false 114 | elif [ "$(echo $mysql_volume | jq -r '.LastBackupStatus')" = "Failed" ]; then 115 | echo $mysql_volume | jq -r '.Logs' 116 | mysql_waiting=false 117 | elif [ "$(echo $mysql_volume | jq -r '.LastBackupStatus')" = "Unknown" ]; then 118 | echo "Volume already backed up, the remote repository may not have been cleaned up." 119 | mysql_waiting=false 120 | test_valid=false 121 | fi 122 | postgres_volume=$(curl -s -H "Authorization: Bearer toto" http://${INTERNAL_IP}:8182/volumes | jq -r '.[] | select(.Name | contains("postgres"))') 123 | if [ "$(echo $postgres_volume | jq -r '.LastBackupStatus')" = "Success" ]; then 124 | postgres_waiting=false 125 | elif [ "$(echo $postgres_volume | jq -r '.LastBackupStatus')" = "Failed" ]; then 126 | echo $postgres_volume | jq -r '.Logs' 127 | postgres_waiting=false 128 | elif [ "$(echo $postgres_volume | jq -r '.LastBackupStatus')" = "Unknown" ]; then 129 | echo "Volume already backed up, the remote repository may not have been cleaned up." 130 | postgres_waiting=false 131 | test_valid=false 132 | fi 133 | 134 | sleep 10 135 | done 136 | 137 | rancher logs bivac/bivac 138 | 139 | # Canaries 140 | dump=$(docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/testing/canary dump latest /data/canary) 141 | dump=${dump%$'\r'} 142 | if [[ "$dump" != "$canary" ]]; then 143 | echo -e "\e[31m[-] Basic : Cattle + Restic (canary short)\e[39m" 144 | echo $dump 145 | docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/testing/canary ls latest 146 | test_valid=false 147 | fi 148 | 149 | dump=$(docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/testing/canary dump latest /data/foo/bar/canary) 150 | dump=${dump%$'\r'} 151 | if [[ "$dump" != "$canary" ]]; then 152 | echo -e "\e[31m[-] Basic : Cattle + Restic (canary long)\e[39m" 153 | echo $dump 154 | docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/testing/canary ls latest 155 | test_valid=false 156 | fi 157 | 158 | # MySQL 159 | dump=$(docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/testing/mysql dump latest /var/lib/mysql/backups/all.sql) 160 | dump=${dump%$'\r'} 161 | if [[ $dump != *"Dump completed"* ]]; then 162 | echo -e "\e[31m[-] Basic : Cattle + Restic (mysql)\e[39m" 163 | echo $dump 164 | docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/testing/mysql ls latest 165 | test_valid=false 166 | fi 167 | 168 | # PostgreSQL 169 | dump=$(docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/testing/postgres dump latest /var/lib/postgresql/data/backups/all.sql) 170 | dump=${dump%$'\r'} 171 | if [[ $dump != *"dump complete"* ]]; then 172 | echo -e "\e[31m[-] Basic : Cattle + Restic (postgresql)\e[39m" 173 | echo $dump 174 | docker run -it --rm -e RESTIC_PASSWORD=toto -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY restic/restic:latest -q -r s3:http://${INTERNAL_IP}:9000/bivac-testing/testing/postgres ls latest 175 | test_valid=false 176 | fi 177 | 178 | # Clean up environment 179 | echo "[*] Cleaning up environment..." 180 | rancher rm $services_stack 181 | rancher rm $bivac_stack 182 | rm /tmp/bivac-docker-compose.yml 183 | rm /tmp/services-docker-compose.yml 184 | docker pull minio/mc 185 | docker run --rm -e MC_HOST_minio=http://OBQZY3DV6VOEZ9PG6NIM:7e88XeX0j3YdB6b1o0zU2GhG0dX6tFMy3Haty@${INTERNAL_IP}:9000 minio/mc rb --force minio/bivac-testing 186 | docker run --rm -e MC_HOST_minio=http://OBQZY3DV6VOEZ9PG6NIM:7e88XeX0j3YdB6b1o0zU2GhG0dX6tFMy3Haty@${INTERNAL_IP}:9000 minio/mc mb minio/bivac-testing 187 | 188 | 189 | if [ "$test_valid" = true ]; then 190 | echo -e "\e[32m[+] Basic : Cattle + Restic\e[39m" 191 | else 192 | echo -e "\e[31m[+] Basic : Cattle + Restic\e[39m" 193 | exit 1 194 | fi 195 | 196 | -------------------------------------------------------------------------------- /test/integration/docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | services: 3 | minio: 4 | image: minio/minio 5 | environment: 6 | MINIO_ACCESS_KEY: the_access_key 7 | MINIO_SECRET_KEY: the_secret_key 8 | command: server /data/minio 9 | ports: 10 | - "9000:9000" 11 | 12 | bivac: 13 | image: bivac-testing 14 | command: manager 15 | ports: 16 | - "8182:8182" 17 | volumes: 18 | - "/var/run/docker.sock:/var/run/docker.sock:ro" 19 | - ./:/data 20 | environment: 21 | BIVAC_LOG_LEVEL: debug 22 | AWS_ACCESS_KEY_ID: the_access_key 23 | AWS_SECRET_ACCESS_KEY: the_secret_key 24 | BIVAC_TARGET_URL: ${BIVAC_TARGET_URL} 25 | RESTIC_PASSWORD: foo 26 | BIVAC_SERVER_PSK: foo 27 | BIVAC_AGENT_IMAGE: bivac-testing 28 | 29 | raw: 30 | image: ubuntu 31 | command: tail -F anything 32 | volumes: 33 | - raw_data:/data 34 | restart: always 35 | 36 | mysql: 37 | image: mysql 38 | volumes: 39 | - mysql_data:/var/lib/mysql 40 | restart: always 41 | environment: 42 | MYSQL_ROOT_PASSWORD: rootpassword 43 | MYSQL_DATABASE: bivac 44 | 45 | postgres: 46 | image: postgres 47 | volumes: 48 | - postgres_data:/var/lib/postgresql/data 49 | restart: always 50 | environment: 51 | POSTGRES_PASSWORD: bivac 52 | POSTGRES_DB: bivac 53 | 54 | restic: 55 | image: restic/restic 56 | environment: 57 | RESTIC_PASSWORD: foo 58 | AWS_ACCESS_KEY_ID: the_access_key 59 | AWS_SECRET_ACCESS_KEY: the_secret_key 60 | 61 | volumes: 62 | mysql_data: {} 63 | postgres_data: {} 64 | raw_data: {} 65 | -------------------------------------------------------------------------------- /test/integration/docker/mysql_seed.sql: -------------------------------------------------------------------------------- 1 | 2 | # TABLE STRUCTURE FOR: authors 3 | # 4 | 5 | DROP TABLE IF EXISTS `authors`; 6 | 7 | CREATE TABLE `authors` ( 8 | `id` int(11) NOT NULL AUTO_INCREMENT, 9 | `first_name` varchar(50) COLLATE utf8_unicode_ci NOT NULL, 10 | `last_name` varchar(50) COLLATE utf8_unicode_ci NOT NULL, 11 | `email` varchar(100) COLLATE utf8_unicode_ci NOT NULL, 12 | `birthdate` date NOT NULL, 13 | `added` timestamp NOT NULL DEFAULT current_timestamp(), 14 | PRIMARY KEY (`id`), 15 | UNIQUE KEY `email` (`email`) 16 | ) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; 17 | 18 | INSERT INTO `authors` (`id`, `first_name`, `last_name`, `email`, `birthdate`, `added`) VALUES (1, 'Gardner', 'Feest', 'uschuster@example.com', '2011-03-29', '1971-01-19 20:22:59'); 19 | INSERT INTO `authors` (`id`, `first_name`, `last_name`, `email`, `birthdate`, `added`) VALUES (2, 'Annie', 'Boyer', 'oma33@example.com', '2006-11-25', '2007-11-21 12:42:49'); 20 | INSERT INTO `authors` (`id`, `first_name`, `last_name`, `email`, `birthdate`, `added`) VALUES (3, 'Karson', 'Kihn', 'bertrand.parisian@example.net', '1992-02-26', '1991-04-15 17:17:49'); 21 | INSERT INTO `authors` (`id`, `first_name`, `last_name`, `email`, `birthdate`, `added`) VALUES (4, 'Karlee', 'Gulgowski', 'justus45@example.org', '1995-09-24', '1973-03-28 03:20:43'); 22 | 23 | 24 | # 25 | # TABLE STRUCTURE FOR: posts 26 | # 27 | 28 | DROP TABLE IF EXISTS `posts`; 29 | 30 | CREATE TABLE `posts` ( 31 | `id` int(11) NOT NULL AUTO_INCREMENT, 32 | `author_id` int(11) NOT NULL, 33 | `title` varchar(255) COLLATE utf8_unicode_ci NOT NULL, 34 | `description` varchar(500) COLLATE utf8_unicode_ci NOT NULL, 35 | `content` text COLLATE utf8_unicode_ci NOT NULL, 36 | `date` date NOT NULL, 37 | PRIMARY KEY (`id`) 38 | ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; 39 | 40 | -------------------------------------------------------------------------------- /test/integration/docker/postgres_seed.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE users( 2 | id SERIAL PRIMARY KEY, 3 | email VARCHAR(40) NOT NULL UNIQUE 4 | ); 5 | INSERT INTO users(email) 6 | SELECT 7 | 'user_' || seq || '@' || ( 8 | CASE (RANDOM() * 2)::INT 9 | WHEN 0 THEN 'gmail' 10 | WHEN 1 THEN 'hotmail' 11 | WHEN 2 THEN 'yahoo' 12 | END 13 | ) || '.com' AS email 14 | FROM GENERATE_SERIES(1, 10) seq; 15 | -------------------------------------------------------------------------------- /test/integration/docker/tests/mysql: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | 6 | HOSTNAME=$(hostname) 7 | INTERNAL_IP=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+') 8 | 9 | echo "BIVAC_TARGET_URL=s3:http://${INTERNAL_IP}:9000/testing" > .env 10 | 11 | docker-compose up -d mysql minio 12 | 13 | sleep 30 14 | 15 | docker-compose exec -T mysql mysql -prootpassword bivac < mysql_seed.sql 16 | 17 | docker-compose up -d bivac 18 | 19 | docker-compose exec -T bivac bivac backup docker_mysql_data 20 | 21 | docker-compose exec -T bivac bash -c "restic -q -r s3:http://${INTERNAL_IP}:9000/testing/${HOSTNAME}/docker_mysql_data/ dump latest /var/lib/docker/volumes/docker_mysql_data/_data/backups/all.sql > /data/mysql_dump.sql" 22 | 23 | docker-compose down -v 24 | 25 | docker-compose up -d mysql 26 | 27 | sleep 30 28 | 29 | docker-compose exec -T mysql mysql -prootpassword bivac < mysql_dump.sql 30 | 31 | docker-compose exec -T mysql mysql -prootpassword bivac -e "select * from authors" 32 | 33 | rm -f mysql_dump.sql 34 | rm -f .env 35 | 36 | docker-compose down -v 37 | -------------------------------------------------------------------------------- /test/integration/docker/tests/postgres: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | 6 | HOSTNAME=$(hostname) 7 | INTERNAL_IP=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+') 8 | 9 | echo "BIVAC_TARGET_URL=s3:http://${INTERNAL_IP}:9000/testing" > .env 10 | 11 | docker-compose up -d postgres minio 12 | 13 | sleep 30 14 | 15 | docker-compose exec -T postgres psql -U postgres bivac < postgres_seed.sql 16 | 17 | docker-compose up -d bivac 18 | 19 | docker-compose exec -T bivac bivac backup docker_postgres_data 20 | 21 | docker-compose run restic -q -r s3:http://${INTERNAL_IP}:9000/testing/${HOSTNAME}/docker_postgres_data/ dump latest /var/lib/docker/volumes/docker_postgres_data/_data/backups/all.sql > postgres_dump.sql 22 | 23 | docker-compose exec -T bivac bash -c "restic -q -r s3:http://${INTERNAL_IP}:9000/testing/${HOSTNAME}/docker_postgres_data/ dump latest /var/lib/docker/volumes/docker_postgres_data/_data/backups/all.sql > /data/postgres_dump.sql" 24 | 25 | docker-compose down -v 26 | 27 | docker-compose up -d postgres 28 | 29 | sleep 30 30 | 31 | docker-compose exec -T postgres psql -U postgres bivac < postgres_dump.sql 32 | 33 | docker-compose exec -T postgres psql -U postgres bivac -c "select * from users" 34 | 35 | rm -f postgres_dump.sql 36 | rm -f .env 37 | 38 | docker-compose down -v 39 | -------------------------------------------------------------------------------- /test/integration/docker/tests/raw: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | 6 | HOSTNAME=$(hostname) 7 | INTERNAL_IP=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+') 8 | 9 | echo "BIVAC_TARGET_URL=s3:http://${INTERNAL_IP}:9000/testing" > .env 10 | 11 | docker-compose up -d raw minio 12 | 13 | docker-compose exec -T raw bash -c "echo 'foo' > /data/foo" 14 | docker-compose exec -T raw bash -c "mkdir -p /data/subdir && echo 'bar' > /data/subdir/bar" 15 | 16 | docker-compose up -d bivac 17 | 18 | docker-compose exec -T bivac bivac backup docker_raw_data 19 | 20 | TEST_1=$(docker-compose exec -T bivac bash -c "restic -q -r s3:http://${INTERNAL_IP}:9000/testing/${HOSTNAME}/docker_raw_data/ dump latest /var/lib/docker/volumes/docker_raw_data/_data/foo") 21 | TEST_2=$(docker-compose exec -T bivac bash -c "restic -q -r s3:http://${INTERNAL_IP}:9000/testing/${HOSTNAME}/docker_raw_data/ dump latest /var/lib/docker/volumes/docker_raw_data/_data/subdir/bar") 22 | 23 | if [ "${TEST_1}" != "foo" ]; then 24 | echo "${TEST_1} != \"foo\"." 25 | exit 1 26 | fi 27 | if [ "${TEST_2}" != "bar" ]; then 28 | echo "${TEST_2} != \"bar\"." 29 | exit 1 30 | fi 31 | 32 | docker-compose down -v 33 | 34 | rm -f .env 35 | --------------------------------------------------------------------------------