├── .github └── dependabot.yml ├── .gitignore ├── .golangci.yml ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── backends ├── aws_s3_backend.go ├── aws_s3_backend_test.go ├── azure_backend.go ├── azure_backend_test.go ├── backblaze_b2_backend.go ├── backblaze_b2_backend_test.go ├── backends.go ├── backends_test.go ├── delete_backend.go ├── delete_backend_test.go ├── doc.go ├── file_backend.go ├── file_backend_test.go ├── gcs_backend.go ├── gcs_backend_test.go ├── ssh_backend.go └── ssh_backend_test.go ├── backup ├── backup.go ├── backup_test.go ├── clean.go ├── doc.go ├── list.go ├── restore.go └── sync.go ├── cmd ├── clean.go ├── doc.go ├── list.go ├── receive.go ├── root.go ├── send.go └── version.go ├── config ├── config.go └── version.go ├── files ├── doc.go ├── jobinfo.go └── volumeinfo.go ├── go.mod ├── go.sum ├── integration_test.go ├── integration_test.sh ├── log └── log.go ├── main.go ├── pgp └── pgp.go ├── travis-setup.sh ├── travis-teardown.sh └── zfs ├── doc.go └── zfs.go /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gomod 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | open-pull-requests-limit: 10 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | zfsbackup* 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 15 | .glide/ 16 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | linters-settings: 2 | dupl: 3 | threshold: 100 4 | funlen: 5 | lines: 100 6 | statements: 50 7 | goconst: 8 | min-len: 2 9 | min-occurrences: 3 10 | gocritic: 11 | enabled-tags: 12 | - diagnostic 13 | - experimental 14 | - opinionated 15 | - performance 16 | - style 17 | gocyclo: 18 | min-complexity: 15 19 | goimports: 20 | local-prefixes: github.com/someone1/zfsbackup-go 21 | golint: 22 | min-confidence: 0 23 | gomnd: 24 | settings: 25 | mnd: 26 | # don't include the "operation" and "assign" 27 | checks: argument,case,condition,return 28 | govet: 29 | check-shadowing: true 30 | lll: 31 | line-length: 140 32 | maligned: 33 | suggest-new: true 34 | misspell: 35 | locale: US 36 | 37 | linters: 38 | # please, do not use `enable-all`: it's deprecated and will be removed soon. 39 | # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint 40 | disable-all: true 41 | enable: 42 | - bodyclose 43 | - deadcode 44 | - depguard 45 | - dogsled 46 | - dupl 47 | - errcheck 48 | - funlen 49 | - goconst 50 | - gocritic 51 | - gocyclo 52 | - gofmt 53 | - goimports 54 | - golint 55 | - goprintffuncname 56 | - gosec 57 | - gosimple 58 | - govet 59 | - ineffassign 60 | - interfacer 61 | - lll 62 | - misspell 63 | - nakedret 64 | - rowserrcheck 65 | - scopelint 66 | - staticcheck 67 | - structcheck 68 | - stylecheck 69 | - typecheck 70 | - unconvert 71 | - unparam 72 | - unused 73 | - varcheck 74 | - whitespace 75 | 76 | # don't enable: 77 | # - gochecknoglobals 78 | # - gocognit 79 | # - godox 80 | # - maligned 81 | # - prealloc 82 | # - gochecknoinits 83 | # - gomnd 84 | 85 | issues: 86 | # Excluding configuration per-path, per-linter, per-text and per-source 87 | exclude-rules: 88 | - path: _test\.go 89 | linters: 90 | - lll 91 | - funlen 92 | - gocyclo 93 | 94 | 95 | # golangci.com configuration 96 | # https://github.com/golangci/golangci/wiki/Configuration 97 | service: 98 | golangci-lint-version: 1.24.x # use the fixed version to not introduce new linters unexpectedly -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | sudo: required 4 | dist: bionic 5 | cache: 6 | directories: 7 | - $HOME/.ccache 8 | - $HOME/zfs 9 | 10 | services: 11 | - docker 12 | 13 | go: 14 | - 1.13.x 15 | - 1.14.x 16 | 17 | env: 18 | global: 19 | - AZURE_CUSTOM_ENDPOINT=http://127.0.0.1:10000/devstoreaccount1 20 | - AWS_S3_CUSTOM_ENDPOINT=http://127.0.0.1:9000 21 | - AWS_REGION=us-east-1 22 | - AWS_ACCESS_KEY_ID=minioadmin 23 | - AWS_SECRET_ACCESS_KEY=minioadmin 24 | - GCS_FAKE_SERVER="https://localhost:4443" 25 | matrix: 26 | - rel=0.7.13 27 | # - rel=0.8.3 # some feature preventing zpool creation (encryption)? 28 | 29 | before_install: 30 | - export MAKEFLAGS=-j$(($(grep -c '^processor' /proc/cpuinfo) * 2 + 1)) 31 | - export GOPATH=$HOME/go 32 | - export PATH=/usr/lib/ccache:$GOPATH/bin:$PATH 33 | - sudo apt-get -qq update 34 | - sudo apt-get install -y linux-headers-`uname -r` tree uuid-dev libattr1-dev libblkid-dev jq gnupg2 xz-utils gzip 35 | - mkdir -p $HOME/zfs 36 | - cd $HOME/zfs 37 | - "[[ -d spl-$rel.tar.gz ]] || curl -L https://github.com/openzfs/zfs/releases/download/zfs-$rel/spl-$rel.tar.gz | tar xz || true" 38 | - "[[ -d zfs-$rel.tar.gz ]] || curl -L https://github.com/openzfs/zfs/releases/download/zfs-$rel/zfs-$rel.tar.gz | tar xz" 39 | - (cd spl-$rel && ./configure --prefix=/usr && make && sudo make install) || true 40 | - (cd zfs-$rel && ./configure --prefix=/usr && make && sudo make install) 41 | - sudo modprobe zfs 42 | - cd $TRAVIS_BUILD_DIR 43 | - mkdir temp 44 | - export TMPDIR=$PWD/temp 45 | - export VDEV=$(mktemp) 46 | - chmod +x ./travis-setup.sh && ./travis-setup.sh 47 | 48 | install: 49 | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $GOPATH/bin v1.24.0 50 | - GO111MODULE=off go get github.com/mattn/goveralls 51 | - GO111MODULE=off go get github.com/mitchellh/gox 52 | - make get 53 | 54 | script: 55 | - make build 56 | - chmod +x ./integration_test.sh && ./integration_test.sh 57 | - sudo -E TMPDIR=$TMPDIR $(which go) test -race -v -coverprofile=coverage.out -covermode=atomic -coverpkg=$(go list ./... | grep -v '/vendor/' | paste -sd, -) ./... 58 | - make lint 59 | 60 | after_success: 61 | - sudo -E $GOPATH/bin/goveralls -coverprofile=coverage.out -service=travis-ci 62 | 63 | after_script: 64 | - chmod +x ./travis-teardown.sh && ./travis-teardown.sh 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TARGETS="freebsd/amd64 linux/amd64" 2 | COMMIT_HASH=`git rev-parse --short HEAD 2>/dev/null` 3 | 4 | check: lint test test-race 5 | 6 | fmt: 7 | @for d in $(DIRS) ; do \ 8 | if [ "`gofmt -s -w $$d/*.go | tee /dev/stderr`" ]; then \ 9 | echo "^ error formatting go files" && echo && exit 1; \ 10 | fi \ 11 | done 12 | 13 | lint: 14 | @if [ "`golangci-lint run | tee /dev/stderr`" ]; then \ 15 | echo "^ golangci-lint errors!" && echo && exit 1; \ 16 | fi 17 | 18 | get: 19 | go get -v -d -t ./... 20 | 21 | test: 22 | go test ./... 23 | 24 | test-race: 25 | go test -race ./... 26 | 27 | build: 28 | ${GOPATH}/bin/gox -ldflags="-w -s" -osarch=${TARGETS} 29 | 30 | build-dev: 31 | ${GOPATH}/bin/gox -osarch=${TARGETS} -output="{{.Dir}}_{{.OS}}_{{.Arch}}-${COMMIT_HASH}" 32 | -------------------------------------------------------------------------------- /backends/aws_s3_backend.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "crypto/md5" //nolint:gosec // MD5 not used cryptographically here 26 | "encoding/base64" 27 | "encoding/hex" 28 | "fmt" 29 | "io" 30 | "os" 31 | "strconv" 32 | "strings" 33 | "sync" 34 | "time" 35 | 36 | "github.com/aws/aws-sdk-go/aws" 37 | "github.com/aws/aws-sdk-go/aws/awserr" 38 | "github.com/aws/aws-sdk-go/aws/request" 39 | "github.com/aws/aws-sdk-go/aws/session" 40 | "github.com/aws/aws-sdk-go/service/s3" 41 | "github.com/aws/aws-sdk-go/service/s3/s3iface" 42 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 43 | "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface" 44 | 45 | "github.com/someone1/zfsbackup-go/files" 46 | "github.com/someone1/zfsbackup-go/log" 47 | ) 48 | 49 | // AWSS3BackendPrefix is the URI prefix used for the AWSS3Backend. 50 | const AWSS3BackendPrefix = "s3" 51 | 52 | // AWSS3Backend integrates with Amazon Web Services' S3. 53 | type AWSS3Backend struct { 54 | conf *BackendConfig 55 | mutex sync.Mutex 56 | client s3iface.S3API 57 | uploader s3manageriface.UploaderAPI 58 | prefix string 59 | bucketName string 60 | } 61 | 62 | // Authenticate https://godoc.org/github.com/aws/aws-sdk-go/aws/session#hdr-Environment_Variables 63 | 64 | type logger struct{} 65 | 66 | func (l logger) Log(args ...interface{}) { 67 | log.AppLogger.Debugf("s3 backend:", args...) 68 | } 69 | 70 | type withS3Client struct{ client s3iface.S3API } 71 | 72 | func (w withS3Client) Apply(b Backend) { 73 | if v, ok := b.(*AWSS3Backend); ok { 74 | v.client = w.client 75 | } 76 | } 77 | 78 | // WithS3Client will override an S3 backend's underlying API client with the one provided. 79 | // Primarily used to inject mock clients for testing. 80 | func WithS3Client(c s3iface.S3API) Option { 81 | return withS3Client{c} 82 | } 83 | 84 | type withS3Uploader struct{ uploader s3manageriface.UploaderAPI } 85 | 86 | func (w withS3Uploader) Apply(b Backend) { 87 | if v, ok := b.(*AWSS3Backend); ok { 88 | v.uploader = w.uploader 89 | } 90 | } 91 | 92 | // WithS3Uploader will override an S3 backend's underlying uploader client with the one provided. 93 | // Primarily used to inject mock clients for testing. 94 | func WithS3Uploader(c s3manageriface.UploaderAPI) Option { 95 | return withS3Uploader{c} 96 | } 97 | 98 | // Init will initialize the AWSS3Backend and verify the provided URI is valid/exists. 99 | func (a *AWSS3Backend) Init(ctx context.Context, conf *BackendConfig, opts ...Option) error { 100 | a.conf = conf 101 | 102 | cleanPrefix := strings.TrimPrefix(a.conf.TargetURI, AWSS3BackendPrefix+"://") 103 | if cleanPrefix == a.conf.TargetURI { 104 | return ErrInvalidURI 105 | } 106 | 107 | uriParts := strings.Split(cleanPrefix, "/") 108 | 109 | a.bucketName = uriParts[0] 110 | if len(uriParts) > 1 { 111 | a.prefix = strings.Join(uriParts[1:], "/") 112 | } 113 | 114 | for _, opt := range opts { 115 | opt.Apply(a) 116 | } 117 | 118 | if a.client == nil { 119 | awsconf := aws.NewConfig(). 120 | WithS3ForcePathStyle(true). 121 | WithEndpoint(os.Getenv("AWS_S3_CUSTOM_ENDPOINT")) 122 | if enableDebug, _ := strconv.ParseBool(os.Getenv("AWS_S3_ENABLE_DEBUG")); enableDebug { 123 | awsconf = awsconf.WithLogger(logger{}). 124 | WithLogLevel(aws.LogDebugWithRequestRetries | aws.LogDebugWithRequestErrors) 125 | } 126 | 127 | sess, err := session.NewSession(awsconf) 128 | if err != nil { 129 | return err 130 | } 131 | 132 | a.client = s3.New(sess) 133 | } 134 | 135 | if a.uploader == nil { 136 | a.uploader = s3manager.NewUploaderWithClient(a.client, func(u *s3manager.Uploader) { 137 | u.Concurrency = conf.MaxParallelUploads 138 | }, func(u *s3manager.Uploader) { 139 | u.PartSize = int64(conf.UploadChunkSize) 140 | }) 141 | } 142 | 143 | listReq := &s3.ListObjectsV2Input{ 144 | Bucket: aws.String(a.bucketName), 145 | MaxKeys: aws.Int64(0), 146 | } 147 | 148 | _, err := a.client.ListObjectsV2WithContext(ctx, listReq) 149 | return err 150 | } 151 | 152 | func withContentMD5Header(md5sum string) request.Option { 153 | return func(ro *request.Request) { 154 | if md5sum != "" { 155 | ro.Handlers.Build.PushBack(func(r *request.Request) { 156 | r.HTTPRequest.Header.Set("Content-MD5", md5sum) 157 | }) 158 | } 159 | } 160 | } 161 | 162 | func withRequestLimiter(buffer chan bool) request.Option { 163 | return func(ro *request.Request) { 164 | ro.Handlers.Send.PushFront(func(r *request.Request) { 165 | buffer <- true 166 | }) 167 | 168 | ro.Handlers.Send.PushBack(func(r *request.Request) { 169 | <-buffer 170 | }) 171 | } 172 | } 173 | 174 | func withComputeMD5HashHandler(ro *request.Request) { 175 | ro.Handlers.Build.PushBack(func(r *request.Request) { 176 | reader := r.GetBody() 177 | if reader == nil { 178 | return 179 | } 180 | 181 | //nolint:gosec // MD5 not used cryptographically here 182 | md5Raw := md5.New() 183 | _, err := io.Copy(md5Raw, reader) 184 | if err != nil { 185 | r.Error = err 186 | return 187 | } 188 | _, r.Error = reader.Seek(0, io.SeekStart) 189 | b64md5 := base64.StdEncoding.EncodeToString(md5Raw.Sum(nil)) 190 | r.HTTPRequest.Header.Set("Content-MD5", b64md5) 191 | }) 192 | } 193 | 194 | type reader struct { 195 | r io.Reader 196 | } 197 | 198 | func (r *reader) Read(p []byte) (int, error) { 199 | return r.r.Read(p) 200 | } 201 | 202 | // Upload will upload the provided volume to this AWSS3Backend's configured bucket+prefix 203 | // It utilizes multipart uploads to upload a single file in chunks concurrently. Impartial 204 | // uploads are cleaned up by the s3manager provided by the AWS Go SDK, but users can also 205 | // implement lifecycle rules, see: 206 | // https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config 207 | func (a *AWSS3Backend) Upload(ctx context.Context, vol *files.VolumeInfo) error { 208 | // We will achieve parallel upload by splitting a single upload into chunks 209 | // so don't let multiple calls to this function run in parallel. 210 | a.mutex.Lock() 211 | defer a.mutex.Unlock() 212 | 213 | key := a.prefix + vol.ObjectName 214 | var options []request.Option 215 | options = append(options, withRequestLimiter(a.conf.MaxParallelUploadBuffer)) 216 | var r io.Reader 217 | 218 | if !vol.IsUsingPipe() { 219 | r = vol 220 | if vol.Size < uint64(s3manager.MinUploadPartSize) { 221 | // It will not chunk the upload so we already know the md5 of the content 222 | md5Raw, merr := hex.DecodeString(vol.MD5Sum) 223 | if merr != nil { 224 | return merr 225 | } 226 | b64md5 := base64.StdEncoding.EncodeToString(md5Raw) 227 | options = append(options, withContentMD5Header(b64md5)) 228 | } else { 229 | options = append(options, withComputeMD5HashHandler) 230 | } 231 | } else { 232 | r = &reader{vol} // Remove the Seek interface since we are using a Pipe 233 | } 234 | 235 | // Do a MultiPart Upload - force the s3manager to compute each chunks md5 hash 236 | _, err := a.uploader.UploadWithContext(ctx, &s3manager.UploadInput{ 237 | Bucket: aws.String(a.bucketName), 238 | Key: aws.String(key), 239 | Body: r, 240 | StorageClass: getS3EnvironmentOverride("AWS_S3_STORAGE_CLASS"), 241 | }, s3manager.WithUploaderRequestOptions(options...)) 242 | 243 | if err != nil { 244 | log.AppLogger.Debugf("s3 backend: Error while uploading volume %s - %v", vol.ObjectName, err) 245 | } 246 | return err 247 | } 248 | 249 | // Delete will delete the given object from the configured bucket 250 | func (a *AWSS3Backend) Delete(ctx context.Context, key string) error { 251 | _, err := a.client.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{ 252 | Bucket: aws.String(a.bucketName), 253 | Key: aws.String(a.prefix + key), 254 | }) 255 | 256 | return err 257 | } 258 | 259 | // PreDownload will restore objects from Glacier as required. 260 | func (a *AWSS3Backend) PreDownload(ctx context.Context, keys []string) error { 261 | // First Let's check if any objects are on the GLACIER storage class 262 | toRestore := make([]string, 0, len(keys)) 263 | restoreTier := os.Getenv("AWS_S3_GLACIER_RESTORE_TIER") 264 | if restoreTier == "" { 265 | restoreTier = s3.TierBulk 266 | } 267 | var bytesToRestore int64 268 | log.AppLogger.Debugf("s3 backend: will use the %s restore tier when trying to restore from Glacier.", restoreTier) 269 | for _, key := range keys { 270 | key = a.prefix + key 271 | resp, err := a.client.HeadObjectWithContext(ctx, &s3.HeadObjectInput{ 272 | Bucket: aws.String(a.bucketName), 273 | Key: aws.String(key), 274 | }) 275 | if err != nil { 276 | return err 277 | } 278 | if resp.StorageClass != nil && *resp.StorageClass == s3.ObjectStorageClassGlacier { 279 | log.AppLogger.Debugf("s3 backend: key %s will be restored from the Glacier storage class.", key) 280 | bytesToRestore += *resp.ContentLength 281 | // Let's Start a restore 282 | toRestore = append(toRestore, key) 283 | _, rerr := a.client.RestoreObjectWithContext(ctx, &s3.RestoreObjectInput{ 284 | Bucket: aws.String(a.bucketName), 285 | Key: aws.String(key), 286 | RestoreRequest: &s3.RestoreRequest{ 287 | Days: aws.Int64(3), 288 | GlacierJobParameters: &s3.GlacierJobParameters{ 289 | Tier: aws.String(restoreTier), 290 | }, 291 | }, 292 | }) 293 | if rerr != nil { 294 | if aerr, ok := rerr.(awserr.Error); ok && aerr.Code() != "RestoreAlreadyInProgress" { 295 | log.AppLogger.Debugf("s3 backend: error trying to restore key %s - %s: %s", key, aerr.Code(), aerr.Message()) 296 | return rerr 297 | } 298 | } 299 | } 300 | } 301 | if len(toRestore) > 0 { 302 | log.AppLogger.Infof( 303 | "s3 backend: waiting for %d objects to restore from Glacier totaling %d bytes (this could take several hours)", 304 | len(toRestore), bytesToRestore, 305 | ) 306 | // Now wait for the objects to be restored 307 | backoffCount := 1 308 | for idx := 0; idx < len(toRestore); idx++ { 309 | key := toRestore[idx] 310 | resp, err := a.client.HeadObjectWithContext(ctx, &s3.HeadObjectInput{ 311 | Bucket: aws.String(a.bucketName), 312 | Key: aws.String(key), 313 | }) 314 | if err != nil { 315 | return err 316 | } 317 | if *resp.Restore == "ongoing-request=\"true\"" { 318 | time.Sleep(time.Duration(backoffCount) * time.Minute) 319 | idx-- 320 | backoffCount++ 321 | if backoffCount > 10 { 322 | backoffCount = 10 323 | } 324 | } else { 325 | backoffCount = 1 326 | log.AppLogger.Debugf("s3 backend: key %s restored.", key) 327 | } 328 | } 329 | } 330 | return nil 331 | } 332 | 333 | // Download will download the requseted object which can be read from the returned io.ReadCloser 334 | func (a *AWSS3Backend) Download(ctx context.Context, key string) (io.ReadCloser, error) { 335 | resp, err := a.client.GetObjectWithContext(ctx, &s3.GetObjectInput{ 336 | Bucket: aws.String(a.bucketName), 337 | Key: aws.String(a.prefix + key), 338 | }) 339 | if err != nil { 340 | return nil, err 341 | } 342 | return resp.Body, nil 343 | } 344 | 345 | // Close will release any resources used by the AWS S3 backend. 346 | func (a *AWSS3Backend) Close() error { 347 | a.client = nil 348 | a.uploader = nil 349 | return nil 350 | } 351 | 352 | // List will iterate through all objects in the configured AWS S3 bucket and return 353 | // a list of keys, filtering by the provided prefix. 354 | func (a *AWSS3Backend) List(ctx context.Context, prefix string) ([]string, error) { 355 | resp, err := a.client.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{ 356 | Bucket: aws.String(a.bucketName), 357 | MaxKeys: aws.Int64(1000), 358 | Prefix: aws.String(a.prefix + prefix), 359 | }) 360 | if err != nil { 361 | return nil, err 362 | } 363 | 364 | l := make([]string, 0, 1000) 365 | for { 366 | for _, obj := range resp.Contents { 367 | l = append(l, strings.TrimPrefix(*obj.Key, a.prefix)) 368 | } 369 | 370 | if !*resp.IsTruncated { 371 | break 372 | } 373 | 374 | resp, err = a.client.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{ 375 | Bucket: aws.String(a.bucketName), 376 | MaxKeys: aws.Int64(1000), 377 | Prefix: aws.String(prefix), 378 | ContinuationToken: resp.NextContinuationToken, 379 | }) 380 | if err != nil { 381 | return nil, fmt.Errorf("s3 backend: could not list bucket due to error - %v", err) 382 | } 383 | } 384 | 385 | return l, nil 386 | } 387 | 388 | func getS3EnvironmentOverride(envVar string) *string { 389 | storageClass := os.Getenv(envVar) 390 | if storageClass != "" { 391 | return aws.String(storageClass) 392 | } 393 | return nil 394 | } 395 | -------------------------------------------------------------------------------- /backends/aws_s3_backend_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | // AWS S3 + 3rd Party S3 compatible destination integration test 24 | // Expectation is that environment variables will be set properly to run tests with 25 | 26 | import ( 27 | "context" 28 | "os" 29 | "strconv" 30 | "testing" 31 | 32 | "github.com/aws/aws-sdk-go/aws" 33 | "github.com/aws/aws-sdk-go/aws/awserr" 34 | "github.com/aws/aws-sdk-go/aws/request" 35 | "github.com/aws/aws-sdk-go/aws/session" 36 | "github.com/aws/aws-sdk-go/service/s3" 37 | "github.com/aws/aws-sdk-go/service/s3/s3iface" 38 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 39 | "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface" 40 | 41 | "github.com/someone1/zfsbackup-go/files" 42 | ) 43 | 44 | type mockS3Client struct { 45 | s3iface.S3API 46 | 47 | headcallcount int 48 | } 49 | 50 | type mockS3Uploader struct { 51 | s3manageriface.UploaderAPI 52 | } 53 | 54 | var ( 55 | s3BadBucket = "badbucket" 56 | s3BadKey = "badkey" 57 | ) 58 | 59 | const ( 60 | s3TestBucketName = "s3bucketbackendtest" 61 | alreadyRestoring = "alreadyrestoring" 62 | ) 63 | 64 | func (m *mockS3Client) DeleteObjectWithContext( 65 | ctx aws.Context, 66 | in *s3.DeleteObjectInput, 67 | _ ...request.Option, 68 | ) (*s3.DeleteObjectOutput, error) { 69 | if *in.Key == s3BadKey { 70 | return nil, errTest 71 | } 72 | 73 | return nil, nil 74 | } 75 | 76 | func (m *mockS3Client) GetObjectWithContext(ctx aws.Context, in *s3.GetObjectInput, _ ...request.Option) (*s3.GetObjectOutput, error) { 77 | if *in.Key == s3BadKey { 78 | return nil, errTest 79 | } 80 | 81 | return &s3.GetObjectOutput{}, nil 82 | } 83 | 84 | func (m *mockS3Client) ListObjectsV2WithContext( 85 | ctx aws.Context, 86 | in *s3.ListObjectsV2Input, 87 | _ ...request.Option, 88 | ) (*s3.ListObjectsV2Output, error) { 89 | if *in.Bucket == s3BadBucket || (in.Prefix != nil && *in.Prefix == s3BadKey) { 90 | return nil, errTest 91 | } 92 | 93 | responses := make(map[string]*s3.ListObjectsV2Output) 94 | responses[""] = &s3.ListObjectsV2Output{ 95 | IsTruncated: aws.Bool(true), 96 | NextContinuationToken: aws.String("call2"), 97 | Contents: []*s3.Object{ 98 | { 99 | Key: aws.String("random"), 100 | }, 101 | { 102 | Key: aws.String("random"), 103 | }, 104 | { 105 | Key: aws.String("random"), 106 | }, 107 | }, 108 | } 109 | 110 | responses["call2"] = &s3.ListObjectsV2Output{ 111 | IsTruncated: aws.Bool(false), 112 | Contents: []*s3.Object{ 113 | { 114 | Key: aws.String("random"), 115 | }, 116 | }, 117 | } 118 | token := "" 119 | if in.ContinuationToken != nil { 120 | token = *in.ContinuationToken 121 | } 122 | 123 | if v, ok := responses[token]; ok { 124 | return v, nil 125 | } 126 | return nil, errTest 127 | } 128 | 129 | func (m *mockS3Client) HeadObjectWithContext(ctx aws.Context, in *s3.HeadObjectInput, _ ...request.Option) (*s3.HeadObjectOutput, error) { 130 | switch *in.Key { 131 | case s3BadKey: 132 | return nil, errTest 133 | case alreadyRestoring: 134 | m.headcallcount++ 135 | restoreString := "ongoing-request=\"true\"" 136 | if m.headcallcount >= 3 { 137 | restoreString = "" 138 | } 139 | return &s3.HeadObjectOutput{ 140 | StorageClass: aws.String(s3.ObjectStorageClassGlacier), 141 | ContentLength: aws.Int64(50), 142 | Restore: aws.String(restoreString), 143 | }, nil 144 | case "needsrestore": 145 | return &s3.HeadObjectOutput{ 146 | StorageClass: aws.String(s3.ObjectStorageClassGlacier), 147 | ContentLength: aws.Int64(50), 148 | Restore: aws.String("ongoing-request=\"false\", expiry-date=\"Wed, 07 Nov 2012 00:00:00 GMT\""), 149 | }, nil 150 | default: 151 | return &s3.HeadObjectOutput{ 152 | StorageClass: aws.String(s3.ObjectStorageClassStandard), 153 | ContentLength: aws.Int64(50), 154 | }, nil 155 | } 156 | } 157 | 158 | func (m *mockS3Client) RestoreObjectWithContext( 159 | ctx aws.Context, 160 | in *s3.RestoreObjectInput, 161 | _ ...request.Option, 162 | ) (*s3.RestoreObjectOutput, error) { 163 | switch *in.Key { 164 | case s3BadKey: 165 | return nil, errTest 166 | case alreadyRestoring: 167 | return nil, awserr.New("RestoreAlreadyInProgress", "", errTest) 168 | } 169 | return nil, nil 170 | } 171 | 172 | func (m *mockS3Uploader) UploadWithContext( 173 | ctx aws.Context, 174 | in *s3manager.UploadInput, 175 | _ ...func(*s3manager.Uploader), 176 | ) (*s3manager.UploadOutput, error) { 177 | if *in.Key == s3BadKey { 178 | return nil, errTest 179 | } 180 | return nil, nil 181 | } 182 | 183 | func TestS3GetBackendForURI(t *testing.T) { 184 | b, err := GetBackendForURI(AWSS3BackendPrefix + "://bucket_name") 185 | if err != nil { 186 | t.Errorf("Error while trying to get backend: %v", err) 187 | } 188 | if _, ok := b.(*AWSS3Backend); !ok { 189 | t.Errorf("Expected to get a backend of type AWSS3Backend, but did not.") 190 | } 191 | } 192 | 193 | func getOptions() []Option { 194 | // If we have a local minio target to test against, let's not use the mock clients 195 | if ok, _ := strconv.ParseBool(os.Getenv("S3_TEST_WITH_MINIO")); ok { 196 | return nil 197 | } 198 | return []Option{WithS3Client(&mockS3Client{}), WithS3Uploader(&mockS3Uploader{})} 199 | } 200 | 201 | func TestS3Init(t *testing.T) { 202 | testCases := []struct { 203 | conf *BackendConfig 204 | errTest errTestFunc 205 | prefix string 206 | }{ 207 | { 208 | conf: &BackendConfig{ 209 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 210 | }, 211 | errTest: nilErrTest, 212 | }, 213 | { 214 | conf: &BackendConfig{ 215 | TargetURI: AWSS3BackendPrefix + "://" + s3BadBucket, 216 | }, 217 | errTest: errTestErrTest, 218 | }, 219 | { 220 | conf: &BackendConfig{ 221 | TargetURI: "nots3://goodbucket", 222 | }, 223 | errTest: errInvalidURIErrTest, 224 | }, 225 | { 226 | conf: &BackendConfig{ 227 | TargetURI: AWSS3BackendPrefix + "://goodbucket/prefix", 228 | }, 229 | errTest: nilErrTest, 230 | prefix: "prefix", 231 | }, 232 | } 233 | 234 | for idx, c := range testCases { 235 | b := &AWSS3Backend{} 236 | if err := b.Init(context.Background(), c.conf, getOptions()...); !c.errTest(err) { 237 | t.Errorf("%d: Did not get expected error, got %v instead", idx, err) 238 | } 239 | if b.prefix != c.prefix { 240 | t.Errorf("%d: Expected prefix %v, got %v", idx, c.prefix, b.prefix) 241 | } 242 | } 243 | } 244 | 245 | func TestS3Close(t *testing.T) { 246 | testCases := []struct { 247 | conf *BackendConfig 248 | errTest errTestFunc 249 | }{ 250 | { 251 | conf: &BackendConfig{ 252 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 253 | }, 254 | errTest: nilErrTest, 255 | }, 256 | } 257 | 258 | for idx, c := range testCases { 259 | b := &AWSS3Backend{} 260 | if err := b.Init(context.Background(), c.conf, getOptions()...); err != nil { 261 | t.Errorf("%d: Did not get expected nil error on Init, got %v instead", idx, err) 262 | } 263 | 264 | if err := b.Close(); !c.errTest(err) { 265 | t.Errorf("%d: Did not get expected error, got %v instead", idx, err) 266 | } else if err == nil { 267 | if b.client != nil { 268 | t.Errorf("%d: expected client to be nil after closing, but its not.", idx) 269 | } 270 | if b.uploader != nil { 271 | t.Errorf("%d: expected uploader to be nil after closing, but its not.", idx) 272 | } 273 | } 274 | } 275 | } 276 | 277 | func TestS3Delete(t *testing.T) { 278 | testCases := []struct { 279 | conf *BackendConfig 280 | errTest errTestFunc 281 | key string 282 | }{ 283 | { 284 | conf: &BackendConfig{ 285 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 286 | }, 287 | errTest: nilErrTest, 288 | key: "goodkey", 289 | }, 290 | { 291 | conf: &BackendConfig{ 292 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 293 | }, 294 | errTest: errTestErrTest, 295 | key: s3BadKey, 296 | }, 297 | } 298 | 299 | for idx, c := range testCases { 300 | b := &AWSS3Backend{} 301 | if err := b.Init(context.Background(), c.conf, getOptions()...); err != nil { 302 | t.Errorf("%d: Did not get expected nil error on Init, got %v instead", idx, err) 303 | } 304 | if err := b.Delete(context.Background(), c.key); !c.errTest(err) { 305 | t.Errorf("%d: Did not get expected error, got %v instead", idx, err) 306 | } 307 | } 308 | } 309 | 310 | func TestS3Download(t *testing.T) { 311 | testCases := []struct { 312 | conf *BackendConfig 313 | errTest errTestFunc 314 | key string 315 | }{ 316 | { 317 | conf: &BackendConfig{ 318 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 319 | }, 320 | errTest: nilErrTest, 321 | key: "goodkey", 322 | }, 323 | { 324 | conf: &BackendConfig{ 325 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 326 | }, 327 | errTest: errTestErrTest, 328 | key: s3BadKey, 329 | }, 330 | } 331 | 332 | for idx, c := range testCases { 333 | b := &AWSS3Backend{} 334 | if err := b.Init(context.Background(), c.conf, getOptions()...); err != nil { 335 | t.Errorf("%d: Did not get expected nil error on Init, got %v instead", idx, err) 336 | } 337 | if _, err := b.Download(context.Background(), c.key); !c.errTest(err) { 338 | t.Errorf("%d: Did not get expected error, got %v instead", idx, err) 339 | } 340 | } 341 | } 342 | 343 | func TestS3Upload(t *testing.T) { 344 | _, goodvol, badvol, err := prepareTestVols() 345 | if err != nil { 346 | t.Fatalf("error preparing volume for testing - %v", err) 347 | } 348 | _, md5mismatchvol, _, err := prepareTestVols() 349 | if err != nil { 350 | t.Fatalf("error preparing volume for testing - %v", err) 351 | } 352 | md5mismatchvol.MD5Sum = "thisisn'thexdecodeable" 353 | md5mismatchvol.Size = uint64(s3manager.MinUploadPartSize - 1) 354 | 355 | testCases := []struct { 356 | conf *BackendConfig 357 | errTest errTestFunc 358 | key string 359 | vol *files.VolumeInfo 360 | }{ 361 | { 362 | conf: &BackendConfig{ 363 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 364 | }, 365 | errTest: nilErrTest, 366 | key: "goodkey", 367 | vol: goodvol, 368 | }, 369 | { 370 | conf: &BackendConfig{ 371 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 372 | }, 373 | errTest: errTestErrTest, 374 | key: s3BadKey, 375 | vol: badvol, 376 | }, 377 | { 378 | conf: &BackendConfig{ 379 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 380 | }, 381 | errTest: invalidByteErrTest, 382 | key: "goodkey", 383 | vol: md5mismatchvol, 384 | }, 385 | } 386 | 387 | if err = goodvol.OpenVolume(); err != nil { 388 | t.Errorf("could not open good volume due to error %v", err) 389 | } 390 | 391 | for idx, c := range testCases { 392 | b := &AWSS3Backend{} 393 | if err := b.Init(context.Background(), c.conf, getOptions()...); err != nil { 394 | t.Errorf("%d: Did not get expected nil error on Init, got %v instead", idx, err) 395 | } 396 | c.vol.ObjectName = c.key 397 | if err := b.Upload(context.Background(), c.vol); !c.errTest(err) { 398 | t.Errorf("%d: Did not get expected error, got %v instead", idx, err) 399 | } 400 | } 401 | } 402 | 403 | func TestS3List(t *testing.T) { 404 | testCases := []struct { 405 | conf *BackendConfig 406 | errTest errTestFunc 407 | prefix string 408 | }{ 409 | { 410 | conf: &BackendConfig{ 411 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 412 | }, 413 | errTest: nilErrTest, 414 | }, 415 | { 416 | conf: &BackendConfig{ 417 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 418 | }, 419 | errTest: errTestErrTest, 420 | prefix: s3BadKey, 421 | }, 422 | } 423 | 424 | for idx, c := range testCases { 425 | b := &AWSS3Backend{} 426 | if err := b.Init(context.Background(), c.conf, getOptions()...); err != nil { 427 | t.Errorf("%d: Did not get expected nil error on Init, got %v instead", idx, err) 428 | } 429 | if l, err := b.List(context.Background(), c.prefix); !c.errTest(err) { 430 | t.Errorf("%d: Did not get expected error, got %v instead", idx, err) 431 | } else if err == nil { 432 | if len(l) != 4 { 433 | t.Errorf("%d: Did not get expected amount of items in the list, expected 4 but got %d", idx, len(l)) 434 | } 435 | for _, key := range l { 436 | if key != "random" { 437 | t.Errorf("%d: Expected all entries to be of value random, got %s instead", idx, key) 438 | } 439 | } 440 | } 441 | } 442 | } 443 | 444 | func TestS3PreDownload(t *testing.T) { 445 | if testing.Short() { 446 | t.Skip("skipping test in short mode") 447 | } 448 | 449 | testCases := []struct { 450 | conf *BackendConfig 451 | errTest errTestFunc 452 | keys []string 453 | }{ 454 | { 455 | conf: &BackendConfig{ 456 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 457 | }, 458 | errTest: nilErrTest, 459 | keys: []string{"good", "needsrestore", alreadyRestoring}, 460 | }, 461 | { 462 | conf: &BackendConfig{ 463 | TargetURI: AWSS3BackendPrefix + "://goodbucket", 464 | }, 465 | errTest: errTestErrTest, 466 | keys: []string{"good", s3BadKey, "good2"}, 467 | }, 468 | } 469 | 470 | for idx, c := range testCases { 471 | b := &AWSS3Backend{} 472 | if err := b.Init(context.Background(), c.conf, getOptions()...); err != nil { 473 | t.Errorf("%d: Did not get expected nil error on Init, got %v instead", idx, err) 474 | } 475 | if err := b.PreDownload(context.Background(), c.keys); !c.errTest(err) { 476 | t.Errorf("%d: Did not get expected error, got %v instead", idx, err) 477 | } 478 | } 479 | } 480 | 481 | func TestS3Backend(t *testing.T) { 482 | t.Parallel() 483 | 484 | if os.Getenv("AWS_S3_CUSTOM_ENDPOINT") == "" { 485 | t.Skip("No custom S3 Endpoint provided to test against") 486 | } 487 | 488 | b, err := GetBackendForURI(AWSS3BackendPrefix + "://bucket_name") 489 | if err != nil { 490 | t.Fatalf("Error while trying to get backend: %v", err) 491 | } 492 | 493 | ctx, cancel := context.WithCancel(context.Background()) 494 | defer cancel() 495 | 496 | awsconf := aws.NewConfig(). 497 | WithS3ForcePathStyle(true). 498 | WithEndpoint(os.Getenv("AWS_S3_CUSTOM_ENDPOINT")) 499 | 500 | sess, err := session.NewSession(awsconf) 501 | if err != nil { 502 | t.Fatalf("could not create AWS client due to error: %v", err) 503 | } 504 | client := s3.New(sess) 505 | _, err = client.CreateBucket(&s3.CreateBucketInput{ 506 | Bucket: aws.String(s3TestBucketName), 507 | }) 508 | if err != nil { 509 | if aerr, ok := err.(awserr.Error); ok { 510 | if aerr.Code() != "BucketAlreadyOwnedByYou" { 511 | t.Fatalf("could not create S3 bucket due to error: %v", err) 512 | } 513 | } 514 | } 515 | 516 | defer func() { 517 | if _, err := client.DeleteBucket(&s3.DeleteBucketInput{ 518 | Bucket: aws.String(s3TestBucketName), 519 | }); err != nil { 520 | t.Errorf("could not delete bucket - %v", err) 521 | } 522 | }() 523 | 524 | BackendTest(ctx, AWSS3BackendPrefix, s3TestBucketName, false, b)(t) 525 | } 526 | -------------------------------------------------------------------------------- /backends/azure_backend.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "bytes" 25 | "context" 26 | "crypto/md5" // nolint:gosec // MD5 not used for crytopgrahic purposes 27 | "encoding/base64" 28 | "encoding/binary" 29 | "encoding/hex" 30 | "fmt" 31 | "io" 32 | "net/url" 33 | "os" 34 | "strings" 35 | "sync" 36 | 37 | "github.com/Azure/azure-storage-blob-go/azblob" 38 | "github.com/pkg/errors" 39 | "golang.org/x/sync/errgroup" 40 | 41 | "github.com/someone1/zfsbackup-go/files" 42 | "github.com/someone1/zfsbackup-go/log" 43 | ) 44 | 45 | // AzureBackendPrefix is the URI prefix used for the AzureBackend. 46 | const ( 47 | AzureBackendPrefix = "azure" 48 | blobAPIURL = "blob.core.windows.net" 49 | ) 50 | 51 | var errContainerMismatch = errors.New("container name in SAS URI is different than destination container provided") 52 | 53 | // AzureBackend integrates with Microsoft's Azure Storage Services. 54 | type AzureBackend struct { 55 | conf *BackendConfig 56 | mutex sync.Mutex 57 | accountName string 58 | accountKey string 59 | containersas string 60 | azureURL string 61 | prefix string 62 | containerName string 63 | containerSvc azblob.ContainerURL 64 | } 65 | 66 | // Init will initialize the AzureBackend and verify the provided URI is valid/exists. 67 | func (a *AzureBackend) Init(ctx context.Context, conf *BackendConfig, opts ...Option) error { 68 | a.conf = conf 69 | 70 | cleanPrefix := strings.TrimPrefix(a.conf.TargetURI, AzureBackendPrefix+"://") 71 | if cleanPrefix == a.conf.TargetURI { 72 | return ErrInvalidURI 73 | } 74 | 75 | a.accountName = os.Getenv("AZURE_ACCOUNT_NAME") 76 | a.accountKey = os.Getenv("AZURE_ACCOUNT_KEY") 77 | a.containersas = os.Getenv("AZURE_SAS_URI") 78 | a.azureURL = os.Getenv("AZURE_CUSTOM_ENDPOINT") 79 | if a.azureURL == "" { 80 | a.azureURL = fmt.Sprintf("https://%s.%s", a.accountName, blobAPIURL) 81 | } 82 | 83 | uriParts := strings.Split(cleanPrefix, "/") 84 | 85 | a.containerName = uriParts[0] 86 | if len(uriParts) > 1 { 87 | a.prefix = strings.Join(uriParts[1:], "/") 88 | } 89 | 90 | for _, opt := range opts { 91 | opt.Apply(a) 92 | } 93 | 94 | // No need for this logging 95 | pipelineOpts := azblob.PipelineOptions{ 96 | RequestLog: azblob.RequestLogOptions{ 97 | LogWarningIfTryOverThreshold: -1, 98 | }, 99 | } 100 | if a.containersas != "" { 101 | parsedsas, err := url.Parse(a.containersas) 102 | if err != nil { 103 | return errors.Wrap(err, "failed to parse SAS URI") 104 | } 105 | pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), pipelineOpts) 106 | sasParts := azblob.NewBlobURLParts(*parsedsas) 107 | if sasParts.ContainerName != a.containerName { 108 | return errContainerMismatch 109 | } 110 | a.containerSvc = azblob.NewContainerURL(*parsedsas, pipeline) 111 | } else { 112 | credential, err := azblob.NewSharedKeyCredential(a.accountName, a.accountKey) 113 | if err != nil { 114 | return errors.Wrap(err, "failed to initilze Azure credential") 115 | } 116 | destURL, err := url.Parse(a.azureURL) 117 | if err != nil { 118 | return errors.Wrap(err, "failed to construct Azure API URL") 119 | } 120 | pipeline := azblob.NewPipeline(credential, pipelineOpts) 121 | svcURL := azblob.NewServiceURL(*destURL, pipeline) 122 | a.containerSvc = svcURL.NewContainerURL(a.containerName) 123 | } 124 | 125 | _, err := a.containerSvc.ListBlobsFlatSegment(ctx, azblob.Marker{}, azblob.ListBlobsSegmentOptions{MaxResults: 0}) 126 | return err 127 | } 128 | 129 | // Upload will upload the provided volume to this AzureBackend's configured container+prefix 130 | // It utilizes Azure Block Blob uploads to upload chunks of a single file concurrently. Partial 131 | // uploads are automatically garbage collected after one week. See: 132 | // https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-list 133 | func (a *AzureBackend) Upload(ctx context.Context, vol *files.VolumeInfo) error { 134 | // We will achieve parallel upload by splitting a single upload into chunks 135 | // so don't let multiple calls to this function run in parallel. 136 | a.mutex.Lock() 137 | defer a.mutex.Unlock() 138 | 139 | name := a.prefix + vol.ObjectName 140 | blobURL := a.containerSvc.NewBlockBlobURL(name) 141 | 142 | // We will PutBlock for chunks of UploadChunkSize and then finalize the block with a PutBlockList call 143 | // Staging blocks does not allow MD5 checksums: https://github.com/Azure/azure-storage-blob-go/issues/56 144 | 145 | // https://godoc.org/github.com/Azure/azure-storage-blob-go/2018-03-28/azblob#NewBlockBlobURL 146 | // These helper functions convert a binary block ID to a base-64 string and vice versa 147 | // NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length 148 | blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) } 149 | // These helper functions convert an int block ID to a base-64 string and vice versa 150 | blockIDIntToBase64 := func(blockID int) string { 151 | binaryBlockID := (&[4]byte{})[:] // All block IDs are 4 bytes long 152 | binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID)) 153 | return blockIDBinaryToBase64(binaryBlockID) 154 | } 155 | 156 | var ( 157 | blockIDs []string 158 | errg errgroup.Group 159 | blockid int 160 | readBytes uint64 161 | ) 162 | 163 | // Currently, we can only have a max of 50000 blocks, 100MiB each, but we don't expect chunks that large 164 | // Upload the object in chunks 165 | for { 166 | blockID := blockIDIntToBase64(blockid) 167 | blockIDs = append(blockIDs, blockID) 168 | blockid++ 169 | 170 | blockSize := uint64(a.conf.UploadChunkSize) 171 | if !vol.IsUsingPipe() && blockSize > vol.Size-readBytes { 172 | blockSize = vol.Size - readBytes 173 | } 174 | 175 | buf := make([]byte, blockSize) 176 | n, rerr := io.ReadFull(vol, buf) 177 | if rerr != nil && rerr != io.ErrUnexpectedEOF { 178 | return rerr 179 | } 180 | 181 | readBytes += uint64(n) 182 | if n > 0 { 183 | // nolint:gosec // MD5 not used for cryptopgraphic purposes 184 | md5sum := md5.Sum(buf[:n]) 185 | 186 | select { 187 | case <-ctx.Done(): 188 | return ctx.Err() 189 | case a.conf.MaxParallelUploadBuffer <- true: 190 | errg.Go(func() error { 191 | defer func() { <-a.conf.MaxParallelUploadBuffer }() 192 | _, err := blobURL.StageBlock(ctx, blockID, bytes.NewReader(buf[:n]), azblob.LeaseAccessConditions{}, md5sum[:]) 193 | return err 194 | }) 195 | } 196 | } 197 | 198 | if !vol.IsUsingPipe() && readBytes == vol.Size || rerr == io.ErrUnexpectedEOF { 199 | break 200 | } 201 | } 202 | 203 | err := errg.Wait() 204 | if err != nil { 205 | log.AppLogger.Debugf("azure backend: Error while uploading volume %s - %v", vol.ObjectName, err) 206 | return err 207 | } 208 | 209 | md5Raw, merr := hex.DecodeString(vol.MD5Sum) 210 | if merr != nil { 211 | return merr 212 | } 213 | 214 | // Finally, finalize the storage blob by giving Azure the block list order 215 | _, err = blobURL.CommitBlockList( 216 | ctx, blockIDs, azblob.BlobHTTPHeaders{ContentMD5: md5Raw}, azblob.Metadata{}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, azblob.BlobTagsMap{}, 217 | ) 218 | if err != nil { 219 | log.AppLogger.Debugf("azure backend: Error while finalizing volume %s - %v", vol.ObjectName, err) 220 | } 221 | return err 222 | } 223 | 224 | // Delete will delete the given object from the configured container 225 | func (a *AzureBackend) Delete(ctx context.Context, name string) error { 226 | blobURL := a.containerSvc.NewBlobURL(a.prefix + name) 227 | _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) 228 | return err 229 | } 230 | 231 | // PreDownload will do nothing for this backend. 232 | func (a *AzureBackend) PreDownload(ctx context.Context, keys []string) error { 233 | return nil 234 | } 235 | 236 | // Download will download the requseted object which can be read from the returned io.ReadCloser 237 | func (a *AzureBackend) Download(ctx context.Context, name string) (io.ReadCloser, error) { 238 | blobURL := a.containerSvc.NewBlobURL(a.prefix + name) 239 | resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false) 240 | if err != nil { 241 | return nil, err 242 | } 243 | return resp.Body(azblob.RetryReaderOptions{}), nil 244 | } 245 | 246 | // Close will release any resources used by the Azure backend. 247 | func (a *AzureBackend) Close() error { 248 | return nil 249 | } 250 | 251 | // List will iterate through all objects in the configured Azure Storage Container and return 252 | // a list of blob names, filtering by the provided prefix. 253 | func (a *AzureBackend) List(ctx context.Context, prefix string) ([]string, error) { 254 | l := make([]string, 0, 5000) 255 | 256 | for marker := (azblob.Marker{}); marker.NotDone(); { 257 | resp, err := a.containerSvc.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{ 258 | Prefix: a.prefix + prefix, 259 | MaxResults: 5000, 260 | }) 261 | if err != nil { 262 | return nil, errors.Wrap(err, "error while listing blobs from container") 263 | } 264 | 265 | for idx := range resp.Segment.BlobItems { 266 | l = append(l, strings.TrimPrefix(resp.Segment.BlobItems[idx].Name, a.prefix)) 267 | } 268 | 269 | marker = resp.NextMarker 270 | } 271 | 272 | return l, nil 273 | } 274 | -------------------------------------------------------------------------------- /backends/azure_backend_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "net/url" 26 | "os" 27 | "testing" 28 | "time" 29 | 30 | "github.com/Azure/azure-sdk-for-go/storage" 31 | "github.com/Azure/azure-storage-blob-go/azblob" 32 | ) 33 | 34 | func TestAzureGetBackendForURI(t *testing.T) { 35 | b, err := GetBackendForURI(AzureBackendPrefix + "://bucket_name") 36 | if err != nil { 37 | t.Errorf("Error while trying to get backend: %v", err) 38 | } 39 | if _, ok := b.(*AzureBackend); !ok { 40 | t.Errorf("Expected to get a backend of type AzureBackend, but did not.") 41 | } 42 | } 43 | 44 | const ( 45 | azureTestBucketName = "azuretestbucket" 46 | ) 47 | 48 | func TestAzureBackend(t *testing.T) { 49 | t.Parallel() 50 | 51 | if os.Getenv("AZURE_CUSTOM_ENDPOINT") == "" { 52 | t.Skip("No custom Azure Endpoint provided to test against") 53 | } 54 | err := os.Setenv("AZURE_ACCOUNT_NAME", storage.StorageEmulatorAccountName) 55 | if err != nil { 56 | t.Fatalf("could not set environmental variable due to error: %v", err) 57 | } 58 | err = os.Setenv("AZURE_ACCOUNT_KEY", storage.StorageEmulatorAccountKey) 59 | if err != nil { 60 | t.Fatalf("could not set environmental variable due to error: %v", err) 61 | } 62 | 63 | ctx, cancel := context.WithCancel(context.Background()) 64 | defer cancel() 65 | 66 | credential, err := azblob.NewSharedKeyCredential(storage.StorageEmulatorAccountName, storage.StorageEmulatorAccountKey) 67 | if err != nil { 68 | t.Fatalf("failed to parse SAS key: %v", err) 69 | } 70 | destURL, err := url.Parse(os.Getenv("AZURE_CUSTOM_ENDPOINT")) 71 | if err != nil { 72 | t.Fatalf("failed to construct Azure API URL: %v", err) 73 | } 74 | pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{}) 75 | svcURL := azblob.NewServiceURL(*destURL, pipeline) 76 | containerSvc := svcURL.NewContainerURL(azureTestBucketName) 77 | if _, err = containerSvc.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone); err != nil { 78 | t.Fatalf("error while creating bucket: %v", err) 79 | } 80 | 81 | defer func() { 82 | if _, err = containerSvc.Delete(ctx, azblob.ContainerAccessConditions{}); err != nil { 83 | t.Logf("could not delete container: %v", err) 84 | } 85 | }() 86 | 87 | b, err := GetBackendForURI(AzureBackendPrefix + "://bucket_name") 88 | if err != nil { 89 | t.Fatalf("Error while trying to get backend: %v", err) 90 | } 91 | 92 | BackendTest(ctx, AzureBackendPrefix, azureTestBucketName, false, b)(t) 93 | 94 | if err = os.Unsetenv("AZURE_ACCOUNT_KEY"); err != nil { 95 | t.Fatalf("could not unset env var - %v", err) 96 | } 97 | if err = os.Unsetenv("AZURE_ACCOUNT_NAME"); err != nil { 98 | t.Fatalf("could not unset env var - %v", err) 99 | } 100 | 101 | t.Run("SASURI", func(t *testing.T) { 102 | b, err = GetBackendForURI(AzureBackendPrefix + "://bucket_name") 103 | if err != nil { 104 | t.Fatalf("Error while trying to get backend: %v", err) 105 | } 106 | 107 | sasURI := generateSASURI(t, containerSvc.String(), azureTestBucketName, storage.StorageEmulatorAccountName, storage.StorageEmulatorAccountKey, 3600) 108 | 109 | if err = os.Setenv("AZURE_SAS_URI", sasURI); err != nil { 110 | t.Fatalf("could not set environmental variable due to error: %v", err) 111 | } 112 | 113 | // Different Container name 114 | conf := &BackendConfig{ 115 | TargetURI: AzureBackendPrefix + "://" + azureTestBucketName + "bad", 116 | UploadChunkSize: 8 * 1024 * 1024, 117 | MaxParallelUploads: 5, 118 | MaxParallelUploadBuffer: make(chan bool, 5), 119 | } 120 | err = b.Init(ctx, conf) 121 | if err != errContainerMismatch { 122 | t.Errorf("Expected container mismatch error initilazing AzureBackend w/ SAS URI, got %v", err) 123 | } 124 | 125 | BackendTest(ctx, AzureBackendPrefix, azureTestBucketName, true, b)(t) 126 | 127 | // Get rid of the env variable 128 | if err = os.Unsetenv("AZURE_SAS_URI"); err != nil { 129 | t.Fatalf("could not unset environmental variable due to error: %v", err) 130 | } 131 | }) 132 | } 133 | 134 | func generateSASURI(t *testing.T, svcURL, container, accountName, key string, expireSecs int) string { 135 | t.Helper() 136 | 137 | perms := azblob.ContainerSASPermissions{Add: true, Create: true, Delete: true, List: true, Read: true, Write: true} 138 | sigValues := azblob.BlobSASSignatureValues{ 139 | Version: "2019-07-07", 140 | Protocol: azblob.SASProtocolHTTPSandHTTP, 141 | Permissions: perms.String(), 142 | ExpiryTime: time.Now().Add(time.Second * time.Duration(expireSecs)).UTC(), 143 | ContainerName: container, 144 | } 145 | creds, err := azblob.NewSharedKeyCredential(accountName, key) 146 | if err != nil { 147 | t.Fatalf("could not generate creds: %v", err) 148 | } 149 | 150 | params, err := sigValues.NewSASQueryParameters(creds) 151 | if err != nil { 152 | t.Fatalf("could not generate params: %v", err) 153 | } 154 | 155 | u, _ := url.Parse(svcURL) 156 | u.RawQuery = params.Encode() 157 | 158 | return u.String() 159 | } 160 | -------------------------------------------------------------------------------- /backends/backblaze_b2_backend.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "io" 26 | "net/http" 27 | "os" 28 | "strings" 29 | "sync" 30 | 31 | "github.com/kurin/blazer/b2" 32 | 33 | "github.com/someone1/zfsbackup-go/files" 34 | "github.com/someone1/zfsbackup-go/log" 35 | ) 36 | 37 | // B2BackendPrefix is the URI prefix used for the B2Backend. 38 | const B2BackendPrefix = "b2" 39 | 40 | // B2Backend integrates with BackBlaze's B2 storage service. 41 | type B2Backend struct { 42 | conf *BackendConfig 43 | bucketCli *b2.Bucket 44 | mutex sync.Mutex 45 | prefix string 46 | bucketName string 47 | } 48 | 49 | type bufferedRT struct { 50 | bufChan chan bool 51 | } 52 | 53 | func (b bufferedRT) RoundTrip(r *http.Request) (*http.Response, error) { 54 | b.bufChan <- true 55 | defer func() { <-b.bufChan }() 56 | return http.DefaultTransport.RoundTrip(r) 57 | } 58 | 59 | // Init will initialize the B2Backend and verify the provided URI is valid/exists. 60 | func (b *B2Backend) Init(ctx context.Context, conf *BackendConfig, opts ...Option) error { 61 | b.conf = conf 62 | 63 | cleanPrefix := strings.TrimPrefix(b.conf.TargetURI, B2BackendPrefix+"://") 64 | if cleanPrefix == b.conf.TargetURI { 65 | return ErrInvalidURI 66 | } 67 | 68 | accountID := os.Getenv("B2_ACCOUNT_ID") 69 | accountKey := os.Getenv("B2_ACCOUNT_KEY") 70 | 71 | uriParts := strings.Split(cleanPrefix, "/") 72 | 73 | b.bucketName = uriParts[0] 74 | if len(uriParts) > 1 { 75 | b.prefix = strings.Join(uriParts[1:], "/") 76 | } 77 | 78 | for _, opt := range opts { 79 | opt.Apply(b) 80 | } 81 | 82 | var cliopts []b2.ClientOption 83 | if conf.MaxParallelUploadBuffer != nil { 84 | cliopts = append(cliopts, b2.Transport(bufferedRT{b.conf.MaxParallelUploadBuffer})) 85 | } 86 | 87 | client, err := b2.NewClient(ctx, accountID, accountKey, cliopts...) 88 | if err != nil { 89 | return err 90 | } 91 | 92 | b.bucketCli, err = client.Bucket(ctx, b.bucketName) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | // Poke the bucket to ensure it exists. 98 | iter := b.bucketCli.List(ctx, b2.ListPageSize(1)) 99 | iter.Next() 100 | return iter.Err() 101 | } 102 | 103 | // Upload will upload the provided volume to this B2Backend's configured bucket+prefix 104 | // It utilizes B2 Large File Upload API to upload chunks of a single file concurrently. Partial 105 | // uploads need to be manually cleaned, which the b2 Go client should take care of for us. 106 | func (b *B2Backend) Upload(ctx context.Context, vol *files.VolumeInfo) error { 107 | ctx, cancel := context.WithCancel(ctx) 108 | defer cancel() 109 | 110 | // We will be doing multipart uploads, no need to allow multiple calls of Upload to initiate new uploads. 111 | b.mutex.Lock() 112 | defer b.mutex.Unlock() 113 | 114 | name := b.prefix + vol.ObjectName 115 | w := b.bucketCli.Object(name).NewWriter(ctx) 116 | 117 | w.ConcurrentUploads = b.conf.MaxParallelUploads 118 | w.ChunkSize = b.conf.UploadChunkSize 119 | 120 | sha1Opt := b2.WithAttrsOption(&b2.Attrs{SHA1: vol.SHA1Sum}) 121 | sha1Opt(w) 122 | b2.WithCancelOnError(func() context.Context { return ctx }, func(err error) { 123 | log.AppLogger.Warningf("b2 backend: Error canceling large file upload %s - %v", vol.ObjectName, err) 124 | }) 125 | 126 | if _, err := io.Copy(w, vol); err != nil { 127 | log.AppLogger.Debugf("b2 backend: Error while uploading volume %s - %v", vol.ObjectName, err) 128 | return err 129 | } 130 | 131 | return w.Close() 132 | } 133 | 134 | // Delete will delete the object with the given name from the configured bucket 135 | func (b *B2Backend) Delete(ctx context.Context, name string) error { 136 | return b.bucketCli.Object(b.prefix + name).Delete(ctx) 137 | } 138 | 139 | // PreDownload will do nothing for this backend. 140 | func (b *B2Backend) PreDownload(ctx context.Context, keys []string) error { 141 | return nil 142 | } 143 | 144 | // Download will download the requseted object which can be read from the returned io.ReadCloser 145 | func (b *B2Backend) Download(ctx context.Context, name string) (io.ReadCloser, error) { 146 | r := b.bucketCli.Object(b.prefix + name).NewReader(ctx) 147 | _, err := r.Read(make([]byte, 0)) 148 | return r, err 149 | } 150 | 151 | // Close will release any resources used by the B2 backend. 152 | func (b *B2Backend) Close() error { 153 | b.bucketCli = nil 154 | return nil 155 | } 156 | 157 | // List will iterate through all objects in the configured B2 bucket and return 158 | // a list of object names, filtering by the provided prefix. 159 | func (b *B2Backend) List(ctx context.Context, prefix string) ([]string, error) { 160 | var l []string 161 | iter := b.bucketCli.List(ctx, b2.ListPrefix(b.prefix+prefix)) 162 | for iter.Next() { 163 | obj := iter.Object() 164 | l = append(l, strings.TrimPrefix(obj.Name(), b.prefix)) 165 | } 166 | if err := iter.Err(); err != nil { 167 | return nil, err 168 | } 169 | return l, nil 170 | } 171 | -------------------------------------------------------------------------------- /backends/backblaze_b2_backend_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "os" 26 | "testing" 27 | 28 | "github.com/kurin/blazer/b2" 29 | ) 30 | 31 | func TestB2GetBackendForURI(t *testing.T) { 32 | b, err := GetBackendForURI(B2BackendPrefix + "://bucket_name") 33 | if err != nil { 34 | t.Errorf("Error while trying to get backend: %v", err) 35 | } 36 | if _, ok := b.(*B2Backend); !ok { 37 | t.Errorf("Expected to get a backend of type B2Backend, but did not.") 38 | } 39 | } 40 | 41 | func TestB2Backend(t *testing.T) { 42 | t.Parallel() 43 | 44 | if os.Getenv("B2_ACCOUNT_ID") == "" { 45 | t.Skip("No test B2 credentials provided to test against") 46 | } 47 | 48 | ctx, cancel := context.WithCancel(context.Background()) 49 | defer cancel() 50 | 51 | b2TestBucketName := "zfsbackup-go" 52 | 53 | // Setup Test Bucket 54 | client, err := b2.NewClient(ctx, os.Getenv("B2_ACCOUNT_ID"), os.Getenv("B2_ACCOUNT_KEY")) 55 | if err != nil { 56 | t.Fatal(err) 57 | } 58 | 59 | bucketCli, err := client.NewBucket(ctx, b2TestBucketName, nil) 60 | if err != nil { 61 | t.Fatalf("could not create bucket; %v", err) 62 | } 63 | 64 | defer func() { 65 | it := bucketCli.List(ctx) 66 | for { 67 | if !it.Next() { 68 | break 69 | } 70 | 71 | if err = it.Object().Delete(ctx); err != nil { 72 | t.Errorf("could not delete object: %v", err) 73 | } 74 | } 75 | if it.Err() != nil { 76 | t.Errorf("could not list all files - %v", it.Err()) 77 | } 78 | if err = bucketCli.Delete(ctx); err != nil { 79 | t.Errorf("could not delete bucket: %v", err) 80 | } 81 | }() 82 | 83 | b, err := GetBackendForURI(B2BackendPrefix + "://bucket_name") 84 | if err != nil { 85 | t.Fatalf("Error while trying to get backend: %v", err) 86 | } 87 | 88 | BackendTest(ctx, B2BackendPrefix, b2TestBucketName, false, b)(t) 89 | } 90 | -------------------------------------------------------------------------------- /backends/backends.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "errors" 26 | "io" 27 | "strings" 28 | "time" 29 | 30 | "github.com/someone1/zfsbackup-go/files" 31 | ) 32 | 33 | // Backend is an interface type that defines the functions and functionality required for different backend implementations. 34 | // nolint:lll // It's neater this way 35 | type Backend interface { 36 | Init(ctx context.Context, conf *BackendConfig, opts ...Option) error // Verifies settings required for backend are present and valid, does basic initialization of backend 37 | Upload(ctx context.Context, vol *files.VolumeInfo) error // Upload the volume provided 38 | List(ctx context.Context, prefix string) ([]string, error) // Lists all files in the backend, filtering by the provided prefix. 39 | Close() error // Release any resources in use 40 | PreDownload(ctx context.Context, objects []string) error // PreDownload will prepare the provided files for download (think restoring from Glacier to S3) 41 | Download(ctx context.Context, filename string) (io.ReadCloser, error) // Download the requested file that can be read from the returned io.ReaderCloser 42 | Delete(ctx context.Context, filename string) error // Delete the file specified on the configured backend 43 | } 44 | 45 | // Option lets users inject functionality to specific backends 46 | type Option interface { 47 | Apply(Backend) 48 | } 49 | 50 | // BackendConfig holds values that relate to backend configurations 51 | type BackendConfig struct { 52 | MaxParallelUploadBuffer chan bool 53 | MaxParallelUploads int 54 | MaxBackoffTime time.Duration 55 | MaxRetryTime time.Duration 56 | TargetURI string 57 | UploadChunkSize int 58 | } 59 | 60 | var ( 61 | // ErrInvalidURI is returned when a backend determines that the provided URI is malformed/invalid. 62 | ErrInvalidURI = errors.New("backends: invalid URI provided to backend") 63 | // ErrInvalidPrefix is returned when a backend destination is provided with a URI prefix that isn't registered. 64 | ErrInvalidPrefix = errors.New("backends: the provided prefix does not exist") 65 | ) 66 | 67 | // GetBackendForURI will try and parse the URI for a matching backend to use. 68 | func GetBackendForURI(uri string) (Backend, error) { 69 | prefix := strings.Split(uri, "://") 70 | if len(prefix) < 2 { 71 | return nil, ErrInvalidURI 72 | } 73 | 74 | switch prefix[0] { 75 | case DeleteBackendPrefix: 76 | return &DeleteBackend{}, nil 77 | case GoogleCloudStorageBackendPrefix: 78 | return &GoogleCloudStorageBackend{}, nil 79 | case AWSS3BackendPrefix: 80 | return &AWSS3Backend{}, nil 81 | case FileBackendPrefix: 82 | return &FileBackend{}, nil 83 | case AzureBackendPrefix: 84 | return &AzureBackend{}, nil 85 | case B2BackendPrefix: 86 | return &B2Backend{}, nil 87 | case SSHBackendPrefix: 88 | return &SSHBackend{}, nil 89 | default: 90 | return nil, ErrInvalidPrefix 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /backends/backends_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "bytes" 25 | "context" 26 | "crypto/rand" 27 | "encoding/hex" 28 | "errors" 29 | "io" 30 | "reflect" 31 | "strings" 32 | "testing" 33 | 34 | "github.com/someone1/zfsbackup-go/files" 35 | ) 36 | 37 | var ( 38 | errTest = errors.New("used for testing") 39 | ) 40 | 41 | type errTestFunc func(error) bool 42 | 43 | func nilErrTest(e error) bool { return e == nil } 44 | func errTestErrTest(e error) bool { return e == errTest } 45 | func errInvalidURIErrTest(e error) bool { return e == ErrInvalidURI } 46 | func invalidByteErrTest(e error) bool { 47 | _, ok := e.(hex.InvalidByteError) 48 | return ok 49 | } 50 | 51 | func prepareTestVols() (payload []byte, goodVol, badVol *files.VolumeInfo, err error) { 52 | payload = make([]byte, 10*1024*1024) 53 | if _, err = rand.Read(payload); err != nil { 54 | return 55 | } 56 | reader := bytes.NewReader(payload) 57 | goodVol, err = files.CreateSimpleVolume(context.Background(), false) 58 | if err != nil { 59 | return 60 | } 61 | _, err = io.Copy(goodVol, reader) 62 | if err != nil { 63 | return 64 | } 65 | err = goodVol.Close() 66 | if err != nil { 67 | return 68 | } 69 | goodVol.ObjectName = strings.Join([]string{"this", "is", "just", "a", "test"}, "-") + ".ext" 70 | 71 | badVol, err = files.CreateSimpleVolume(context.Background(), false) 72 | if err != nil { 73 | return 74 | } 75 | err = badVol.Close() 76 | if err != nil { 77 | return 78 | } 79 | badVol.ObjectName = strings.Join([]string{"this", "is", "just", "a", "badtest"}, "-") + ".ext" 80 | 81 | err = badVol.DeleteVolume() 82 | 83 | return payload, goodVol, badVol, err 84 | } 85 | 86 | func TestGetBackendForURI(t *testing.T) { 87 | _, err := GetBackendForURI("thiswon'texist://") 88 | if err != ErrInvalidPrefix { 89 | t.Errorf("Expecting err %v, got %v for non-existent prefix", ErrInvalidPrefix, err) 90 | } 91 | 92 | _, err = GetBackendForURI("thisisinvalid") 93 | if err != ErrInvalidURI { 94 | t.Errorf("Expecting err %v, got %v for invalid URI", ErrInvalidURI, err) 95 | } 96 | } 97 | 98 | func BackendTest(ctx context.Context, prefix, uri string, skipPrefix bool, b Backend, opts ...Option) func(*testing.T) { 99 | return func(t *testing.T) { 100 | testPayLoad, goodVol, badVol, perr := prepareTestVols() 101 | if perr != nil { 102 | t.Fatalf("Error while creating test volumes: %v", perr) 103 | } 104 | defer func() { 105 | if err := goodVol.DeleteVolume(); err != nil { 106 | t.Errorf("could not delete good vol - %v", err) 107 | } 108 | }() 109 | 110 | t.Run("Init", func(t *testing.T) { 111 | // Bad TargetURI 112 | conf := &BackendConfig{ 113 | TargetURI: "notvalid://" + uri, 114 | UploadChunkSize: 8 * 1024 * 1024, 115 | MaxParallelUploads: 5, 116 | MaxParallelUploadBuffer: make(chan bool, 5), 117 | } 118 | err := b.Init(ctx, conf) 119 | if err != ErrInvalidURI { 120 | t.Fatalf("Expected Invalid URI error, got %v", err) 121 | } 122 | 123 | // Good TargetURI 124 | conf = &BackendConfig{ 125 | TargetURI: prefix + "://" + uri, 126 | UploadChunkSize: 8 * 1024 * 1024, 127 | MaxParallelUploads: 5, 128 | MaxParallelUploadBuffer: make(chan bool, 5), 129 | } 130 | err = b.Init(ctx, conf, opts...) 131 | if err != nil { 132 | t.Fatalf("Issue initilazing backend: %v", err) 133 | } 134 | 135 | if !skipPrefix { 136 | // Good TargetURI with URI prefix 137 | uri += "/prefix/" 138 | conf = &BackendConfig{ 139 | TargetURI: prefix + "://" + uri, 140 | UploadChunkSize: 8 * 1024 * 1024, 141 | MaxParallelUploads: 5, 142 | MaxParallelUploadBuffer: make(chan bool, 5), 143 | } 144 | err = b.Init(ctx, conf, opts...) 145 | if err != nil { 146 | t.Fatalf("Issue initilazing backend: %v", err) 147 | } 148 | } 149 | }) 150 | 151 | t.Run("Upload", func(t *testing.T) { 152 | err := goodVol.OpenVolume() 153 | if err != nil { 154 | t.Errorf("could not open good volume due to error %v", err) 155 | } 156 | defer goodVol.Close() 157 | err = b.Upload(ctx, goodVol) 158 | if err != nil { 159 | t.Fatalf("Issue uploading goodvol: %v", err) 160 | } 161 | 162 | err = b.Upload(ctx, badVol) 163 | if err == nil { 164 | t.Errorf("Expecting non-nil error uploading badvol, got nil instead.") 165 | } 166 | }) 167 | 168 | t.Run("List", func(t *testing.T) { 169 | names, err := b.List(ctx, "") 170 | if err != nil { 171 | t.Fatalf("Issue listing backend: %v", err) 172 | } 173 | 174 | if len(names) != 1 { 175 | t.Fatalf("Expecting exactly one name from list, got %d instead.", len(names)) 176 | } 177 | 178 | if names[0] != goodVol.ObjectName { 179 | t.Fatalf("Expecting name '%s', got '%s' instead", goodVol.ObjectName, names[0]) 180 | } 181 | 182 | names, err = b.List(ctx, "badprefix") 183 | if err != nil { 184 | t.Fatalf("Issue listing container: %v", err) 185 | } 186 | 187 | if len(names) != 0 { 188 | t.Fatalf("Expecting exactly zero names from list, got %d instead.", len(names)) 189 | } 190 | }) 191 | 192 | t.Run("PreDownload", func(t *testing.T) { 193 | err := b.PreDownload(ctx, []string{goodVol.ObjectName}) 194 | if err != nil { 195 | t.Fatalf("Issue calling PreDownload: %v", err) 196 | } 197 | }) 198 | 199 | t.Run("Download", func(t *testing.T) { 200 | r, err := b.Download(ctx, goodVol.ObjectName) 201 | if err != nil { 202 | t.Fatalf("Issue calling Download: %v", err) 203 | } 204 | defer r.Close() 205 | 206 | buf := bytes.NewBuffer(nil) 207 | _, err = io.Copy(buf, r) 208 | if err != nil { 209 | t.Fatalf("error reading: %v", err) 210 | } 211 | 212 | if !reflect.DeepEqual(testPayLoad, buf.Bytes()) { 213 | t.Fatalf("downloaded object does not equal expected payload") 214 | } 215 | 216 | _, err = b.Download(ctx, badVol.ObjectName) 217 | if err == nil { 218 | t.Fatalf("expecting non-nil response, got nil instead") 219 | } 220 | }) 221 | 222 | t.Run("Delete", func(t *testing.T) { 223 | err := b.Delete(ctx, goodVol.ObjectName) 224 | if err != nil { 225 | t.Errorf("Issue calling Delete: %v", err) 226 | } 227 | 228 | names, err := b.List(ctx, "") 229 | if err != nil { 230 | t.Errorf("Issue listing backend: %v", err) 231 | } 232 | 233 | if len(names) != 0 { 234 | t.Errorf("Expecting exactly zero names from list, got %d instead.", len(names)) 235 | } 236 | }) 237 | 238 | t.Run("Close", func(t *testing.T) { 239 | err := b.Close() 240 | if err != nil { 241 | t.Fatalf("Issue closing backend: %v", err) 242 | } 243 | }) 244 | } 245 | } 246 | -------------------------------------------------------------------------------- /backends/delete_backend.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "errors" 26 | "io" 27 | 28 | "github.com/someone1/zfsbackup-go/files" 29 | "github.com/someone1/zfsbackup-go/log" 30 | ) 31 | 32 | // DeleteBackendPrefix is the URI prefix used for the DeleteBackend. 33 | const DeleteBackendPrefix = "delete" 34 | 35 | // DeleteBackend is a special Backend used to delete the files after they've been uploaded 36 | type DeleteBackend struct { 37 | conf *BackendConfig 38 | } 39 | 40 | // Init will initialize the DeleteBackend (aka do nothing) 41 | func (d *DeleteBackend) Init(ctx context.Context, conf *BackendConfig, opts ...Option) error { 42 | d.conf = conf 43 | return nil 44 | } 45 | 46 | // Delete should not be used with this backend 47 | func (d *DeleteBackend) Delete(ctx context.Context, filename string) error { 48 | return errors.New("delete backend: Delete is invalid for this backend") 49 | } 50 | 51 | // PreDownload should not be used with this backend 52 | func (d *DeleteBackend) PreDownload(ctx context.Context, objects []string) error { 53 | return errors.New("delete backend: PreDownload is invalid for this backend") 54 | } 55 | 56 | // Download should not be used with this backend 57 | func (d *DeleteBackend) Download(ctx context.Context, filename string) (io.ReadCloser, error) { 58 | return nil, errors.New("delete backend: Get is invalid for this backend") 59 | } 60 | 61 | // Close does nothing for this backend. 62 | func (d *DeleteBackend) Close() error { 63 | return nil 64 | } 65 | 66 | // List should not be used with this backend 67 | func (d *DeleteBackend) List(ctx context.Context, prefix string) ([]string, error) { 68 | return nil, errors.New("delete backend: List is invalid for this backend") 69 | } 70 | 71 | // Upload will delete the provided volume, usually found in a temporary folder 72 | func (d *DeleteBackend) Upload(ctx context.Context, vol *files.VolumeInfo) error { 73 | if err := vol.DeleteVolume(); err != nil { 74 | log.AppLogger.Errorf("delete backend: could not delete volume %s due to error: %v", vol.ObjectName, err) 75 | return err 76 | } 77 | log.AppLogger.Debugf("delete backend: Deleted Volume %s", vol.ObjectName) 78 | 79 | return nil 80 | } 81 | -------------------------------------------------------------------------------- /backends/delete_backend_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "os" 26 | "testing" 27 | "time" 28 | 29 | "github.com/someone1/zfsbackup-go/files" 30 | ) 31 | 32 | func TestDeleteGetBackendForURI(t *testing.T) { 33 | b, err := GetBackendForURI(DeleteBackendPrefix + "://") 34 | if err != nil { 35 | t.Errorf("Error while trying to get backend: %v", err) 36 | } 37 | if _, ok := b.(*DeleteBackend); !ok { 38 | t.Errorf("Expected to get a backend of type DeleteBackend, but did not.") 39 | } 40 | } 41 | 42 | func TestDeleteInit(t *testing.T) { 43 | b := &DeleteBackend{} 44 | if err := b.Init(context.Background(), nil); err != nil { 45 | t.Errorf("Expected error %v, got %v", nil, err) 46 | } 47 | } 48 | func TestDeleteClose(t *testing.T) { 49 | b := &DeleteBackend{} 50 | if err := b.Init(context.Background(), nil); err != nil { 51 | t.Errorf("Expected error %v, got %v", nil, err) 52 | } else { 53 | err = b.Close() 54 | if err != nil { 55 | t.Errorf("Expected %v, got %v", nil, err) 56 | } 57 | } 58 | } 59 | 60 | func TestDeletePreDownload(t *testing.T) { 61 | b := &DeleteBackend{} 62 | if err := b.Init(context.Background(), nil); err != nil { 63 | t.Errorf("Expected error %v, got %v", nil, err) 64 | } else { 65 | err = b.PreDownload(context.Background(), nil) 66 | if err == nil { 67 | t.Errorf("Expected non-nil error, got %v", err) 68 | } 69 | } 70 | } 71 | 72 | func TestDeleteDelete(t *testing.T) { 73 | b := &DeleteBackend{} 74 | if err := b.Init(context.Background(), nil); err != nil { 75 | t.Errorf("Expected error %v, got %v", nil, err) 76 | } else { 77 | err = b.Delete(context.Background(), "") 78 | if err == nil { 79 | t.Errorf("Expected non-nil error, got %v", err) 80 | } 81 | } 82 | } 83 | 84 | func TestDeleteList(t *testing.T) { 85 | b := &DeleteBackend{} 86 | if err := b.Init(context.Background(), nil); err != nil { 87 | t.Errorf("Expected error %v, got %v", nil, err) 88 | } else { 89 | _, err = b.List(context.Background(), "") 90 | if err == nil { 91 | t.Errorf("Expected non-nil error, got %v", err) 92 | } 93 | } 94 | } 95 | 96 | func TestDeleteDownload(t *testing.T) { 97 | b := &DeleteBackend{} 98 | if err := b.Init(context.Background(), nil); err != nil { 99 | t.Errorf("Expected error %v, got %v", nil, err) 100 | } else { 101 | _, err = b.Download(context.Background(), "") 102 | if err == nil { 103 | t.Errorf("Expected non-nil error, got %v", err) 104 | } 105 | } 106 | } 107 | 108 | func TestDeleteStartUpload(t *testing.T) { 109 | _, goodVol, badVol, err := prepareTestVols() 110 | if err != nil { 111 | t.Fatalf("error preparing good volume for testing - %v", err) 112 | } 113 | 114 | config := &BackendConfig{ 115 | TargetURI: "delete://", 116 | MaxBackoffTime: 1 * time.Second, 117 | MaxRetryTime: 5 * time.Second, 118 | } 119 | 120 | testCases := []struct { 121 | vol *files.VolumeInfo 122 | valid errTestFunc 123 | }{ 124 | { 125 | vol: goodVol, 126 | valid: nilErrTest, 127 | }, 128 | { 129 | vol: badVol, 130 | valid: os.IsNotExist, 131 | }, 132 | } 133 | 134 | for idx, testCase := range testCases { 135 | b := &DeleteBackend{} 136 | if err := b.Init(context.Background(), config); err != nil { 137 | t.Errorf("%d: Expected error %v, got %v", idx, nil, err) 138 | } else { 139 | if errResult := b.Upload(context.Background(), testCase.vol); !testCase.valid(errResult) { 140 | t.Errorf("%d: error %v id not pass validation function", idx, errResult) 141 | } else if errResult == nil { 142 | err = testCase.vol.OpenVolume() 143 | if !os.IsNotExist(err) { 144 | t.Errorf("expected not exist error, got %v instead", err) 145 | } 146 | } 147 | } 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /backends/doc.go: -------------------------------------------------------------------------------- 1 | // Package backends implements the Backend interface for various targets. 2 | package backends 3 | -------------------------------------------------------------------------------- /backends/file_backend.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "io" 26 | "os" 27 | "path/filepath" 28 | "strings" 29 | 30 | "github.com/someone1/zfsbackup-go/files" 31 | "github.com/someone1/zfsbackup-go/log" 32 | ) 33 | 34 | // FileBackendPrefix is the URI prefix used for the FileBackend. 35 | const FileBackendPrefix = "file" 36 | 37 | // FileBackend provides a local destination storage option. 38 | type FileBackend struct { 39 | conf *BackendConfig 40 | localPath string 41 | } 42 | 43 | // Init will initialize the FileBackend and verify the provided URI is valid/exists. 44 | func (f *FileBackend) Init(ctx context.Context, conf *BackendConfig, opts ...Option) error { 45 | f.conf = conf 46 | 47 | cleanPrefix := strings.TrimPrefix(f.conf.TargetURI, FileBackendPrefix+"://") 48 | if cleanPrefix == f.conf.TargetURI { 49 | return ErrInvalidURI 50 | } 51 | 52 | absLocalPath, err := filepath.Abs(cleanPrefix) 53 | if err != nil { 54 | log.AppLogger.Errorf("file backend: Error while verifying path %s - %v", cleanPrefix, err) 55 | return err 56 | } 57 | 58 | fi, err := os.Stat(absLocalPath) 59 | if err != nil { 60 | log.AppLogger.Errorf("file backend: Error while verifying path %s - %v", absLocalPath, err) 61 | return err 62 | } 63 | 64 | if !fi.IsDir() { 65 | log.AppLogger.Errorf("file backend: Provided path is not a directory!") 66 | return ErrInvalidURI 67 | } 68 | 69 | f.localPath = absLocalPath 70 | return nil 71 | } 72 | 73 | // Upload will copy the provided VolumeInfo to the backend's configured local destination 74 | func (f *FileBackend) Upload(ctx context.Context, vol *files.VolumeInfo) error { 75 | f.conf.MaxParallelUploadBuffer <- true 76 | defer func() { 77 | <-f.conf.MaxParallelUploadBuffer 78 | }() 79 | 80 | destinationPath := filepath.Join(f.localPath, vol.ObjectName) 81 | destinationDir := filepath.Dir(destinationPath) 82 | 83 | if err := os.MkdirAll(destinationDir, os.ModePerm); err != nil { 84 | log.AppLogger.Debugf("file backend: Could not create path %s due to error - %v", destinationDir, err) 85 | return err 86 | } 87 | 88 | w, err := os.Create(destinationPath) 89 | if err != nil { 90 | log.AppLogger.Debugf("file backend: Could not create file %s due to error - %v", destinationPath, err) 91 | return err 92 | } 93 | 94 | _, err = io.Copy(w, vol) 95 | if err != nil { 96 | if closeErr := w.Close(); closeErr != nil { 97 | log.AppLogger.Warningf("file backend: Error closing volume %s - %v", vol.ObjectName, closeErr) 98 | } 99 | if deleteErr := os.Remove(destinationPath); deleteErr != nil { 100 | log.AppLogger.Warningf("file backend: Error deleting failed upload file %s - %v", destinationPath, deleteErr) 101 | } 102 | log.AppLogger.Debugf("file backend: Error while copying volume %s - %v", vol.ObjectName, err) 103 | return err 104 | } 105 | 106 | return w.Close() 107 | } 108 | 109 | // Delete will delete the given object from the provided path 110 | func (f *FileBackend) Delete(ctx context.Context, filename string) error { 111 | return os.Remove(filepath.Join(f.localPath, filename)) 112 | } 113 | 114 | // PreDownload does nothing on this backend. 115 | func (f *FileBackend) PreDownload(ctx context.Context, objects []string) error { 116 | return nil 117 | } 118 | 119 | // Download will open the file for reading 120 | func (f *FileBackend) Download(ctx context.Context, filename string) (io.ReadCloser, error) { 121 | return os.Open(filepath.Join(f.localPath, filename)) 122 | } 123 | 124 | // Close does nothing for this backend. 125 | func (f *FileBackend) Close() error { 126 | return nil 127 | } 128 | 129 | // List will return a list of all files matching the provided prefix 130 | func (f *FileBackend) List(ctx context.Context, prefix string) ([]string, error) { 131 | l := make([]string, 0, 1000) 132 | err := filepath.Walk(f.localPath, func(path string, fi os.FileInfo, werr error) error { 133 | if werr != nil { 134 | return werr 135 | } 136 | 137 | trimmedPath := strings.TrimPrefix(path, f.localPath+string(filepath.Separator)) 138 | if !fi.IsDir() && strings.HasPrefix(trimmedPath, prefix) { 139 | l = append(l, trimmedPath) 140 | } 141 | return nil 142 | }) 143 | 144 | return l, err 145 | } 146 | -------------------------------------------------------------------------------- /backends/file_backend_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "io/ioutil" 26 | "os" 27 | "testing" 28 | ) 29 | 30 | func TestFileGetBackendForURI(t *testing.T) { 31 | b, err := GetBackendForURI(FileBackendPrefix + "://") 32 | if err != nil { 33 | t.Errorf("Error while trying to get backend: %v", err) 34 | } 35 | if _, ok := b.(*FileBackend); !ok { 36 | t.Errorf("Expected to get a backend of type FileBackend, but did not.") 37 | } 38 | } 39 | 40 | func TestFileInit(t *testing.T) { 41 | testCases := []struct { 42 | uri string 43 | errTest errTestFunc 44 | }{ 45 | { 46 | uri: "file://", 47 | errTest: nilErrTest, 48 | }, 49 | { 50 | uri: "file:///", 51 | errTest: nilErrTest, 52 | }, 53 | { 54 | uri: "notvalid://", 55 | errTest: errInvalidURIErrTest, 56 | }, 57 | { 58 | uri: "file://this/should/not/exist", 59 | errTest: os.IsNotExist, 60 | }, 61 | { 62 | uri: "file:///dev/urandom", 63 | errTest: errInvalidURIErrTest, 64 | }, 65 | } 66 | for idx, testCase := range testCases { 67 | b := &FileBackend{} 68 | conf := &BackendConfig{TargetURI: testCase.uri} 69 | if err := b.Init(context.Background(), conf); !testCase.errTest(err) { 70 | t.Errorf("%d: Unexpected error, got %v", idx, err) 71 | } 72 | } 73 | } 74 | 75 | func TestFileBackend(t *testing.T) { 76 | t.Parallel() 77 | 78 | ctx, cancel := context.WithCancel(context.Background()) 79 | defer cancel() 80 | 81 | tempPath, err := ioutil.TempDir("", t.Name()) 82 | if err != nil { 83 | t.Fatalf("could not create temp dir: %v", err) 84 | } 85 | 86 | defer func() { 87 | if err = os.RemoveAll(tempPath); err != nil { 88 | t.Errorf("could not clearn temp dir: %v", err) 89 | } 90 | }() 91 | 92 | b, err := GetBackendForURI(FileBackendPrefix + "://" + tempPath) 93 | if err != nil { 94 | t.Fatalf("Error while trying to get backend: %v", err) 95 | } 96 | 97 | BackendTest(ctx, FileBackendPrefix, tempPath, true, b)(t) 98 | } 99 | -------------------------------------------------------------------------------- /backends/gcs_backend.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "fmt" 26 | "io" 27 | "strings" 28 | 29 | "cloud.google.com/go/storage" 30 | "google.golang.org/api/iterator" 31 | "google.golang.org/api/option" 32 | 33 | "github.com/someone1/zfsbackup-go/files" 34 | "github.com/someone1/zfsbackup-go/log" 35 | ) 36 | 37 | // GoogleCloudStorageBackendPrefix is the URI prefix used for the GoogleCloudStorageBackend. 38 | const GoogleCloudStorageBackendPrefix = "gs" 39 | 40 | // Authenticate: https://developers.google.com/identity/protocols/application-default-credentials 41 | 42 | // GoogleCloudStorageBackend integrates with Google Cloud Storage. 43 | type GoogleCloudStorageBackend struct { 44 | conf *BackendConfig 45 | client *storage.Client 46 | prefix string 47 | bucketName string 48 | } 49 | 50 | type withGoogleCloudStorageClient struct{ client *storage.Client } 51 | 52 | func (w withGoogleCloudStorageClient) Apply(b Backend) { 53 | if v, ok := b.(*GoogleCloudStorageBackend); ok { 54 | v.client = w.client 55 | } 56 | } 57 | 58 | // WithGoogleCloudStorageClient will override an GCS backend's underlying API client with the one provided. 59 | func WithGoogleCloudStorageClient(c *storage.Client) Option { 60 | return withGoogleCloudStorageClient{c} 61 | } 62 | 63 | // Init will initialize the GoogleCloudStorageBackend and verify the provided URI is valid/exists. 64 | func (g *GoogleCloudStorageBackend) Init(ctx context.Context, conf *BackendConfig, opts ...Option) error { 65 | g.conf = conf 66 | 67 | cleanPrefix := strings.TrimPrefix(g.conf.TargetURI, "gs://") 68 | if cleanPrefix == g.conf.TargetURI { 69 | return ErrInvalidURI 70 | } 71 | 72 | uriParts := strings.Split(cleanPrefix, "/") 73 | 74 | g.bucketName = uriParts[0] 75 | if len(uriParts) > 1 { 76 | g.prefix = strings.Join(uriParts[1:], "/") 77 | } 78 | 79 | for _, opt := range opts { 80 | opt.Apply(g) 81 | } 82 | 83 | if g.client == nil { 84 | client, err := storage.NewClient(ctx, option.WithScopes(storage.ScopeReadWrite)) 85 | if err != nil { 86 | return err 87 | } 88 | g.client = client 89 | } 90 | 91 | if _, err := g.client.Bucket(g.bucketName).Attrs(ctx); err != nil { 92 | return err 93 | } 94 | 95 | return nil 96 | } 97 | 98 | // Upload will upload the provided VolumeInfo to Google's Cloud Storage 99 | // It utilizes Resumeable Uploads to upload one file at a time serially. 100 | // Incomplete uploads auto-expire after one week. See: 101 | // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload 102 | func (g *GoogleCloudStorageBackend) Upload(ctx context.Context, vol *files.VolumeInfo) error { 103 | ctx, cancel := context.WithCancel(ctx) 104 | defer cancel() 105 | 106 | g.conf.MaxParallelUploadBuffer <- true 107 | defer func() { 108 | <-g.conf.MaxParallelUploadBuffer 109 | }() 110 | 111 | objName := g.prefix + vol.ObjectName 112 | w := g.client.Bucket(g.bucketName).Object(objName).NewWriter(ctx) 113 | w.CRC32C = vol.CRC32CSum32 114 | w.SendCRC32C = true 115 | w.ChunkSize = g.conf.UploadChunkSize 116 | 117 | if _, err := io.Copy(w, vol); err != nil { 118 | log.AppLogger.Debugf("gs backend: Error while uploading volume %s - %v", vol.ObjectName, err) 119 | return err 120 | } 121 | return w.Close() 122 | } 123 | 124 | // Delete will delete the given object from the configured bucket 125 | func (g *GoogleCloudStorageBackend) Delete(ctx context.Context, filename string) error { 126 | return g.client.Bucket(g.bucketName).Object(g.prefix + filename).Delete(ctx) 127 | } 128 | 129 | // PreDownload does nothing on this backend. 130 | func (g *GoogleCloudStorageBackend) PreDownload(ctx context.Context, objects []string) error { 131 | return nil 132 | } 133 | 134 | // Download will download the requseted object which can be read from the return io.ReadCloser. 135 | func (g *GoogleCloudStorageBackend) Download(ctx context.Context, filename string) (io.ReadCloser, error) { 136 | return g.client.Bucket(g.bucketName).Object(g.prefix + filename).NewReader(ctx) 137 | } 138 | 139 | // Close will release any resources used by the GCS backend. 140 | func (g *GoogleCloudStorageBackend) Close() error { 141 | // Close the storage client as well 142 | err := g.client.Close() 143 | g.client = nil 144 | return err 145 | } 146 | 147 | // List will iterate through all objects in the configured GCS bucket and return 148 | // a list of object names, filtering by the prefix provided. 149 | func (g *GoogleCloudStorageBackend) List(ctx context.Context, prefix string) ([]string, error) { 150 | q := &storage.Query{Prefix: g.prefix + prefix} 151 | objects := g.client.Bucket(g.bucketName).Objects(ctx, q) 152 | l := make([]string, 0, 1000) 153 | for { 154 | attrs, err := objects.Next() 155 | if err == iterator.Done { 156 | break 157 | } 158 | if err != nil { 159 | return nil, fmt.Errorf("gs backend: could not list bucket due to error - %v", err) 160 | } 161 | 162 | l = append(l, strings.TrimPrefix(attrs.Name, g.prefix)) 163 | } 164 | return l, nil 165 | } 166 | -------------------------------------------------------------------------------- /backends/gcs_backend_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backends 22 | 23 | import ( 24 | "context" 25 | "crypto/tls" 26 | "net/http" 27 | "os" 28 | "testing" 29 | 30 | "cloud.google.com/go/storage" 31 | "google.golang.org/api/option" 32 | ) 33 | 34 | func TestGCSBackend(t *testing.T) { 35 | t.Parallel() 36 | 37 | if os.Getenv("GCS_FAKE_SERVER") == "" { 38 | t.Skip("No test GCS Endpoint provided to test against") 39 | } 40 | 41 | ctx, cancel := context.WithCancel(context.Background()) 42 | defer cancel() 43 | 44 | gcsTestBucketName := "test_bucket" 45 | 46 | // Setup Test Bucket 47 | transCfg := &http.Transport{ 48 | // nolint:gosec // Purposely connecting to a local self-signed certificate endpoint 49 | TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // ignore expired SSL certificates 50 | } 51 | httpClient := &http.Client{Transport: transCfg} 52 | client, err := storage.NewClient(ctx, option.WithEndpoint(os.Getenv("GCS_FAKE_SERVER")+"/storage/v1/"), option.WithHTTPClient(httpClient)) 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | 57 | if err = client.Bucket(gcsTestBucketName).Create(ctx, "test-project", nil); err != nil { 58 | t.Fatalf("could not create bucket; %v", err) 59 | } 60 | 61 | defer func() { 62 | // nolint:gocritic // Re-enable if this gets resolved: https://github.com/fsouza/fake-gcs-server/issues/214 63 | // it := testBucket.Objects(ctx, nil) 64 | // for { 65 | // attrs, iErr := it.Next() 66 | // if iErr == iterator.Done { 67 | // break 68 | // } 69 | // if iErr != nil { 70 | // t.Fatalf("could not list objects: %v", iErr) 71 | // } 72 | // if err = testBucket.Object(attrs.Name).Delete(ctx); err != nil { 73 | // t.Errorf("could not delete object: %v", err) 74 | // } 75 | // } 76 | // if err = client.Bucket(gcsTestBucketName).Delete(ctx); err != nil { 77 | // t.Errorf("could not delete bucket: %v", err) 78 | // } 79 | }() 80 | 81 | b, err := GetBackendForURI(GoogleCloudStorageBackendPrefix + "://bucket_name") 82 | if err != nil { 83 | t.Fatalf("Error while trying to get backend: %v", err) 84 | } 85 | 86 | BackendTest(ctx, GoogleCloudStorageBackendPrefix, gcsTestBucketName, false, b, WithGoogleCloudStorageClient(client))(t) 87 | } 88 | -------------------------------------------------------------------------------- /backends/ssh_backend.go: -------------------------------------------------------------------------------- 1 | package backends 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "net" 8 | "net/url" 9 | "os" 10 | "os/user" 11 | "path/filepath" 12 | "strings" 13 | "time" 14 | 15 | "github.com/pkg/sftp" 16 | "github.com/someone1/zfsbackup-go/files" 17 | "github.com/someone1/zfsbackup-go/log" 18 | "golang.org/x/crypto/ssh" 19 | "golang.org/x/crypto/ssh/agent" 20 | "golang.org/x/crypto/ssh/knownhosts" 21 | ) 22 | 23 | // SSHBackendPrefix is the URI prefix used for the SSHBackend. 24 | const SSHBackendPrefix = "ssh" 25 | 26 | // SSHBackend provides a ssh/sftp storage option. 27 | type SSHBackend struct { 28 | conf *BackendConfig 29 | sshClient *ssh.Client 30 | sftpClient *sftp.Client 31 | remotePath string 32 | } 33 | 34 | // buildSshSigner reads the private key file at privateKeyPath and transforms it into a ssh.Signer, 35 | // using password to decrypt the key if required. 36 | func buildSshSigner(privateKeyPath string, password string) (ssh.Signer, error) { 37 | privateKey, err := os.ReadFile(privateKeyPath) 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | signer, err := ssh.ParsePrivateKey(privateKey) 43 | _, isMissingPassword := err.(*ssh.PassphraseMissingError) 44 | if isMissingPassword && password != "" { 45 | signer, err = ssh.ParsePrivateKeyWithPassphrase(privateKey, []byte(password)) 46 | } 47 | 48 | return signer, err 49 | } 50 | 51 | // buildAuthMethods builds ssh auth methods based on the provided password and private keys in the the users home directory. 52 | // To use a specific key instead of the default files set the env variable SSH_KEY_FILE. 53 | // buildAuthMethods also adds ssh-agent auth if the env variable SSH_AUTH_SOCK exists. 54 | func buildAuthMethods(userHomeDir string, password string) (sshAuths []ssh.AuthMethod, err error) { 55 | sshAuthSock := os.Getenv("SSH_AUTH_SOCK") 56 | if sshAuthSock != "" { 57 | sshAgent, err := net.Dial("unix", sshAuthSock) 58 | if err != nil { 59 | return nil, err 60 | } 61 | sshAuths = append(sshAuths, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)) 62 | } 63 | 64 | sshKeyFile := os.Getenv("SSH_KEY_FILE") 65 | if sshKeyFile != "" { 66 | signer, err := buildSshSigner(sshKeyFile, password) 67 | if err != nil { 68 | return nil, err 69 | } 70 | sshAuths = append(sshAuths, ssh.PublicKeys(signer)) 71 | } else { 72 | signers := make([]ssh.Signer, 0) 73 | 74 | defaultKeys := []string{ 75 | filepath.Join(userHomeDir, ".ssh/id_rsa"), 76 | filepath.Join(userHomeDir, ".ssh/id_cdsa"), 77 | filepath.Join(userHomeDir, ".ssh/id_ecdsa_sk"), 78 | filepath.Join(userHomeDir, ".ssh/id_ed25519"), 79 | filepath.Join(userHomeDir, ".ssh/id_ed25519_sk"), 80 | filepath.Join(userHomeDir, ".ssh/id_dsa"), 81 | } 82 | 83 | for _, keyPath := range defaultKeys { 84 | signer, err := buildSshSigner(keyPath, password) 85 | if err != nil { 86 | if !errors.Is(err, os.ErrNotExist) { 87 | log.AppLogger.Warningf("ssh backend: Failed to use ssh key at %s - %v", keyPath, err) 88 | } 89 | continue 90 | } 91 | signers = append(signers, signer) 92 | } 93 | if len(signers) > 0 { 94 | sshAuths = append(sshAuths, ssh.PublicKeys(signers...)) 95 | } 96 | } 97 | 98 | if password != "" { 99 | sshAuths = append(sshAuths, ssh.Password(password)) 100 | } 101 | 102 | return sshAuths, nil 103 | } 104 | 105 | // buildHostKeyCallback builds a ssh.HostKeyCallback that uses the known_hosts file from the users home directory. 106 | // Or from a custom file specified by the SSH_KNOWN_HOSTS env variable. 107 | // If SSH_KNOWN_HOSTS is set to "ignore" host key checking is disabled. 108 | func buildHostKeyCallback(userHomeDir string) (callback ssh.HostKeyCallback, err error) { 109 | knownHostsFile := os.Getenv("SSH_KNOWN_HOSTS") 110 | if knownHostsFile == "" { 111 | knownHostsFile = filepath.Join(userHomeDir, ".ssh/known_hosts") 112 | } 113 | if knownHostsFile == "ignore" { 114 | callback = ssh.InsecureIgnoreHostKey() 115 | } else { 116 | callback, err = knownhosts.New(knownHostsFile) 117 | } 118 | return callback, err 119 | } 120 | 121 | // Init will initialize the SSHBackend and verify the provided URI is valid and the target directory exists. 122 | func (s *SSHBackend) Init(ctx context.Context, conf *BackendConfig, opts ...Option) (err error) { 123 | s.conf = conf 124 | 125 | if !strings.HasPrefix(s.conf.TargetURI, SSHBackendPrefix+"://") { 126 | return ErrInvalidURI 127 | } 128 | 129 | targetUrl, err := url.Parse(s.conf.TargetURI) 130 | if err != nil { 131 | log.AppLogger.Errorf("ssh backend: Error while parsing target uri %s - %v", s.conf.TargetURI, err) 132 | return err 133 | } 134 | 135 | s.remotePath = strings.TrimSuffix(targetUrl.Path, "/") 136 | if s.remotePath == "" && targetUrl.Path != "/" { // allow root path 137 | log.AppLogger.Errorf("ssh backend: No remote path provided!") 138 | return ErrInvalidURI 139 | } 140 | 141 | username := os.Getenv("SSH_USERNAME") 142 | password := os.Getenv("SSH_PASSWORD") 143 | if targetUrl.User != nil { 144 | urlUsername := targetUrl.User.Username() 145 | if urlUsername != "" { 146 | username = urlUsername 147 | } 148 | urlPassword, _ := targetUrl.User.Password() 149 | if urlPassword != "" { 150 | password = urlPassword 151 | } 152 | } 153 | 154 | userInfo, err := user.Current() 155 | if err != nil { 156 | return err 157 | } 158 | if username == "" { 159 | username = userInfo.Username 160 | } 161 | 162 | sshAuths, err := buildAuthMethods(userInfo.HomeDir, password) 163 | if err != nil { 164 | return err 165 | } 166 | 167 | hostKeyCallback, err := buildHostKeyCallback(userInfo.HomeDir) 168 | if err != nil { 169 | return err 170 | } 171 | 172 | sshConfig := &ssh.ClientConfig{ 173 | User: username, 174 | Auth: sshAuths, 175 | HostKeyCallback: hostKeyCallback, 176 | Timeout: 30 * time.Second, 177 | } 178 | 179 | hostname := targetUrl.Host 180 | if !strings.Contains(hostname, ":") { 181 | hostname = hostname + ":22" 182 | } 183 | s.sshClient, err = ssh.Dial("tcp", hostname, sshConfig) 184 | if err != nil { 185 | return err 186 | } 187 | 188 | s.sftpClient, err = sftp.NewClient(s.sshClient) 189 | if err != nil { 190 | return err 191 | } 192 | 193 | fi, err := s.sftpClient.Stat(s.remotePath) 194 | if err != nil { 195 | log.AppLogger.Errorf("ssh backend: Error while verifying remote path %s - %v", s.remotePath, err) 196 | return err 197 | } 198 | 199 | if !fi.IsDir() { 200 | log.AppLogger.Errorf("ssh backend: Provided remote path is not a directory!") 201 | return ErrInvalidURI 202 | } 203 | 204 | return nil 205 | } 206 | 207 | // Upload will upload the provided VolumeInfo to the remote sftp server. 208 | func (s *SSHBackend) Upload(ctx context.Context, vol *files.VolumeInfo) error { 209 | s.conf.MaxParallelUploadBuffer <- true 210 | defer func() { 211 | <-s.conf.MaxParallelUploadBuffer 212 | }() 213 | 214 | destinationPath := filepath.Join(s.remotePath, vol.ObjectName) 215 | destinationDir := filepath.Dir(destinationPath) 216 | 217 | if err := s.sftpClient.MkdirAll(destinationDir); err != nil { 218 | log.AppLogger.Debugf("ssh backend: Could not create path %s due to error - %v", destinationDir, err) 219 | return err 220 | } 221 | 222 | w, err := s.sftpClient.Create(destinationPath) 223 | if err != nil { 224 | log.AppLogger.Debugf("ssh backend: Could not create file %s due to error - %v", destinationPath, err) 225 | return err 226 | } 227 | 228 | _, err = io.Copy(w, vol) 229 | if err != nil { 230 | if closeErr := w.Close(); closeErr != nil { 231 | log.AppLogger.Warningf("ssh backend: Error closing volume %s - %v", vol.ObjectName, closeErr) 232 | } 233 | if deleteErr := os.Remove(destinationPath); deleteErr != nil { 234 | log.AppLogger.Warningf("ssh backend: Error deleting failed upload file %s - %v", destinationPath, deleteErr) 235 | } 236 | log.AppLogger.Debugf("ssh backend: Error while copying volume %s - %v", vol.ObjectName, err) 237 | return err 238 | } 239 | 240 | return w.Close() 241 | } 242 | 243 | // List will return a list of all files matching the provided prefix. 244 | func (s *SSHBackend) List(ctx context.Context, prefix string) ([]string, error) { 245 | l := make([]string, 0, 1000) 246 | 247 | w := s.sftpClient.Walk(s.remotePath) 248 | for w.Step() { 249 | if err := w.Err(); err != nil { 250 | return l, err 251 | } 252 | 253 | trimmedPath := strings.TrimPrefix(w.Path(), s.remotePath+string(filepath.Separator)) 254 | if !w.Stat().IsDir() && strings.HasPrefix(trimmedPath, prefix) { 255 | l = append(l, trimmedPath) 256 | } 257 | } 258 | 259 | return l, nil 260 | } 261 | 262 | // Close will release any resources used by SSHBackend. 263 | func (s *SSHBackend) Close() (err error) { 264 | if s.sftpClient != nil { 265 | err = s.sftpClient.Close() 266 | s.sftpClient = nil 267 | } 268 | if s.sshClient != nil { 269 | sshErr := s.sshClient.Close() 270 | if sshErr == nil && err == nil { 271 | err = sshErr 272 | } 273 | s.sshClient = nil 274 | } 275 | return err 276 | } 277 | 278 | // PreDownload does nothing on this backend. 279 | func (s *SSHBackend) PreDownload(ctx context.Context, objects []string) error { 280 | return nil 281 | } 282 | 283 | // Download will open the remote file for reading. 284 | func (s *SSHBackend) Download(ctx context.Context, filename string) (io.ReadCloser, error) { 285 | return s.sftpClient.Open(filepath.Join(s.remotePath, filename)) 286 | } 287 | 288 | // Delete will delete the given object from the provided path. 289 | func (s *SSHBackend) Delete(ctx context.Context, filename string) error { 290 | return s.sftpClient.Remove(filepath.Join(s.remotePath, filename)) 291 | } 292 | 293 | var _ Backend = (*SSHBackend)(nil) 294 | -------------------------------------------------------------------------------- /backends/ssh_backend_test.go: -------------------------------------------------------------------------------- 1 | package backends 2 | 3 | import ( 4 | "context" 5 | "crypto/rand" 6 | "crypto/x509" 7 | "encoding/pem" 8 | "errors" 9 | "fmt" 10 | "io" 11 | "net" 12 | "os" 13 | "testing" 14 | 15 | "github.com/pkg/sftp" 16 | "golang.org/x/crypto/ed25519" 17 | "golang.org/x/crypto/ssh" 18 | ) 19 | 20 | func generateSSHPrivateKey() (ssh.Signer, error) { 21 | _, priv, err := ed25519.GenerateKey(rand.Reader) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | keyBytes, err := x509.MarshalPKCS8PrivateKey(priv) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | signer, err := ssh.ParsePrivateKey(pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes})) 32 | if err != nil { 33 | return nil, err 34 | } 35 | 36 | return signer, nil 37 | } 38 | 39 | // startSftpServer starts a very hacky sftp server based on 40 | // https://github.com/pkg/sftp/blob/v1.13.5/examples/go-sftp-server/main.go 41 | func startSftpServer(t testing.TB, user string, password string) net.Listener { 42 | t.Helper() 43 | 44 | config := &ssh.ServerConfig{ 45 | PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { 46 | if c.User() == user && string(pass) == password { 47 | return nil, nil 48 | } 49 | return nil, fmt.Errorf("password rejected for %q", c.User()) 50 | }, 51 | } 52 | signer, err := generateSSHPrivateKey() 53 | if err != nil { 54 | t.Fatal("failed to generate key", err) 55 | } 56 | config.AddHostKey(signer) 57 | 58 | listener, err := net.Listen("tcp", "127.0.0.1:0") 59 | if err != nil { 60 | t.Fatal("failed to listen for connection", err) 61 | } 62 | 63 | fmt.Println("Listening on", listener.Addr()) 64 | 65 | go func() { 66 | for { 67 | acceptedConn, err := listener.Accept() 68 | if err != nil { 69 | if !errors.Is(err, net.ErrClosed) { 70 | t.Fatal("failed to accept incoming connection", err) 71 | } 72 | return 73 | } 74 | 75 | go func(conn net.Conn) { 76 | // Before use, a handshake must be performed on the incoming net.Conn. 77 | _, chans, reqs, err := ssh.NewServerConn(conn, config) 78 | if err != nil { 79 | t.Fatal("failed to handshake", err) 80 | } 81 | fmt.Println("SSH server established") 82 | 83 | // The incoming Request channel must be serviced. 84 | go ssh.DiscardRequests(reqs) 85 | 86 | // Service the incoming Channel channel. 87 | for newChannel := range chans { 88 | // Channels have a type, depending on the application level 89 | // protocol intended. In the case of an SFTP session, this is "subsystem" 90 | // with a payload string of "sftp" 91 | fmt.Println("Incoming channel:", newChannel.ChannelType()) 92 | if newChannel.ChannelType() != "session" { 93 | _ = newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") 94 | fmt.Println("Unknown channel type:", newChannel.ChannelType()) 95 | continue 96 | } 97 | channel, requests, err := newChannel.Accept() 98 | if err != nil { 99 | t.Fatal("could not accept channel.", err) 100 | } 101 | fmt.Println("Channel accepted") 102 | 103 | // Sessions have out-of-band requests such as "shell", 104 | // "pty-req" and "env". Here we handle only the 105 | // "subsystem" request. 106 | go func(in <-chan *ssh.Request) { 107 | for req := range in { 108 | fmt.Println("Request:", req.Type) 109 | ok := false 110 | switch req.Type { 111 | case "subsystem": 112 | fmt.Printf("Subsystem: %s\n", req.Payload[4:]) 113 | if string(req.Payload[4:]) == "sftp" { 114 | ok = true 115 | } 116 | } 117 | fmt.Println(" - accepted:", ok) 118 | _ = req.Reply(ok, nil) 119 | } 120 | }(requests) 121 | 122 | server, err := sftp.NewServer(channel) 123 | if err != nil { 124 | t.Fatal(err) 125 | } 126 | if err := server.Serve(); err == io.EOF { 127 | _ = server.Close() 128 | fmt.Println("sftp client exited session.") 129 | } else if err != nil { 130 | t.Fatal("sftp server completed with error:", err) 131 | } 132 | } 133 | }(acceptedConn) 134 | } 135 | }() 136 | 137 | return listener 138 | } 139 | 140 | func TestSSHBackend(t *testing.T) { 141 | t.Parallel() 142 | 143 | sftpListener := startSftpServer(t, "test", "password") 144 | defer func() { 145 | _ = sftpListener.Close() 146 | }() 147 | 148 | tempPath := t.TempDir() 149 | 150 | backendUriEnd := "test:password@" + sftpListener.Addr().String() + tempPath 151 | 152 | err := os.Setenv("SSH_KNOWN_HOSTS", "ignore") 153 | if err != nil { 154 | t.Fatalf("Failed to disable host key checking: %v", err) 155 | } 156 | 157 | b, err := GetBackendForURI(SSHBackendPrefix + "://" + backendUriEnd) 158 | if err != nil { 159 | t.Fatalf("Error while trying to get backend: %v", err) 160 | } 161 | 162 | ctx, cancel := context.WithCancel(context.Background()) 163 | defer cancel() 164 | 165 | BackendTest(ctx, SSHBackendPrefix, backendUriEnd, true, b)(t) 166 | } 167 | -------------------------------------------------------------------------------- /backup/backup_test.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backup 22 | 23 | import ( 24 | "bytes" 25 | "context" 26 | "crypto/rand" 27 | "io" 28 | "io/ioutil" 29 | "os" 30 | "testing" 31 | "time" 32 | 33 | "github.com/someone1/zfsbackup-go/backends" 34 | "github.com/someone1/zfsbackup-go/files" 35 | ) 36 | 37 | // Truly a useless backend 38 | type mockBackend struct{} 39 | 40 | func (m *mockBackend) Init(ctx context.Context, conf *backends.BackendConfig, opts ...backends.Option) error { 41 | return nil 42 | } 43 | 44 | func (m *mockBackend) Upload(ctx context.Context, vol *files.VolumeInfo) error { 45 | // make sure we can read the volume 46 | _, err := ioutil.ReadAll(vol) 47 | return err 48 | } 49 | 50 | func (m *mockBackend) List(ctx context.Context, prefix string) ([]string, error) { 51 | return nil, nil 52 | } 53 | 54 | func (m *mockBackend) Close() error { return nil } 55 | 56 | func (m *mockBackend) PreDownload(ctx context.Context, objects []string) error { return nil } 57 | 58 | func (m *mockBackend) Download(ctx context.Context, filename string) (io.ReadCloser, error) { 59 | return nil, nil 60 | } 61 | 62 | func (m *mockBackend) Delete(ctx context.Context, filename string) error { return nil } 63 | 64 | type errTestFunc func(error) bool 65 | 66 | func nilErrTest(e error) bool { return e == nil } 67 | 68 | func TestRetryUploadChainer(t *testing.T) { 69 | _, goodVol, badVol, err := prepareTestVols() 70 | if err != nil { 71 | t.Fatalf("error preparing volumes for testing - %v", err) 72 | } 73 | 74 | testCases := []struct { 75 | vol *files.VolumeInfo 76 | valid errTestFunc 77 | }{ 78 | { 79 | vol: goodVol, 80 | valid: nilErrTest, 81 | }, 82 | { 83 | vol: badVol, 84 | valid: os.IsNotExist, 85 | }, 86 | } 87 | 88 | j := &files.JobInfo{ 89 | MaxParallelUploads: 1, 90 | MaxBackoffTime: 5 * time.Second, 91 | MaxRetryTime: 1 * time.Minute, 92 | } 93 | 94 | for idx, testCase := range testCases { 95 | b := &mockBackend{} 96 | if err := b.Init(context.Background(), nil); err != nil { 97 | t.Errorf("%d: Expected error %v, got %v", idx, nil, err) 98 | } else { 99 | in := make(chan *files.VolumeInfo, 1) 100 | out, wg := retryUploadChainer(context.Background(), in, b, j, "mock://") 101 | in <- testCase.vol 102 | close(in) 103 | outVol := <-out 104 | if errResult := wg.Wait(); !testCase.valid(errResult) { 105 | t.Errorf("%d: error %v id not pass validation function", idx, errResult) 106 | } else if errResult == nil { 107 | // Verify we got the same vol we passed in! 108 | if outVol != testCase.vol { 109 | t.Errorf("did not get same volume passed in back out") 110 | } 111 | } 112 | } 113 | } 114 | } 115 | 116 | func prepareTestVols() (payload []byte, goodVol, badVol *files.VolumeInfo, err error) { 117 | payload = make([]byte, 10*1024*1024) 118 | if _, err = rand.Read(payload); err != nil { 119 | return 120 | } 121 | reader := bytes.NewReader(payload) 122 | goodVol, err = files.CreateSimpleVolume(context.Background(), false) 123 | if err != nil { 124 | return 125 | } 126 | _, err = io.Copy(goodVol, reader) 127 | if err != nil { 128 | return 129 | } 130 | err = goodVol.Close() 131 | if err != nil { 132 | return 133 | } 134 | 135 | badVol, err = files.CreateSimpleVolume(context.Background(), false) 136 | if err != nil { 137 | return 138 | } 139 | err = badVol.Close() 140 | if err != nil { 141 | return 142 | } 143 | 144 | err = badVol.DeleteVolume() 145 | 146 | return payload, goodVol, badVol, err 147 | } 148 | -------------------------------------------------------------------------------- /backup/clean.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backup 22 | 23 | import ( 24 | "context" 25 | "crypto/md5" // nolint:gosec // MD5 not used for cryptographic purposes here 26 | "fmt" 27 | "os" 28 | "path/filepath" 29 | "strings" 30 | "time" 31 | 32 | "github.com/cenkalti/backoff" 33 | "golang.org/x/sync/errgroup" 34 | 35 | "github.com/someone1/zfsbackup-go/files" 36 | "github.com/someone1/zfsbackup-go/log" 37 | ) 38 | 39 | // Clean will remove files found in the desination that are not found in any of the manifests found locally or in the destination. 40 | // If cleanLocal is true, then local manifests not found in the destination are ignored and deleted. This function will optionally 41 | // delete broken backup sets in the destination if the --force flag is provided. 42 | // nolint:funlen,gocyclo // Difficult to break this up 43 | func Clean(pctx context.Context, jobInfo *files.JobInfo, cleanLocal bool) error { 44 | ctx, cancel := context.WithCancel(pctx) 45 | defer cancel() 46 | 47 | // Prepare the backend client 48 | target := jobInfo.Destinations[0] 49 | backend, berr := prepareBackend(ctx, jobInfo, target, nil) 50 | if berr != nil { 51 | log.AppLogger.Errorf("Could not initialize backend for target %s due to error - %v.", target, berr) 52 | return berr 53 | } 54 | defer backend.Close() 55 | 56 | // Get the local cache dir 57 | localCachePath, cerr := getCacheDir(target) 58 | if cerr != nil { 59 | log.AppLogger.Errorf("Could not get cache dir for target %s due to error - %v.", target, cerr) 60 | return cerr 61 | } 62 | 63 | // Sync the local cache 64 | safeManifests, localOnlyFiles, serr := syncCache(ctx, jobInfo, localCachePath, backend) 65 | if serr != nil { 66 | log.AppLogger.Errorf("Could not sync cache dir for target %s due to error - %v.", target, serr) 67 | return serr 68 | } 69 | 70 | // Read in Manifests 71 | decodedManifests := make([]*files.JobInfo, 0, len(safeManifests)) 72 | for _, manifest := range safeManifests { 73 | manifestPath := filepath.Join(localCachePath, manifest) 74 | decodedManifest, oerr := readManifest(ctx, manifestPath, jobInfo) 75 | if oerr != nil { 76 | log.AppLogger.Errorf("Could not read manifest %s due to error - %v", manifestPath, oerr) 77 | return oerr 78 | } 79 | decodedManifests = append(decodedManifests, decodedManifest) 80 | } 81 | 82 | if !cleanLocal { 83 | if len(localOnlyFiles) > 0 { 84 | // nolint:lll // Long log message 85 | log.AppLogger.Noticef( 86 | "There are %d local manifests not found in the destination, use --cleanLocal to delete these locally and any of their volumes found in the destination.", 87 | len(localOnlyFiles), 88 | ) 89 | for _, manifest := range localOnlyFiles { 90 | manifestPath := filepath.Join(localCachePath, manifest) 91 | decodedManifest, oerr := readManifest(ctx, manifestPath, jobInfo) 92 | if oerr != nil { 93 | log.AppLogger.Errorf("Could not read manifest %s due to error - %v", manifestPath, oerr) 94 | return oerr 95 | } 96 | decodedManifests = append(decodedManifests, decodedManifest) 97 | } 98 | } 99 | } else { 100 | for _, manifest := range localOnlyFiles { 101 | manifestPath := filepath.Join(localCachePath, manifest) 102 | err := os.Remove(manifestPath) 103 | if err != nil { 104 | log.AppLogger.Errorf("Could not delete local manifest %s due to error - %v", manifestPath, err) 105 | return err 106 | } 107 | log.AppLogger.Debugf("Deleted %s.", manifestPath) 108 | } 109 | } 110 | 111 | // TODO: The following can be done in a much more efficient way (probably) 112 | allObjects, err := backend.List(ctx, "") 113 | if err != nil { 114 | log.AppLogger.Errorf("Could not list objects in backend %s due to error - %v", target, err) 115 | return err 116 | } 117 | 118 | // Remove Manifest Files 119 | for idx := 0; idx < len(allObjects); idx++ { 120 | if strings.HasPrefix(allObjects[idx], jobInfo.ManifestPrefix) { 121 | allObjects = append(allObjects[:idx], allObjects[idx+1:]...) 122 | idx-- 123 | } 124 | } 125 | 126 | // Go through all manifests and remove from the allObjects list what we know should exist 127 | for _, manifest := range decodedManifests { 128 | for vidx, vol := range manifest.Volumes { 129 | found := false 130 | for idx := range allObjects { 131 | if strings.Compare(vol.ObjectName, allObjects[idx]) == 0 { 132 | allObjects = append(allObjects[:idx], allObjects[idx+1:]...) 133 | found = true 134 | break 135 | } 136 | } 137 | 138 | if !found { 139 | // Broken backup set! inform the user! 140 | if jobInfo.Force { 141 | log.AppLogger.Warningf( 142 | "The following backup set is missing volume %s. Removing entire backupset:\n\n%s", 143 | vol.ObjectName, manifest.String(), 144 | ) 145 | 146 | // Compute the manifest object name and cache name to delete 147 | manifest.ManifestPrefix = jobInfo.ManifestPrefix 148 | manifest.SignKey = jobInfo.SignKey 149 | manifest.EncryptKey = jobInfo.EncryptKey 150 | tempManifest, terr := files.CreateManifestVolume(ctx, manifest) 151 | if terr != nil { 152 | log.AppLogger.Errorf("Could not compute manifest path due to error - %v.", terr) 153 | return terr 154 | } 155 | allObjects = append(allObjects, tempManifest.ObjectName) 156 | if err = tempManifest.Close(); err != nil { 157 | log.AppLogger.Warningf("Could not close temporary manifest %v", err) 158 | } 159 | if err = tempManifest.DeleteVolume(); err != nil { 160 | log.AppLogger.Warningf("Could not delete temporary manifest %v", err) 161 | } 162 | // nolint:gosec // MD5 not used for cryptographic purposes here 163 | manifestPath := filepath.Join(localCachePath, fmt.Sprintf("%x", md5.Sum([]byte(tempManifest.ObjectName)))) 164 | err = os.Remove(manifestPath) 165 | if err != nil { 166 | log.AppLogger.Errorf("Could not delete local manifest %s due to error - %v. Continuing.", manifestPath, err) 167 | } 168 | 169 | // Delete all volumes already processed in the manifest 170 | for i := 0; i < vidx; i++ { 171 | allObjects = append(allObjects, manifest.Volumes[i].ObjectName) 172 | } 173 | break 174 | } else { 175 | log.AppLogger.Warningf( 176 | "The following backup set is missing volume %s:\n\n%s\n\nPass the --force flag to delete this backup set.", 177 | vol.ObjectName, manifest.String(), 178 | ) 179 | } 180 | } 181 | } 182 | } 183 | 184 | log.AppLogger.Noticef("Starting to delete %d objects in destination.", len(allObjects)) 185 | 186 | // Whatever is left in allObjects was not found in any manifest, delete 'em 187 | var group *errgroup.Group 188 | group, ctx = errgroup.WithContext(ctx) 189 | 190 | deleteChan := make(chan string, len(allObjects)) 191 | for _, obj := range allObjects { 192 | deleteChan <- obj 193 | } 194 | close(deleteChan) 195 | 196 | // Let's not slam the endpoint with a lot of concurrent requests, pick a sensible default and stick to it 197 | for i := 0; i < 5; i++ { 198 | group.Go(func() error { 199 | for { 200 | select { 201 | case <-ctx.Done(): 202 | return ctx.Err() 203 | case objectPath, ok := <-deleteChan: 204 | if !ok { 205 | return nil 206 | } 207 | 208 | be := backoff.NewExponentialBackOff() 209 | be.MaxInterval = time.Minute 210 | be.MaxElapsedTime = 10 * time.Minute 211 | retryconf := backoff.WithContext(be, ctx) 212 | 213 | operation := func() error { 214 | return backend.Delete(ctx, objectPath) 215 | } 216 | 217 | if berr := backoff.Retry(operation, retryconf); berr != nil { 218 | log.AppLogger.Errorf("Could not delete object %s in due to error - %v", objectPath, berr) 219 | return berr 220 | } 221 | 222 | log.AppLogger.Debugf("Deleted %s.", filepath.Join(target, objectPath)) 223 | } 224 | } 225 | }) 226 | } 227 | 228 | log.AppLogger.Debugf("Waiting to delete %d objects in destination.", len(allObjects)) 229 | err = group.Wait() 230 | if err != nil { 231 | log.AppLogger.Errorf("Could not finish clean operation due to error, aborting: %v", err) 232 | return err 233 | } 234 | 235 | log.AppLogger.Noticef("Done.") 236 | return nil 237 | } 238 | -------------------------------------------------------------------------------- /backup/doc.go: -------------------------------------------------------------------------------- 1 | // Package backup handles all the logic around backing up and restoring snapshots 2 | package backup 3 | -------------------------------------------------------------------------------- /backup/list.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backup 22 | 23 | import ( 24 | "context" 25 | "crypto/md5" // nolint:gosec // MD5 not used for cryptographic purposes here 26 | "encoding/json" 27 | "fmt" 28 | "path/filepath" 29 | "sort" 30 | "strings" 31 | "time" 32 | 33 | "github.com/someone1/zfsbackup-go/config" 34 | "github.com/someone1/zfsbackup-go/files" 35 | "github.com/someone1/zfsbackup-go/log" 36 | ) 37 | 38 | // List will sync the manifests found in the target destination to the local cache 39 | // and then read and output the manifest information describing the backup sets 40 | // found in the target destination. 41 | // TODO: Group by volume name? 42 | // nolint:gocyclo // Difficult to break this up 43 | func List(pctx context.Context, jobInfo *files.JobInfo, startswith string, before, after time.Time) error { 44 | ctx, cancel := context.WithCancel(pctx) 45 | defer cancel() 46 | 47 | // Prepare the backend client 48 | target := jobInfo.Destinations[0] 49 | backend, berr := prepareBackend(ctx, jobInfo, target, nil) 50 | if berr != nil { 51 | log.AppLogger.Errorf("Could not initialize backend for target %s due to error - %v.", target, berr) 52 | return berr 53 | } 54 | defer backend.Close() 55 | 56 | // Get the local cache dir 57 | localCachePath, cerr := getCacheDir(jobInfo.Destinations[0]) 58 | if cerr != nil { 59 | log.AppLogger.Errorf("Could not get cache dir for target %s due to error - %v.", target, cerr) 60 | return cerr 61 | } 62 | 63 | // Sync the local cache 64 | safeManifests, localOnlyFiles, serr := syncCache(ctx, jobInfo, localCachePath, backend) 65 | if serr != nil { 66 | log.AppLogger.Errorf("Could not sync cache dir for target %s due to error - %v.", target, serr) 67 | return serr 68 | } 69 | 70 | decodedManifests, derr := readAndSortManifests(ctx, localCachePath, safeManifests, jobInfo) 71 | if derr != nil { 72 | return derr 73 | } 74 | 75 | // Filter Manifests to only results we care about 76 | filteredResults := decodedManifests[:0] 77 | for _, manifest := range decodedManifests { 78 | if startswith != "" { 79 | if startswith[len(startswith)-1:] == "*" { 80 | if len(startswith) != 1 && !strings.HasPrefix(manifest.VolumeName, startswith[:len(startswith)-1]) { 81 | continue 82 | } 83 | } else if strings.Compare(startswith, manifest.VolumeName) != 0 { 84 | continue 85 | } 86 | } 87 | 88 | if !before.IsZero() && !manifest.BaseSnapshot.CreationTime.Before(before) { 89 | continue 90 | } 91 | 92 | if !after.IsZero() && !manifest.BaseSnapshot.CreationTime.After(after) { 93 | continue 94 | } 95 | 96 | filteredResults = append(filteredResults, manifest) 97 | } 98 | 99 | decodedManifests = filteredResults 100 | 101 | if !config.JSONOutput { 102 | var output []string 103 | 104 | output = append(output, fmt.Sprintf("Found %d backup sets:\n", len(decodedManifests))) 105 | for _, manifest := range decodedManifests { 106 | output = append(output, manifest.String()) 107 | } 108 | 109 | if len(localOnlyFiles) > 0 { 110 | output = append(output, fmt.Sprintf("There are %d manifests found locally that are not on the target destination.", len(localOnlyFiles))) 111 | localOnlyOuput := []string{"The following manifests were found locally and can be removed using the clean command."} 112 | for _, filename := range localOnlyFiles { 113 | manifestPath := filepath.Join(localCachePath, filename) 114 | decodedManifest, derr := readManifest(ctx, manifestPath, jobInfo) 115 | if derr != nil { 116 | log.AppLogger.Warningf("Could not read local only manifest %s due to error %v", manifestPath, derr) 117 | continue 118 | } 119 | localOnlyOuput = append(localOnlyOuput, decodedManifest.String()) 120 | } 121 | log.AppLogger.Infof(strings.Join(localOnlyOuput, "\n")) 122 | } 123 | fmt.Fprintln(config.Stdout, strings.Join(output, "\n")) 124 | } else { 125 | organizedManifests := linkManifests(decodedManifests) 126 | j, jerr := json.Marshal(organizedManifests) 127 | if jerr != nil { 128 | log.AppLogger.Errorf("could not marshal results to JSON - %v", jerr) 129 | return jerr 130 | } 131 | 132 | fmt.Fprintln(config.Stdout, string(j)) 133 | } 134 | 135 | return nil 136 | } 137 | 138 | func readAndSortManifests( 139 | ctx context.Context, 140 | localCachePath string, 141 | manifests []string, 142 | jobInfo *files.JobInfo, 143 | ) ([]*files.JobInfo, error) { 144 | // Read in Manifests and display 145 | decodedManifests := make([]*files.JobInfo, 0, len(manifests)) 146 | for _, manifest := range manifests { 147 | manifestPath := filepath.Join(localCachePath, manifest) 148 | decodedManifest, oerr := readManifest(ctx, manifestPath, jobInfo) 149 | if oerr != nil { 150 | log.AppLogger.Errorf("Could not read manifest %s due to error - %v", manifestPath, oerr) 151 | return nil, oerr 152 | } 153 | decodedManifests = append(decodedManifests, decodedManifest) 154 | } 155 | 156 | sort.SliceStable(decodedManifests, func(i, j int) bool { 157 | cmp := strings.Compare(decodedManifests[i].VolumeName, decodedManifests[j].VolumeName) 158 | if cmp == 0 { 159 | return decodedManifests[i].BaseSnapshot.CreationTime.Before(decodedManifests[j].BaseSnapshot.CreationTime) 160 | } 161 | return cmp < 0 162 | }) 163 | 164 | return decodedManifests, nil 165 | } 166 | 167 | // linkManifests will group manifests by Volume and link parents to their children 168 | func linkManifests(manifests []*files.JobInfo) map[string][]*files.JobInfo { 169 | if manifests == nil { 170 | return nil 171 | } 172 | manifestTree := make(map[string][]*files.JobInfo) 173 | manifestsByID := make(map[string]*files.JobInfo) 174 | for idx := range manifests { 175 | key := manifests[idx].VolumeName 176 | 177 | // nolint:gosec // MD5 not used for cryptographic purposes here 178 | manifestID := fmt.Sprintf("%x", md5.Sum([]byte( 179 | fmt.Sprintf("%s%s%v", key, manifests[idx].BaseSnapshot.Name, manifests[idx].BaseSnapshot.CreationTime), 180 | ))) 181 | 182 | manifestTree[key] = append(manifestTree[key], manifests[idx]) 183 | 184 | // Case 1: Full Backups, nothing to link 185 | if manifests[idx].IncrementalSnapshot.Name == "" { 186 | // We will always assume full backups are ideal when selecting a parent 187 | manifestsByID[manifestID] = manifests[idx] 188 | } else if _, ok := manifestsByID[manifestID]; !ok { 189 | // Case 2: Incremental Backup - only make it the designated parent if we haven't gone one already 190 | manifestsByID[manifestID] = manifests[idx] 191 | } 192 | } 193 | 194 | // Link up parents 195 | for _, snapList := range manifestTree { 196 | for _, val := range snapList { 197 | if val.IncrementalSnapshot.Name == "" { 198 | // Full backup, no parent 199 | continue 200 | } 201 | // nolint:gosec // MD5 not used for cryptographic purposes here 202 | manifestID := fmt.Sprintf("%x", md5.Sum([]byte( 203 | fmt.Sprintf("%s%s%v", val.VolumeName, val.IncrementalSnapshot.Name, val.IncrementalSnapshot.CreationTime), 204 | ))) 205 | if psnap, ok := manifestsByID[manifestID]; ok { 206 | val.ParentSnap = psnap 207 | } else { 208 | log.AppLogger.Warningf("Could not find matching parent for %v", val) 209 | } 210 | } 211 | } 212 | return manifestTree 213 | } 214 | 215 | func readManifest(ctx context.Context, manifestPath string, j *files.JobInfo) (*files.JobInfo, error) { 216 | decodedManifest := new(files.JobInfo) 217 | manifestVol, err := files.ExtractLocal(ctx, j, manifestPath, true) 218 | if err != nil { 219 | return nil, err 220 | } 221 | defer manifestVol.Close() 222 | decoder := json.NewDecoder(manifestVol) 223 | err = decoder.Decode(decodedManifest) 224 | if err != nil { 225 | return nil, err 226 | } 227 | 228 | return decodedManifest, nil 229 | } 230 | -------------------------------------------------------------------------------- /backup/sync.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package backup 22 | 23 | import ( 24 | "context" 25 | "crypto/md5" // nolint:gosec // MD5 not used for cryptographic purposes here 26 | "fmt" 27 | "io/ioutil" 28 | "os" 29 | "path/filepath" 30 | "strings" 31 | 32 | "github.com/someone1/zfsbackup-go/backends" 33 | "github.com/someone1/zfsbackup-go/config" 34 | "github.com/someone1/zfsbackup-go/files" 35 | "github.com/someone1/zfsbackup-go/log" 36 | "github.com/someone1/zfsbackup-go/zfs" 37 | ) 38 | 39 | func prepareBackend(ctx context.Context, j *files.JobInfo, backendURI string, uploadBuffer chan bool) (backends.Backend, error) { 40 | log.AppLogger.Debugf("Initializing Backend %s", backendURI) 41 | conf := &backends.BackendConfig{ 42 | MaxParallelUploadBuffer: uploadBuffer, 43 | TargetURI: backendURI, 44 | MaxParallelUploads: j.MaxParallelUploads, 45 | MaxBackoffTime: j.MaxBackoffTime, 46 | MaxRetryTime: j.MaxRetryTime, 47 | UploadChunkSize: j.UploadChunkSize * 1024 * 1024, 48 | } 49 | 50 | backend, err := backends.GetBackendForURI(backendURI) 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | err = backend.Init(ctx, conf) 56 | 57 | return backend, err 58 | } 59 | 60 | func getCacheDir(backendURI string) (string, error) { 61 | // nolint:gosec // MD5 not used for cryptographic purposes here 62 | safeFolder := fmt.Sprintf("%x", md5.Sum([]byte(backendURI))) 63 | dest := filepath.Join(config.WorkingDir, "cache", safeFolder) 64 | oerr := os.MkdirAll(dest, os.ModePerm) 65 | if oerr != nil { 66 | return "", fmt.Errorf("could not create cache directory %s due to an error: %v", dest, oerr) 67 | } 68 | 69 | return dest, nil 70 | } 71 | 72 | // Returns local manifest paths that exist in the backend and those that do not 73 | // nolint:gocritic // Don't need to name the results 74 | func syncCache(ctx context.Context, j *files.JobInfo, localCache string, backend backends.Backend) ([]string, []string, error) { 75 | // List all manifests at the destination 76 | manifests, merr := backend.List(ctx, j.ManifestPrefix) 77 | if merr != nil { 78 | return nil, nil, fmt.Errorf("could not list manifest files from the backed due to error - %v", merr) 79 | } 80 | 81 | // Make it safe for local file system storage 82 | safeManifests := make([]string, len(manifests)) 83 | for idx := range manifests { 84 | // nolint:gosec // MD5 not used for cryptographic purposes here 85 | safeManifests[idx] = fmt.Sprintf("%x", md5.Sum([]byte(manifests[idx]))) 86 | } 87 | 88 | // Check what manifests we have locally, and if we are missing any, download them 89 | manifestFiles, ferr := ioutil.ReadDir(localCache) 90 | if ferr != nil { 91 | return nil, nil, fmt.Errorf("could not list files from the local cache dir due to error - %v", ferr) 92 | } 93 | 94 | var localOnlyFiles []string 95 | var foundFiles []string 96 | for _, file := range manifestFiles { 97 | if file.IsDir() { 98 | continue 99 | } 100 | found := false 101 | for idx := range manifests { 102 | if strings.Compare(file.Name(), safeManifests[idx]) != 0 { 103 | continue 104 | } 105 | 106 | found = true 107 | foundFiles = append(foundFiles, safeManifests[idx]) 108 | manifests = append(manifests[:idx], manifests[idx+1:]...) 109 | safeManifests = append(safeManifests[:idx], safeManifests[idx+1:]...) 110 | break 111 | } 112 | if !found { 113 | localOnlyFiles = append(localOnlyFiles, file.Name()) 114 | } 115 | } 116 | 117 | pderr := backend.PreDownload(ctx, manifests) 118 | if pderr != nil { 119 | return nil, nil, fmt.Errorf("could not prepare manifests for download due to error - %v", pderr) 120 | } 121 | 122 | if len(manifests) > 0 { 123 | log.AppLogger.Debugf("Syncing %d manifests to local cache.", len(manifests)) 124 | 125 | // manifests should only contain what we don't have locally 126 | for idx, manifest := range manifests { 127 | if err := downloadTo(ctx, backend, manifest, filepath.Join(localCache, safeManifests[idx])); err != nil { 128 | return nil, nil, err 129 | } 130 | } 131 | } 132 | 133 | safeManifests = append(safeManifests, foundFiles...) 134 | 135 | return safeManifests, localOnlyFiles, nil 136 | } 137 | 138 | // nolint:unparam // Some errors are not ok to ignore 139 | func validateSnapShotExists(ctx context.Context, snapshot *files.SnapshotInfo, target string, includeBookmarks bool) (bool, error) { 140 | snapshots, err := zfs.GetSnapshotsAndBookmarks(ctx, target) 141 | if err != nil { 142 | log.AppLogger.Debugf("Could not list snapshots for %s: %v", target, err) 143 | // TODO: There are some error cases that are ok to ignore! 144 | return false, nil 145 | } 146 | return validateSnapShotExistsFromSnaps(snapshot, snapshots, includeBookmarks), nil 147 | } 148 | 149 | func validateSnapShotExistsFromSnaps(snapshot *files.SnapshotInfo, snapshots []files.SnapshotInfo, includeBookmarks bool) bool { 150 | for _, snap := range snapshots { 151 | if !includeBookmarks && snap.Bookmark { 152 | continue 153 | } 154 | if snap.Equal(snapshot) { 155 | // Flag the snapshot as a bookmark if it is one 156 | snapshot.Bookmark = snap.Bookmark 157 | return true 158 | } 159 | } 160 | 161 | return false 162 | } 163 | -------------------------------------------------------------------------------- /cmd/clean.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2017 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package cmd 22 | 23 | import ( 24 | "github.com/spf13/cobra" 25 | 26 | "github.com/someone1/zfsbackup-go/backup" 27 | ) 28 | 29 | var cleanLocal bool 30 | 31 | // cleanCmd represents the clean command 32 | var cleanCmd = &cobra.Command{ 33 | Use: "clean [flags] uri", 34 | Short: "Clean will delete any objects in the target that are not found in the manifest files found in the target.", 35 | Long: `Clean will delete any objects in the target that are not found in the manifest files found in the target.`, 36 | SilenceErrors: true, 37 | PreRunE: validateCleanFlags, 38 | RunE: func(cmd *cobra.Command, args []string) error { 39 | jobInfo.Destinations = []string{args[0]} 40 | return backup.Clean(cmd.Context(), &jobInfo, cleanLocal) 41 | }, 42 | } 43 | 44 | func init() { 45 | RootCmd.AddCommand(cleanCmd) 46 | 47 | cleanCmd.Flags().BoolVarP(&cleanLocal, "cleanLocal", "", false, "Delete any files found in the local cache that shouldn't be there.") 48 | cleanCmd.Flags().BoolVarP(&jobInfo.Force, "force", "", false, 49 | "This will force the deletion of broken backup sets (sets where volumes expected in the manifest file are not found). Use with caution.", 50 | ) 51 | } 52 | 53 | func validateCleanFlags(cmd *cobra.Command, args []string) error { 54 | if len(args) != 1 { 55 | _ = cmd.Usage() 56 | return errInvalidInput 57 | } 58 | 59 | if err := loadReceiveKeys(); err != nil { 60 | return err 61 | } 62 | 63 | return nil 64 | } 65 | -------------------------------------------------------------------------------- /cmd/doc.go: -------------------------------------------------------------------------------- 1 | // Package cmd implements the command line options to configure and run the application 2 | package cmd 3 | -------------------------------------------------------------------------------- /cmd/list.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package cmd 22 | 23 | import ( 24 | "time" 25 | 26 | "github.com/spf13/cobra" 27 | 28 | "github.com/someone1/zfsbackup-go/backup" 29 | "github.com/someone1/zfsbackup-go/log" 30 | ) 31 | 32 | var ( 33 | startsWith string 34 | beforeStr string 35 | afterStr string 36 | before time.Time 37 | after time.Time 38 | ) 39 | 40 | // listCmd represents the list command 41 | var listCmd = &cobra.Command{ 42 | Use: "list [flags] uri", 43 | Short: "List all backup sets found at the provided target.", 44 | Long: `List all backup sets found at the provided target.`, 45 | PreRunE: validateListFlags, 46 | RunE: func(cmd *cobra.Command, args []string) error { 47 | if startsWith != "" { 48 | if startsWith[len(startsWith)-1:] == "*" { 49 | log.AppLogger.Infof("Listing all backup jobs for volumes starting with %s", startsWith) 50 | } else { 51 | log.AppLogger.Infof("Listing all backup jobs for volume %s", startsWith) 52 | } 53 | } 54 | 55 | if !before.IsZero() { 56 | log.AppLogger.Infof("Listing all back jobs of snapshots taken before %v", before) 57 | } 58 | 59 | if !after.IsZero() { 60 | log.AppLogger.Infof("Listing all back jobs of snapshots taken after %v", after) 61 | } 62 | 63 | jobInfo.Destinations = []string{args[0]} 64 | return backup.List(cmd.Context(), &jobInfo, startsWith, before, after) 65 | }, 66 | } 67 | 68 | func init() { 69 | RootCmd.AddCommand(listCmd) 70 | 71 | listCmd.Flags().StringVar( 72 | &startsWith, 73 | "volumeName", 74 | "", 75 | "Filter results to only this volume name, can end with a '*' to match as only a prefix", 76 | ) 77 | listCmd.Flags().StringVar( 78 | &beforeStr, 79 | "before", 80 | "", 81 | "Filter results to only this backups before this specified date & time (format: yyyy-MM-ddTHH:mm:ss, parsed in local TZ)", 82 | ) 83 | listCmd.Flags().StringVar( 84 | &afterStr, 85 | "after", 86 | "", 87 | "Filter results to only this backups after this specified date & time (format: yyyy-MM-ddTHH:mm:ss, parsed in local TZ)", 88 | ) 89 | } 90 | 91 | func validateListFlags(cmd *cobra.Command, args []string) error { 92 | if len(args) != 1 { 93 | _ = cmd.Usage() 94 | return errInvalidInput 95 | } 96 | 97 | if err := loadReceiveKeys(); err != nil { 98 | return err 99 | } 100 | 101 | if beforeStr != "" { 102 | parsed, perr := time.ParseInLocation(time.RFC3339[:19], beforeStr, time.Local) 103 | if perr != nil { 104 | log.AppLogger.Errorf("could not parse before time '%s' due to error: %v", beforeStr, perr) 105 | return perr 106 | } 107 | before = parsed 108 | } 109 | 110 | if afterStr != "" { 111 | parsed, perr := time.ParseInLocation(time.RFC3339[:19], afterStr, time.Local) 112 | if perr != nil { 113 | log.AppLogger.Errorf("could not parse before time '%s' due to error: %v", beforeStr, perr) 114 | return perr 115 | } 116 | after = parsed 117 | } 118 | return nil 119 | } 120 | 121 | // ResetListJobInfo exists solely for integration testing 122 | func ResetListJobInfo() { 123 | resetRootFlags() 124 | startsWith = "" 125 | beforeStr = "" 126 | afterStr = "" 127 | before = time.Time{} 128 | after = time.Time{} 129 | } 130 | -------------------------------------------------------------------------------- /cmd/receive.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package cmd 22 | 23 | import ( 24 | "context" 25 | "fmt" 26 | "strings" 27 | "time" 28 | 29 | "github.com/spf13/cobra" 30 | 31 | "github.com/someone1/zfsbackup-go/backends" 32 | "github.com/someone1/zfsbackup-go/backup" 33 | "github.com/someone1/zfsbackup-go/files" 34 | "github.com/someone1/zfsbackup-go/log" 35 | "github.com/someone1/zfsbackup-go/zfs" 36 | ) 37 | 38 | // receiveCmd represents the receive command 39 | var receiveCmd = &cobra.Command{ 40 | Use: "receive [flags] filesystem|volume|snapshot-to-restore uri local_volume", 41 | Short: "receive will restore a snapshot of a ZFS volume similar to how the \"zfs recv\" command works.", 42 | Long: `receive will restore a snapshot of a ZFS volume similar to how the "zfs recv" command works.`, 43 | PreRunE: validateReceiveFlags, 44 | RunE: func(cmd *cobra.Command, args []string) error { 45 | log.AppLogger.Infof("Limiting the number of active files to %d", jobInfo.MaxFileBuffer) 46 | 47 | if jobInfo.AutoRestore { 48 | return backup.AutoRestore(cmd.Context(), &jobInfo) 49 | } 50 | return backup.Receive(cmd.Context(), &jobInfo) 51 | }, 52 | } 53 | 54 | func init() { 55 | RootCmd.AddCommand(receiveCmd) 56 | 57 | // ZFS recv command options 58 | receiveCmd.Flags().BoolVar( 59 | &jobInfo.AutoRestore, 60 | "auto", 61 | false, 62 | "Automatically restore to the snapshot provided, or to the latest snapshot of the volume provided, cannot be "+ 63 | "used with the --incremental flag.", 64 | ) 65 | receiveCmd.Flags().BoolVarP( 66 | &jobInfo.FullPath, 67 | "fullPath", 68 | "d", 69 | false, "See the -d flag on zfs recv for more information", 70 | ) 71 | receiveCmd.Flags().BoolVarP( 72 | &jobInfo.LastPath, 73 | "lastPath", 74 | "e", 75 | false, 76 | "See the -e flag for zfs recv for more information.", 77 | ) 78 | receiveCmd.Flags().BoolVarP( 79 | &jobInfo.Force, 80 | "force", 81 | "F", 82 | false, 83 | "See the -F flag for zfs recv for more information.", 84 | ) 85 | receiveCmd.Flags().BoolVarP( 86 | &jobInfo.NotMounted, 87 | "unmounted", 88 | "u", 89 | false, 90 | "See the -u flag for zfs recv for more information.", 91 | ) 92 | receiveCmd.Flags().StringVarP( 93 | &jobInfo.Origin, 94 | "origin", 95 | "o", 96 | "", 97 | "See the -o flag on zfs recv for more information.", 98 | ) 99 | receiveCmd.Flags().StringVarP( 100 | &jobInfo.IncrementalSnapshot.Name, 101 | "incremental", 102 | "i", 103 | "", 104 | "Used to specify the snapshot target to restore from.", 105 | ) 106 | receiveCmd.Flags().IntVar( 107 | &jobInfo.MaxFileBuffer, 108 | "maxFileBuffer", 109 | 5, 110 | "the maximum number of files to have active during the upload process. Should be set to at least the number "+ 111 | "of max parallel uploads. Set to 0 to bypass local storage and upload straight to your destination - this will "+ 112 | "limit you to a single destination and disable any hash checks for the upload where available.", 113 | ) 114 | receiveCmd.Flags().DurationVar( 115 | &jobInfo.MaxRetryTime, 116 | "maxRetryTime", 117 | 12*time.Hour, 118 | "the maximum time that can elapse when retrying a failed download. Use 0 for no limit.", 119 | ) 120 | receiveCmd.Flags().DurationVar( 121 | &jobInfo.MaxBackoffTime, 122 | "maxBackoffTime", 123 | 30*time.Minute, 124 | "the maximum delay you'd want a worker to sleep before retrying an download.", 125 | ) 126 | receiveCmd.Flags().StringVar( 127 | &jobInfo.Separator, 128 | "separator", 129 | "|", 130 | "the separator to use between object component names (used only for the initial manifest we are looking for).", 131 | ) 132 | } 133 | 134 | // ResetReceiveJobInfo exists solely for integration testing 135 | func ResetReceiveJobInfo() { 136 | resetRootFlags() 137 | jobInfo.AutoRestore = false 138 | jobInfo.FullPath = false 139 | jobInfo.LastPath = false 140 | jobInfo.Force = false 141 | jobInfo.NotMounted = false 142 | jobInfo.Origin = "" 143 | jobInfo.BaseSnapshot = files.SnapshotInfo{} 144 | jobInfo.IncrementalSnapshot = files.SnapshotInfo{} 145 | jobInfo.MaxFileBuffer = 5 146 | jobInfo.MaxRetryTime = 12 * time.Hour 147 | jobInfo.MaxBackoffTime = 30 * time.Minute 148 | jobInfo.Separator = "|" 149 | } 150 | 151 | // nolint:gocyclo // Will do later 152 | func validateReceiveFlags(cmd *cobra.Command, args []string) error { 153 | if len(args) != 3 { 154 | _ = cmd.Usage() 155 | return errInvalidInput 156 | } 157 | 158 | if err := loadReceiveKeys(); err != nil { 159 | return err 160 | } 161 | 162 | jobInfo.StartTime = time.Now() 163 | 164 | parts := strings.Split(args[0], "@") 165 | if len(parts) != 2 && !jobInfo.AutoRestore { 166 | log.AppLogger.Errorf("Invalid base snapshot provided. Expected format @, got %s instead", args[0]) 167 | return errInvalidInput 168 | } else if len(parts) == 2 { 169 | jobInfo.BaseSnapshot = files.SnapshotInfo{Name: parts[1]} 170 | } 171 | 172 | if jobInfo.FullPath && jobInfo.LastPath { 173 | log.AppLogger.Errorf("The -d and -e options are mutually exclusive, please select only one!") 174 | return errInvalidInput 175 | } 176 | 177 | jobInfo.VolumeName = parts[0] 178 | jobInfo.Destinations = strings.Split(args[1], ",") 179 | jobInfo.LocalVolume = args[2] 180 | 181 | // Intelligently restore to the snapshot wanted 182 | if jobInfo.AutoRestore && jobInfo.IncrementalSnapshot.Name != "" { 183 | log.AppLogger.Errorf("Cannot request auto restore option and provide an incremental snapshot to restore from.") 184 | return errInvalidInput 185 | } 186 | 187 | // Remove 'origin=' from beginning of -o argument 188 | jobInfo.Origin = strings.TrimPrefix(jobInfo.Origin, "origin=") 189 | 190 | if !jobInfo.AutoRestore { 191 | // Let's see if we already have this snap shot 192 | creationTime, err := zfs.GetCreationDate(context.TODO(), fmt.Sprintf("%s@%s", jobInfo.LocalVolume, jobInfo.BaseSnapshot.Name)) 193 | if err == nil { 194 | jobInfo.BaseSnapshot.CreationTime = creationTime 195 | } 196 | if jobInfo.IncrementalSnapshot.Name != "" { 197 | jobInfo.IncrementalSnapshot.Name = strings.TrimPrefix(jobInfo.IncrementalSnapshot.Name, jobInfo.VolumeName) 198 | jobInfo.IncrementalSnapshot.Name = strings.TrimPrefix(jobInfo.IncrementalSnapshot.Name, "@") 199 | creationTime, err = zfs.GetCreationDate(context.TODO(), fmt.Sprintf("%s@%s", jobInfo.LocalVolume, jobInfo.IncrementalSnapshot.Name)) 200 | if err == nil { 201 | jobInfo.IncrementalSnapshot.CreationTime = creationTime 202 | } 203 | } 204 | } 205 | 206 | for _, destination := range jobInfo.Destinations { 207 | _, err := backends.GetBackendForURI(destination) 208 | if err == backends.ErrInvalidPrefix { 209 | log.AppLogger.Errorf("Unsupported prefix provided in destination URI, was given %s", destination) 210 | return errInvalidInput 211 | } else if err == backends.ErrInvalidURI { 212 | log.AppLogger.Errorf("Invalid destination URI, was given %s", destination) 213 | return errInvalidInput 214 | } 215 | } 216 | 217 | return nil 218 | } 219 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package cmd 22 | 23 | import ( 24 | "context" 25 | "errors" 26 | "fmt" 27 | "io/ioutil" 28 | "os" 29 | "os/user" 30 | "path/filepath" 31 | "runtime" 32 | "strings" 33 | 34 | humanize "github.com/dustin/go-humanize" 35 | "github.com/juju/ratelimit" 36 | "github.com/op/go-logging" 37 | "github.com/spf13/cobra" 38 | "golang.org/x/crypto/openpgp" 39 | "golang.org/x/crypto/ssh/terminal" 40 | 41 | "github.com/someone1/zfsbackup-go/config" 42 | "github.com/someone1/zfsbackup-go/files" 43 | "github.com/someone1/zfsbackup-go/log" 44 | "github.com/someone1/zfsbackup-go/pgp" 45 | "github.com/someone1/zfsbackup-go/zfs" 46 | ) 47 | 48 | var ( 49 | numCores int 50 | logLevel string 51 | secretKeyRingPath string 52 | publicKeyRingPath string 53 | workingDirectory string 54 | errInvalidInput = errors.New("invalid input") 55 | ) 56 | 57 | // RootCmd represents the base command when called without any subcommands 58 | var RootCmd = &cobra.Command{ 59 | Use: "zfsbackup", 60 | Short: "zfsbackup is a tool used to do off-site backups of ZFS volumes.", 61 | Long: `zfsbackup is a tool used to do off-site backups of ZFS volumes. 62 | It leverages the built-in snapshot capabilities of ZFS in order to export ZFS 63 | volumes for long-term storage. 64 | 65 | zfsbackup uses the "zfs send" command to export, and optionally compress, sign, 66 | encrypt, and split the send stream to files that are then transferred to a 67 | destination of your choosing.`, 68 | PersistentPreRunE: processFlags, 69 | PersistentPostRun: postRunCleanup, 70 | SilenceErrors: true, 71 | SilenceUsage: true, 72 | } 73 | 74 | // Execute adds all child commands to the root command sets flags appropriately. 75 | // This is called by main.main(). It only needs to happen once to the rootCmd. 76 | func Execute(ctx context.Context) { 77 | if err := RootCmd.ExecuteContext(ctx); err != nil { 78 | os.Exit(-1) 79 | } 80 | } 81 | 82 | func init() { 83 | RootCmd.PersistentFlags().IntVar( 84 | &numCores, 85 | "numCores", 86 | 2, 87 | "number of CPU cores to utilize. Do not exceed the number of CPU cores on the system.", 88 | ) 89 | RootCmd.PersistentFlags().StringVar( 90 | &logLevel, 91 | "logLevel", 92 | "notice", 93 | "this controls the verbosity level of logging. Possible values are critical, error, warning, notice, info, debug.", 94 | ) 95 | RootCmd.PersistentFlags().StringVar( 96 | &secretKeyRingPath, 97 | "secretKeyRingPath", 98 | "", 99 | "the path to the PGP secret key ring", 100 | ) 101 | RootCmd.PersistentFlags().StringVar( 102 | &publicKeyRingPath, 103 | "publicKeyRingPath", 104 | "", 105 | "the path to the PGP public key ring", 106 | ) 107 | RootCmd.PersistentFlags().StringVar( 108 | &workingDirectory, 109 | "workingDirectory", 110 | "~/.zfsbackup", 111 | "the working directory path for zfsbackup.", 112 | ) 113 | RootCmd.PersistentFlags().StringVar( 114 | &jobInfo.ManifestPrefix, 115 | "manifestPrefix", 116 | "manifests", "the prefix to use for all manifest files.", 117 | ) 118 | RootCmd.PersistentFlags().StringVar( 119 | &jobInfo.EncryptTo, 120 | "encryptTo", 121 | "", 122 | "the email of the user to encrypt the data to from the provided public keyring.", 123 | ) 124 | RootCmd.PersistentFlags().StringVar( 125 | &jobInfo.SignFrom, 126 | "signFrom", 127 | "", 128 | "the email of the user to sign on behalf of from the provided private keyring.", 129 | ) 130 | RootCmd.PersistentFlags().StringVar( 131 | &zfs.ZFSPath, 132 | "zfsPath", 133 | "zfs", 134 | "the path to the zfs executable.", 135 | ) 136 | RootCmd.PersistentFlags().BoolVar( 137 | &config.JSONOutput, 138 | "jsonOutput", 139 | false, 140 | "dump results as a JSON string - on success only", 141 | ) 142 | passphrase = []byte(os.Getenv("PGP_PASSPHRASE")) 143 | } 144 | 145 | func resetRootFlags() { 146 | jobInfo = files.JobInfo{} 147 | numCores = 2 148 | logLevel = "notice" 149 | secretKeyRingPath = "" 150 | publicKeyRingPath = "" 151 | workingDirectory = "~/.zfsbackup" 152 | jobInfo.ManifestPrefix = "manifests" 153 | jobInfo.EncryptTo = "" 154 | jobInfo.SignFrom = "" 155 | zfs.ZFSPath = "zfs" 156 | config.JSONOutput = false 157 | } 158 | 159 | // nolint:gocyclo,funlen // Will do later 160 | func processFlags(cmd *cobra.Command, args []string) error { 161 | switch strings.ToLower(logLevel) { 162 | case "critical": 163 | logging.SetLevel(logging.CRITICAL, log.LogModuleName) 164 | case "error": 165 | logging.SetLevel(logging.ERROR, log.LogModuleName) 166 | case "warning", "warn": 167 | logging.SetLevel(logging.WARNING, log.LogModuleName) 168 | case "notice": 169 | logging.SetLevel(logging.NOTICE, log.LogModuleName) 170 | case "info": 171 | logging.SetLevel(logging.INFO, log.LogModuleName) 172 | case "debug": 173 | logging.SetLevel(logging.DEBUG, log.LogModuleName) 174 | default: 175 | log.AppLogger.Errorf("Invalid log level provided. Was given %s", logLevel) 176 | return errInvalidInput 177 | } 178 | 179 | if numCores <= 0 { 180 | log.AppLogger.Errorf("The number of cores to use provided is an invalid value. It must be greater than 0. %d was given.", numCores) 181 | return errInvalidInput 182 | } 183 | 184 | if numCores > runtime.NumCPU() { 185 | log.AppLogger.Warningf( 186 | "Ignoring user provided number of cores (%d) and using the number of detected cores (%d).", 187 | numCores, runtime.NumCPU(), 188 | ) 189 | numCores = runtime.NumCPU() 190 | } 191 | log.AppLogger.Infof("Setting number of cores to: %d", numCores) 192 | runtime.GOMAXPROCS(numCores) 193 | 194 | if secretKeyRingPath != "" { 195 | if err := pgp.LoadPrivateRing(secretKeyRingPath); err != nil { 196 | log.AppLogger.Errorf("Could not load private keyring due to an error - %v", err) 197 | return errInvalidInput 198 | } 199 | } 200 | log.AppLogger.Infof("Loaded private key ring %s", secretKeyRingPath) 201 | 202 | if publicKeyRingPath != "" { 203 | if err := pgp.LoadPublicRing(publicKeyRingPath); err != nil { 204 | log.AppLogger.Errorf("Could not load public keyring due to an error - %v", err) 205 | return errInvalidInput 206 | } 207 | } 208 | log.AppLogger.Infof("Loaded public key ring %s", publicKeyRingPath) 209 | 210 | if err := setupGlobalVars(); err != nil { 211 | return err 212 | } 213 | log.AppLogger.Infof("Setting working directory to %s", workingDirectory) 214 | pgp.PrintPGPDebugInformation() 215 | return nil 216 | } 217 | 218 | func getAndDecryptPrivateKey(email string) (*openpgp.Entity, error) { 219 | var entity *openpgp.Entity 220 | if entity = pgp.GetPrivateKeyByEmail(email); entity == nil { 221 | log.AppLogger.Errorf("Could not find private key for %s", email) 222 | return nil, errInvalidInput 223 | } 224 | 225 | if entity.PrivateKey != nil && entity.PrivateKey.Encrypted { 226 | validatePassphrase() 227 | if err := entity.PrivateKey.Decrypt(passphrase); err != nil { 228 | log.AppLogger.Errorf("Error decrypting private key: %v", err) 229 | return nil, errInvalidInput 230 | } 231 | } 232 | 233 | for _, subkey := range entity.Subkeys { 234 | if subkey.PrivateKey != nil && subkey.PrivateKey.Encrypted { 235 | validatePassphrase() 236 | if err := subkey.PrivateKey.Decrypt(passphrase); err != nil { 237 | log.AppLogger.Errorf("Error decrypting subkey's private key: %v", err) 238 | return nil, errInvalidInput 239 | } 240 | } 241 | } 242 | 243 | return entity, nil 244 | } 245 | 246 | func loadSendKeys() error { 247 | if jobInfo.EncryptTo != "" { 248 | if usingSmartOption() && secretKeyRingPath == "" { 249 | log.AppLogger.Errorf("You must specify a secret keyring path if you use a smart option with encryptTo") 250 | return errInvalidInput 251 | } else if publicKeyRingPath == "" { 252 | log.AppLogger.Errorf("You must specify a public keyring path if you provide an encryptTo option") 253 | return errInvalidInput 254 | } 255 | } 256 | 257 | if jobInfo.SignFrom != "" && secretKeyRingPath == "" { 258 | log.AppLogger.Errorf("You must specify a secret keyring path if you provide a signFrom option") 259 | return errInvalidInput 260 | } 261 | 262 | if jobInfo.EncryptTo != "" { 263 | if usingSmartOption() { 264 | var err error 265 | jobInfo.EncryptKey, err = getAndDecryptPrivateKey(jobInfo.EncryptTo) 266 | if err != nil { 267 | return err 268 | } 269 | } else if jobInfo.EncryptKey = pgp.GetPublicKeyByEmail(jobInfo.EncryptTo); jobInfo.EncryptKey == nil { 270 | log.AppLogger.Errorf("Could not find public key for %s", jobInfo.EncryptTo) 271 | return errInvalidInput 272 | } 273 | } 274 | 275 | if jobInfo.SignFrom != "" { 276 | var err error 277 | jobInfo.SignKey, err = getAndDecryptPrivateKey(jobInfo.SignFrom) 278 | if err != nil { 279 | return err 280 | } 281 | } 282 | 283 | return nil 284 | } 285 | 286 | func loadReceiveKeys() error { 287 | if jobInfo.EncryptTo != "" && secretKeyRingPath == "" { 288 | log.AppLogger.Errorf("You must specify a secret keyring path if you provide an encryptTo option") 289 | return errInvalidInput 290 | } 291 | 292 | if jobInfo.SignFrom != "" && publicKeyRingPath == "" { 293 | log.AppLogger.Errorf("You must specify a public keyring path if you provide a signFrom option") 294 | return errInvalidInput 295 | } 296 | 297 | if jobInfo.EncryptTo != "" { 298 | var err error 299 | jobInfo.EncryptKey, err = getAndDecryptPrivateKey(jobInfo.EncryptTo) 300 | if err != nil { 301 | return err 302 | } 303 | } 304 | 305 | if jobInfo.SignFrom != "" { 306 | if jobInfo.SignKey = pgp.GetPublicKeyByEmail(jobInfo.SignFrom); jobInfo.SignKey == nil { 307 | log.AppLogger.Errorf("Could not find public key for %s", jobInfo.SignFrom) 308 | return errInvalidInput 309 | } 310 | } 311 | 312 | return nil 313 | } 314 | 315 | func postRunCleanup(cmd *cobra.Command, args []string) { 316 | err := os.RemoveAll(config.BackupTempdir) 317 | if err != nil { 318 | log.AppLogger.Errorf("Could not clean working temporary directory - %v", err) 319 | } 320 | } 321 | 322 | // nolint:gocyclo,funlen // Will do later 323 | func setupGlobalVars() error { 324 | // Setup Tempdir 325 | 326 | if strings.HasPrefix(workingDirectory, "~") { 327 | usr, err := user.Current() 328 | if err != nil { 329 | log.AppLogger.Errorf("Could not get current user due to error - %v", err) 330 | return err 331 | } 332 | workingDirectory = filepath.Join(usr.HomeDir, strings.TrimPrefix(workingDirectory, "~")) 333 | } 334 | 335 | if dir, serr := os.Stat(workingDirectory); serr == nil && !dir.IsDir() { 336 | log.AppLogger.Errorf( 337 | "Cannot create working directory because another non-directory object already exists in that path (%s)", 338 | workingDirectory, 339 | ) 340 | return errInvalidInput 341 | } else if serr != nil { 342 | err := os.Mkdir(workingDirectory, 0755) 343 | if err != nil { 344 | log.AppLogger.Errorf("Could not create working directory %s due to error - %v", workingDirectory, err) 345 | return err 346 | } 347 | } 348 | 349 | dirPath := filepath.Join(workingDirectory, "temp") 350 | if dir, serr := os.Stat(dirPath); serr == nil && !dir.IsDir() { 351 | log.AppLogger.Errorf( 352 | "Cannot create temp dir in working directory because another non-directory object already exists in that path (%s)", 353 | dirPath, 354 | ) 355 | return errInvalidInput 356 | } else if serr != nil { 357 | err := os.Mkdir(dirPath, 0755) 358 | if err != nil { 359 | log.AppLogger.Errorf("Could not create temp directory %s due to error - %v", dirPath, err) 360 | return err 361 | } 362 | } 363 | 364 | tempdir, err := ioutil.TempDir(dirPath, config.ProgramName) 365 | if err != nil { 366 | log.AppLogger.Errorf("Could not create temp directory due to error - %v", err) 367 | return err 368 | } 369 | 370 | config.BackupTempdir = tempdir 371 | config.WorkingDir = workingDirectory 372 | 373 | dirPath = filepath.Join(workingDirectory, "cache") 374 | if dir, serr := os.Stat(dirPath); serr == nil && !dir.IsDir() { 375 | log.AppLogger.Errorf( 376 | "Cannot create cache dir in working directory because another non-directory object already exists in that path (%s)", 377 | dirPath, 378 | ) 379 | return errInvalidInput 380 | } else if serr != nil { 381 | err := os.Mkdir(dirPath, 0755) 382 | if err != nil { 383 | log.AppLogger.Errorf("Could not create cache directory %s due to error - %v", dirPath, err) 384 | return err 385 | } 386 | } 387 | 388 | if maxUploadSpeed != 0 { 389 | log.AppLogger.Infof("Limiting the upload speed to %s/s.", humanize.Bytes(maxUploadSpeed*humanize.KByte)) 390 | config.BackupUploadBucket = ratelimit.NewBucketWithRate(float64(maxUploadSpeed*humanize.KByte), int64(maxUploadSpeed*humanize.KByte)) 391 | } 392 | return nil 393 | } 394 | 395 | func validatePassphrase() { 396 | var err error 397 | if len(passphrase) == 0 { 398 | fmt.Fprint(config.Stdout, "Enter passphrase to decrypt encryption key: ") 399 | passphrase, err = terminal.ReadPassword(0) 400 | if err != nil { 401 | log.AppLogger.Errorf("Error reading user input for encryption key passphrase: %v", err) 402 | panic(err) 403 | } 404 | } 405 | } 406 | -------------------------------------------------------------------------------- /cmd/send.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package cmd 22 | 23 | import ( 24 | "context" 25 | "fmt" 26 | "strings" 27 | "time" 28 | 29 | "github.com/spf13/cobra" 30 | 31 | "github.com/someone1/zfsbackup-go/backends" 32 | "github.com/someone1/zfsbackup-go/backup" 33 | "github.com/someone1/zfsbackup-go/config" 34 | "github.com/someone1/zfsbackup-go/files" 35 | "github.com/someone1/zfsbackup-go/log" 36 | "github.com/someone1/zfsbackup-go/zfs" 37 | ) 38 | 39 | var ( 40 | jobInfo files.JobInfo 41 | fullIncremental string 42 | maxUploadSpeed uint64 43 | passphrase []byte 44 | ) 45 | 46 | // sendCmd represents the send command 47 | var sendCmd = &cobra.Command{ 48 | Use: "send [flags] filesystem|volume|snapshot uri(s)", 49 | Short: "send will backup of a ZFS volume similar to how the \"zfs send\" command works.", 50 | Long: `send take a subset of the`, 51 | PreRunE: validateSendFlags, 52 | RunE: func(cmd *cobra.Command, args []string) error { 53 | log.AppLogger.Infof("Limiting the number of active files to %d", jobInfo.MaxFileBuffer) 54 | log.AppLogger.Infof("Limiting the number of parallel uploads to %d", jobInfo.MaxParallelUploads) 55 | log.AppLogger.Infof("Max Backoff Time will be %v", jobInfo.MaxBackoffTime) 56 | log.AppLogger.Infof("Max Upload Retry Time will be %v", jobInfo.MaxRetryTime) 57 | log.AppLogger.Infof("Upload Chunk Size will be %dMiB", jobInfo.UploadChunkSize) 58 | if jobInfo.EncryptKey != nil { 59 | log.AppLogger.Infof("Will be using encryption key for %s", jobInfo.EncryptTo) 60 | } 61 | 62 | if jobInfo.SignKey != nil { 63 | log.AppLogger.Infof("Will be signed from %s", jobInfo.SignFrom) 64 | } 65 | 66 | return backup.Backup(cmd.Context(), &jobInfo) 67 | }, 68 | } 69 | 70 | // nolint:funlen,gocyclo // Will do later 71 | func init() { 72 | RootCmd.AddCommand(sendCmd) 73 | 74 | // ZFS send command options 75 | sendCmd.Flags().BoolVarP(&jobInfo.Replication, "replication", "R", false, "See the -R flag on zfs send for more information") 76 | sendCmd.Flags().BoolVarP(&jobInfo.SkipMissing, "skip-missing", "s", false, "See the -s flag on zfs send for more information") 77 | sendCmd.Flags().BoolVarP(&jobInfo.Deduplication, "deduplication", "D", false, "See the -D flag for zfs send for more information.") 78 | sendCmd.Flags().StringVarP(&jobInfo.IncrementalSnapshot.Name, "incremental", "i", "", "See the -i flag on zfs send for more information") 79 | sendCmd.Flags().StringVarP(&fullIncremental, "intermediary", "I", "", "See the -I flag on zfs send for more information") 80 | sendCmd.Flags().BoolVarP(&jobInfo.Properties, "properties", "p", false, "See the -p flag on zfs send for more information.") 81 | sendCmd.Flags().BoolVarP(&jobInfo.Raw, "raw", "w", false, "See the -w flag on zfs send for more information.") 82 | 83 | // Specific to download only 84 | sendCmd.Flags().Uint64Var( 85 | &jobInfo.VolumeSize, 86 | "volsize", 87 | 200, 88 | "the maximum size (in MiB) a volume should be before splitting to a new volume. Note: zfsbackup will try its best to stay close/under "+ 89 | "this limit but it is not guaranteed.", 90 | ) 91 | sendCmd.Flags().IntVar( 92 | &jobInfo.CompressionLevel, 93 | "compressionLevel", 94 | 6, 95 | "the compression level to use with the compressor. Valid values are between 1-9.", 96 | ) 97 | sendCmd.Flags().BoolVar( 98 | &jobInfo.Resume, 99 | "resume", 100 | false, 101 | "set this flag to true when you want to try and resume a previously cancled or failed backup. It is up to the caller to ensure the same "+ 102 | "command line arguments are provided between the original backup and the resumed one.", 103 | ) 104 | sendCmd.Flags().BoolVar( 105 | &jobInfo.Full, 106 | "full", 107 | false, 108 | "set this flag to take a full backup of the specified volume using the most recent snapshot.", 109 | ) 110 | sendCmd.Flags().BoolVar( 111 | &jobInfo.Incremental, 112 | "increment", 113 | false, 114 | "set this flag to do an incremental backup of the most recent snapshot from the most recent snapshot found in the target.", 115 | ) 116 | sendCmd.Flags().StringVar( 117 | &jobInfo.SnapshotPrefix, 118 | "snapshotPrefix", 119 | "", 120 | "Only consider snapshots starting with the given snapshot prefix", 121 | ) 122 | sendCmd.Flags().DurationVar( 123 | &jobInfo.FullIfOlderThan, 124 | "fullIfOlderThan", 125 | -1*time.Minute, 126 | "set this flag to do an incremental backup of the most recent snapshot from the most recent snapshot found in the target unless the "+ 127 | "it's been greater than the time specified in this flag, then do a full backup.", 128 | ) 129 | sendCmd.Flags().StringVar( 130 | &jobInfo.Compressor, 131 | "compressor", 132 | files.InternalCompressor, 133 | "specify to use the internal (parallel) gzip implementation or an external binary (e.g. gzip, bzip2, pigz, lzma, xz, etc.) Syntax "+ 134 | "must be similar to the gzip compression tool) to compress the stream for storage. Please take into consideration time, memory, "+ 135 | "and CPU usage for any of the compressors used. All manifests utilize the internal compressor. If value is zfs, the zfs stream "+ 136 | "will be created compressed. See the -c flag on zfs send for more information.", 137 | ) 138 | sendCmd.Flags().IntVar( 139 | &jobInfo.MaxFileBuffer, 140 | "maxFileBuffer", 141 | 5, 142 | "the maximum number of files to have active during the upload process. Should be set to at least the number of max parallel uploads. "+ 143 | "Set to 0 to bypass local storage and upload straight to your destination - this will limit you to a single destination and disable "+ 144 | "any hash checks for the upload where available.", 145 | ) 146 | sendCmd.Flags().IntVar( 147 | &jobInfo.MaxParallelUploads, 148 | "maxParallelUploads", 149 | 4, 150 | "the maximum number of uploads to run in parallel.", 151 | ) 152 | sendCmd.Flags().Uint64Var( 153 | &maxUploadSpeed, 154 | "maxUploadSpeed", 155 | 0, 156 | "the maximum upload speed (in KB/s) the program should use between all upload workers. Use 0 for no limit", 157 | ) 158 | sendCmd.Flags().DurationVar( 159 | &jobInfo.MaxRetryTime, 160 | "maxRetryTime", 161 | 12*time.Hour, 162 | "the maximum time that can elapse when retrying a failed upload. Use 0 for no limit.", 163 | ) 164 | sendCmd.Flags().DurationVar( 165 | &jobInfo.MaxBackoffTime, 166 | "maxBackoffTime", 167 | 30*time.Minute, 168 | "the maximum delay you'd want a worker to sleep before retrying an upload.", 169 | ) 170 | sendCmd.Flags().StringVar( 171 | &jobInfo.Separator, 172 | "separator", 173 | "|", 174 | "the separator to use between object component names.", 175 | ) 176 | sendCmd.Flags().IntVar( 177 | &jobInfo.UploadChunkSize, 178 | "uploadChunkSize", 179 | 10, 180 | "the chunk size, in MiB, to use when uploading. A minimum of 5MiB and maximum of 100MiB is enforced.", 181 | ) 182 | } 183 | 184 | // ResetSendJobInfo exists solely for integration testing 185 | func ResetSendJobInfo() { 186 | resetRootFlags() 187 | // ZFS send command options 188 | jobInfo.Replication = false 189 | jobInfo.SkipMissing = false 190 | jobInfo.Deduplication = false 191 | jobInfo.IncrementalSnapshot = files.SnapshotInfo{} 192 | jobInfo.BaseSnapshot = files.SnapshotInfo{} 193 | fullIncremental = "" 194 | jobInfo.Properties = false 195 | 196 | // Specific to download only 197 | jobInfo.VolumeSize = 200 198 | jobInfo.CompressionLevel = 6 199 | jobInfo.Resume = false 200 | jobInfo.Full = false 201 | jobInfo.Incremental = false 202 | jobInfo.FullIfOlderThan = -1 * time.Minute 203 | 204 | jobInfo.MaxFileBuffer = 5 205 | jobInfo.MaxParallelUploads = 4 206 | maxUploadSpeed = 0 207 | jobInfo.MaxRetryTime = 12 * time.Hour 208 | jobInfo.MaxBackoffTime = 30 * time.Minute 209 | jobInfo.Separator = "|" 210 | jobInfo.UploadChunkSize = 10 211 | jobInfo.Compressor = files.InternalCompressor 212 | } 213 | 214 | // nolint:gocyclo,funlen // Will do later 215 | func updateJobInfo(args []string) error { 216 | jobInfo.StartTime = time.Now() 217 | jobInfo.Version = config.VersionNumber 218 | 219 | if fullIncremental != "" { 220 | jobInfo.IncrementalSnapshot.Name = fullIncremental 221 | jobInfo.IntermediaryIncremental = true 222 | } 223 | 224 | parts := strings.Split(args[0], "@") 225 | jobInfo.VolumeName = parts[0] 226 | jobInfo.Destinations = strings.Split(args[1], ",") 227 | 228 | if len(jobInfo.Destinations) > 1 && jobInfo.MaxFileBuffer == 0 { 229 | log.AppLogger.Errorf("Specifying multiple destinations and a MaxFileBuffer size of 0 is unsupported.") 230 | return errInvalidInput 231 | } 232 | 233 | for _, destination := range jobInfo.Destinations { 234 | _, err := backends.GetBackendForURI(destination) 235 | if err == backends.ErrInvalidPrefix { 236 | log.AppLogger.Errorf("Unsupported prefix provided in destination URI, was given %s", destination) 237 | return err 238 | } else if err == backends.ErrInvalidURI { 239 | log.AppLogger.Errorf("Unsupported destination URI, was given %s", destination) 240 | return err 241 | } 242 | } 243 | 244 | // If we aren't using a "smart" option, rely on the user to provide the snapshots to use! 245 | if !usingSmartOption() { 246 | if len(parts) != 2 { 247 | log.AppLogger.Errorf("Invalid base snapshot provided. Expected format @, got %s instead", args[0]) 248 | return errInvalidInput 249 | } 250 | jobInfo.BaseSnapshot = files.SnapshotInfo{Name: parts[1]} 251 | creationTime, err := zfs.GetCreationDate(context.TODO(), args[0]) 252 | if err != nil { 253 | log.AppLogger.Errorf("Error trying to get creation date of specified base snapshot - %v", err) 254 | return err 255 | } 256 | jobInfo.BaseSnapshot.CreationTime = creationTime 257 | 258 | if jobInfo.IncrementalSnapshot.Name != "" { 259 | var targetName string 260 | jobInfo.IncrementalSnapshot.Name = strings.TrimPrefix(jobInfo.IncrementalSnapshot.Name, jobInfo.VolumeName) 261 | if strings.HasPrefix(jobInfo.IncrementalSnapshot.Name, "#") { 262 | jobInfo.IncrementalSnapshot.Name = strings.TrimPrefix(jobInfo.IncrementalSnapshot.Name, "#") 263 | targetName = fmt.Sprintf("%s#%s", jobInfo.VolumeName, jobInfo.IncrementalSnapshot.Name) 264 | jobInfo.IncrementalSnapshot.Bookmark = true 265 | } else { 266 | jobInfo.IncrementalSnapshot.Name = strings.TrimPrefix(jobInfo.IncrementalSnapshot.Name, "@") 267 | targetName = fmt.Sprintf("%s@%s", jobInfo.VolumeName, jobInfo.IncrementalSnapshot.Name) 268 | } 269 | 270 | creationTime, err = zfs.GetCreationDate(context.TODO(), targetName) 271 | if err != nil { 272 | log.AppLogger.Errorf("Error trying to get creation date of specified incremental snapshot/bookmark - %v", err) 273 | return err 274 | } 275 | jobInfo.IncrementalSnapshot.CreationTime = creationTime 276 | } 277 | } else { 278 | // Some basic checks here 279 | onlyOneCheck := 0 280 | if jobInfo.Full { 281 | onlyOneCheck++ 282 | } 283 | if jobInfo.Incremental { 284 | onlyOneCheck++ 285 | } 286 | if jobInfo.FullIfOlderThan != -1*time.Minute { 287 | onlyOneCheck++ 288 | } 289 | if onlyOneCheck > 1 { 290 | log.AppLogger.Errorf("Please specify only one \"smart\" option at a time") 291 | return errInvalidInput 292 | } 293 | if len(parts) != 1 { 294 | log.AppLogger.Errorf("When using a smart option, please only specify the volume to backup, do not include any snapshot information.") 295 | return errInvalidInput 296 | } 297 | if err := backup.ProcessSmartOptions(context.Background(), &jobInfo); err != nil { 298 | log.AppLogger.Errorf("Error while trying to process smart option - %v", err) 299 | return err 300 | } 301 | log.AppLogger.Debugf("Utilizing smart option.") 302 | } 303 | 304 | return nil 305 | } 306 | 307 | func usingSmartOption() bool { 308 | return jobInfo.Full || jobInfo.Incremental || jobInfo.FullIfOlderThan != -1*time.Minute 309 | } 310 | 311 | func validateSendFlags(cmd *cobra.Command, args []string) error { 312 | if len(args) != 2 { 313 | _ = cmd.Usage() 314 | return errInvalidInput 315 | } 316 | 317 | if err := loadSendKeys(); err != nil { 318 | return err 319 | } 320 | 321 | if jobInfo.IncrementalSnapshot.Name != "" && fullIncremental != "" { 322 | log.AppLogger.Errorf("The flags -i and -I are mutually exclusive. Please specify only one of these flags.") 323 | return errInvalidInput 324 | } 325 | 326 | if err := jobInfo.ValidateSendFlags(); err != nil { 327 | log.AppLogger.Error(err) 328 | return err 329 | } 330 | 331 | return updateJobInfo(args) 332 | } 333 | -------------------------------------------------------------------------------- /cmd/version.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package cmd 22 | 23 | import ( 24 | "encoding/json" 25 | "fmt" 26 | "runtime" 27 | 28 | "github.com/spf13/cobra" 29 | 30 | "github.com/someone1/zfsbackup-go/config" 31 | "github.com/someone1/zfsbackup-go/log" 32 | ) 33 | 34 | // versionCmd represents the version command 35 | var versionCmd = &cobra.Command{ 36 | Use: "version", 37 | Short: "Print the version of zfsbackup in use and relevant compile information", 38 | Long: `This will output the version of zfsbackup in use and information about 39 | the runtime and architecture.`, 40 | RunE: func(cmd *cobra.Command, args []string) error { 41 | var output string 42 | if config.JSONOutput { 43 | j, err := json.Marshal(struct { 44 | Name string 45 | Version string 46 | OS string 47 | Arch string 48 | Compiled string 49 | GoVersion string 50 | }{ 51 | Name: config.ProgramName, 52 | Version: config.Version(), 53 | OS: runtime.GOOS, 54 | Arch: runtime.GOARCH, 55 | Compiled: runtime.Compiler, 56 | GoVersion: runtime.Version(), 57 | }) 58 | if err != nil { 59 | log.AppLogger.Errorf("could not dump version info to JSON - %v", err) 60 | return err 61 | } 62 | output = string(j) 63 | } else { 64 | output = fmt.Sprintf( 65 | "\tProgram Name:\t%s\n\tVersion:\tv%s\n\tOS Target:\t%s\n\tArch Target:\t%s\n\tCompiled With:\t%s\n\tGo Version:\t%s", 66 | config.ProgramName, config.Version(), runtime.GOOS, runtime.GOARCH, runtime.Compiler, runtime.Version()) 67 | } 68 | fmt.Fprintln(config.Stdout, output) 69 | return nil 70 | }, 71 | } 72 | 73 | func init() { 74 | RootCmd.AddCommand(versionCmd) 75 | } 76 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "io" 5 | "os" 6 | 7 | "github.com/juju/ratelimit" 8 | ) 9 | 10 | var ( 11 | // Stdout is where to output standard messaging to 12 | Stdout io.Writer = os.Stdout 13 | // JSONOutput will signal if we should dump the results to Stdout JSON formatted 14 | JSONOutput = false 15 | // BackupUploadBucket is the bandwidth rate-limit bucket if we need one. 16 | BackupUploadBucket *ratelimit.Bucket 17 | // BackupTempdir is the scratch space for our output 18 | BackupTempdir string 19 | // WorkingDir is the directory that all the cache/scratch work is done for this program 20 | WorkingDir string 21 | ) 22 | -------------------------------------------------------------------------------- /config/version.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package config 22 | 23 | import "fmt" 24 | 25 | const ( 26 | // VersionNumber represents the current version of zfsbackup 27 | VersionNumber = .3 28 | // ProgramName is the name for zfsbackup 29 | ProgramName = "zfsbackup" 30 | ) 31 | 32 | // Version will return the current version of zfsbackup 33 | func Version() string { 34 | return fmt.Sprintf("%.2g", VersionNumber) 35 | } 36 | -------------------------------------------------------------------------------- /files/doc.go: -------------------------------------------------------------------------------- 1 | // Package files handles the reading/writing of files (compression, encryption, etc.) 2 | package files 3 | -------------------------------------------------------------------------------- /files/jobinfo.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package files 22 | 23 | import ( 24 | "fmt" 25 | "regexp" 26 | "strings" 27 | "time" 28 | 29 | humanize "github.com/dustin/go-humanize" 30 | "golang.org/x/crypto/openpgp" 31 | 32 | "github.com/someone1/zfsbackup-go/log" 33 | ) 34 | 35 | var disallowedSeps = regexp.MustCompile(`^[\w\-:\.]+`) // Disallowed by ZFS 36 | 37 | // JobInfo represents the relevant information for a job that can be used to read 38 | // in details of that job at a later time. 39 | type JobInfo struct { 40 | StartTime time.Time 41 | EndTime time.Time 42 | VolumeName string 43 | BaseSnapshot SnapshotInfo 44 | IncrementalSnapshot SnapshotInfo 45 | SnapshotPrefix string 46 | Raw bool 47 | Compressor string 48 | CompressionLevel int 49 | Separator string 50 | ZFSCommandLine string 51 | ZFSStreamBytes uint64 52 | Volumes []*VolumeInfo 53 | Version float64 54 | EncryptTo string 55 | SignFrom string 56 | Replication bool 57 | SkipMissing bool 58 | Deduplication bool 59 | Properties bool 60 | IntermediaryIncremental bool 61 | Resume bool `json:"-"` 62 | // "Smart" Options 63 | Full bool `json:"-"` 64 | Incremental bool `json:"-"` 65 | FullIfOlderThan time.Duration `json:"-"` 66 | 67 | // ZFS Receive options 68 | Force bool `json:"-"` 69 | FullPath bool `json:"-"` 70 | LastPath bool `json:"-"` 71 | NotMounted bool `json:"-"` 72 | Origin string `json:"-"` 73 | LocalVolume string `json:"-"` 74 | AutoRestore bool `json:"-"` 75 | 76 | Destinations []string `json:"-"` 77 | VolumeSize uint64 `json:"-"` 78 | ManifestPrefix string `json:"-"` 79 | MaxBackoffTime time.Duration `json:"-"` 80 | MaxRetryTime time.Duration `json:"-"` 81 | MaxParallelUploads int `json:"-"` 82 | MaxFileBuffer int `json:"-"` 83 | EncryptKey *openpgp.Entity `json:"-"` 84 | SignKey *openpgp.Entity `json:"-"` 85 | ParentSnap *JobInfo `json:"-"` 86 | UploadChunkSize int `json:"-"` 87 | } 88 | 89 | // SnapshotInfo represents a snapshot with relevant information. 90 | type SnapshotInfo struct { 91 | CreationTime time.Time 92 | Name string 93 | Bookmark bool 94 | } 95 | 96 | // Equal will test two SnapshotInfo objects for equality. This is based on the snapshot name and the time of creation 97 | func (s *SnapshotInfo) Equal(t *SnapshotInfo) bool { 98 | if s == nil || t == nil { 99 | return s == t 100 | } 101 | return strings.Compare(s.Name, t.Name) == 0 && s.CreationTime.Equal(t.CreationTime) 102 | } 103 | 104 | // TotalBytesWritten will sum up the size of all underlying Volumes to give a total 105 | // that represents how many bytes have been written. 106 | func (j *JobInfo) TotalBytesWritten() uint64 { 107 | var total uint64 108 | 109 | for _, vol := range j.Volumes { 110 | total += vol.Size 111 | } 112 | 113 | return total 114 | } 115 | 116 | // String will return a string representation of this JobInfo. 117 | func (j *JobInfo) String() string { 118 | var output []string 119 | output = append( 120 | output, 121 | fmt.Sprintf("Volume: %s", j.VolumeName), 122 | fmt.Sprintf("Snapshot: %s (%v)", j.BaseSnapshot.Name, j.BaseSnapshot.CreationTime), 123 | ) 124 | if j.IncrementalSnapshot.Name != "" { 125 | output = append( 126 | output, 127 | fmt.Sprintf("Incremental From Snapshot: %s (%v)", j.IncrementalSnapshot.Name, j.IncrementalSnapshot.CreationTime), 128 | fmt.Sprintf("Intermediary: %v", j.IntermediaryIncremental), 129 | ) 130 | } 131 | 132 | totalWrittenBytes := j.TotalBytesWritten() 133 | 134 | output = append( 135 | output, 136 | fmt.Sprintf("Replication: %v", j.Replication), 137 | fmt.Sprintf("SkipMissing: %v", j.SkipMissing), 138 | fmt.Sprintf("Raw: %v", j.Raw), 139 | fmt.Sprintf("Archives: %d - %d bytes (%s)", len(j.Volumes), totalWrittenBytes, humanize.IBytes(totalWrittenBytes)), 140 | fmt.Sprintf("Volume Size (Raw): %d bytes (%s)", j.ZFSStreamBytes, humanize.IBytes(j.ZFSStreamBytes)), 141 | fmt.Sprintf("Uploaded: %v (took %v)\n\n", j.StartTime, j.EndTime.Sub(j.StartTime)), 142 | ) 143 | return strings.Join(output, "\n\t") 144 | } 145 | 146 | // TotalBytesStreamedAndVols will sum up the streamed bytes of all underlying Volumes to give a total 147 | // that represents how many bytes have been streamed. It will stop at any out of order volume number. 148 | func (j *JobInfo) TotalBytesStreamedAndVols() (total uint64, volnum int64) { 149 | volnum = 1 150 | if len(j.Volumes) > 0 { 151 | for _, vol := range j.Volumes { 152 | if vol.VolumeNumber != volnum { 153 | break 154 | } 155 | total += vol.ZFSStreamBytes 156 | volnum++ 157 | } 158 | j.Volumes = j.Volumes[:volnum-1] 159 | } 160 | return 161 | } 162 | 163 | // ValidateSendFlags will check if the options assigned to this JobInfo object is 164 | // properly within the bounds for a send backup operation. 165 | // nolint:golint,stylecheck // Error strings used as log messages for user 166 | func (j *JobInfo) ValidateSendFlags() error { 167 | if j.MaxFileBuffer < 0 { 168 | return fmt.Errorf("The number of active files must be set to a value greater than or equal to 0. Was given %d", j.MaxFileBuffer) 169 | } 170 | 171 | if j.MaxParallelUploads <= 0 { 172 | return fmt.Errorf("The number of parallel uploads must be set to a value greater than 0. Was given %d", j.MaxParallelUploads) 173 | } 174 | 175 | if j.MaxFileBuffer < j.MaxParallelUploads { 176 | log.AppLogger.Warningf( 177 | "The number of parallel uploads (%d) is greater than the number of active files allowed (%d), this may result in an unachievable max parallel upload target.", //nolint:lll // Long log output 178 | j.MaxParallelUploads, 179 | j.MaxFileBuffer, 180 | ) 181 | } 182 | 183 | if j.MaxRetryTime < 0 { 184 | return fmt.Errorf("The max retry time must be set to a value greater than or equal to 0. Was given %d", j.MaxRetryTime) 185 | } 186 | 187 | if j.MaxBackoffTime <= 0 { 188 | return fmt.Errorf("The max backoff time must be set to a value greater than 0. Was given %d", j.MaxBackoffTime) 189 | } 190 | 191 | if j.CompressionLevel < 1 || j.CompressionLevel > 9 { 192 | return fmt.Errorf("The compression level specified must be between 1 and 9. Was given %d", j.CompressionLevel) 193 | } 194 | 195 | if disallowedSeps.MatchString(j.Separator) { 196 | return fmt.Errorf( 197 | "The separator provided (%s) should not be used as it can conflict with allowed characters in zfs components", 198 | j.Separator, 199 | ) 200 | } 201 | 202 | if j.UploadChunkSize < 5 || j.UploadChunkSize > 100 { 203 | return fmt.Errorf("The uploadChunkSize provided (%d) is not between 5 and 100", j.UploadChunkSize) 204 | } 205 | 206 | return nil 207 | } 208 | 209 | func (j *JobInfo) ManifestObjectName() string { 210 | extensions := []string{"manifest"} 211 | nameParts := []string{j.ManifestPrefix} 212 | 213 | baseParts, ext := j.volumeNameParts(true) 214 | extensions = append(extensions, ext...) 215 | nameParts = append(nameParts, baseParts...) 216 | 217 | return fmt.Sprintf("%s.%s", strings.Join(nameParts, j.Separator), strings.Join(extensions, ".")) 218 | } 219 | 220 | func (j *JobInfo) BackupVolumeObjectName(volumeNumber int64) string { 221 | extensions := []string{"zstream"} 222 | 223 | nameParts, ext := j.volumeNameParts(false) 224 | extensions = append(extensions, ext...) 225 | extensions = append(extensions, fmt.Sprintf("vol%d", volumeNumber)) 226 | 227 | return fmt.Sprintf("%s.%s", strings.Join(nameParts, j.Separator), strings.Join(extensions, ".")) 228 | } 229 | 230 | func (j *JobInfo) volumeNameParts(isManifest bool) (nameParts, extensions []string) { 231 | extensions = make([]string, 0, 2) 232 | 233 | if j.EncryptKey != nil || j.SignKey != nil { 234 | extensions = append(extensions, "pgp") 235 | } 236 | 237 | compressorName := j.Compressor 238 | if isManifest { 239 | compressorName = InternalCompressor 240 | } 241 | 242 | switch compressorName { 243 | case InternalCompressor: 244 | extensions = append([]string{"gz"}, extensions...) 245 | case "", ZfsCompressor: 246 | default: 247 | extensions = append([]string{compressorName}, extensions...) 248 | } 249 | 250 | nameParts = []string{j.VolumeName} 251 | if j.IncrementalSnapshot.Name != "" { 252 | nameParts = append(nameParts, j.IncrementalSnapshot.Name, "to", j.BaseSnapshot.Name) 253 | } else { 254 | nameParts = append(nameParts, j.BaseSnapshot.Name) 255 | } 256 | 257 | return nameParts, extensions 258 | } 259 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/someone1/zfsbackup-go 2 | 3 | go 1.18 4 | 5 | require ( 6 | cloud.google.com/go/storage v1.27.0 7 | github.com/Azure/azure-sdk-for-go v66.0.0+incompatible 8 | github.com/Azure/azure-storage-blob-go v0.11.0 9 | github.com/aws/aws-sdk-go v1.44.104 10 | github.com/cenkalti/backoff v2.2.1+incompatible 11 | github.com/dustin/go-humanize v1.0.0 12 | github.com/juju/ratelimit v1.0.2 13 | github.com/klauspost/pgzip v1.2.5 14 | github.com/kurin/blazer v0.5.3 15 | github.com/miolini/datacounter v1.0.3 16 | github.com/nightlyone/lockfile v1.0.0 17 | github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 18 | github.com/pkg/errors v0.9.1 19 | github.com/pkg/sftp v1.13.5 20 | github.com/spf13/cobra v1.5.0 21 | golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 22 | golang.org/x/sync v0.0.0-20220907140024-f12130a52804 23 | google.golang.org/api v0.97.0 24 | ) 25 | 26 | require ( 27 | cloud.google.com/go v0.104.0 // indirect 28 | cloud.google.com/go/compute v1.10.0 // indirect 29 | cloud.google.com/go/iam v0.4.0 // indirect 30 | github.com/Azure/azure-pipeline-go v0.2.3 // indirect 31 | github.com/Azure/go-autorest v14.2.0+incompatible // indirect 32 | github.com/Azure/go-autorest/autorest v0.10.2 // indirect 33 | github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect 34 | github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect 35 | github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect 36 | github.com/Azure/go-autorest/logger v0.2.1 // indirect 37 | github.com/Azure/go-autorest/tracing v0.6.0 // indirect 38 | github.com/davecgh/go-spew v1.1.1 // indirect 39 | github.com/dnaeon/go-vcr v1.1.0 // indirect 40 | github.com/form3tech-oss/jwt-go v3.2.2+incompatible // indirect 41 | github.com/gofrs/uuid v4.3.0+incompatible // indirect 42 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 43 | github.com/golang/protobuf v1.5.2 // indirect 44 | github.com/google/go-cmp v0.5.9 // indirect 45 | github.com/google/uuid v1.3.0 // indirect 46 | github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect 47 | github.com/googleapis/gax-go/v2 v2.5.1 // indirect 48 | github.com/inconshreveable/mousetrap v1.0.1 // indirect 49 | github.com/jmespath/go-jmespath v0.4.0 // indirect 50 | github.com/klauspost/compress v1.15.10 // indirect 51 | github.com/kr/fs v0.1.0 // indirect 52 | github.com/mattn/go-ieproxy v0.0.9 // indirect 53 | github.com/spf13/pflag v1.0.5 // indirect 54 | go.opencensus.io v0.23.0 // indirect 55 | golang.org/x/net v0.0.0-20220921203646-d300de134e69 // indirect 56 | golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect 57 | golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect 58 | golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect 59 | golang.org/x/text v0.3.7 // indirect 60 | golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect 61 | google.golang.org/appengine v1.6.7 // indirect 62 | google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 // indirect 63 | google.golang.org/grpc v1.49.0 // indirect 64 | google.golang.org/protobuf v1.28.1 // indirect 65 | ) 66 | -------------------------------------------------------------------------------- /integration_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -ev 2 | 3 | SCRATCHDIR=$(mktemp -d) 4 | mkdir -p $SCRATCHDIR/integrationtest 5 | 6 | TARGET="file://$SCRATCHDIR/integrationtest/" 7 | USER="test@example.com" 8 | 9 | echo "Basic send/recieve test with encryption and compression" 10 | go build ./ 11 | 12 | sudo ./zfsbackup-go send \ 13 | --logLevel=debug \ 14 | --workingDirectory=$SCRATCHDIR \ 15 | --compressor=xz \ 16 | --compressionLevel=2 \ 17 | --publicKeyRingPath=public.pgp \ 18 | --encryptTo=$USER \ 19 | "tank/data@a" \ 20 | $TARGET 21 | 22 | echo "Test manifest file" 23 | gpg2 --output data.manifest.gz --decrypt $SCRATCHDIR/integrationtest/manifests\|tank/data\|a.manifest.gz.pgp 24 | gzip -d data.manifest.gz 25 | cat data.manifest | jq '.VolumeName' | grep -q 'tank/data' 26 | 27 | echo "Test data files" 28 | gpg2 --output data.vol1.xz --decrypt $SCRATCHDIR/integrationtest/tank/data\|a.zstream.xz.pgp.vol1 29 | gpg2 --output data.vol2.xz --decrypt $SCRATCHDIR/integrationtest/tank/data\|a.zstream.xz.pgp.vol2 30 | xz -d data.vol1.xz 31 | xz -d data.vol2.xz 32 | cat data.vol1 data.vol2 | sudo zfs receive -F tank/integrationtest 33 | 34 | echo "Cleaning up" 35 | rm data.vol1 data.vol2 data.manifest "zfsbackup-go" 36 | sudo rm -rf $SCRATCHDIR 37 | sudo zfs destroy -f -r tank/integrationtest 38 | -------------------------------------------------------------------------------- /log/log.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package log 22 | 23 | import ( 24 | "github.com/op/go-logging" 25 | 26 | "github.com/someone1/zfsbackup-go/config" 27 | ) 28 | 29 | // LogModuleName is the module name for our application's logger. 30 | const LogModuleName = config.ProgramName 31 | 32 | // AppLogger is our application's logger. 33 | var AppLogger = logging.MustGetLogger(LogModuleName) 34 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package main 22 | 23 | import ( 24 | "context" 25 | 26 | "github.com/someone1/zfsbackup-go/cmd" 27 | ) 28 | 29 | func main() { 30 | ctx, cancel := context.WithCancel(context.Background()) 31 | defer cancel() 32 | 33 | cmd.Execute(ctx) 34 | } 35 | -------------------------------------------------------------------------------- /pgp/pgp.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Prateek Malhotra (someone1@gmail.com) 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | // THE SOFTWARE. 20 | 21 | package pgp 22 | 23 | import ( 24 | "fmt" 25 | "os" 26 | "strings" 27 | 28 | "golang.org/x/crypto/openpgp" 29 | 30 | "github.com/someone1/zfsbackup-go/log" 31 | ) 32 | 33 | var ( 34 | pubRing openpgp.EntityList 35 | secRing openpgp.EntityList 36 | ) 37 | 38 | // GetPublicKeyByEmail will return the key from the pubpoic PGP ring (if available) matching 39 | // the provided email address. 40 | func GetPublicKeyByEmail(email string) *openpgp.Entity { 41 | return getKeyByEmail(pubRing, email) 42 | } 43 | 44 | // GetPrivateKeyByEmail will return the key from the secret PGP ring (if available) matching 45 | // the provided email address. 46 | func GetPrivateKeyByEmail(email string) *openpgp.Entity { 47 | return getKeyByEmail(secRing, email) 48 | } 49 | 50 | // GetCombinedKeyRing will return both the public and secret key rings combined 51 | func GetCombinedKeyRing() openpgp.KeyRing { 52 | return append(pubRing, secRing...) 53 | } 54 | 55 | // PromptFunc is used to satisfy the openpgp package's requirements 56 | func PromptFunc(keys []openpgp.Key, symmetric bool) ([]byte, error) { 57 | panic("secret keys should have been decrypted already") 58 | } 59 | 60 | func getKeyByEmail(keyring openpgp.EntityList, email string) *openpgp.Entity { 61 | for _, entity := range keyring { 62 | for _, ident := range entity.Identities { 63 | if ident.UserId.Email == email { 64 | return entity 65 | } 66 | } 67 | } 68 | 69 | return nil 70 | } 71 | 72 | // LoadPublicRing will open and parse the PGP keyring from the file path provided. 73 | func LoadPublicRing(path string) error { 74 | pubringFile, err := os.Open(path) 75 | if err != nil { 76 | return err 77 | } 78 | pubRing, err = openpgp.ReadArmoredKeyRing(pubringFile) 79 | return err 80 | } 81 | 82 | // LoadPrivateRing will open and parse the PGP keyring from the file path provided. 83 | func LoadPrivateRing(path string) error { 84 | privringFile, err := os.Open(path) 85 | if err != nil { 86 | return err 87 | } 88 | secRing, err = openpgp.ReadArmoredKeyRing(privringFile) 89 | 90 | return err 91 | } 92 | 93 | // PrintPGPDebugInformation will output a debug log entry listing the keys it has read in from each keyring. 94 | func PrintPGPDebugInformation() { 95 | debugStr := make([]string, 0, 4) 96 | debugStr = append(debugStr, "PGP Debug Info:\nLoaded Private Keys:") 97 | for _, key := range secRing { 98 | debugStr = append(debugStr, fmt.Sprintf("\t%v\n\t%v", key.PrimaryKey.KeyIdString(), key.Identities)) 99 | } 100 | 101 | debugStr = append(debugStr, "\nLoaded Public Keys:") 102 | for _, key := range pubRing { 103 | debugStr = append(debugStr, fmt.Sprintf("\t%v\n\t%v", key.PrimaryKey.KeyIdString(), key.Identities)) 104 | } 105 | 106 | log.AppLogger.Debugf("%s", strings.Join(debugStr, "\n")) 107 | } 108 | -------------------------------------------------------------------------------- /travis-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -ev 2 | 3 | # Setup ZFS Pool 4 | dd if=/dev/zero of=${VDEV} bs=2048 count=1572864 5 | sudo zpool create tank ${VDEV} 6 | sudo zfs set snapdir=visible compression=lz4 atime=off tank 7 | sudo zfs create tank/data 8 | sudo dd if=/dev/urandom of=/tank/data/a bs=1024 count=409600 9 | sudo zfs snapshot tank/data@a 10 | sudo dd if=/dev/urandom of=/tank/data/b bs=256 count=409600 11 | sudo zfs snapshot tank/data@b 12 | sudo dd if=/dev/urandom of=/tank/data/c bs=256 count=409600 13 | sudo zfs snapshot tank/data@c 14 | 15 | # Setup Docker containers 16 | sudo docker pull mcr.microsoft.com/azure-storage/azurite 17 | sudo docker pull minio/minio 18 | sudo docker pull fsouza/fake-gcs-server 19 | sudo docker run -d -p 10000:10000 --rm --name azurite mcr.microsoft.com/azure-storage/azurite azurite-blob --blobHost 0.0.0.0 20 | sudo docker run -d -p 9000:9000 --rm --name minio minio/minio server /data 21 | sudo docker run -d -p 4443:4443 --rm --name fake-gcs-server fsouza/fake-gcs-server -public-host localhost 22 | 23 | # PGP Test keys 24 | cat >test <