├── go.sum
├── go.mod
├── go.work
├── b2
├── licenses.csv
├── readerat.go
├── key.go
├── buffer.go
├── monitor.go
├── reader.go
├── iterator.go
├── writer.go
├── baseline.go
├── backend.go
└── b2.go
├── bin
└── b2keys
│ ├── go.sum
│ ├── go.mod
│ └── b2keys.go
├── .travis.yml
├── AUTHORS
├── .gitignore
├── LICENSE
├── internal
├── b2assets
│ ├── gen.go
│ ├── data
│ │ └── status.html
│ └── b2assets.go
├── blog
│ └── blog.go
├── bin
│ └── cleanup
│ │ └── cleanup.go
├── retry
│ ├── retry.go
│ └── options.go
└── b2types
│ └── b2types.go
├── base
├── strings.go
└── strings_test.go
├── x
├── window
│ ├── counter_test.go
│ ├── accum_test.go
│ ├── window_test.go
│ └── window.go
├── consistent
│ ├── consistent_test.go
│ └── consistent.go
└── transport
│ └── transport.go
├── CHANGELOG.md
├── examples
└── simple
│ └── simple.go
├── README.md
└── CONTRIBUTING.md
/go.sum:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/Backblaze/blazer
2 |
3 | go 1.18
4 |
--------------------------------------------------------------------------------
/go.work:
--------------------------------------------------------------------------------
1 | go 1.18
2 |
3 | use (
4 | .
5 | ./bin/b2keys
6 | )
7 |
--------------------------------------------------------------------------------
/b2/licenses.csv:
--------------------------------------------------------------------------------
1 | github.com/Backblaze/blazer,https://github.com/Backblaze/blazer/blob/HEAD/LICENSE,Apache-2.0
2 |
--------------------------------------------------------------------------------
/bin/b2keys/go.sum:
--------------------------------------------------------------------------------
1 | github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=
2 | github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
3 |
--------------------------------------------------------------------------------
/bin/b2keys/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/Backblaze/blazer/bin/b2keys
2 |
3 | go 1.18
4 |
5 | replace github.com/Backblaze/blazer => ../..
6 |
7 | require (
8 | github.com/Backblaze/blazer v0.7.2
9 | github.com/google/subcommands v1.2.0
10 | )
11 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 |
3 | go:
4 | - tip
5 |
6 | branches:
7 | only:
8 | - master
9 |
10 | before_script: go run internal/bin/cleanup/cleanup.go
11 | script:
12 | - go test -v ./base ./b2 ./x/...
13 | - go vet -v ./base ./b2 ./x/...
14 |
--------------------------------------------------------------------------------
/AUTHORS:
--------------------------------------------------------------------------------
1 | # This is the list of Blazer authors for copyright purposes.
2 | #
3 | # This does not necessarily list everyone who has contributed code, since in
4 | # some cases, their employer may be the copyright holder. To see the full list
5 | # of contributors, see the revision history in source control.
6 | #
7 | # Tag yourself.
8 | Google LLC
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled Object files, Static and Dynamic libs (Shared Objects)
2 | *.o
3 | *.a
4 | *.so
5 |
6 | # Folders
7 | _obj
8 | _test
9 |
10 | # IntelliJ
11 | .idea
12 |
13 | # Architecture specific extensions/prefixes
14 | *.[568vq]
15 | [568vq].out
16 |
17 | *.cgo1.go
18 | *.cgo2.c
19 | _cgo_defun.c
20 | _cgo_gotypes.go
21 | _cgo_export.*
22 |
23 | _testmain.go
24 |
25 | *.exe
26 | *.test
27 | *.prof
28 |
29 | # Binaries
30 | /cleanup
31 | /simple
32 | /b2keys
33 |
34 | .go-version
35 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2016, the Blazer authors
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/internal/b2assets/gen.go:
--------------------------------------------------------------------------------
1 | // Copyright 2018, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Package b2assets contains data required by other libraries in blazer.
16 | package b2assets
17 |
18 | //go:generate go-bindata -pkg $GOPACKAGE -o b2assets.go data/
19 |
--------------------------------------------------------------------------------
/base/strings.go:
--------------------------------------------------------------------------------
1 | // Copyright 2017, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package base
16 |
17 | import (
18 | "net/url"
19 | "strings"
20 | )
21 |
22 | func escape(s string) string {
23 | return strings.Replace(url.QueryEscape(s), "%2F", "/", -1)
24 | }
25 |
26 | func unescape(s string) (string, error) {
27 | return url.QueryUnescape(s)
28 | }
29 |
--------------------------------------------------------------------------------
/internal/b2assets/data/status.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | b2 client status
5 |
6 |
7 | {{$methods := methods .}}
8 | {{$durations := durations .}}
9 | {{$table := table .}}
10 | count by code
11 |
12 | {{range $method := $methods}}
13 |
14 | | {{$method}} |
15 | {{range $duration := $durations}}
16 | {{index $table $method $duration}} |
17 | {{end}}
18 |
19 | {{end}}
20 |
21 | uploads
22 | {{range $name, $val := .Writers}}
23 | {{ $name }}
24 | {{range $id, $prog := $val.Progress}}
25 | {{inc $id}}
26 | {{end}}
27 | {{end}}
28 | downloads
29 | {{range $name, $val := .Readers}}
30 | {{ $name }}
31 | {{range $id, $prog := $val.Progress}}
32 | {{inc $id}}
33 | {{end}}
34 | {{end}}
35 |
36 |
37 |
--------------------------------------------------------------------------------
/base/strings_test.go:
--------------------------------------------------------------------------------
1 | package base
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestEncodeDecode(t *testing.T) {
8 | // crashes identified by go-fuzz
9 | origs := []string{
10 | "&\x020000",
11 | "&\x020000\x9c",
12 | "&\x020\x9c0",
13 | "&\x0230j",
14 | "&\x02\x98000",
15 | "&\x02\x983\xc8j00",
16 | "00\x000",
17 | "00\x0000",
18 | "00\x0000000000000",
19 | "\x11\x030",
20 | }
21 |
22 | for _, orig := range origs {
23 | escaped := escape(orig)
24 | unescaped, err := unescape(escaped)
25 | if err != nil {
26 | t.Errorf("%s: orig: %#v, escaped: %#v, unescaped: %#v\n", err.Error(), orig, escaped, unescaped)
27 | continue
28 | }
29 |
30 | if unescaped != orig {
31 | t.Errorf("expected: %#v, got: %#v", orig, unescaped)
32 | }
33 | }
34 | }
35 |
36 | func Fuzz(f *testing.F) {
37 | f.Fuzz(func(t *testing.T, orig string) {
38 | escaped := escape(orig)
39 |
40 | unescaped, err := unescape(escaped)
41 | if err != nil {
42 | t.Errorf("Can't unescape escaped string %q", escaped)
43 | }
44 |
45 | if unescaped != orig {
46 | t.Errorf("Before: %q, after: %q", orig, unescaped)
47 | }
48 | })
49 | }
50 |
--------------------------------------------------------------------------------
/b2/readerat.go:
--------------------------------------------------------------------------------
1 | // Copyright 2017, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package b2
16 |
17 | import (
18 | "io"
19 | "sync"
20 | )
21 |
22 | type readerAt struct {
23 | rs io.ReadSeeker
24 | mu sync.Mutex
25 | }
26 |
27 | func (r *readerAt) ReadAt(p []byte, off int64) (int, error) {
28 | r.mu.Lock()
29 | defer r.mu.Unlock()
30 |
31 | // ReadAt is supposed to preserve the offset.
32 | cur, err := r.rs.Seek(0, io.SeekCurrent)
33 | if err != nil {
34 | return 0, err
35 | }
36 | defer r.rs.Seek(cur, io.SeekStart)
37 |
38 | if _, err := r.rs.Seek(off, io.SeekStart); err != nil {
39 | return 0, err
40 | }
41 | return io.ReadFull(r.rs, p)
42 | }
43 |
44 | // wraps a ReadSeeker in a mutex to provite a ReaderAt how is this not in the
45 | // io package?
46 | func enReaderAt(rs io.ReadSeeker) io.ReaderAt {
47 | return &readerAt{rs: rs}
48 | }
49 |
--------------------------------------------------------------------------------
/x/window/counter_test.go:
--------------------------------------------------------------------------------
1 | // Copyright 2018, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package window_test
16 |
17 | import (
18 | "fmt"
19 | "time"
20 |
21 | "github.com/Backblaze/blazer/x/window"
22 | )
23 |
24 | type Counter struct {
25 | w *window.Window
26 | }
27 |
28 | func (c Counter) Add() {
29 | c.w.Insert(1)
30 | }
31 |
32 | func (c Counter) Count() int {
33 | v := c.w.Reduce()
34 | return v.(int)
35 | }
36 |
37 | func New(size time.Duration) Counter {
38 | r := func(i, j interface{}) interface{} {
39 | a, ok := i.(int)
40 | if !ok {
41 | a = 0
42 | }
43 | b, ok := j.(int)
44 | if !ok {
45 | b = 0
46 | }
47 | return a + b
48 | }
49 | return Counter{w: window.New(size, time.Second, r)}
50 | }
51 |
52 | func Example_counter() {
53 | c := New(time.Minute)
54 | c.Add()
55 | c.Add()
56 | c.Add()
57 | fmt.Printf("total: %d\n", c.Count())
58 | // Output:
59 | // total: 3
60 | }
61 |
--------------------------------------------------------------------------------
/internal/blog/blog.go:
--------------------------------------------------------------------------------
1 | // Copyright 2017, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Package blog implements a private logger, in the manner of glog, without
16 | // polluting the flag namespace or leaving files all over /tmp.
17 | //
18 | // It has almost no features, and a bunch of global state.
19 | package blog
20 |
21 | import (
22 | "log"
23 | "os"
24 | "strconv"
25 | )
26 |
27 | var level int32
28 |
29 | type Verbose bool
30 |
31 | func init() {
32 | lvl := os.Getenv("B2_LOG_LEVEL")
33 | i, err := strconv.ParseInt(lvl, 10, 32)
34 | if err != nil {
35 | return
36 | }
37 | level = int32(i)
38 | }
39 |
40 | func (v Verbose) Info(a ...interface{}) {
41 | if v {
42 | log.Print(a...)
43 | }
44 | }
45 |
46 | func (v Verbose) Infof(format string, a ...interface{}) {
47 | if v {
48 | log.Printf(format, a...)
49 | }
50 | }
51 |
52 | func V(target int32) Verbose {
53 | return Verbose(target <= level)
54 | }
55 |
--------------------------------------------------------------------------------
/x/window/accum_test.go:
--------------------------------------------------------------------------------
1 | // Copyright 2018, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package window_test
16 |
17 | import (
18 | "fmt"
19 | "time"
20 |
21 | "github.com/Backblaze/blazer/x/window"
22 | )
23 |
24 | type Accumulator struct {
25 | w *window.Window
26 | }
27 |
28 | func (a Accumulator) Add(s string) {
29 | a.w.Insert([]string{s})
30 | }
31 |
32 | func (a Accumulator) All() []string {
33 | v := a.w.Reduce()
34 | return v.([]string)
35 | }
36 |
37 | func NewAccum(size time.Duration) Accumulator {
38 | r := func(i, j interface{}) interface{} {
39 | a, ok := i.([]string)
40 | if !ok {
41 | a = nil
42 | }
43 | b, ok := j.([]string)
44 | if !ok {
45 | b = nil
46 | }
47 | for _, s := range b {
48 | a = append(a, s)
49 | }
50 | return a
51 | }
52 | return Accumulator{w: window.New(size, time.Second, r)}
53 | }
54 |
55 | func Example_accumulator() {
56 | a := NewAccum(time.Minute)
57 | a.Add("this")
58 | a.Add("is")
59 | a.Add("that")
60 | fmt.Printf("total: %v\n", a.All())
61 | // Output:
62 | // total: [this is that]
63 | }
64 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | Going forward from v0.6.0 (the first new version after the move to https://github.com/Backblaze/blazer), all notable changes to this project will be documented in this file.
4 |
5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7 |
8 | ## [Unreleased]
9 |
10 | - Nothing at present
11 |
12 | ## [0.7.2] - 2025-01-23
13 |
14 | ### Changed
15 |
16 | - Removed unused `bonfire` and `pyre` code, greatly reducing the number of dependencies and the amount of work required to keep them up to date.
17 | - Restructured the repository as a [Go workspace](https://go.dev/ref/mod#workspaces) and moved the sole remaining third-party dependency into `bin/b2keys`.
18 |
19 | ## [0.7.1] - 2024-10-07
20 |
21 | ### Fixed
22 |
23 | - The `cleanup` utility now deletes the `replication-target` test bucket
24 |
25 | ### Changed
26 |
27 | - Bumped dependencies in response to dependabot alerts
28 |
29 | ## [0.7.0] - 2024-10-04
30 |
31 | ### Added
32 |
33 | - Can now specify bucket type when listing buckets
34 | - Can now get and set default encryption configuration, object lock, CORS rules, etc on bucket ([djenriquez](https://github.com/djenriquez))
35 | - Can now get the S3 API URL from a bucket ([celskeggs](https://github.com/celskeggs))
36 |
37 | ### Fixed
38 |
39 | - The `cleanup` utility now successfully deletes test files and buckets after an interrupted test run
40 |
41 | ### Changed
42 |
43 | - Migrated to Backblaze B2 Native API v3
44 |
45 | ## [0.6.1] - 2023-10-16
46 |
47 | ### Added
48 |
49 | - `go.mod` file, license report ([tzeejay](https://github.com/tzeejay))
50 |
51 | ### Fixed
52 |
53 | - Resolve import errors ([tzeejay](https://github.com/tzeejay))
54 |
55 | ### Changed
56 |
57 | - Reference license report from README ([tzeejay](https://github.com/tzeejay))
58 |
59 | ## [0.6.0] - 2023-09-26
60 |
61 | Tagged initial version at https://github.com/Backblaze/blazer
62 |
63 | [unreleased]: https://github.com/Backblaze/blazer/compare/v0.6.1...HEAD
64 | [0.6.1]: https://github.com/Backblaze/blazer/compare/v0.6.0...v0.6.1
65 | [0.6.0]: https://github.com/Backblaze/blazer/compare/v0.5.3...v0.6.0
66 |
--------------------------------------------------------------------------------
/internal/bin/cleanup/cleanup.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "strings"
8 | "sync"
9 |
10 | "github.com/Backblaze/blazer/b2"
11 | )
12 |
13 | const (
14 | apiID = "B2_ACCOUNT_ID"
15 | apiKey = "B2_SECRET_KEY"
16 | )
17 |
18 | var bucketNameSuffixes = [...]string{
19 | "consistobucket",
20 | "base-tests",
21 | "replication-target",
22 | }
23 |
24 | func main() {
25 | id := os.Getenv(apiID)
26 | key := os.Getenv(apiKey)
27 | ctx := context.Background()
28 | client, err := b2.NewClient(ctx, id, key)
29 | if err != nil {
30 | fmt.Println(err)
31 | return
32 | }
33 | buckets, err := client.ListBuckets(ctx)
34 | if err != nil {
35 | fmt.Println(err)
36 | return
37 | }
38 | var kill []string
39 | for _, bucket := range buckets {
40 | if strings.HasPrefix(bucket.Name(), fmt.Sprintf("%s-b2-tests-", id)) {
41 | kill = append(kill, bucket.Name())
42 | } else {
43 | for _, suffix := range bucketNameSuffixes {
44 | if bucket.Name() == fmt.Sprintf("%s-%s", id, suffix) {
45 | kill = append(kill, bucket.Name())
46 | break
47 | }
48 | }
49 | }
50 | }
51 | var wg sync.WaitGroup
52 | for _, name := range kill {
53 | wg.Add(1)
54 | go func(name string) {
55 | defer wg.Done()
56 | fmt.Println("removing bucket", name)
57 | if err := killBucket(ctx, client, name); err != nil {
58 | fmt.Println(err)
59 | }
60 | }(name)
61 | }
62 | wg.Wait()
63 | }
64 |
65 | func killBucket(ctx context.Context, client *b2.Client, name string) error {
66 | bucket, err := client.NewBucket(ctx, name, nil)
67 | if b2.IsNotExist(err) {
68 | return nil
69 | }
70 | if err != nil {
71 | return err
72 | }
73 | defer bucket.Delete(ctx)
74 | iter := bucket.List(ctx, b2.ListHidden())
75 | for iter.Next() {
76 | o := iter.Object()
77 | fmt.Println("deleting file", o.Name())
78 | if err := o.Delete(ctx); err != nil {
79 | fmt.Println(err)
80 | }
81 | }
82 | if err = iter.Err(); err != nil {
83 | return err
84 | }
85 | iter = bucket.List(ctx, b2.ListUnfinished())
86 | for iter.Next() {
87 | o := iter.Object()
88 | fmt.Println("canceling file", o.Name())
89 | if err := o.Cancel(ctx); err != nil {
90 | fmt.Println(err)
91 | }
92 | }
93 | return iter.Err()
94 | }
95 |
--------------------------------------------------------------------------------
/bin/b2keys/b2keys.go:
--------------------------------------------------------------------------------
1 | // b2keys is a small utility for managing Backblaze B2 keys.
2 | package main
3 |
4 | import (
5 | "context"
6 | "flag"
7 | "fmt"
8 | "os"
9 | "time"
10 |
11 | "github.com/Backblaze/blazer/b2"
12 | "github.com/google/subcommands"
13 | )
14 |
15 | const (
16 | apiID = "B2_ACCOUNT_ID"
17 | apiKey = "B2_SECRET_KEY"
18 | )
19 |
20 | func main() {
21 | subcommands.Register(&create{}, "")
22 | flag.Parse()
23 | ctx := context.Background()
24 | os.Exit(int(subcommands.Execute(ctx)))
25 | }
26 |
27 | type create struct {
28 | d *time.Duration
29 | bucket *string
30 | pfx *string
31 | }
32 |
33 | func (c *create) Name() string { return "create" }
34 | func (c *create) Synopsis() string { return "create a new application key" }
35 | func (c *create) Usage() string {
36 | return "b2keys create [-bucket bucket] [-duration duration] [-prefix pfx] name capability [capability ...]"
37 | }
38 |
39 | func (c *create) SetFlags(fs *flag.FlagSet) {
40 | c.d = fs.Duration("duration", 0, "the lifetime of the new key")
41 | c.bucket = fs.String("bucket", "", "limit the key to the given bucket")
42 | c.pfx = fs.String("prefix", "", "limit the key to the objects starting with prefix")
43 | }
44 |
45 | func (c *create) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
46 | id := os.Getenv(apiID)
47 | key := os.Getenv(apiKey)
48 | if id == "" || key == "" {
49 | fmt.Fprintf(os.Stderr, "both %s and %s must be set in the environment", apiID, apiKey)
50 | return subcommands.ExitUsageError
51 | }
52 |
53 | args := f.Args()
54 | if len(args) < 2 {
55 | fmt.Fprintf(os.Stderr, "%s\n", c.Usage())
56 | return subcommands.ExitUsageError
57 | }
58 | name := args[0]
59 | caps := args[1:]
60 |
61 | var opts []b2.KeyOption
62 | if *c.d > 0 {
63 | opts = append(opts, b2.Lifetime(*c.d))
64 | }
65 | if *c.pfx != "" {
66 | opts = append(opts, b2.Prefix(*c.pfx))
67 | }
68 | opts = append(opts, b2.Capabilities(caps...))
69 |
70 | client, err := b2.NewClient(ctx, id, key, b2.UserAgent("b2keys"))
71 | if err != nil {
72 | fmt.Fprintf(os.Stderr, "%v\n", err)
73 | return subcommands.ExitFailure
74 | }
75 |
76 | var cr creater = client
77 |
78 | if *c.bucket != "" {
79 | bucket, err := client.Bucket(ctx, *c.bucket)
80 | if err != nil {
81 | fmt.Fprintf(os.Stderr, "%v\n", err)
82 | return subcommands.ExitFailure
83 | }
84 | cr = bucket
85 | }
86 |
87 | b2key, err := cr.CreateKey(ctx, name, opts...)
88 | if err != nil {
89 | fmt.Fprintf(os.Stderr, "%v\n", err)
90 | return subcommands.ExitFailure
91 | }
92 | fmt.Printf("key=%s, secret=%s\n", b2key.ID(), b2key.Secret())
93 | return subcommands.ExitSuccess
94 | }
95 |
96 | type creater interface {
97 | CreateKey(context.Context, string, ...b2.KeyOption) (*b2.Key, error)
98 | }
99 |
--------------------------------------------------------------------------------
/internal/retry/retry.go:
--------------------------------------------------------------------------------
1 | // Copyright 2025, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Simple library for retry mechanism.
16 | //
17 | // Inspired by [retry-go](https://github.com/avast/retry-go)
18 |
19 | package retry
20 |
21 | import (
22 | "context"
23 | "math/rand"
24 | "time"
25 | )
26 |
27 | // Function signature of retryable function
28 | type RetryableFunc func() error
29 |
30 | func Do(ctx context.Context, retryableFunc RetryableFunc, opts ...Option) error {
31 | var n uint
32 |
33 | if err := ctx.Err(); err != nil {
34 | return err
35 | }
36 |
37 | // Set config
38 | config := newDefaultRetryConfig()
39 | for _, opt := range opts {
40 | opt(config)
41 | }
42 |
43 | for {
44 | n++
45 |
46 | err := retryableFunc()
47 | if err == nil {
48 | return nil
49 | }
50 |
51 | config.attempts = config.dynamicAttempts(n, config.attempts, err)
52 | // if this is last attempt or we now have less attempts that tries - return immediately
53 | if config.attempts != 0 && n >= config.attempts {
54 | return err
55 | }
56 |
57 | if !config.retryIf(n, err) {
58 | return err
59 | }
60 | if err := config.onRetry(n, err); err != nil {
61 | return err
62 | }
63 |
64 | config.delay = config.dynamicDelay(n, config.delay, err)
65 | select {
66 | case <-config.after(config.delay):
67 | case <-ctx.Done():
68 | return ctx.Err()
69 | }
70 | }
71 | }
72 |
73 | func Jitter(d time.Duration) time.Duration {
74 | f := float64(d)
75 | f /= 50
76 | f += f * (rand.Float64() - 0.5)
77 | return time.Duration(f)
78 | }
79 |
80 | func Backoff(d time.Duration) time.Duration {
81 | if d > 30*time.Second {
82 | return 30*time.Second + Jitter(d)
83 | }
84 | return d*2 + Jitter(d*2)
85 | }
86 |
87 | func newDefaultRetryConfig() *Config {
88 | return &Config{
89 | attempts: uint(1),
90 | delay: 0,
91 |
92 | dynamicAttempts: func(attempt uint, attempts uint, err error) uint { return attempts },
93 | dynamicDelay: func(attempt uint, delay time.Duration, err error) time.Duration { return delay },
94 |
95 | onRetry: func(attempt uint, err error) error { return nil },
96 | retryIf: func(attempt uint, err error) bool { return true },
97 |
98 | after: time.After,
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/x/window/window_test.go:
--------------------------------------------------------------------------------
1 | // Copyright 2018, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package window
16 |
17 | import (
18 | "testing"
19 | "time"
20 | )
21 |
22 | type epair struct {
23 | e interface{}
24 | t time.Time
25 | }
26 |
27 | func adder(i, j interface{}) interface{} {
28 | a, ok := i.(int)
29 | if !ok {
30 | a = 0
31 | }
32 | b, ok := j.(int)
33 | if !ok {
34 | b = 0
35 | }
36 | return a + b
37 | }
38 |
39 | func TestWindows(t *testing.T) {
40 | table := []struct {
41 | size, dur time.Duration
42 | incs []epair
43 | look time.Time
44 | reduce Reducer
45 | want interface{}
46 | }{
47 | {
48 | size: time.Minute,
49 | dur: time.Second,
50 | incs: []epair{
51 | // year, month, day, hour, min, sec, nano
52 | {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1},
53 | {t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1},
54 | {t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1},
55 | {t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1},
56 | {t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1},
57 | {t: time.Date(2000, 1, 1, 0, 0, 5, 0, time.UTC), e: 1},
58 | },
59 | look: time.Date(2000, 1, 1, 0, 1, 0, 0, time.UTC),
60 | want: 5,
61 | reduce: adder,
62 | },
63 | {
64 | incs: []epair{
65 | // year, month, day, hour, min, sec, nano
66 | {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1},
67 | {t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1},
68 | {t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1},
69 | {t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1},
70 | {t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1},
71 | {t: time.Date(2000, 1, 1, 0, 0, 5, 0, time.UTC), e: 1},
72 | },
73 | want: 6,
74 | reduce: adder,
75 | },
76 | { // what happens if time goes backwards?
77 | size: time.Minute,
78 | dur: time.Second,
79 | incs: []epair{
80 | {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1},
81 | {t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1},
82 | {t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1},
83 | {t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1},
84 | {t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1},
85 | {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1},
86 | },
87 | look: time.Date(2000, 1, 1, 0, 0, 30, 0, time.UTC),
88 | want: 1,
89 | reduce: adder,
90 | },
91 | }
92 |
93 | for _, e := range table {
94 | w := New(e.size, e.dur, e.reduce)
95 | for _, inc := range e.incs {
96 | w.insertAt(inc.t, inc.e)
97 | }
98 | ct := w.reducedAt(e.look)
99 | if ct != e.want {
100 | t.Errorf("reducedAt(%v) got %v, want %v", e.look, ct, e.want)
101 | }
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/examples/simple/simple.go:
--------------------------------------------------------------------------------
1 | // Copyright 2017, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // This is a simple program that will copy named files into or out of B2.
16 | //
17 | // To copy a file into B2:
18 | //
19 | // B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple /path/to/file b2://bucket/path/to/dst
20 | //
21 | // To copy a file out:
22 | //
23 | // B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple b2://bucket/path/to/file /path/to/dst
24 | package main
25 |
26 | import (
27 | "context"
28 | "flag"
29 | "fmt"
30 | "io"
31 | "net/url"
32 | "os"
33 | "strings"
34 |
35 | "github.com/Backblaze/blazer/b2"
36 | )
37 |
38 | func main() {
39 | flag.Parse()
40 | b2id := os.Getenv("B2_ACCOUNT_ID")
41 | b2key := os.Getenv("B2_ACCOUNT_KEY")
42 |
43 | args := flag.Args()
44 | if len(args) != 2 {
45 | fmt.Printf("Usage:\n\nsimple [src] [dst]\n")
46 | return
47 | }
48 | src, dst := args[0], args[1]
49 |
50 | ctx := context.Background()
51 | c, err := b2.NewClient(ctx, b2id, b2key)
52 | if err != nil {
53 | fmt.Println(err)
54 | return
55 | }
56 |
57 | var r io.ReadCloser
58 | var w io.WriteCloser
59 |
60 | if strings.HasPrefix(src, "b2://") {
61 | reader, err := b2Reader(ctx, c, src)
62 | if err != nil {
63 | fmt.Println(err)
64 | return
65 | }
66 | r = reader
67 | } else {
68 | f, err := os.Open(src)
69 | if err != nil {
70 | fmt.Println(err)
71 | return
72 | }
73 | r = f
74 | }
75 | // Readers do not need their errors checked on close. (Also it's a little
76 | // silly to defer this in main(), but.)
77 | defer r.Close()
78 |
79 | if strings.HasPrefix(dst, "b2://") {
80 | writer, err := b2Writer(ctx, c, dst)
81 | if err != nil {
82 | fmt.Println(err)
83 | return
84 | }
85 | w = writer
86 | } else {
87 | f, err := os.Create(dst)
88 | if err != nil {
89 | fmt.Println(err)
90 | return
91 | }
92 | w = f
93 | }
94 |
95 | // Copy and check error.
96 | if _, err := io.Copy(w, r); err != nil {
97 | fmt.Println(err)
98 | return
99 | }
100 |
101 | // It is very important to check the error of the writer.
102 | if err := w.Close(); err != nil {
103 | fmt.Println(err)
104 | }
105 | }
106 |
107 | func b2Reader(ctx context.Context, c *b2.Client, path string) (io.ReadCloser, error) {
108 | o, err := b2Obj(ctx, c, path)
109 | if err != nil {
110 | return nil, err
111 | }
112 | return o.NewReader(ctx), nil
113 | }
114 |
115 | func b2Writer(ctx context.Context, c *b2.Client, path string) (io.WriteCloser, error) {
116 | o, err := b2Obj(ctx, c, path)
117 | if err != nil {
118 | return nil, err
119 | }
120 | return o.NewWriter(ctx), nil
121 | }
122 |
123 | func b2Obj(ctx context.Context, c *b2.Client, path string) (*b2.Object, error) {
124 | uri, err := url.Parse(path)
125 | if err != nil {
126 | return nil, err
127 | }
128 | bucket, err := c.Bucket(ctx, uri.Host)
129 | if err != nil {
130 | return nil, err
131 | }
132 | // B2 paths must not begin with /, so trim it here.
133 | return bucket.Object(strings.TrimPrefix(uri.Path, "/")), nil
134 | }
135 |
--------------------------------------------------------------------------------
/internal/retry/options.go:
--------------------------------------------------------------------------------
1 | // Copyright 2025, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package retry
16 |
17 | import (
18 | "time"
19 | )
20 |
21 | // Function signature of "dynamic attempts" function
22 | type DynamicAttemptsFunc func(attempt uint, attempts uint, err error) uint
23 |
24 | // Function signature of "dynamic delay" function
25 | type DynamicDelayFunc func(attempt uint, delay time.Duration, err error) time.Duration
26 |
27 | // Function signature of "on retry" function
28 | type OnRetryFunc func(attempt uint, err error) error
29 |
30 | // Function signature of "retry if" function
31 | type RetryIfFunc func(attempt uint, err error) bool
32 |
33 | // Function signature of time.After function
34 | type AfterFunc func(time.Duration) <-chan time.Time
35 |
36 | type Config struct {
37 | attempts uint
38 | delay time.Duration
39 |
40 | dynamicAttempts DynamicAttemptsFunc
41 | dynamicDelay DynamicDelayFunc
42 |
43 | onRetry OnRetryFunc
44 | retryIf RetryIfFunc
45 |
46 | after AfterFunc
47 | }
48 |
49 | // Option represents an option for retry.
50 | type Option func(*Config)
51 |
52 | func emptyOption(c *Config) {}
53 |
54 | // Attempts set count of retry. Setting to 0 will retry until the retried function succeeds.
55 | // Default is 1 (one attampt)
56 | // Number of attempts can be override by the dynamicAttempts function
57 | func Attempts(attempts uint) Option {
58 | return func(c *Config) {
59 | c.attempts = attempts
60 | }
61 | }
62 |
63 | // Delay set delay between retry. It can be override by the delayType function
64 | // Default is 0 (no delay)
65 | // Number of attempts can be override by the dynamicDelay function
66 | func Delay(delay time.Duration) Option {
67 | return func(c *Config) {
68 | c.delay = delay
69 | }
70 | }
71 |
72 | // DynamicAttempts dynamically set the number of attempts
73 | func DynamicAttempts(dynamicAttempts DynamicAttemptsFunc) Option {
74 | if dynamicAttempts == nil {
75 | return emptyOption
76 | }
77 | return func(c *Config) {
78 | c.dynamicAttempts = dynamicAttempts
79 | }
80 | }
81 |
82 | // DynamicDelay dynamically set the delay between retries
83 | func DynamicDelay(dynamicDelay DynamicDelayFunc) Option {
84 | if dynamicDelay == nil {
85 | return emptyOption
86 | }
87 | return func(c *Config) {
88 | c.dynamicDelay = dynamicDelay
89 | }
90 | }
91 |
92 | // OnRetry function callback are called each retry
93 | func OnRetry(onRetry OnRetryFunc) Option {
94 | if onRetry == nil {
95 | return emptyOption
96 | }
97 | return func(c *Config) {
98 | c.onRetry = onRetry
99 | }
100 | }
101 |
102 | // RetryIf controls whether a retry should be attempted after an error
103 | // (assuming there are any retry attempts remaining)
104 | func RetryIf(retryIf RetryIfFunc) Option {
105 | if retryIf == nil {
106 | return emptyOption
107 | }
108 | return func(c *Config) {
109 | c.retryIf = retryIf
110 | }
111 | }
112 |
113 | // WithAfter provides a way to swap out time.After implementations.
114 | // This primarily is useful for mocking/testing, where you may not want to explicitly wait for a set duration
115 | // for retries.
116 | func WithAfter(after AfterFunc) Option {
117 | return func(c *Config) {
118 | c.after = after
119 | }
120 | }
121 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Blazer
2 | ====
3 |
4 | [](https://godoc.org/github.com/Backblaze/blazer/b2)
5 |
6 | Blazer is a Golang client library for Backblaze B2 Cloud Object Storage.
7 |
8 | ```go
9 | import "github.com/Backblaze/blazer/b2"
10 | ```
11 |
12 | Blazer targets the Backblaze B2 Native API. Unless you specifically need to access Backblaze B2 via its Native API, you should use the [MinIO Go Client SDK](https://github.com/minio/minio-go) with Backblaze B2's S3 Compatible SDK.
13 |
14 | _Many thanks to Toby Burress ([kurin](https://github.com/kurin)) for creating and maintaining Blazer for its first six years._
15 |
16 | ## Examples
17 |
18 | ### Getting started
19 | ```go
20 | import "os"
21 |
22 | id := os.Getenv("B2_APPLICATION_KEY_ID")
23 | key := os.Getenv("B2_APPLICATION_KEY")
24 |
25 | ctx := context.Background()
26 |
27 | // b2_authorize_account
28 | b2, err := b2.NewClient(ctx, id, key)
29 | if err != nil {
30 | log.Fatalln(err)
31 | }
32 |
33 | buckets, err := b2.ListBuckets(ctx)
34 | if err != nil {
35 | log.Fatalln(err)
36 | }
37 | ```
38 |
39 | ### Copy a file into B2
40 |
41 | ```go
42 | func copyFile(ctx context.Context, bucket *b2.Bucket, src, dst string) error {
43 | f, err := os.Open(src)
44 | if err != nil {
45 | return err
46 | }
47 | defer f.Close()
48 |
49 | obj := bucket.Object(dst)
50 | w := obj.NewWriter(ctx)
51 | if _, err := io.Copy(w, f); err != nil {
52 | w.Close()
53 | return err
54 | }
55 | return w.Close()
56 | }
57 | ```
58 |
59 | If the file is less than 100MB, Blazer will simply buffer the file and use the
60 | `b2_upload_file` API to send the file to Backblaze. If the file is greater
61 | than 100MB, Blazer will use B2's large file support to upload the file in 100MB
62 | chunks.
63 |
64 | ### Copy a file into B2, with multiple concurrent uploads
65 |
66 | Uploading a large file with multiple HTTP connections is simple:
67 |
68 | ```go
69 | func copyFile(ctx context.Context, bucket *b2.Bucket, writers int, src, dst string) error {
70 | f, err := os.Open(src)
71 | if err != nil {
72 | return err
73 | }
74 | defer f.Close()
75 |
76 | w := bucket.Object(dst).NewWriter(ctx)
77 | w.ConcurrentUploads = writers
78 | if _, err := io.Copy(w, f); err != nil {
79 | w.Close()
80 | return err
81 | }
82 | return w.Close()
83 | }
84 | ```
85 |
86 | This will automatically split the file into `writers` chunks of 100MB uploads.
87 | Note that 100MB is the smallest chunk size that B2 supports.
88 |
89 | ### Download a file from B2
90 |
91 | Downloading is as simple as uploading:
92 |
93 | ```go
94 | func downloadFile(ctx context.Context, bucket *b2.Bucket, downloads int, src, dst string) error {
95 | r := bucket.Object(src).NewReader(ctx)
96 | defer r.Close()
97 |
98 | f, err := os.Create(dst)
99 | if err != nil {
100 | return err
101 | }
102 | r.ConcurrentDownloads = downloads
103 | if _, err := io.Copy(f, r); err != nil {
104 | f.Close()
105 | return err
106 | }
107 | return f.Close()
108 | }
109 | ```
110 |
111 | ### List all objects in a bucket
112 |
113 | ```go
114 | func printObjects(ctx context.Context, bucket *b2.Bucket) error {
115 | iterator := bucket.List(ctx)
116 | for iterator.Next() {
117 | fmt.Println(iterator.Object())
118 | }
119 | return iterator.Err()
120 | }
121 | ```
122 |
123 | ### Grant temporary auth to a file
124 |
125 | Say you have a number of files in a private bucket, and you want to allow other
126 | people to download some files. This is possible to do by issuing a temporary
127 | authorization token for the prefix of the files you want to share.
128 |
129 | ```go
130 | token, err := bucket.AuthToken(ctx, "photos", time.Hour)
131 | ```
132 |
133 | If successful, `token` is then an authorization token valid for one hour, which
134 | can be set in HTTP GET requests.
135 |
136 | The hostname to use when downloading files via HTTP is account-specific and can
137 | be found via the BaseURL method:
138 |
139 | ```go
140 | base := bucket.BaseURL()
141 | ```
142 |
143 |
144 | ### Licenses
145 | The b2 package currently does not consume any third party packages and entirely depends on imports of the Go stdlib or from sources provided within the `blazer` repository itself.
146 | A report of used licenses can be found at `./b2/licenses.csv` which was generated with https://github.com/google/go-licenses . Please double check yourself if this is a concern as this may change over time and the licenses report could become stale
--------------------------------------------------------------------------------
/x/window/window.go:
--------------------------------------------------------------------------------
1 | // Copyright 2018, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Package window provides a type for efficiently recording events as they
16 | // occur over a given span of time. Events added to the window will remain
17 | // until the time expires.
18 | package window
19 |
20 | import (
21 | "sync"
22 | "time"
23 | )
24 |
25 | // A Window efficiently records events that have occurred over a span of time
26 | // extending from some fixed interval ago to now. Events that pass beyond this
27 | // horizon are discarded.
28 | type Window struct {
29 | mu sync.Mutex
30 | events []interface{}
31 | res time.Duration
32 | last time.Time
33 | reduce Reducer
34 | forever bool
35 | e interface{}
36 | }
37 |
38 | // A Reducer should take two values from the window and combine them into a
39 | // third value that will be stored in the window. The values i or j may be
40 | // nil. The underlying types for both arguments and the output should be
41 | // identical.
42 | //
43 | // If the reducer is any kind of slice or list, then data usage will grow
44 | // linearly with the number of events added to the window.
45 | //
46 | // Reducer will be called on its own output: Reducer(Reducer(x, y), z).
47 | type Reducer func(i, j interface{}) interface{}
48 |
49 | // New returns an initialized window for events over the given duration at the
50 | // given resolution. Windows with tight resolution (i.e., small values for
51 | // that argument) will be more accurate, at the cost of some memory.
52 | //
53 | // A size of 0 means "forever"; old events will never be removed.
54 | func New(size, resolution time.Duration, r Reducer) *Window {
55 | if size > 0 {
56 | return &Window{
57 | res: resolution,
58 | events: make([]interface{}, size/resolution),
59 | reduce: r,
60 | }
61 | }
62 | return &Window{
63 | forever: true,
64 | reduce: r,
65 | }
66 | }
67 |
68 | func (w *Window) bucket(now time.Time) int {
69 | nanos := now.UnixNano()
70 | abs := nanos / int64(w.res)
71 | return int(abs) % len(w.events)
72 | }
73 |
74 | // sweep keeps the window valid. It needs to be called from every method that
75 | // views or updates the window, and the caller needs to hold the mutex.
76 | func (w *Window) sweep(now time.Time) {
77 | if w.forever {
78 | return
79 | }
80 | defer func() {
81 | w.last = now
82 | }()
83 |
84 | // This compares now and w.last's monotonic clocks.
85 | diff := now.Sub(w.last)
86 | if diff < 0 {
87 | // time went backwards somehow; zero events and return
88 | for i := range w.events {
89 | w.events[i] = nil
90 | }
91 | return
92 | }
93 | last := now.Add(-diff)
94 |
95 | b := w.bucket(now)
96 | p := w.bucket(last)
97 |
98 | if b == p && diff <= w.res {
99 | // We're in the same bucket as the previous sweep, so all buckets are
100 | // valid.
101 | return
102 | }
103 |
104 | if diff > w.res*time.Duration(len(w.events)) {
105 | // We've gone longer than this window measures since the last sweep, just
106 | // zero the thing and have done.
107 | for i := range w.events {
108 | w.events[i] = nil
109 | }
110 | return
111 | }
112 |
113 | // Expire all invalid buckets. This means buckets not seen since the
114 | // previous sweep and now, including the current bucket but not including the
115 | // previous bucket.
116 | old := int64(last.UnixNano()) / int64(w.res)
117 | new := int64(now.UnixNano()) / int64(w.res)
118 | for i := old + 1; i <= new; i++ {
119 | b := int(i) % len(w.events)
120 | w.events[b] = nil
121 | }
122 | }
123 |
124 | // Insert adds the given event.
125 | func (w *Window) Insert(e interface{}) {
126 | w.insertAt(time.Now(), e)
127 | }
128 |
129 | func (w *Window) insertAt(t time.Time, e interface{}) {
130 | w.mu.Lock()
131 | defer w.mu.Unlock()
132 |
133 | if w.forever {
134 | w.e = w.reduce(w.e, e)
135 | return
136 | }
137 |
138 | w.sweep(t)
139 | w.events[w.bucket(t)] = w.reduce(w.events[w.bucket(t)], e)
140 | }
141 |
142 | // Reduce runs the window's reducer over the valid values and returns the
143 | // result.
144 | func (w *Window) Reduce() interface{} {
145 | return w.reducedAt(time.Now())
146 | }
147 |
148 | func (w *Window) reducedAt(t time.Time) interface{} {
149 | w.mu.Lock()
150 | defer w.mu.Unlock()
151 |
152 | if w.forever {
153 | return w.e
154 | }
155 |
156 | w.sweep(t)
157 | var n interface{}
158 | for i := range w.events {
159 | n = w.reduce(n, w.events[i])
160 | }
161 | return n
162 | }
163 |
--------------------------------------------------------------------------------
/b2/key.go:
--------------------------------------------------------------------------------
1 | // Copyright 2018, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package b2
16 |
17 | import (
18 | "context"
19 | "errors"
20 | "io"
21 | "time"
22 | )
23 |
24 | // Key is a B2 application key. A Key grants limited access on a global or
25 | // per-bucket basis.
26 | type Key struct {
27 | c *Client
28 | k beKeyInterface
29 | }
30 |
31 | // Capabilities returns the list of capabilites granted by this application
32 | // key.
33 | func (k *Key) Capabilities() []string { return k.k.caps() }
34 |
35 | // Name returns the user-supplied name of this application key. Key names are
36 | // useless.
37 | func (k *Key) Name() string { return k.k.name() }
38 |
39 | // Expires returns the expiration date of this application key.
40 | func (k *Key) Expires() time.Time { return k.k.expires() }
41 |
42 | // Delete removes the key from B2.
43 | func (k *Key) Delete(ctx context.Context) error { return k.k.del(ctx) }
44 |
45 | // Secret returns the value that should be passed into NewClient(). It is only
46 | // available on newly created keys; it is not available from ListKey
47 | // operations.
48 | func (k *Key) Secret() string { return k.k.secret() }
49 |
50 | // ID returns the application key ID. This, plus the secret, is necessary to
51 | // authenticate to B2.
52 | func (k *Key) ID() string { return k.k.id() }
53 |
54 | type keyOptions struct {
55 | caps []string
56 | prefix string
57 | lifetime time.Duration
58 | }
59 |
60 | // KeyOption specifies desired properties for application keys.
61 | type KeyOption func(*keyOptions)
62 |
63 | // Lifetime requests a key with the given lifetime.
64 | func Lifetime(d time.Duration) KeyOption {
65 | return func(k *keyOptions) {
66 | k.lifetime = d
67 | }
68 | }
69 |
70 | // Deadline requests a key that expires after the given date.
71 | func Deadline(t time.Time) KeyOption {
72 | d := t.Sub(time.Now())
73 | return Lifetime(d)
74 | }
75 |
76 | // Capabilities requests a key with the given capability.
77 | func Capabilities(caps ...string) KeyOption {
78 | return func(k *keyOptions) {
79 | k.caps = append(k.caps, caps...)
80 | }
81 | }
82 |
83 | // Prefix limits the requested application key to be valid only for objects
84 | // that begin with prefix. This can only be used when requesting an
85 | // application key within a specific bucket.
86 | func Prefix(prefix string) KeyOption {
87 | return func(k *keyOptions) {
88 | k.prefix = prefix
89 | }
90 | }
91 |
92 | // CreateKey creates a global application key that is valid for all buckets in
93 | // this project. The key's secret will only be accessible on the object
94 | // returned from this call.
95 | func (c *Client) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) {
96 | var ko keyOptions
97 | for _, o := range opts {
98 | o(&ko)
99 | }
100 | if ko.prefix != "" {
101 | return nil, errors.New("Prefix is not a valid option for global application keys")
102 | }
103 | ki, err := c.backend.createKey(ctx, name, ko.caps, ko.lifetime, "", "")
104 | if err != nil {
105 | return nil, err
106 | }
107 | return &Key{
108 | c: c,
109 | k: ki,
110 | }, nil
111 | }
112 |
113 | // ListKeys lists all the keys associated with this project. It takes the
114 | // maximum number of keys it should return in a call, as well as a cursor
115 | // (which should be empty for the initial call). It will return up to count
116 | // keys, as well as the cursor for the next invocation.
117 | //
118 | // ListKeys returns io.EOF when there are no more keys, although it may do so
119 | // concurrently with the final set of keys.
120 | func (c *Client) ListKeys(ctx context.Context, count int, cursor string) ([]*Key, string, error) {
121 | ks, next, err := c.backend.listKeys(ctx, count, cursor)
122 | if err != nil {
123 | return nil, "", err
124 | }
125 | if len(ks) == 0 {
126 | return nil, "", io.EOF
127 | }
128 | var keys []*Key
129 | for _, k := range ks {
130 | keys = append(keys, &Key{
131 | c: c,
132 | k: k,
133 | })
134 | }
135 | var rerr error
136 | if next == "" {
137 | rerr = io.EOF
138 | }
139 | return keys, next, rerr
140 | }
141 |
142 | // CreateKey creates a scoped application key that is valid only for this bucket.
143 | func (b *Bucket) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) {
144 | var ko keyOptions
145 | for _, o := range opts {
146 | o(&ko)
147 | }
148 | ki, err := b.r.createKey(ctx, name, ko.caps, ko.lifetime, b.b.id(), ko.prefix)
149 | if err != nil {
150 | return nil, err
151 | }
152 | return &Key{
153 | c: b.c,
154 | k: ki,
155 | }, nil
156 | }
157 |
--------------------------------------------------------------------------------
/x/consistent/consistent_test.go:
--------------------------------------------------------------------------------
1 | package consistent
2 |
3 | import (
4 | "context"
5 | "io/ioutil"
6 | "log"
7 | "os"
8 | "strconv"
9 | "sync"
10 | "sync/atomic"
11 | "testing"
12 | "time"
13 |
14 | "github.com/Backblaze/blazer/b2"
15 | )
16 |
17 | const (
18 | apiID = "B2_ACCOUNT_ID"
19 | apiKey = "B2_SECRET_KEY"
20 | bucketName = "consistobucket"
21 | )
22 |
23 | // consistent is an experimental package, not currently maintained, with some tests that are flaky.
24 | // Skip all tests in the package unless the env var BLAZER_TEST_CONSISTENT is set.
25 | func TestMain(m *testing.M) {
26 | if os.Getenv("BLAZER_TEST_CONSISTENT") == "" {
27 | log.Print("Skipping x/consistent/consistent_test.go. Set BLAZER_TEST_CONSISTENT to include these tests.")
28 | } else {
29 | m.Run()
30 | }
31 | }
32 |
33 | func TestOperationLive(t *testing.T) {
34 | ctx := context.Background()
35 | bucket, done := startLiveTest(ctx, t)
36 | defer done()
37 |
38 | g := NewGroup(bucket, "tester")
39 | name := "some_kinda_name/thing.txt"
40 |
41 | var wg sync.WaitGroup
42 | for i := 0; i < 10; i++ {
43 | wg.Add(1)
44 | i := i
45 | go func() {
46 | defer wg.Done()
47 | for j := 0; j < 10; j++ {
48 | var n int
49 | if err := g.Operate(ctx, name, func(b []byte) ([]byte, error) {
50 | if len(b) > 0 {
51 | i, err := strconv.Atoi(string(b))
52 | if err != nil {
53 | return nil, err
54 | }
55 | n = i
56 | }
57 | return []byte(strconv.Itoa(n + 1)), nil
58 | }); err != nil {
59 | t.Error(err)
60 | }
61 | t.Logf("thread %d: successful %d++", i, n)
62 | }
63 | }()
64 | }
65 | wg.Wait()
66 |
67 | r, err := g.NewReader(ctx, name)
68 | if err != nil {
69 | t.Fatal(err)
70 | }
71 | defer r.Close()
72 | b, err := ioutil.ReadAll(r)
73 | if err != nil {
74 | t.Fatal(err)
75 | }
76 | n, err := strconv.Atoi(string(b))
77 | if err != nil {
78 | t.Fatal(err)
79 | }
80 | if n != 100 {
81 | t.Errorf("result: got %d, want 100", n)
82 | }
83 | }
84 |
85 | type jsonThing struct {
86 | Boop int `json:"boop_field"`
87 | Thread int `json:"thread_id"`
88 | }
89 |
90 | func TestOperationJSONLive(t *testing.T) {
91 | ctx := context.Background()
92 | bucket, done := startLiveTest(ctx, t)
93 | defer done()
94 |
95 | g := NewGroup(bucket, "tester")
96 | name := "some_kinda_json/thing.json"
97 |
98 | var wg sync.WaitGroup
99 | for i := 0; i < 4; i++ {
100 | wg.Add(1)
101 | i := i
102 | go func() {
103 | var n int
104 | defer wg.Done()
105 | for j := 0; j < 4; j++ {
106 | // Pass both a struct and a pointer to a struct.
107 | var face interface{}
108 | face = jsonThing{}
109 | if j%2 == 0 {
110 | face = &jsonThing{}
111 | }
112 | if err := g.OperateJSON(ctx, name, face, func(j interface{}) (interface{}, error) {
113 | jt := j.(*jsonThing)
114 | n = jt.Boop
115 | return &jsonThing{
116 | Boop: jt.Boop + 1,
117 | Thread: i,
118 | }, nil
119 | }); err != nil {
120 | t.Error(err)
121 | }
122 | t.Logf("thread %d: successful %d++", i, n)
123 | }
124 | }()
125 | }
126 | wg.Wait()
127 |
128 | if err := g.OperateJSON(ctx, name, &jsonThing{}, func(i interface{}) (interface{}, error) {
129 | jt := i.(*jsonThing)
130 | if jt.Boop != 16 {
131 | t.Errorf("got %d boops; want 16", jt.Boop)
132 | }
133 | return nil, nil
134 | }); err != nil {
135 | t.Error(err)
136 | }
137 | }
138 |
139 | func TestMutex(t *testing.T) {
140 | ctx := context.Background()
141 | bucket, done := startLiveTest(ctx, t)
142 | defer done()
143 |
144 | g := NewGroup(bucket, "tester")
145 | m := g.Mutex(ctx, "mootex")
146 | var a int32
147 | var wg sync.WaitGroup
148 | for i := 0; i < 5; i++ {
149 | wg.Add(1)
150 | go func(i int) {
151 | defer wg.Done()
152 | for j := 0; j < 5; j++ {
153 | m.Lock()
154 | new := atomic.AddInt32(&a, 1)
155 | if new != 1 {
156 | t.Fatalf("two threads locked at once")
157 | }
158 | time.Sleep(20 * time.Millisecond)
159 | new = atomic.AddInt32(&a, -1)
160 | if new != 0 {
161 | t.Fatalf("two threads locked at once")
162 | }
163 | t.Logf("thread %d: lock %d", i, j)
164 | m.Unlock()
165 | }
166 | }(i)
167 | }
168 | wg.Wait()
169 | }
170 |
171 | func startLiveTest(ctx context.Context, t *testing.T) (*b2.Bucket, func()) {
172 | id := os.Getenv(apiID)
173 | key := os.Getenv(apiKey)
174 | if id == "" || key == "" {
175 | t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests")
176 | return nil, nil
177 | }
178 | client, err := b2.NewClient(ctx, id, key)
179 | if err != nil {
180 | t.Fatal(err)
181 | return nil, nil
182 | }
183 | bucket, err := client.NewBucket(ctx, id+"-"+bucketName, nil)
184 | if err != nil {
185 | t.Fatal(err)
186 | return nil, nil
187 | }
188 | f := func() {
189 | iter := bucket.List(ctx, b2.ListHidden())
190 | for iter.Next() {
191 | if err := iter.Object().Delete(ctx); err != nil {
192 | t.Error(err)
193 | }
194 | }
195 | if err := iter.Err(); err != nil && !b2.IsNotExist(err) {
196 | t.Error(err)
197 | }
198 | if err := bucket.Delete(ctx); err != nil && !b2.IsNotExist(err) {
199 | t.Error(err)
200 | }
201 | }
202 | return bucket, f
203 | }
204 |
205 | type object struct {
206 | o *b2.Object
207 | err error
208 | }
209 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Contributions are subject to GitHub’s Terms of Service and You accept and
4 | agree to the following for your present and future Contributions.
5 |
6 | 1. License Grant: You hereby grant Backblaze, and any recipients or users of
7 | the Backblaze open source software (as may be modified by your Contribution),
8 | a non-exclusive, perpetual, irrevocable, worldwide, royalty-free,
9 | sublicensable license to use, reproduce, distribute, modify, create derivative
10 | works of, publicly display, publicly perform, and otherwise use your
11 | Contributions on any terms Backblaze or such users, deem appropriate.
12 |
13 | 3. Representations and Warranties: You represent and warrant that you have the
14 | necessary rights to grant the rights described herein and that your
15 | Contribution does not violate any third-party rights or applicable laws.
16 | Except as stated in the previous sentence, the contribution is submitted
17 | “AS IS” and the Contributor disclaims all warranties with regard to the
18 | contribution.
19 |
20 | 3. Except for the license granted herein to Backblaze and to recipients and
21 | users of the Backblaze open source software, You reserve all right, title, and
22 | interest in and to your Contributions.
23 |
24 | ## Bug Reports & Feature Requests
25 |
26 | Bug reports and feature requests are really helpful. Head over to
27 | [Issues](https://github.com/Backblaze/blazer/issues), and provide
28 | plenty of detail and context.
29 |
30 | ## Development Guidelines
31 |
32 | ### Fork the Repository
33 |
34 | If you are planning to submit a pull request, please begin by [forking this repository in the GitHub UI](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo), then cloning your fork:
35 |
36 | ```shell
37 | git clone git@github.com:/blazer.git
38 | cd blazer
39 | ```
40 |
41 | Create a local branch in which to work on your contribution:
42 |
43 | ```shell
44 | git switch -c my-cool-fix
45 | ```
46 |
47 | When you're ready to submit, see the section below, [Submitting a Pull Request](#submitting-a-pull-request).
48 |
49 | ### Testing
50 |
51 | Automated tests are run with `go test`. Integration tests run against Backblaze B2, and require that you create an application key with "all buckets" access and set the following environment variables:
52 |
53 | ```shell
54 | export B2_ACCOUNT_ID=
55 | export B2_SECRET_KEY=
56 | ```
57 |
58 | To simply run all tests from the `blazer` directory, do:
59 |
60 | ```shell
61 | go test ./...
62 | ```
63 |
64 | The `-v` flag is useful to generate more verbose output, such as passing package tests:
65 |
66 | ```shell
67 | go test -v ./...
68 | ```
69 |
70 | When developing, it's often useful to have `go test` exit as soon as it encounters a failing test; add the `-failfast` flag to do this:
71 |
72 | ```shell
73 | go test -v -failfast ./...
74 | ```
75 |
76 | You can supply a regular expression to limit the tests that are to be run. For example, to run all tests with names
77 | containing `TestResumeWriter`:
78 |
79 | ```shell
80 | go test -v ./... -run 'TestResumeWriter'
81 | ```
82 |
83 | This will run both `TestResumeWriter` and `TestResumeWriterWithoutExtantFile`, and `TestTestResumeWriter`, if such a test existed. To run only `TestResumeWriter`, use the caret, `^`, and dollar sign, `$`, to represent the beginning and end of the test name:
84 |
85 | ```shell
86 | go test -v ./... -run '^TestResumeWriter$'
87 | ```
88 |
89 | By default, `go test` caches test output, rerunning tests only when the code has changed. To avoid using cached results, do:
90 |
91 | ```shell
92 | go test -v -count=1 ./...
93 | ```
94 |
95 | Finally, it's good practice to test for race conditions before submitting your code. Since the [data race detector](https://go.dev/doc/articles/race_detector) may increase memory usage by 5-10x and execution time by 2-20x, we don't recommend you do so all the time!
96 |
97 | To test for race conditions:
98 |
99 | ```shell
100 | go test -v -race ./...
101 | ```
102 |
103 | Automated tests should be developed for cases that clearly improve Blazer's
104 | reliability, user and developer experience. Otherwise, there is no specific
105 | enforcement of test coverage.
106 |
107 | ### Test Cleanup
108 |
109 | The tests should delete all files and buckets that they create. However, if testing fails with an error such as a segmentation fault, test files and buckets may be left in place, causing subsequent test runs to fail. The `cleanup` utility deletes test files and buckets in such a situation:
110 |
111 | ```shell
112 | go build internal/bin/cleanup/cleanup.go
113 | ./cleanup
114 | ```
115 |
116 | ### Submitting a Pull Request
117 |
118 | When you're ready to submit your pull request, add and commit your files with a relevant message, including the issue number, if the PR fixes a specific issue:
119 |
120 | ```shell
121 | git add
122 | git commit -m "Cool update. Fixes #123"
123 | ```
124 |
125 | Now push your changes to a new branch to your GitHub repository:
126 |
127 | ```shell
128 | git push --set-upstream origin my-cool-fix
129 | ```
130 |
131 | The git response will display the pull request URL, or you can go to the branch page in your repo, `https://github.com//blazer/tree/my-cool-fix`, and click the 'Compare & pull request' button.
132 |
133 | After you submit your pull request, a project maintainer will review it and respond within two weeks, likely much less unless we are flooded with contributions!
134 |
--------------------------------------------------------------------------------
/b2/buffer.go:
--------------------------------------------------------------------------------
1 | // Copyright 2017, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package b2
16 |
17 | import (
18 | "bytes"
19 | "crypto/sha1"
20 | "errors"
21 | "fmt"
22 | "hash"
23 | "io"
24 | "io/ioutil"
25 | "os"
26 | "strings"
27 | "sync"
28 | )
29 |
30 | type readResetter interface {
31 | Read([]byte) (int, error)
32 | Reset() error
33 | }
34 |
35 | type resetter struct {
36 | rs io.ReadSeeker
37 | }
38 |
39 | func (r resetter) Read(p []byte) (int, error) { return r.rs.Read(p) }
40 | func (r resetter) Reset() error { _, err := r.rs.Seek(0, 0); return err }
41 |
42 | func newResetter(p []byte) readResetter { return resetter{rs: bytes.NewReader(p)} }
43 |
44 | type writeBuffer interface {
45 | io.Writer
46 | Len() int
47 | Reader() (readResetter, error)
48 | Hash() string // sha1 or whatever it is
49 | Close() error
50 | }
51 |
52 | // nonBuffer doesn't buffer anything, but passes values directly from the
53 | // source readseeker. Many nonBuffers can point at different parts of the same
54 | // underlying source, and be accessed by multiple goroutines simultaneously.
55 | func newNonBuffer(rs io.ReaderAt, offset, size int64) writeBuffer {
56 | return &nonBuffer{
57 | r: io.NewSectionReader(rs, offset, size),
58 | size: int(size),
59 | hsh: sha1.New(),
60 | }
61 | }
62 |
63 | type nonBuffer struct {
64 | r *io.SectionReader
65 | size int
66 | hsh hash.Hash
67 |
68 | isEOF bool
69 | buf *strings.Reader
70 | }
71 |
72 | func (nb *nonBuffer) Len() int { return nb.size + 40 }
73 | func (nb *nonBuffer) Hash() string { return "hex_digits_at_end" }
74 | func (nb *nonBuffer) Close() error { return nil }
75 | func (nb *nonBuffer) Reader() (readResetter, error) { return nb, nil }
76 | func (nb *nonBuffer) Write([]byte) (int, error) { return 0, errors.New("writes not supported") }
77 |
78 | func (nb *nonBuffer) Read(p []byte) (int, error) {
79 | if nb.isEOF {
80 | return nb.buf.Read(p)
81 | }
82 | n, err := io.TeeReader(nb.r, nb.hsh).Read(p)
83 | if err == io.EOF {
84 | err = nil
85 | nb.isEOF = true
86 | nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.hsh.Sum(nil)))
87 | }
88 | return n, err
89 | }
90 |
91 | func (nb *nonBuffer) Reset() error {
92 | nb.hsh.Reset()
93 | nb.isEOF = false
94 | _, err := nb.r.Seek(0, 0)
95 | return err
96 | }
97 |
98 | type memoryBuffer struct {
99 | buf *bytes.Buffer
100 | hsh hash.Hash
101 | w io.Writer
102 | mux sync.RWMutex
103 | }
104 |
105 | var bufpool *sync.Pool
106 |
107 | func init() {
108 | bufpool = &sync.Pool{}
109 | bufpool.New = func() interface{} { return &bytes.Buffer{} }
110 | }
111 |
112 | func newMemoryBuffer() *memoryBuffer {
113 | mb := &memoryBuffer{
114 | hsh: sha1.New(),
115 | }
116 | mb.buf = bufpool.Get().(*bytes.Buffer)
117 | mb.w = io.MultiWriter(mb.hsh, mb.buf)
118 | return mb
119 | }
120 |
121 | func (mb *memoryBuffer) Write(p []byte) (int, error) {
122 | mb.mux.Lock()
123 | defer mb.mux.Unlock()
124 | return mb.w.Write(p)
125 | }
126 |
127 | func (mb *memoryBuffer) Len() int {
128 | mb.mux.RLock()
129 | defer mb.mux.RUnlock()
130 | return mb.buf.Len()
131 | }
132 |
133 | func (mb *memoryBuffer) Reader() (readResetter, error) {
134 | mb.mux.RLock()
135 | defer mb.mux.RUnlock()
136 | return newResetter(mb.buf.Bytes()), nil
137 | }
138 |
139 | func (mb *memoryBuffer) Hash() string {
140 | mb.mux.RLock()
141 | defer mb.mux.RUnlock()
142 | return fmt.Sprintf("%x", mb.hsh.Sum(nil))
143 | }
144 |
145 | func (mb *memoryBuffer) Close() error {
146 | mb.mux.Lock()
147 | defer mb.mux.Unlock()
148 | if mb.buf == nil {
149 | return nil
150 | }
151 | mb.buf.Truncate(0)
152 | bufpool.Put(mb.buf)
153 | mb.buf = nil
154 | return nil
155 | }
156 |
157 | type fileBuffer struct {
158 | f *os.File
159 | hsh hash.Hash
160 | w io.Writer
161 | s int
162 | }
163 |
164 | func newFileBuffer(loc string) (*fileBuffer, error) {
165 | f, err := ioutil.TempFile(loc, "blazer")
166 | if err != nil {
167 | return nil, err
168 | }
169 | fb := &fileBuffer{
170 | f: f,
171 | hsh: sha1.New(),
172 | }
173 | fb.w = io.MultiWriter(fb.f, fb.hsh)
174 | return fb, nil
175 | }
176 |
177 | func (fb *fileBuffer) Write(p []byte) (int, error) {
178 | n, err := fb.w.Write(p)
179 | fb.s += n
180 | return n, err
181 | }
182 |
183 | func (fb *fileBuffer) Len() int { return fb.s }
184 | func (fb *fileBuffer) Hash() string { return fmt.Sprintf("%x", fb.hsh.Sum(nil)) }
185 |
186 | func (fb *fileBuffer) Reader() (readResetter, error) {
187 | if _, err := fb.f.Seek(0, 0); err != nil {
188 | return nil, err
189 | }
190 | return &fr{f: fb.f}, nil
191 | }
192 |
193 | func (fb *fileBuffer) Close() error {
194 | fb.f.Close()
195 | return os.Remove(fb.f.Name())
196 | }
197 |
198 | // wraps *os.File so that the http package doesn't see it as an io.Closer
199 | type fr struct {
200 | f *os.File
201 | }
202 |
203 | func (r *fr) Read(p []byte) (int, error) { return r.f.Read(p) }
204 | func (r *fr) Reset() error { _, err := r.f.Seek(0, 0); return err }
205 |
--------------------------------------------------------------------------------
/x/transport/transport.go:
--------------------------------------------------------------------------------
1 | // Copyright 2017, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Package transport provides http.RoundTrippers that may be useful to clients
16 | // of Blazer.
17 | package transport
18 |
19 | import (
20 | "context"
21 | "fmt"
22 | "io"
23 | "io/ioutil"
24 | "math/rand"
25 | "net/http"
26 | "strings"
27 | "sync/atomic"
28 | "time"
29 | )
30 |
31 | // WithFailures returns an http.RoundTripper that wraps an existing
32 | // RoundTripper, causing failures according to the options given. If rt is
33 | // nil, the http.DefaultTransport is wrapped.
34 | func WithFailures(rt http.RoundTripper, opts ...FailureOption) http.RoundTripper {
35 | if rt == nil {
36 | rt = http.DefaultTransport
37 | }
38 | o := &options{
39 | rt: rt,
40 | }
41 | for _, opt := range opts {
42 | opt(o)
43 | }
44 | return o
45 | }
46 |
47 | type options struct {
48 | pathSubstrings []string
49 | failureRate float64
50 | status int
51 | stall time.Duration
52 | rt http.RoundTripper
53 | msg string
54 | trg *triggerReaderGroup
55 | }
56 |
57 | func (o *options) doRequest(req *http.Request) (*http.Response, error) {
58 | if o.trg != nil && req.Body != nil {
59 | req.Body = o.trg.new(req.Body)
60 | }
61 | resp, err := o.rt.RoundTrip(req)
62 | if resp != nil && o.trg != nil {
63 | resp.Body = o.trg.new(resp.Body)
64 | }
65 | return resp, err
66 | }
67 |
68 | func (o *options) RoundTrip(req *http.Request) (*http.Response, error) {
69 | // TODO: fix triggering conditions
70 | if rand.Float64() > o.failureRate {
71 | return o.doRequest(req)
72 | }
73 |
74 | var match bool
75 | if len(o.pathSubstrings) == 0 {
76 | match = true
77 | }
78 | for _, ss := range o.pathSubstrings {
79 | if strings.Contains(req.URL.Path, ss) {
80 | match = true
81 | break
82 | }
83 | }
84 | if !match {
85 | return o.doRequest(req)
86 | }
87 |
88 | if o.status > 0 {
89 | resp := &http.Response{
90 | Status: fmt.Sprintf("%d %s", o.status, http.StatusText(o.status)),
91 | StatusCode: o.status,
92 | Body: ioutil.NopCloser(strings.NewReader(o.msg)),
93 | Request: req,
94 | }
95 | return resp, nil
96 | }
97 |
98 | if o.stall > 0 {
99 | ctx := req.Context()
100 | select {
101 | case <-time.After(o.stall):
102 | case <-ctx.Done():
103 | }
104 | }
105 | return o.doRequest(req)
106 | }
107 |
108 | // A FailureOption specifies the kind of failure that the RoundTripper should
109 | // display.
110 | type FailureOption func(*options)
111 |
112 | // MatchPathSubstring restricts the RoundTripper to URLs whose paths contain
113 | // the given string. The default behavior is to match all paths.
114 | func MatchPathSubstring(s string) FailureOption {
115 | return func(o *options) {
116 | o.pathSubstrings = append(o.pathSubstrings, s)
117 | }
118 | }
119 |
120 | // FailureRate causes the RoundTripper to fail a certain percentage of the
121 | // time. rate should be a number between 0 and 1, where 0 will never fail and
122 | // 1 will always fail. The default is never to fail.
123 | func FailureRate(rate float64) FailureOption {
124 | return func(o *options) {
125 | o.failureRate = rate
126 | }
127 | }
128 |
129 | // Response simulates a given status code. The returned http.Response will
130 | // have its Status, StatusCode, and Body (with any predefined message) set.
131 | func Response(status int) FailureOption {
132 | return func(o *options) {
133 | o.status = status
134 | }
135 | }
136 |
137 | // Stall simulates a network connection failure by stalling for the given
138 | // duration.
139 | func Stall(dur time.Duration) FailureOption {
140 | return func(o *options) {
141 | o.stall = dur
142 | }
143 | }
144 |
145 | // If a specific Response is requested, the body will have the given message
146 | // set.
147 | func Body(msg string) FailureOption {
148 | return func(o *options) {
149 | o.msg = msg
150 | }
151 | }
152 |
153 | // Trigger will raise the RoundTripper's failure rate to 100% when the given
154 | // context is closed.
155 | func Trigger(ctx context.Context) FailureOption {
156 | return func(o *options) {
157 | go func() {
158 | <-ctx.Done()
159 | o.failureRate = 1
160 | }()
161 | }
162 | }
163 |
164 | // AfterNBytes will call effect once (roughly) n bytes have gone over the wire.
165 | // Both sent and received bytes are counted against the total. Only bytes in
166 | // the body of an HTTP request are currently counted; this may change in the
167 | // future. effect will only be called once, and it will block (allowing
168 | // callers to simulate connection hangs).
169 | func AfterNBytes(n int, effect func()) FailureOption {
170 | return func(o *options) {
171 | o.trg = &triggerReaderGroup{
172 | bytes: int64(n),
173 | trigger: effect,
174 | }
175 | }
176 | }
177 |
178 | type triggerReaderGroup struct {
179 | bytes int64
180 | trigger func()
181 | triggered int64
182 | }
183 |
184 | func (rg *triggerReaderGroup) new(rc io.ReadCloser) io.ReadCloser {
185 | return &triggerReader{
186 | ReadCloser: rc,
187 | bytes: &rg.bytes,
188 | trigger: rg.trigger,
189 | triggered: &rg.triggered,
190 | }
191 | }
192 |
193 | type triggerReader struct {
194 | io.ReadCloser
195 | bytes *int64
196 | trigger func()
197 | triggered *int64
198 | }
199 |
200 | func (r *triggerReader) Read(p []byte) (int, error) {
201 | n, err := r.ReadCloser.Read(p)
202 | if atomic.AddInt64(r.bytes, -int64(n)) < 0 && atomic.CompareAndSwapInt64(r.triggered, 0, 1) {
203 | // Can't use sync.Once because it blocks for *all* callers until Do returns.
204 | r.trigger()
205 | }
206 | return n, err
207 | }
208 |
--------------------------------------------------------------------------------
/b2/monitor.go:
--------------------------------------------------------------------------------
1 | // Copyright 2017, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package b2
16 |
17 | import (
18 | "fmt"
19 | "html/template"
20 | "math"
21 | "net/http"
22 | "sort"
23 | "time"
24 |
25 | "github.com/Backblaze/blazer/internal/b2assets"
26 | "github.com/Backblaze/blazer/x/window"
27 | )
28 |
29 | // StatusInfo reports information about a client.
30 | type StatusInfo struct {
31 | // Writers contains the status of all current uploads with progress.
32 | Writers map[string]*WriterStatus
33 |
34 | // Readers contains the status of all current downloads with progress.
35 | Readers map[string]*ReaderStatus
36 |
37 | // RPCs contains information about recently made RPC calls over the last
38 | // minute, five minutes, hour, and for all time.
39 | RPCs map[time.Duration]MethodList
40 | }
41 |
42 | // MethodList is an accumulation of RPC calls that have been made over a given
43 | // period of time.
44 | type MethodList []method
45 |
46 | // CountByMethod returns the total RPC calls made per method.
47 | func (ml MethodList) CountByMethod() map[string]int {
48 | r := make(map[string]int)
49 | for i := range ml {
50 | r[ml[i].name]++
51 | }
52 | return r
53 | }
54 |
55 | type method struct {
56 | name string
57 | duration time.Duration
58 | status int
59 | }
60 |
61 | type methodCounter struct {
62 | d time.Duration
63 | w *window.Window
64 | }
65 |
66 | func (mc methodCounter) record(m method) {
67 | mc.w.Insert([]method{m})
68 | }
69 |
70 | func (mc methodCounter) retrieve() MethodList {
71 | ms := mc.w.Reduce()
72 | return MethodList(ms.([]method))
73 | }
74 |
75 | func newMethodCounter(d, res time.Duration) methodCounter {
76 | r := func(i, j interface{}) interface{} {
77 | a, ok := i.([]method)
78 | if !ok {
79 | a = nil
80 | }
81 | b, ok := j.([]method)
82 | if !ok {
83 | b = nil
84 | }
85 | for _, m := range b {
86 | a = append(a, m)
87 | }
88 | return a
89 | }
90 | return methodCounter{
91 | d: d,
92 | w: window.New(d, res, r),
93 | }
94 | }
95 |
96 | // WriterStatus reports the status for each writer.
97 | type WriterStatus struct {
98 | // Progress is a slice of completion ratios. The index of a ratio is its
99 | // chunk id less one.
100 | Progress []float64
101 | }
102 |
103 | // ReaderStatus reports the status for each reader.
104 | type ReaderStatus struct {
105 | // Progress is a slice of completion ratios. The index of a ratio is its
106 | // chunk id less one.
107 | Progress []float64
108 | }
109 |
110 | // Status returns information about the current state of the client.
111 | func (c *Client) Status() *StatusInfo {
112 | c.slock.Lock()
113 | defer c.slock.Unlock()
114 |
115 | si := &StatusInfo{
116 | Writers: make(map[string]*WriterStatus),
117 | Readers: make(map[string]*ReaderStatus),
118 | RPCs: make(map[time.Duration]MethodList),
119 | }
120 |
121 | for name, w := range c.sWriters {
122 | si.Writers[name] = w.status()
123 | }
124 |
125 | for name, r := range c.sReaders {
126 | si.Readers[name] = r.status()
127 | }
128 |
129 | for _, c := range c.sMethods {
130 | si.RPCs[c.d] = c.retrieve()
131 | }
132 |
133 | return si
134 | }
135 |
136 | func (si *StatusInfo) table() map[string]map[string]int {
137 | r := make(map[string]map[string]int)
138 | for d, c := range si.RPCs {
139 | for _, m := range c {
140 | if _, ok := r[m.name]; !ok {
141 | r[m.name] = make(map[string]int)
142 | }
143 | dur := "all time"
144 | if d > 0 {
145 | dur = d.String()
146 | }
147 | r[m.name][dur]++
148 | }
149 | }
150 | return r
151 | }
152 |
153 | func (c *Client) addWriter(w *Writer) {
154 | c.slock.Lock()
155 | defer c.slock.Unlock()
156 |
157 | if c.sWriters == nil {
158 | c.sWriters = make(map[string]*Writer)
159 | }
160 |
161 | c.sWriters[fmt.Sprintf("%s/%s", w.o.b.Name(), w.name)] = w
162 | }
163 |
164 | func (c *Client) removeWriter(w *Writer) {
165 | c.slock.Lock()
166 | defer c.slock.Unlock()
167 |
168 | if c.sWriters == nil {
169 | return
170 | }
171 |
172 | delete(c.sWriters, fmt.Sprintf("%s/%s", w.o.b.Name(), w.name))
173 | }
174 |
175 | func (c *Client) addReader(r *Reader) {
176 | c.slock.Lock()
177 | defer c.slock.Unlock()
178 |
179 | if c.sReaders == nil {
180 | c.sReaders = make(map[string]*Reader)
181 | }
182 |
183 | c.sReaders[fmt.Sprintf("%s/%s", r.o.b.Name(), r.name)] = r
184 | }
185 |
186 | func (c *Client) removeReader(r *Reader) {
187 | c.slock.Lock()
188 | defer c.slock.Unlock()
189 |
190 | if c.sReaders == nil {
191 | return
192 | }
193 |
194 | delete(c.sReaders, fmt.Sprintf("%s/%s", r.o.b.Name(), r.name))
195 | }
196 |
197 | var (
198 | funcMap = template.FuncMap{
199 | "inc": func(i int) int { return i + 1 },
200 | "lookUp": func(m map[string]int, s string) int { return m[s] },
201 | "pRange": func(i int) string {
202 | f := float64(i)
203 | min := int(math.Pow(2, f)) - 1
204 | max := min + int(math.Pow(2, f))
205 | return fmt.Sprintf("%v - %v", time.Duration(min)*time.Millisecond, time.Duration(max)*time.Millisecond)
206 | },
207 | "methods": func(si *StatusInfo) []string {
208 | methods := make(map[string]bool)
209 | for _, ms := range si.RPCs {
210 | for _, m := range ms {
211 | methods[m.name] = true
212 | }
213 | }
214 | var names []string
215 | for name := range methods {
216 | names = append(names, name)
217 | }
218 | sort.Strings(names)
219 | return names
220 | },
221 | "durations": func(si *StatusInfo) []string {
222 | var ds []time.Duration
223 | for d := range si.RPCs {
224 | ds = append(ds, d)
225 | }
226 | sort.Slice(ds, func(i, j int) bool { return ds[i] < ds[j] })
227 | var r []string
228 | for _, d := range ds {
229 | dur := "all time"
230 | if d > 0 {
231 | dur = d.String()
232 | }
233 | r = append(r, dur)
234 | }
235 | return r
236 | },
237 | "table": func(si *StatusInfo) map[string]map[string]int { return si.table() },
238 | }
239 | statusTemplate = template.Must(template.New("status").Funcs(funcMap).Parse(string(b2assets.MustAsset("data/status.html"))))
240 | )
241 |
242 | // ServeHTTP serves diagnostic information about the current state of the
243 | // client; essentially everything available from Client.Status()
244 | //
245 | // ServeHTTP satisfies the http.Handler interface. This means that a Client
246 | // can be passed directly to a path via http.Handle (or on a custom ServeMux or
247 | // a custom http.Server).
248 | func (c *Client) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
249 | info := c.Status()
250 | statusTemplate.Execute(rw, info)
251 | }
252 |
--------------------------------------------------------------------------------
/internal/b2assets/b2assets.go:
--------------------------------------------------------------------------------
1 | // Code generated by go-bindata.
2 | // sources:
3 | // data/status.html
4 | // DO NOT EDIT!
5 |
6 | package b2assets
7 |
8 | import (
9 | "bytes"
10 | "compress/gzip"
11 | "fmt"
12 | "io"
13 | "io/ioutil"
14 | "os"
15 | "path/filepath"
16 | "strings"
17 | "time"
18 | )
19 |
20 | func bindataRead(data []byte, name string) ([]byte, error) {
21 | gz, err := gzip.NewReader(bytes.NewBuffer(data))
22 | if err != nil {
23 | return nil, fmt.Errorf("Read %q: %v", name, err)
24 | }
25 |
26 | var buf bytes.Buffer
27 | _, err = io.Copy(&buf, gz)
28 | clErr := gz.Close()
29 |
30 | if err != nil {
31 | return nil, fmt.Errorf("Read %q: %v", name, err)
32 | }
33 | if clErr != nil {
34 | return nil, err
35 | }
36 |
37 | return buf.Bytes(), nil
38 | }
39 |
40 | type asset struct {
41 | bytes []byte
42 | info os.FileInfo
43 | }
44 |
45 | type bindataFileInfo struct {
46 | name string
47 | size int64
48 | mode os.FileMode
49 | modTime time.Time
50 | }
51 |
52 | func (fi bindataFileInfo) Name() string {
53 | return fi.name
54 | }
55 | func (fi bindataFileInfo) Size() int64 {
56 | return fi.size
57 | }
58 | func (fi bindataFileInfo) Mode() os.FileMode {
59 | return fi.mode
60 | }
61 | func (fi bindataFileInfo) ModTime() time.Time {
62 | return fi.modTime
63 | }
64 | func (fi bindataFileInfo) IsDir() bool {
65 | return false
66 | }
67 | func (fi bindataFileInfo) Sys() interface{} {
68 | return nil
69 | }
70 |
71 | var _dataStatusHtml = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd4\x93\x41\x6f\xe3\x20\x10\x85\xef\xf9\x15\xb3\x56\x8e\x51\x90\x73\x5c\x4d\xb8\xec\xee\x79\xa3\xaa\x52\xd5\x23\x36\xa3\x60\x09\x43\x84\x71\x9a\xc8\xe2\xbf\x57\x18\x83\xa3\xb6\x87\x5e\x7b\xf2\x98\xf7\xe6\xf1\xbe\x03\xf8\xeb\xef\xff\x3f\xcf\xaf\xa7\x7f\xa0\x7c\xaf\xf9\x06\xf3\x87\x84\xe4\x1b\x00\xf4\x9d\xd7\xc4\x9b\x03\xb4\xba\x23\xe3\x61\xf0\xc2\x8f\x03\xb2\x74\xbe\x41\x96\x9c\xd8\x58\x79\x8f\x0b\xd3\xb4\xed\xc9\x2b\x2b\x07\xf8\x7d\x84\x3c\xee\x43\x48\x9a\x1c\x9d\xf0\x9d\x35\xb3\xba\xfe\x14\xdd\x8b\x46\x53\xd4\xd2\x90\xce\x51\xd5\xbc\xb5\xa3\xf1\xd0\xdc\xa1\xb5\x92\x90\xa9\x3a\xb5\x8b\xae\x38\xc5\x65\x27\xcc\x99\x60\xb9\x3e\x66\xe4\x26\x73\x48\x74\xbb\x64\x8d\xa3\xe4\xa5\x69\x08\xc8\xbc\xcc\x52\xc9\xc9\xed\xe6\xa4\x52\x75\xc9\x5a\x43\x3a\x23\xe9\x06\x4b\xf1\x7c\x79\xf1\x7f\xcc\x26\x23\x73\x1b\x96\xeb\xac\xa7\xc8\x0a\x50\x64\x1e\x2f\xda\x0a\x39\x64\xda\x87\x6e\x46\xf4\xb4\x83\xed\x55\xe8\xd8\x6e\xff\xe2\x3a\x4f\xae\x70\xaa\x03\x9f\xa6\x64\x82\x58\x40\x1d\x3e\xc1\x75\x72\x07\xdb\x8b\xb3\xe7\x99\xee\x2a\xf4\xfe\xe4\xec\xd9\xd1\xb0\x02\x46\xb4\x36\x3a\x43\x00\xbc\x2c\x2a\x5c\x85\x1e\xe9\x58\x4d\xd3\xbc\x1d\x42\x05\xbd\xb8\x1d\xab\xba\xe2\xc8\xb2\x89\x63\xe3\x80\x7d\x05\xfd\x80\xaa\x6a\x2e\xed\x9b\xf9\x26\xe1\x13\x09\xf9\xa3\x08\x91\xa5\x17\x81\x2c\xbd\xa8\xf7\x00\x00\x00\xff\xff\xd4\xf0\x90\xb4\x69\x03\x00\x00")
72 |
73 | func dataStatusHtmlBytes() ([]byte, error) {
74 | return bindataRead(
75 | _dataStatusHtml,
76 | "data/status.html",
77 | )
78 | }
79 |
80 | func dataStatusHtml() (*asset, error) {
81 | bytes, err := dataStatusHtmlBytes()
82 | if err != nil {
83 | return nil, err
84 | }
85 |
86 | info := bindataFileInfo{name: "data/status.html", size: 873, mode: os.FileMode(436), modTime: time.Unix(1520578750, 0)}
87 | a := &asset{bytes: bytes, info: info}
88 | return a, nil
89 | }
90 |
91 | // Asset loads and returns the asset for the given name.
92 | // It returns an error if the asset could not be found or
93 | // could not be loaded.
94 | func Asset(name string) ([]byte, error) {
95 | cannonicalName := strings.Replace(name, "\\", "/", -1)
96 | if f, ok := _bindata[cannonicalName]; ok {
97 | a, err := f()
98 | if err != nil {
99 | return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
100 | }
101 | return a.bytes, nil
102 | }
103 | return nil, fmt.Errorf("Asset %s not found", name)
104 | }
105 |
106 | // MustAsset is like Asset but panics when Asset would return an error.
107 | // It simplifies safe initialization of global variables.
108 | func MustAsset(name string) []byte {
109 | a, err := Asset(name)
110 | if err != nil {
111 | panic("asset: Asset(" + name + "): " + err.Error())
112 | }
113 |
114 | return a
115 | }
116 |
117 | // AssetInfo loads and returns the asset info for the given name.
118 | // It returns an error if the asset could not be found or
119 | // could not be loaded.
120 | func AssetInfo(name string) (os.FileInfo, error) {
121 | cannonicalName := strings.Replace(name, "\\", "/", -1)
122 | if f, ok := _bindata[cannonicalName]; ok {
123 | a, err := f()
124 | if err != nil {
125 | return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
126 | }
127 | return a.info, nil
128 | }
129 | return nil, fmt.Errorf("AssetInfo %s not found", name)
130 | }
131 |
132 | // AssetNames returns the names of the assets.
133 | func AssetNames() []string {
134 | names := make([]string, 0, len(_bindata))
135 | for name := range _bindata {
136 | names = append(names, name)
137 | }
138 | return names
139 | }
140 |
141 | // _bindata is a table, holding each asset generator, mapped to its name.
142 | var _bindata = map[string]func() (*asset, error){
143 | "data/status.html": dataStatusHtml,
144 | }
145 |
146 | // AssetDir returns the file names below a certain
147 | // directory embedded in the file by go-bindata.
148 | // For example if you run go-bindata on data/... and data contains the
149 | // following hierarchy:
150 | //
151 | // data/
152 | // foo.txt
153 | // img/
154 | // a.png
155 | // b.png
156 | //
157 | // then AssetDir("data") would return []string{"foo.txt", "img"}
158 | // AssetDir("data/img") would return []string{"a.png", "b.png"}
159 | // AssetDir("foo.txt") and AssetDir("notexist") would return an error
160 | // AssetDir("") will return []string{"data"}.
161 | func AssetDir(name string) ([]string, error) {
162 | node := _bintree
163 | if len(name) != 0 {
164 | cannonicalName := strings.Replace(name, "\\", "/", -1)
165 | pathList := strings.Split(cannonicalName, "/")
166 | for _, p := range pathList {
167 | node = node.Children[p]
168 | if node == nil {
169 | return nil, fmt.Errorf("Asset %s not found", name)
170 | }
171 | }
172 | }
173 | if node.Func != nil {
174 | return nil, fmt.Errorf("Asset %s not found", name)
175 | }
176 | rv := make([]string, 0, len(node.Children))
177 | for childName := range node.Children {
178 | rv = append(rv, childName)
179 | }
180 | return rv, nil
181 | }
182 |
183 | type bintree struct {
184 | Func func() (*asset, error)
185 | Children map[string]*bintree
186 | }
187 |
188 | var _bintree = &bintree{nil, map[string]*bintree{
189 | "data": {nil, map[string]*bintree{
190 | "status.html": {dataStatusHtml, map[string]*bintree{}},
191 | }},
192 | }}
193 |
194 | // RestoreAsset restores an asset under the given directory
195 | func RestoreAsset(dir, name string) error {
196 | data, err := Asset(name)
197 | if err != nil {
198 | return err
199 | }
200 | info, err := AssetInfo(name)
201 | if err != nil {
202 | return err
203 | }
204 | err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
205 | if err != nil {
206 | return err
207 | }
208 | err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
209 | if err != nil {
210 | return err
211 | }
212 | err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
213 | if err != nil {
214 | return err
215 | }
216 | return nil
217 | }
218 |
219 | // RestoreAssets restores an asset under the given directory recursively
220 | func RestoreAssets(dir, name string) error {
221 | children, err := AssetDir(name)
222 | // File
223 | if err != nil {
224 | return RestoreAsset(dir, name)
225 | }
226 | // Dir
227 | for _, child := range children {
228 | err = RestoreAssets(dir, filepath.Join(name, child))
229 | if err != nil {
230 | return err
231 | }
232 | }
233 | return nil
234 | }
235 |
236 | func _filePath(dir, name string) string {
237 | cannonicalName := strings.Replace(name, "\\", "/", -1)
238 | return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
239 | }
240 |
--------------------------------------------------------------------------------
/b2/reader.go:
--------------------------------------------------------------------------------
1 | // Copyright 2016, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package b2
16 |
17 | import (
18 | "bytes"
19 | "context"
20 | "crypto/sha1"
21 | "errors"
22 | "fmt"
23 | "hash"
24 | "io"
25 | "sync"
26 | "time"
27 |
28 | "github.com/Backblaze/blazer/internal/blog"
29 | )
30 |
31 | var errNoMoreContent = errors.New("416: out of content")
32 |
33 | // Reader reads files from B2.
34 | type Reader struct {
35 | // ConcurrentDownloads is the number of simultaneous downloads to pull from
36 | // B2. Values greater than one will cause B2 to make multiple HTTP requests
37 | // for a given file, increasing available bandwidth at the cost of buffering
38 | // the downloads in memory.
39 | ConcurrentDownloads int
40 |
41 | // ChunkSize is the size to fetch per ConcurrentDownload. The default is
42 | // 10MB.
43 | ChunkSize int
44 |
45 | ctx context.Context
46 | cancel context.CancelFunc // cancels ctx
47 | o *Object
48 | name string
49 | offset int64 // the start of the file
50 | length int64 // the length to read, or -1
51 | csize int // chunk size
52 | read int // amount read
53 | chwid int // chunks written
54 | chrid int // chunks read
55 | chbuf chan *rchunk
56 | init sync.Once
57 | chunks map[int]*rchunk
58 | vrfy hash.Hash
59 | readOffEnd bool
60 | sha1 string
61 |
62 | rmux sync.Mutex // guards rcond
63 | rcond *sync.Cond
64 |
65 | emux sync.RWMutex // guards err, believe it or not
66 | err error
67 |
68 | smux sync.Mutex
69 | smap map[int]*meteredReader
70 | }
71 |
72 | type rchunk struct {
73 | bytes.Buffer
74 | final bool
75 | }
76 |
77 | // Close frees resources associated with the download.
78 | func (r *Reader) Close() error {
79 | r.cancel()
80 | r.o.b.c.removeReader(r)
81 | return nil
82 | }
83 |
84 | func (r *Reader) setErr(err error) {
85 | r.emux.Lock()
86 | defer r.emux.Unlock()
87 | if r.err == nil {
88 | r.err = err
89 | r.cancel()
90 | }
91 | }
92 |
93 | func (r *Reader) setErrNoCancel(err error) {
94 | r.emux.Lock()
95 | defer r.emux.Unlock()
96 | if r.err == nil {
97 | r.err = err
98 | }
99 | }
100 |
101 | func (r *Reader) getErr() error {
102 | r.emux.RLock()
103 | defer r.emux.RUnlock()
104 | return r.err
105 | }
106 |
107 | func (r *Reader) thread() {
108 | go func() {
109 | for {
110 | var buf *rchunk
111 | select {
112 | case b, ok := <-r.chbuf:
113 | if !ok {
114 | return
115 | }
116 | buf = b
117 | case <-r.ctx.Done():
118 | return
119 | }
120 | r.rmux.Lock()
121 | chunkID := r.chwid
122 | r.chwid++
123 | r.rmux.Unlock()
124 | offset := int64(chunkID*r.csize) + r.offset
125 | size := int64(r.csize)
126 | if r.length > 0 {
127 | if size > r.length {
128 | buf.final = true
129 | size = r.length
130 | }
131 | r.length -= size
132 | }
133 | var b backoff
134 | redo:
135 | fr, err := r.o.b.b.downloadFileByName(r.ctx, r.name, offset, size, false)
136 | if err == errNoMoreContent {
137 | // this read generated a 416 so we are entirely past the end of the object
138 | r.rmux.Lock()
139 | r.readOffEnd = true
140 | buf.final = true
141 | r.chunks[chunkID] = buf
142 | r.rmux.Unlock()
143 | r.rcond.Broadcast()
144 | return
145 | }
146 | if err != nil {
147 | r.setErr(err)
148 | r.rcond.Broadcast()
149 | return
150 | }
151 | r.rmux.Lock()
152 | rsize, _, sha1, _ := fr.stats()
153 | if len(sha1) == 40 && r.sha1 != sha1 {
154 | r.sha1 = sha1
155 | }
156 | r.rmux.Unlock()
157 | mr := &meteredReader{r: noopResetter{fr}, size: int(rsize)}
158 | r.smux.Lock()
159 | r.smap[chunkID] = mr
160 | r.smux.Unlock()
161 | i, err := copyContext(r.ctx, buf, mr)
162 | fr.Close()
163 | r.smux.Lock()
164 | r.smap[chunkID] = nil
165 | r.smux.Unlock()
166 | if i < int64(rsize) || err == io.ErrUnexpectedEOF {
167 | // Probably the network connection was closed early. Retry.
168 | blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying after %v", chunkID, i, rsize, b)
169 | if err := b.wait(r.ctx); err != nil {
170 | r.setErr(err)
171 | r.rcond.Broadcast()
172 | return
173 | }
174 | buf.Reset()
175 | goto redo
176 | }
177 | if err != nil {
178 | r.setErr(err)
179 | r.rcond.Broadcast()
180 | return
181 | }
182 | r.rmux.Lock()
183 | r.chunks[chunkID] = buf
184 | r.rmux.Unlock()
185 | r.rcond.Broadcast()
186 | }
187 | }()
188 | }
189 |
190 | func (r *Reader) curChunk() (*rchunk, error) {
191 | ch := make(chan *rchunk)
192 | go func() {
193 | r.rmux.Lock()
194 | defer r.rmux.Unlock()
195 | for r.chunks[r.chrid] == nil && r.getErr() == nil && r.ctx.Err() == nil {
196 | r.rcond.Wait()
197 | }
198 | select {
199 | case ch <- r.chunks[r.chrid]:
200 | case <-r.ctx.Done():
201 | return
202 | }
203 | }()
204 | select {
205 | case buf := <-ch:
206 | return buf, r.getErr()
207 | case <-r.ctx.Done():
208 | if r.getErr() != nil {
209 | return nil, r.getErr()
210 | }
211 | return nil, r.ctx.Err()
212 | }
213 | }
214 |
215 | func (r *Reader) initFunc() {
216 | r.smux.Lock()
217 | r.smap = make(map[int]*meteredReader)
218 | r.smux.Unlock()
219 | r.o.b.c.addReader(r)
220 | r.rcond = sync.NewCond(&r.rmux)
221 | cr := r.ConcurrentDownloads
222 | if cr < 1 {
223 | cr = 1
224 | }
225 | if r.ChunkSize < 1 {
226 | r.ChunkSize = 1e7
227 | }
228 | r.csize = r.ChunkSize
229 | r.chbuf = make(chan *rchunk, cr)
230 | for i := 0; i < cr; i++ {
231 | r.thread()
232 | r.chbuf <- &rchunk{}
233 | }
234 | r.vrfy = sha1.New()
235 | }
236 |
237 | func (r *Reader) Read(p []byte) (int, error) {
238 | if err := r.getErr(); err != nil {
239 | return 0, err
240 | }
241 | r.init.Do(r.initFunc)
242 | chunk, err := r.curChunk()
243 | if err != nil {
244 | r.setErrNoCancel(err)
245 | return 0, err
246 | }
247 | n, err := chunk.Read(p)
248 | r.vrfy.Write(p[:n]) // Hash.Write never returns an error.
249 | r.read += n
250 | if err == io.EOF {
251 | if chunk.final {
252 | close(r.chbuf)
253 | r.setErrNoCancel(err)
254 | return n, err
255 | }
256 | r.chrid++
257 | chunk.Reset()
258 | r.chbuf <- chunk
259 | err = nil
260 | }
261 | r.setErrNoCancel(err)
262 | return n, err
263 | }
264 |
265 | func (r *Reader) status() *ReaderStatus {
266 | r.smux.Lock()
267 | defer r.smux.Unlock()
268 |
269 | rs := &ReaderStatus{
270 | Progress: make([]float64, len(r.smap)),
271 | }
272 |
273 | for i := 1; i <= len(r.smap); i++ {
274 | rs.Progress[i-1] = r.smap[i].done()
275 | }
276 |
277 | return rs
278 | }
279 |
280 | // Verify checks the SHA1 hash on download and compares it to the SHA1 hash
281 | // submitted on upload. If the two differ, this returns an error. If the
282 | // correct hash could not be calculated (if, for example, the entire object was
283 | // not read, or if the object was uploaded as a "large file" and thus the SHA1
284 | // hash was not sent), this returns (nil, false).
285 | func (r *Reader) Verify() (error, bool) {
286 | got := fmt.Sprintf("%x", r.vrfy.Sum(nil))
287 | if r.sha1 == got {
288 | return nil, true
289 | }
290 | // TODO: if the exact length of the file is requested AND the checksum is
291 | // bad, this will return (nil, false) instead of (an error, true). This is
292 | // because there's no good way that I can tell to determine that we've hit
293 | // the end of the file without reading off the end. Consider reading N+1
294 | // bytes at the very end to close this hole.
295 | if r.offset > 0 || !r.readOffEnd || len(r.sha1) != 40 {
296 | return nil, false
297 | }
298 | return fmt.Errorf("bad hash: got %v, want %v", got, r.sha1), true
299 | }
300 |
301 | // strip a writer of any non-Write methods
302 | type onlyWriter struct{ w io.Writer }
303 |
304 | func (ow onlyWriter) Write(p []byte) (int, error) { return ow.w.Write(p) }
305 |
306 | func copyContext(ctx context.Context, w io.Writer, r io.Reader) (int64, error) {
307 | var n int64
308 | var err error
309 | done := make(chan struct{})
310 | go func() {
311 | if _, ok := w.(*Writer); ok {
312 | w = onlyWriter{w}
313 | }
314 | n, err = io.Copy(w, r)
315 | close(done)
316 | }()
317 | select {
318 | case <-done:
319 | return n, err
320 | case <-ctx.Done():
321 | return 0, ctx.Err()
322 | }
323 | }
324 |
325 | type noopResetter struct {
326 | io.Reader
327 | }
328 |
329 | func (noopResetter) Reset() error { return nil }
330 |
331 | type backoff time.Duration
332 |
333 | func (b *backoff) wait(ctx context.Context) error {
334 | if *b == 0 {
335 | *b = backoff(time.Millisecond)
336 | }
337 | select {
338 | case <-time.After(time.Duration(*b)):
339 | if time.Duration(*b) < time.Second*10 {
340 | *b <<= 1
341 | }
342 | return nil
343 | case <-ctx.Done():
344 | return ctx.Err()
345 | }
346 | }
347 |
348 | func (b backoff) String() string {
349 | return time.Duration(b).String()
350 | }
351 |
--------------------------------------------------------------------------------
/b2/iterator.go:
--------------------------------------------------------------------------------
1 | // Copyright 2018, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package b2
16 |
17 | import (
18 | "context"
19 | "io"
20 | "sync"
21 | )
22 |
23 | // List returns an iterator for selecting objects in a bucket. The default
24 | // behavior, with no options, is to list all currently un-hidden objects.
25 | func (b *Bucket) List(ctx context.Context, opts ...ListOption) *ObjectIterator {
26 | o := &ObjectIterator{
27 | bucket: b,
28 | ctx: ctx,
29 | }
30 | for _, opt := range opts {
31 | opt(&o.opts)
32 | }
33 | return o
34 | }
35 |
36 | // ObjectIterator abstracts away the tricky bits of iterating over a bucket's
37 | // contents.
38 | //
39 | // It is intended to be called in a loop:
40 | //
41 | // for iter.Next() {
42 | // obj := iter.Object()
43 | // // act on obj
44 | // }
45 | // if err := iter.Err(); err != nil {
46 | // // handle err
47 | // }
48 | type ObjectIterator struct {
49 | bucket *Bucket
50 | ctx context.Context
51 | final bool
52 | err error
53 | idx int
54 | c *cursor
55 | opts objectIteratorOptions
56 | objs []*Object
57 | init sync.Once
58 | l lister
59 | count int
60 | }
61 |
62 | type lister func(context.Context, int, *cursor) ([]*Object, *cursor, error)
63 |
64 | func (o *ObjectIterator) page(ctx context.Context) error {
65 | if o.opts.locker != nil {
66 | o.opts.locker.Lock()
67 | defer o.opts.locker.Unlock()
68 | }
69 | objs, c, err := o.l(ctx, o.count, o.c)
70 | if err != nil && err != io.EOF {
71 | if bNotExist.MatchString(err.Error()) {
72 | return b2err{
73 | err: err,
74 | notFoundErr: true,
75 | }
76 | }
77 | return err
78 | }
79 | o.c = c
80 | o.objs = objs
81 | o.idx = 0
82 | if err == io.EOF {
83 | o.final = true
84 | }
85 | return nil
86 | }
87 |
88 | // Next advances the iterator to the next object. It should be called before
89 | // any calls to Object(). If Next returns true, then the next call to Object()
90 | // will be valid. Once Next returns false, it is important to check the return
91 | // value of Err().
92 | func (o *ObjectIterator) Next() bool {
93 | o.init.Do(func() {
94 | o.count = o.opts.pageSize
95 | if o.count < 0 || o.count > 1000 {
96 | o.count = 1000
97 | }
98 | switch {
99 | case o.opts.unfinished:
100 | o.l = o.bucket.listUnfinishedLargeFiles
101 | if o.count > 100 {
102 | o.count = 100
103 | }
104 | case o.opts.hidden:
105 | o.l = o.bucket.listObjects
106 | default:
107 | o.l = o.bucket.listCurrentObjects
108 | }
109 | o.c = &cursor{
110 | prefix: o.opts.prefix,
111 | delimiter: o.opts.delimiter,
112 | }
113 | })
114 | if o.err != nil {
115 | return false
116 | }
117 | if o.ctx.Err() != nil {
118 | o.err = o.ctx.Err()
119 | return false
120 | }
121 | if o.idx >= len(o.objs) {
122 | if o.final {
123 | o.err = io.EOF
124 | return false
125 | }
126 | if err := o.page(o.ctx); err != nil {
127 | o.err = err
128 | return false
129 | }
130 | return o.Next()
131 | }
132 | o.idx++
133 | return true
134 | }
135 |
136 | // Object returns the current object.
137 | func (o *ObjectIterator) Object() *Object {
138 | return o.objs[o.idx-1]
139 | }
140 |
141 | // Err returns the current error or nil. If Next() returns false and Err() is
142 | // nil, then all objects have been seen.
143 | func (o *ObjectIterator) Err() error {
144 | if o.err == io.EOF {
145 | return nil
146 | }
147 | return o.err
148 | }
149 |
150 | type objectIteratorOptions struct {
151 | hidden bool
152 | unfinished bool
153 | prefix string
154 | delimiter string
155 | pageSize int
156 | locker sync.Locker
157 | }
158 |
159 | // A ListOption alters the default behavior of List.
160 | type ListOption func(*objectIteratorOptions)
161 |
162 | // ListHidden will include hidden objects in the output.
163 | func ListHidden() ListOption {
164 | return func(o *objectIteratorOptions) {
165 | o.hidden = true
166 | }
167 | }
168 |
169 | // ListUnfinished will list unfinished large file operations instead of
170 | // existing objects.
171 | func ListUnfinished() ListOption {
172 | return func(o *objectIteratorOptions) {
173 | o.unfinished = true
174 | }
175 | }
176 |
177 | // ListPrefix will restrict the output to objects whose names begin with
178 | // prefix.
179 | func ListPrefix(pfx string) ListOption {
180 | return func(o *objectIteratorOptions) {
181 | o.prefix = pfx
182 | }
183 | }
184 |
185 | // ListDelimiter denotes the path separator. If set, object listings will be
186 | // truncated at this character.
187 | //
188 | // For example, if the bucket contains objects foo/bar, foo/baz, and foo,
189 | // then a delimiter of "/" will cause the listing to return "foo" and "foo/".
190 | // Otherwise, the listing would have returned all object names.
191 | //
192 | // Note that objects returned that end in the delimiter may not be actual
193 | // objects, e.g. you cannot read from (or write to, or delete) an object
194 | // "foo/", both because no actual object exists and because B2 disallows object
195 | // names that end with "/". If you want to ensure that all objects returned
196 | // are actual objects, leave this unset.
197 | func ListDelimiter(delimiter string) ListOption {
198 | return func(o *objectIteratorOptions) {
199 | o.delimiter = delimiter
200 | }
201 | }
202 |
203 | // ListPageSize configures the iterator to request the given number of objects
204 | // per network round-trip. The default (and maximum) is 1000 objects, except
205 | // for unfinished large files, which is 100.
206 | func ListPageSize(count int) ListOption {
207 | return func(o *objectIteratorOptions) {
208 | o.pageSize = count
209 | }
210 | }
211 |
212 | // ListLocker passes the iterator a lock which will be held during network
213 | // round-trips.
214 | func ListLocker(l sync.Locker) ListOption {
215 | return func(o *objectIteratorOptions) {
216 | o.locker = l
217 | }
218 | }
219 |
220 | type cursor struct {
221 | // Prefix limits the listed objects to those that begin with this string.
222 | prefix string
223 |
224 | // Delimiter denotes the path separator. If set, object listings will be
225 | // truncated at this character.
226 | //
227 | // For example, if the bucket contains objects foo/bar, foo/baz, and foo,
228 | // then a delimiter of "/" will cause the listing to return "foo" and "foo/".
229 | // Otherwise, the listing would have returned all object names.
230 | //
231 | // Note that objects returned that end in the delimiter may not be actual
232 | // objects, e.g. you cannot read from (or write to, or delete) an object "foo/",
233 | // both because no actual object exists and because B2 disallows object names
234 | // that end with "/". If you want to ensure that all objects returned by
235 | // ListObjects and ListCurrentObjects are actual objects, leave this unset.
236 | delimiter string
237 |
238 | name string
239 | id string
240 | }
241 |
242 | func (b *Bucket) listObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) {
243 | if c == nil {
244 | c = &cursor{}
245 | }
246 | fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.prefix, c.delimiter)
247 | if err != nil {
248 | return nil, nil, err
249 | }
250 | var next *cursor
251 | if name != "" && id != "" {
252 | next = &cursor{
253 | prefix: c.prefix,
254 | delimiter: c.delimiter,
255 | name: name,
256 | id: id,
257 | }
258 | }
259 | var objects []*Object
260 | for _, f := range fs {
261 | // b2_list_file_versions returns unfinished large files ("start"), but we're only interested in
262 | // regular ("upload") and hidden ("hide") files.
263 | if f.status() == "upload" || f.status() == "hide" {
264 | objects = append(objects, &Object{
265 | name: f.name(),
266 | f: f,
267 | b: b,
268 | })
269 | }
270 | }
271 | var rtnErr error
272 | if len(objects) == 0 || next == nil {
273 | rtnErr = io.EOF
274 | }
275 | return objects, next, rtnErr
276 | }
277 |
278 | func (b *Bucket) listCurrentObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) {
279 | if c == nil {
280 | c = &cursor{}
281 | }
282 | fs, name, err := b.b.listFileNames(ctx, count, c.name, c.prefix, c.delimiter)
283 | if err != nil {
284 | return nil, nil, err
285 | }
286 | var next *cursor
287 | if name != "" {
288 | next = &cursor{
289 | prefix: c.prefix,
290 | delimiter: c.delimiter,
291 | name: name,
292 | }
293 | }
294 | var objects []*Object
295 | for _, f := range fs {
296 | objects = append(objects, &Object{
297 | name: f.name(),
298 | f: f,
299 | b: b,
300 | })
301 | }
302 | var rtnErr error
303 | if len(objects) == 0 || next == nil {
304 | rtnErr = io.EOF
305 | }
306 | return objects, next, rtnErr
307 | }
308 |
309 | func (b *Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) {
310 | if c == nil {
311 | c = &cursor{}
312 | }
313 | fs, name, err := b.b.listUnfinishedLargeFiles(ctx, count, c.name)
314 | if err != nil {
315 | return nil, nil, err
316 | }
317 | var next *cursor
318 | if name != "" {
319 | next = &cursor{
320 | name: name,
321 | }
322 | }
323 | var objects []*Object
324 | for _, f := range fs {
325 | objects = append(objects, &Object{
326 | name: f.name(),
327 | f: f,
328 | b: b,
329 | })
330 | }
331 | var rtnErr error
332 | if len(objects) == 0 || next == nil {
333 | rtnErr = io.EOF
334 | }
335 | return objects, next, rtnErr
336 | }
337 |
--------------------------------------------------------------------------------
/x/consistent/consistent.go:
--------------------------------------------------------------------------------
1 | // Copyright 2016, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Package consistent implements an experimental interface for using B2 as a
16 | // coordination primitive.
17 | package consistent
18 |
19 | import (
20 | "bytes"
21 | "context"
22 | "crypto/rand"
23 | "encoding/base64"
24 | "encoding/json"
25 | "errors"
26 | "fmt"
27 | "io"
28 | "io/ioutil"
29 | "reflect"
30 | "time"
31 |
32 | "github.com/Backblaze/blazer/b2"
33 | )
34 |
35 | const metaKey = "blazer-meta-key-no-touchie"
36 |
37 | var (
38 | errUpdateConflict = errors.New("update conflict")
39 | errNotInGroup = errors.New("not in group")
40 | )
41 |
42 | // NewGroup creates a new consistent Group for the given bucket.
43 | func NewGroup(bucket *b2.Bucket, name string) *Group {
44 | return &Group{
45 | name: name,
46 | b: bucket,
47 | }
48 | }
49 |
50 | // Group represents a collection of B2 objects that can be modified in a
51 | // consistent way. Objects in the same group contend with each other for
52 | // updates, but there can only be so many (maximum of 10; fewer if there are
53 | // other bucket attributes set) groups in a given bucket.
54 | type Group struct {
55 | name string
56 | b *b2.Bucket
57 | ba *b2.BucketAttrs
58 | }
59 |
60 | // Mutex returns a new mutex on the given group. Only one caller can hold the
61 | // lock on a mutex with a given name, for a given group.
62 | func (g *Group) Mutex(ctx context.Context, name string) *Mutex {
63 | return &Mutex{
64 | g: g,
65 | name: name,
66 | ctx: ctx,
67 | }
68 | }
69 |
70 | // Operate calls f with the contents of the group object given by name, and
71 | // updates that object with the output of f if f returns no error. Operate
72 | // guarantees that no other callers have modified the contents of name in the
73 | // meantime (as long as all other callers are using this package). It may call
74 | // f any number of times and, as a result, the potential data transfer is
75 | // unbounded. Callers should have f fail after a given number of attempts if
76 | // this is unacceptable.
77 | //
78 | // The io.Reader that f returns is guaranteed to be read until at least the
79 | // first error. Callers must ensure that this is sufficient for the reader to
80 | // clean up after itself.
81 | func (g *Group) OperateStream(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error {
82 | for {
83 | r, err := g.NewReader(ctx, name)
84 | if err != nil && err != errNotInGroup {
85 | return err
86 | }
87 | out, err := f(r)
88 | r.Close()
89 | if err != nil {
90 | return err
91 | }
92 | defer io.Copy(ioutil.Discard, out) // ensure the reader is read
93 | w, err := g.NewWriter(ctx, r.Key, name)
94 | if err != nil {
95 | return err
96 | }
97 | if _, err := io.Copy(w, out); err != nil {
98 | return err
99 | }
100 | if err := w.Close(); err != nil {
101 | if err == errUpdateConflict {
102 | continue
103 | }
104 | return err
105 | }
106 | return nil
107 | }
108 | }
109 |
110 | // Operate uses OperateStream to act on byte slices.
111 | func (g *Group) Operate(ctx context.Context, name string, f func([]byte) ([]byte, error)) error {
112 | return g.OperateStream(ctx, name, func(r io.Reader) (io.Reader, error) {
113 | b, err := ioutil.ReadAll(r)
114 | if b2.IsNotExist(err) {
115 | b = nil
116 | err = nil
117 | }
118 | if err != nil {
119 | return nil, err
120 | }
121 | bs, err := f(b)
122 | if err != nil {
123 | return nil, err
124 | }
125 | return bytes.NewReader(bs), nil
126 | })
127 | }
128 |
129 | // OperateJSON is a convenience function for transforming JSON data in B2 in a
130 | // consistent way. Callers should pass a function f which accepts a pointer to
131 | // a struct of a given type and transforms it into another struct (ideally but
132 | // not necessarily of the same type). Callers should also pass an example
133 | // struct, t, or a pointer to it, that is the same type. t will not be
134 | // altered. If there is no existing file, f will be called with an pointer to
135 | // an empty struct of type t. Otherwise, it will be called with a pointer to a
136 | // struct filled out with the given JSON.
137 | func (g *Group) OperateJSON(ctx context.Context, name string, t interface{}, f func(interface{}) (interface{}, error)) error {
138 | jsonType := reflect.TypeOf(t)
139 | for jsonType.Kind() == reflect.Ptr {
140 | jsonType = jsonType.Elem()
141 | }
142 | return g.OperateStream(ctx, name, func(r io.Reader) (io.Reader, error) {
143 | in := reflect.New(jsonType).Interface()
144 | if err := json.NewDecoder(r).Decode(in); err != nil && err != io.EOF && !b2.IsNotExist(err) {
145 | return nil, err
146 | }
147 | out, err := f(in)
148 | if err != nil {
149 | return nil, err
150 | }
151 | pr, pw := io.Pipe()
152 | go func() { pw.CloseWithError(json.NewEncoder(pw).Encode(out)) }()
153 | return closeAfterReading{rc: pr}, nil
154 | })
155 | }
156 |
157 | // closeAfterReading closes the underlying reader on the first non-nil error
158 | type closeAfterReading struct {
159 | rc io.ReadCloser
160 | }
161 |
162 | func (car closeAfterReading) Read(p []byte) (int, error) {
163 | n, err := car.rc.Read(p)
164 | if err != nil {
165 | car.rc.Close()
166 | }
167 | return n, err
168 | }
169 |
170 | // Writer is an io.ReadCloser.
171 | type Writer struct {
172 | ctx context.Context
173 | wc io.WriteCloser
174 | name string
175 | suffix string
176 | key string
177 | g *Group
178 | }
179 |
180 | // Write implements io.Write.
181 | func (w Writer) Write(p []byte) (int, error) { return w.wc.Write(p) }
182 |
183 | // Close writes any remaining data into B2 and updates the group to reflect the
184 | // contents of the new object. If the group object has been modified, Close()
185 | // will fail.
186 | func (w Writer) Close() error {
187 | if err := w.wc.Close(); err != nil {
188 | return err
189 | }
190 | // TODO: maybe see if you can cut down on calls to info()
191 | for {
192 | ci, err := w.g.info(w.ctx)
193 | if err != nil {
194 | // Replacement failed; delete the new version.
195 | w.g.b.Object(w.name + "/" + w.suffix).Delete(w.ctx)
196 | return err
197 | }
198 | old, ok := ci.Locations[w.name]
199 | if ok && old != w.key {
200 | w.g.b.Object(w.name + "/" + w.suffix).Delete(w.ctx)
201 | return errUpdateConflict
202 | }
203 | ci.Locations[w.name] = w.suffix
204 | if err := w.g.save(w.ctx, ci); err != nil {
205 | if err == errUpdateConflict {
206 | continue
207 | }
208 | w.g.b.Object(w.name + "/" + w.suffix).Delete(w.ctx)
209 | return err
210 | }
211 | // Replacement successful; delete the old version.
212 | w.g.b.Object(w.name + "/" + w.key).Delete(w.ctx)
213 | return nil
214 | }
215 | }
216 |
217 | // Reader is an io.ReadCloser. Key must be passed to NewWriter.
218 | type Reader struct {
219 | r io.ReadCloser
220 | Key string
221 | }
222 |
223 | func (r Reader) Read(p []byte) (int, error) {
224 | if r.r == nil {
225 | return 0, io.EOF
226 | }
227 | return r.r.Read(p)
228 | }
229 |
230 | func (r Reader) Close() error {
231 | if r.r == nil {
232 | return nil
233 | }
234 | return r.r.Close()
235 | }
236 |
237 | // NewWriter creates a Writer and prepares it to be updated. The key argument
238 | // should come from the Key field of a Reader; if Writer.Close() returns with
239 | // no error, then the underlying group object was successfully updated from the
240 | // data available from the Reader with no intervening writes. New objects can
241 | // be created with an empty key.
242 | func (g *Group) NewWriter(ctx context.Context, key, name string) (Writer, error) {
243 | suffix, err := random()
244 | if err != nil {
245 | return Writer{}, err
246 | }
247 | return Writer{
248 | ctx: ctx,
249 | wc: g.b.Object(name + "/" + suffix).NewWriter(ctx),
250 | name: name,
251 | suffix: suffix,
252 | key: key,
253 | g: g,
254 | }, nil
255 | }
256 |
257 | // NewReader creates a Reader with the current version of the object, as well
258 | // as that object's update key.
259 | func (g *Group) NewReader(ctx context.Context, name string) (Reader, error) {
260 | ci, err := g.info(ctx)
261 | if err != nil {
262 | return Reader{}, err
263 | }
264 | suffix, ok := ci.Locations[name]
265 | if !ok {
266 | return Reader{}, errNotInGroup
267 | }
268 | return Reader{
269 | r: g.b.Object(name + "/" + suffix).NewReader(ctx),
270 | Key: suffix,
271 | }, nil
272 | }
273 |
274 | func (g *Group) info(ctx context.Context) (*consistentInfo, error) {
275 | attrs, err := g.b.Attrs(ctx)
276 | if err != nil {
277 | return nil, err
278 | }
279 | g.ba = attrs
280 | imap := attrs.Info
281 | if imap == nil {
282 | return nil, nil
283 | }
284 | enc, ok := imap[metaKey+"-"+g.name]
285 | if !ok {
286 | return &consistentInfo{
287 | Version: 1,
288 | Locations: make(map[string]string),
289 | }, nil
290 | }
291 | b, err := base64.StdEncoding.DecodeString(enc)
292 | if err != nil {
293 | return nil, err
294 | }
295 | ci := &consistentInfo{}
296 | if err := json.Unmarshal(b, ci); err != nil {
297 | return nil, err
298 | }
299 | if ci.Locations == nil {
300 | ci.Locations = make(map[string]string)
301 | }
302 | return ci, nil
303 | }
304 |
305 | func (g *Group) save(ctx context.Context, ci *consistentInfo) error {
306 | ci.Serial++
307 | b, err := json.Marshal(ci)
308 | if err != nil {
309 | return err
310 | }
311 | s := base64.StdEncoding.EncodeToString(b)
312 |
313 | for {
314 | oldAI, err := g.info(ctx)
315 | if err != nil {
316 | return err
317 | }
318 | if oldAI.Serial != ci.Serial-1 {
319 | return errUpdateConflict
320 | }
321 | if g.ba.Info == nil {
322 | g.ba.Info = make(map[string]string)
323 | }
324 | g.ba.Info[metaKey+"-"+g.name] = s
325 | err = g.b.Update(ctx, g.ba)
326 | if err == nil {
327 | return nil
328 | }
329 | if !b2.IsUpdateConflict(err) {
330 | return err
331 | }
332 | // Bucket update conflict; try again.
333 | }
334 | }
335 |
336 | // List returns a list of all the group objects.
337 | func (g *Group) List(ctx context.Context) ([]string, error) {
338 | ci, err := g.info(ctx)
339 | if err != nil {
340 | return nil, err
341 | }
342 | var l []string
343 | for name := range ci.Locations {
344 | l = append(l, name)
345 | }
346 | return l, nil
347 | }
348 |
349 | // A Mutex is a sync.Locker that is backed by data in B2.
350 | type Mutex struct {
351 | g *Group
352 | name string
353 | ctx context.Context
354 | }
355 |
356 | // Lock locks the mutex. If the mutex is already locked, lock will wait,
357 | // polling at 1 second intervals, until it can acquire the lock.
358 | func (m *Mutex) Lock() {
359 | cont := errors.New("continue")
360 | for {
361 | err := m.g.Operate(m.ctx, m.name, func(b []byte) ([]byte, error) {
362 | if len(b) != 0 {
363 | return nil, cont
364 | }
365 | return []byte{1}, nil
366 | })
367 | if err == nil {
368 | return
369 | }
370 | if err != cont {
371 | panic(err)
372 | }
373 | time.Sleep(time.Second)
374 | }
375 | }
376 |
377 | // Unlock unconditionally unlocks the mutex. This allows programs to clear
378 | // stale locks.
379 | func (m *Mutex) Unlock() {
380 | if err := m.g.Operate(m.ctx, m.name, func([]byte) ([]byte, error) {
381 | return nil, nil
382 | }); err != nil {
383 | panic(err)
384 | }
385 | }
386 |
387 | type consistentInfo struct {
388 | Version int
389 |
390 | // Serial is incremented for every version saved. If we ensure that
391 | // current.Serial = 1 + previous.Serial, and that the bucket metadata is
392 | // updated cleanly, then we know that the version we saved is the direct
393 | // successor to the version we had. If the bucket metadata doesn't update
394 | // cleanly, but the serial relation holds true for the new AI struct, then we
395 | // can retry without bothering the user. However, if the serial relation no
396 | // longer holds true, it means someone else has updated AI and we have to ask
397 | // the user to redo everything they've done.
398 | //
399 | // However, it is still necessary for higher level constructs to confirm that
400 | // the serial number they expect is good. The writer does this, for example,
401 | // by comparing the "key" of the file it is replacing.
402 | Serial int
403 | Locations map[string]string
404 | }
405 |
406 | func random() (string, error) {
407 | b := make([]byte, 20)
408 | if _, err := rand.Read(b); err != nil {
409 | return "", err
410 | }
411 | return fmt.Sprintf("%x", b), nil
412 | }
413 |
--------------------------------------------------------------------------------
/internal/b2types/b2types.go:
--------------------------------------------------------------------------------
1 | // Copyright 2016, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Package b2types implements internal types common to the B2 API.
16 | package b2types
17 |
18 | // You know what would be amazing? If I could autogen this from like a JSON
19 | // file. Wouldn't that be amazing? That would be amazing.
20 |
21 | const (
22 | V3api = "/b2api/v3/"
23 | )
24 |
25 | type ErrorMessage struct {
26 | Status int `json:"status"`
27 | Code string `json:"code"`
28 | Msg string `json:"message"`
29 | }
30 |
31 | type StorageAPIInfo struct {
32 | AbsMinPartSize int `json:"absoluteMinimumPartSize"`
33 | URI string `json:"apiUrl"`
34 | Bucket string `json:"bucketId"`
35 | Name string `json:"bucketName"`
36 | Capabilities []string `json:"capabilities"`
37 | DownloadURI string `json:"downloadUrl"`
38 | Type string `json:"storageApi"`
39 | Prefix string `json:"namePrefix"`
40 | PartSize int `json:"recommendedPartSize"`
41 | S3URI string `json:"s3ApiUrl"`
42 | }
43 |
44 | type GroupsAPIInfo struct {
45 | Capabilities []string `json:"capabilities"`
46 | URI string `json:"groupsApiUrl"`
47 | Type string `json:"storageApi"`
48 | }
49 |
50 | type APIInfo struct {
51 | StorageAPIInfo *StorageAPIInfo `json:"storageApi,omitempty"`
52 | GroupsAPIInfo *GroupsAPIInfo `json:"groupsApi,omitempty"`
53 | }
54 |
55 | type AuthorizeAccountResponse struct {
56 | AccountID string `json:"accountId"`
57 | KeyExpiration int64 `json:"applicationKeyExpirationTimestamp"`
58 | APIInfo *APIInfo `json:"apiInfo"`
59 | AuthToken string `json:"authorizationToken"`
60 | }
61 |
62 | type Allowance struct {
63 | }
64 |
65 | type LifecycleRule struct {
66 | DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"`
67 | DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"`
68 | Prefix string `json:"fileNamePrefix"`
69 | }
70 |
71 | type CreateBucketRequest struct {
72 | AccountID string `json:"accountId"`
73 | Name string `json:"bucketName"`
74 | Type string `json:"bucketType"`
75 | Info map[string]string `json:"bucketInfo"`
76 | LifecycleRules []LifecycleRule `json:"lifecycleRules"`
77 | }
78 |
79 | type CreateBucketResponse struct {
80 | BucketID string `json:"bucketId"`
81 | Name string `json:"bucketName"`
82 | Type string `json:"bucketType"`
83 | Info map[string]string `json:"bucketInfo"`
84 | LifecycleRules []LifecycleRule `json:"lifecycleRules"`
85 | Revision int `json:"revision"`
86 |
87 | CORSRules []CORSRule `json:"corsRules,omitempty"`
88 | DefaultRetention string `json:"defaultRetention,omitempty"`
89 | DefaultServerSideEncryption *ServerSideEncryption `json:"defaultServerSideEncryption,omitempty"`
90 | FileLockConfig *FileLockConfiguration `json:"fileLockConfiguration,omitempty"`
91 | ReplicationConfiguration *ReplicationConfigurationResponse `json:"replicationConfiguration,omitempty"`
92 | }
93 |
94 | type FileLockConfiguration struct {
95 | IsClientAuthorizedToRead bool `json:"isClientAuthorizedToRead"`
96 | Val struct {
97 | DefaultRetention struct {
98 | Mode *string `json:"mode"`
99 | Period struct {
100 | Duration int `json:"duration"`
101 | Unit *string `json:"unit"`
102 | } `json:"period"`
103 | } `json:"defaultRetention"`
104 | IsFileLockEnabled bool `json:"isFileLockEnabled"`
105 | } `json:"value"`
106 | }
107 |
108 | type DeleteBucketRequest struct {
109 | AccountID string `json:"accountId"`
110 | BucketID string `json:"bucketId"`
111 | }
112 |
113 | type ListBucketsRequest struct {
114 | AccountID string `json:"accountId"`
115 | Bucket string `json:"bucketId,omitempty"`
116 | Name string `json:"bucketName,omitempty"`
117 | BucketTypes []string `json:"bucketTypes,omitempty"`
118 | }
119 |
120 | type ListBucketsResponse struct {
121 | Buckets []CreateBucketResponse `json:"buckets"`
122 | }
123 |
124 | type UpdateBucketRequest struct {
125 | AccountID string `json:"accountId"`
126 | BucketID string `json:"bucketId"`
127 | Type string `json:"bucketType,omitempty"`
128 | Info map[string]string `json:"bucketInfo,omitempty"`
129 | LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
130 | IfRevisionIs int `json:"ifRevisionIs,omitempty"`
131 |
132 | CORSRules []CORSRule `json:"corsRules,omitempty"`
133 | DefaultRetention *Retention `json:"defaultRetention,omitempty"`
134 | DefaultServerSideEncryption *ServerSideEncryption `json:"defaultServerSideEncryption,omitempty"`
135 | FileLockEnabled bool `json:"fileLockEnabled,omitempty"`
136 | ReplicationConfiguration *ReplicationConfiguration `json:"replicationConfiguration,omitempty"`
137 | }
138 |
139 | type UpdateBucketResponse CreateBucketResponse
140 |
141 | type GetUploadURLRequest struct {
142 | BucketID string `json:"bucketId"`
143 | }
144 |
145 | type GetUploadURLResponse struct {
146 | URI string `json:"uploadUrl"`
147 | Token string `json:"authorizationToken"`
148 | }
149 |
150 | type UploadFileResponse GetFileInfoResponse
151 |
152 | type DeleteFileVersionRequest struct {
153 | Name string `json:"fileName"`
154 | FileID string `json:"fileId"`
155 | }
156 |
157 | type StartLargeFileRequest struct {
158 | BucketID string `json:"bucketId"`
159 | Name string `json:"fileName"`
160 | ContentType string `json:"contentType"`
161 | Info map[string]string `json:"fileInfo,omitempty"`
162 | }
163 |
164 | type StartLargeFileResponse struct {
165 | ID string `json:"fileId"`
166 | }
167 |
168 | type CancelLargeFileRequest struct {
169 | ID string `json:"fileId"`
170 | }
171 |
172 | type ListPartsRequest struct {
173 | ID string `json:"fileId"`
174 | Start int `json:"startPartNumber"`
175 | Count int `json:"maxPartCount"`
176 | }
177 |
178 | type ListPartsResponse struct {
179 | Next int `json:"nextPartNumber"`
180 | Parts []struct {
181 | ID string `json:"fileId"`
182 | Number int `json:"partNumber"`
183 | SHA1 string `json:"contentSha1"`
184 | Size int64 `json:"contentLength"`
185 | } `json:"parts"`
186 | }
187 |
188 | type getUploadPartURLRequest struct {
189 | ID string `json:"fileId"`
190 | }
191 |
192 | type getUploadPartURLResponse struct {
193 | URL string `json:"uploadUrl"`
194 | Token string `json:"authorizationToken"`
195 | }
196 |
197 | type FinishLargeFileRequest struct {
198 | ID string `json:"fileId"`
199 | Hashes []string `json:"partSha1Array"`
200 | }
201 |
202 | type FinishLargeFileResponse struct {
203 | Name string `json:"fileName"`
204 | FileID string `json:"fileId"`
205 | Timestamp int64 `json:"uploadTimestamp"`
206 | Action string `json:"action"`
207 | }
208 |
209 | type ListFileNamesRequest struct {
210 | BucketID string `json:"bucketId"`
211 | Count int `json:"maxFileCount"`
212 | Continuation string `json:"startFileName,omitempty"`
213 | Prefix string `json:"prefix,omitempty"`
214 | Delimiter string `json:"delimiter,omitempty"`
215 | }
216 |
217 | type ListFileNamesResponse struct {
218 | Continuation string `json:"nextFileName"`
219 | Files []GetFileInfoResponse `json:"files"`
220 | }
221 |
222 | type ListFileVersionsRequest struct {
223 | BucketID string `json:"bucketId"`
224 | Count int `json:"maxFileCount"`
225 | StartName string `json:"startFileName,omitempty"`
226 | StartID string `json:"startFileId,omitempty"`
227 | Prefix string `json:"prefix,omitempty"`
228 | Delimiter string `json:"delimiter,omitempty"`
229 | }
230 |
231 | type ListFileVersionsResponse struct {
232 | NextName string `json:"nextFileName"`
233 | NextID string `json:"nextFileId"`
234 | Files []GetFileInfoResponse `json:"files"`
235 | }
236 |
237 | type HideFileRequest struct {
238 | BucketID string `json:"bucketId"`
239 | File string `json:"fileName"`
240 | }
241 |
242 | type HideFileResponse struct {
243 | ID string `json:"fileId"`
244 | Timestamp int64 `json:"uploadTimestamp"`
245 | Action string `json:"action"`
246 | }
247 |
248 | type GetFileInfoRequest struct {
249 | ID string `json:"fileId"`
250 | }
251 |
252 | type GetFileInfoResponse struct {
253 | FileID string `json:"fileId,omitempty"`
254 | Name string `json:"fileName,omitempty"`
255 | AccountID string `json:"accountId,omitempty"`
256 | BucketID string `json:"bucketId,omitempty"`
257 | Size int64 `json:"contentLength,omitempty"`
258 | SHA1 string `json:"contentSha1,omitempty"`
259 | MD5 string `json:"contentMd5,omitempty"`
260 | ContentType string `json:"contentType,omitempty"`
261 | Info map[string]string `json:"fileInfo,omitempty"`
262 | Action string `json:"action,omitempty"`
263 | Timestamp int64 `json:"uploadTimestamp,omitempty"`
264 | }
265 |
266 | type GetDownloadAuthorizationRequest struct {
267 | BucketID string `json:"bucketId"`
268 | Prefix string `json:"fileNamePrefix"`
269 | Valid int `json:"validDurationInSeconds"`
270 | ContentDisposition string `json:"b2ContentDisposition,omitempty"`
271 | }
272 |
273 | type GetDownloadAuthorizationResponse struct {
274 | BucketID string `json:"bucketId"`
275 | Prefix string `json:"fileNamePrefix"`
276 | Token string `json:"authorizationToken"`
277 | }
278 |
279 | type ListUnfinishedLargeFilesRequest struct {
280 | BucketID string `json:"bucketId"`
281 | Continuation string `json:"startFileId,omitempty"`
282 | Count int `json:"maxFileCount,omitempty"`
283 | }
284 |
285 | type ListUnfinishedLargeFilesResponse struct {
286 | Files []GetFileInfoResponse `json:"files"`
287 | Continuation string `json:"nextFileId"`
288 | }
289 |
290 | type CreateKeyRequest struct {
291 | AccountID string `json:"accountId"`
292 | Capabilities []string `json:"capabilities"`
293 | Name string `json:"keyName"`
294 | Valid int `json:"validDurationInSeconds,omitempty"`
295 | BucketID string `json:"bucketId,omitempty"`
296 | Prefix string `json:"namePrefix,omitempty"`
297 | }
298 |
299 | type Key struct {
300 | ID string `json:"applicationKeyId"`
301 | Secret string `json:"applicationKey"`
302 | AccountID string `json:"accountId"`
303 | Capabilities []string `json:"capabilities"`
304 | Name string `json:"keyName"`
305 | Expires int64 `json:"expirationTimestamp"`
306 | BucketID string `json:"bucketId"`
307 | Prefix string `json:"namePrefix"`
308 | }
309 |
310 | type CreateKeyResponse Key
311 |
312 | type DeleteKeyRequest struct {
313 | KeyID string `json:"applicationKeyId"`
314 | }
315 |
316 | type DeleteKeyResponse Key
317 |
318 | type ListKeysRequest struct {
319 | AccountID string `json:"accountId"`
320 | Max int `json:"maxKeyCount,omitempty"`
321 | Next string `json:"startApplicationKeyId,omitempty"`
322 | }
323 |
324 | type ListKeysResponse struct {
325 | Keys []Key `json:"keys"`
326 | Next string `json:"nextApplicationKeyId"`
327 | }
328 |
329 | type ServerSideEncryption struct {
330 | Mode string `json:"mode"`
331 | Algorithm string `json:"algorithm"`
332 | }
333 |
334 | type Retention struct {
335 | Mode string `json:"mode,omitempty"`
336 | Period *RetentionPeriod `json:"period,omitempty"`
337 | }
338 |
339 | type RetentionPeriod struct {
340 | Duration int `json:"duration,omitempty"`
341 | Unit string `json:"unit,omitempty"`
342 | }
343 |
344 | type CORSRule struct {
345 | Name string `json:"corsRuleName,omitempty"`
346 | AllowedOrigins []string `json:"allowedOrigins,omitempty"`
347 | AllowedHeaders []string `json:"allowedHeaders,omitempty"`
348 | AllowedOperations []string `json:"allowedOperations,omitempty"`
349 | ExposeHeaders []string `json:"exposeHeaders,omitempty"`
350 | MaxAgeSeconds int `json:"maxAgeSeconds,omitempty"`
351 | }
352 |
353 | type ReplicationConfigurationResponse struct {
354 | IsClientAuthorizedToRead bool `json:"isClientAuthorizedToRead,omitempty"`
355 | Value *ReplicationConfiguration `json:"value,omitempty"`
356 | }
357 |
358 | type ReplicationConfiguration struct {
359 | AsReplicationSource *AsReplicationSource `json:"asReplicationSource,omitempty"`
360 | AsReplicationDestination *AsReplicationDestination `json:"asReplicationDestination,omitempty"`
361 | }
362 |
363 | type AsReplicationSource struct {
364 | ReplicationRules []ReplicationRules `json:"replicationRules,omitempty"`
365 | KeyID string `json:"sourceApplicationKeyId,omitempty"`
366 | }
367 |
368 | type AsReplicationDestination struct {
369 | SourceToDestinationKeyMapping map[string]string `json:"sourceToDestinationKeyMapping,omitempty"`
370 | }
371 |
372 | type ReplicationRules struct {
373 | DestinationBucketID string `json:"destinationBucketId"`
374 | FileNamePrefix string `json:"fileNamePrefix"`
375 | IncludeExistingFiles bool `json:"includeExistingFiles"`
376 | IsEnabled bool `json:"isEnabled"`
377 | Priority int `json:"priority"`
378 | ReplicationRuleName string `json:"replicationRuleName"`
379 | }
380 |
--------------------------------------------------------------------------------
/b2/writer.go:
--------------------------------------------------------------------------------
1 | // Copyright 2016, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package b2
16 |
17 | import (
18 | "context"
19 | "errors"
20 | "fmt"
21 | "io"
22 | "sync"
23 | "sync/atomic"
24 | "time"
25 |
26 | "github.com/Backblaze/blazer/internal/blog"
27 | "github.com/Backblaze/blazer/internal/retry"
28 | )
29 |
30 | var ErrClosed = errors.New("file already closed")
31 |
32 | // Writer writes data into Backblaze. It automatically switches to the large
33 | // file API if the file exceeds ChunkSize bytes. Due to that and other
34 | // Backblaze API details, there is a large buffer.
35 | //
36 | // Changes to public Writer attributes must be made before the first call to
37 | // Write.
38 | type Writer struct {
39 | // ConcurrentUploads is number of different threads sending data concurrently
40 | // to Backblaze for large files. This can increase performance greatly, as
41 | // each thread will hit a different endpoint. However, there is a ChunkSize
42 | // buffer for each thread. Values less than 1 are equivalent to 1.
43 | ConcurrentUploads int
44 |
45 | // Resume an upload. If true, and the upload is a large file, and a file of
46 | // the same name was started but not finished, then assume that we are
47 | // resuming that file, and don't upload duplicate chunks.
48 | Resume bool
49 |
50 | // ChunkSize is the size, in bytes, of each individual part, when writing
51 | // large files, and also when determining whether to upload a file normally
52 | // or when to split it into parts. The default is 100M (1e8) The minimum is
53 | // 5M (5e6); values less than this are not an error, but will fail. The
54 | // maximum is 5GB (5e9).
55 | ChunkSize int
56 |
57 | // UseFileBuffer controls whether to use an in-memory buffer (the default) or
58 | // scratch space on the file system. If this is true, b2 will save chunks in
59 | // FileBufferDir.
60 | UseFileBuffer bool
61 |
62 | // FileBufferDir specifies the directory where scratch files are kept. If
63 | // blank, os.TempDir() is used.
64 | FileBufferDir string
65 |
66 | contentType string
67 | info map[string]string
68 |
69 | csize int
70 | ctx context.Context
71 | cancel context.CancelFunc // cancels ctx
72 | ctxf func() context.Context
73 | errf func(error)
74 | ready chan chunk
75 | cdone chan struct{}
76 | wg sync.WaitGroup
77 | start sync.Once
78 | once sync.Once
79 | done sync.Once
80 | file beLargeFileInterface
81 | seen map[int]string
82 | everStarted bool
83 | newBuffer func() (writeBuffer, error)
84 |
85 | closed bool
86 | closeWrite sync.RWMutex
87 |
88 | o *Object
89 | name string
90 |
91 | wmux sync.RWMutex
92 | cidx int
93 | w writeBuffer
94 |
95 | emux sync.RWMutex
96 | err error
97 |
98 | smux sync.RWMutex
99 | smap map[int]*meteredReader
100 | }
101 |
102 | type chunk struct {
103 | id int
104 | buf writeBuffer
105 | }
106 |
107 | func (w *Writer) setErr(err error) {
108 | if err == nil || err == io.EOF {
109 | return
110 | }
111 | w.emux.Lock()
112 | defer w.emux.Unlock()
113 | if w.err != nil {
114 | return
115 | }
116 | blog.V(1).Infof("error writing %s: %v", w.name, err)
117 | w.err = err
118 | w.cancel()
119 | if w.ctxf == nil {
120 | return
121 | }
122 | if w.errf == nil {
123 | w.errf = func(error) {}
124 | }
125 | w.errf(w.file.cancel(w.ctxf()))
126 | }
127 |
128 | func (w *Writer) getErr() error {
129 | w.emux.RLock()
130 | defer w.emux.RUnlock()
131 | return w.err
132 | }
133 |
134 | func (w *Writer) registerChunk(id int, r *meteredReader) {
135 | w.smux.Lock()
136 | w.smap[id] = r
137 | w.smux.Unlock()
138 | }
139 |
140 | func (w *Writer) completeChunk(id int) {
141 | w.smux.Lock()
142 | w.smap[id] = nil
143 | w.smux.Unlock()
144 | }
145 |
146 | var gid int32
147 |
148 | func (w *Writer) thread() {
149 | w.wg.Add(1)
150 | go func() {
151 | defer w.wg.Done()
152 | id := atomic.AddInt32(&gid, 1)
153 | fc, err := w.file.getUploadPartURL(w.ctx)
154 | if err != nil {
155 | w.setErr(err)
156 | return
157 | }
158 | for {
159 | var cnk chunk
160 | select {
161 | case cnk = <-w.ready:
162 | case <-w.cdone:
163 | return
164 | }
165 | if sha, ok := w.seen[cnk.id]; ok {
166 | if sha != cnk.buf.Hash() {
167 | w.setErr(errors.New("resumable upload was requested, but chunks don't match"))
168 | return
169 | }
170 | cnk.buf.Close()
171 | w.completeChunk(cnk.id)
172 | blog.V(2).Infof("skipping chunk %d", cnk.id)
173 | continue
174 | }
175 | blog.V(2).Infof("thread %d handling chunk %d", id, cnk.id)
176 | r, err := cnk.buf.Reader()
177 | if err != nil {
178 | w.setErr(err)
179 | return
180 | }
181 | mr := &meteredReader{r: r, size: cnk.buf.Len()}
182 | w.registerChunk(cnk.id, mr)
183 | err = retry.Do(
184 | w.ctx,
185 | func() error {
186 | n, err := fc.uploadPart(w.ctx, mr, cnk.buf.Hash(), cnk.buf.Len(), cnk.id)
187 | if err != nil {
188 | return err
189 | }
190 | if n != cnk.buf.Len() {
191 | return fmt.Errorf("wrote %d of %d", n, cnk.buf.Len())
192 | }
193 | return nil
194 | },
195 | retry.DynamicAttempts(func(attempt uint, attempts uint, err error) uint {
196 | if attempt == 1 {
197 | return w.o.b.r.maxReuploads(err) + 1
198 | }
199 | return attempts
200 | }),
201 | retry.DynamicDelay(func(attempt uint, delay time.Duration, err error) time.Duration {
202 | return retry.Backoff(delay)
203 | }),
204 | retry.RetryIf(func(attempt uint, err error) bool {
205 | return w.o.b.r.reupload(err)
206 | }),
207 | retry.OnRetry(func(attempt uint, err error) error {
208 | blog.V(1).Infof("b2 writer: error: %v; retrying", err)
209 | f, err := w.file.getUploadPartURL(w.ctx)
210 | if err != nil {
211 | return err
212 | }
213 | fc = f
214 | return nil
215 | }),
216 | retry.WithAfter(after),
217 | )
218 | if err != nil {
219 | w.setErr(err)
220 | w.completeChunk(cnk.id)
221 | cnk.buf.Close()
222 | return
223 | }
224 | w.completeChunk(cnk.id)
225 | cnk.buf.Close()
226 | blog.V(2).Infof("chunk %d handled", cnk.id)
227 | }
228 | }()
229 | }
230 |
231 | func (w *Writer) init() {
232 | w.start.Do(func() {
233 | w.everStarted = true
234 | w.smux.Lock()
235 | w.smap = make(map[int]*meteredReader)
236 | w.smux.Unlock()
237 | w.o.b.c.addWriter(w)
238 | w.csize = w.ChunkSize
239 | if w.csize == 0 {
240 | w.csize = 1e8
241 | }
242 | if w.newBuffer == nil {
243 | w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil }
244 | if w.UseFileBuffer {
245 | w.newBuffer = func() (writeBuffer, error) { return newFileBuffer(w.FileBufferDir) }
246 | }
247 | }
248 | v, err := w.newBuffer()
249 | if err != nil {
250 | w.setErr(err)
251 | return
252 | }
253 | w.w = v
254 | })
255 | }
256 |
257 | // Write satisfies the io.Writer interface.
258 | func (w *Writer) Write(p []byte) (int, error) {
259 | w.closeWrite.RLock()
260 | defer w.closeWrite.RUnlock()
261 | if w.closed {
262 | return 0, ErrClosed
263 | }
264 | if len(p) == 0 {
265 | return 0, nil
266 | }
267 | w.init()
268 | if err := w.getErr(); err != nil {
269 | return 0, err
270 | }
271 | left := w.csize - w.w.Len()
272 | if len(p) < left {
273 | return w.w.Write(p)
274 | }
275 | i, err := w.w.Write(p[:left])
276 | if err != nil {
277 | w.setErr(err)
278 | return i, err
279 | }
280 | if err := w.sendChunk(); err != nil {
281 | w.setErr(err)
282 | return i, w.getErr()
283 | }
284 | k, err := w.Write(p[left:])
285 | if err != nil {
286 | w.setErr(err)
287 | }
288 | return i + k, err
289 | }
290 |
291 | func (w *Writer) getUploadURL(ctx context.Context) (beURLInterface, error) {
292 | u := w.o.b.urlPool.get()
293 | if u == nil {
294 | return w.o.b.b.getUploadURL(w.ctx)
295 | }
296 |
297 | return u, nil
298 | }
299 |
300 | func (w *Writer) simpleWriteFile() error {
301 | ue, err := w.getUploadURL(w.ctx)
302 | if err != nil {
303 | return err
304 | }
305 | // This defer needs to be in a func() so that we put whatever the value of ue
306 | // is at function exit.
307 | defer func() { w.o.b.urlPool.put(ue) }()
308 | sha1 := w.w.Hash()
309 | ctype := w.contentType
310 | if ctype == "" {
311 | ctype = "application/octet-stream"
312 | }
313 | r, err := w.w.Reader()
314 | if err != nil {
315 | return err
316 | }
317 | mr := &meteredReader{r: r, size: w.w.Len()}
318 | w.registerChunk(1, mr)
319 | defer w.completeChunk(1)
320 |
321 | err = retry.Do(
322 | w.ctx,
323 | func() error {
324 | f, err := ue.uploadFile(w.ctx, mr, int(w.w.Len()), w.name, ctype, sha1, w.info)
325 | if err != nil {
326 | return err
327 | }
328 | w.o.f = f
329 | return nil
330 | },
331 | retry.DynamicAttempts(func(attempt uint, attempts uint, err error) uint {
332 | if attempt == 1 {
333 | return w.o.b.r.maxReuploads(err) + 1
334 | }
335 | return attempts
336 | }),
337 | retry.DynamicDelay(func(attempt uint, delay time.Duration, err error) time.Duration {
338 | return retry.Backoff(delay)
339 | }),
340 | retry.RetryIf(func(attempt uint, err error) bool {
341 | return w.o.b.r.reupload(err)
342 | }),
343 | retry.OnRetry(func(attempt uint, err error) error {
344 | blog.V(2).Infof("b2 writer: %v; retrying", err)
345 | u, err := w.o.b.b.getUploadURL(w.ctx)
346 | if err != nil {
347 | return err
348 | }
349 | ue = u
350 | return nil
351 | }),
352 | retry.WithAfter(after),
353 | )
354 | if err != nil {
355 | return err
356 | }
357 |
358 | return nil
359 | }
360 |
361 | func (w *Writer) getLargeFile() (beLargeFileInterface, error) {
362 | if !w.Resume {
363 | ctype := w.contentType
364 | if ctype == "" {
365 | ctype = "application/octet-stream"
366 | }
367 | return w.o.b.b.startLargeFile(w.ctx, w.name, ctype, w.info)
368 | }
369 | var got bool
370 | iter := w.o.b.List(w.ctx, ListPrefix(w.name), ListUnfinished())
371 | var fi beFileInterface
372 | for iter.Next() {
373 | obj := iter.Object()
374 | if obj.Name() == w.name {
375 | got = true
376 | fi = obj.f
377 | }
378 | }
379 | if iter.Err() != nil {
380 | return nil, iter.Err()
381 | }
382 | if !got {
383 | w.Resume = false
384 | return w.getLargeFile()
385 | }
386 |
387 | next := 1
388 | seen := make(map[int]string)
389 | var size int64
390 | for {
391 | parts, n, err := fi.listParts(w.ctx, next, 100)
392 | if err != nil {
393 | return nil, err
394 | }
395 | next = n
396 | for _, p := range parts {
397 | seen[p.number()] = p.sha1()
398 | size += p.size()
399 | }
400 | if len(parts) == 0 {
401 | break
402 | }
403 | if next == 0 {
404 | break
405 | }
406 | }
407 | w.seen = make(map[int]string) // copy the map
408 | for id, sha := range seen {
409 | w.seen[id] = sha
410 | }
411 | return fi.compileParts(size, seen), nil
412 | }
413 |
414 | func (w *Writer) sendChunk() error {
415 | var err error
416 | w.once.Do(func() {
417 | lf, e := w.getLargeFile()
418 | if e != nil {
419 | err = e
420 | return
421 | }
422 | w.file = lf
423 | w.ready = make(chan chunk)
424 | w.cdone = make(chan struct{})
425 | if w.ConcurrentUploads < 1 {
426 | w.ConcurrentUploads = 1
427 | }
428 | for i := 0; i < w.ConcurrentUploads; i++ {
429 | w.thread()
430 | }
431 | })
432 | if err != nil {
433 | return err
434 | }
435 |
436 | var cidx = -1
437 | var ww writeBuffer = nil
438 | w.emux.RLock()
439 | defer w.emux.RUnlock()
440 | if w.ctx.Err() == nil {
441 | // Only claim the read lock if we need it
442 | w.wmux.RLock()
443 | cidx = w.cidx + 1
444 | ww = w.w
445 | w.wmux.RUnlock()
446 | } else {
447 | return w.ctx.Err()
448 | }
449 | select {
450 | case <-w.cdone:
451 | return nil
452 | case w.ready <- chunk{
453 | id: cidx,
454 | buf: ww,
455 | }:
456 | case <-w.ctx.Done():
457 | return w.ctx.Err()
458 | }
459 | w.wmux.Lock()
460 | defer w.wmux.Unlock()
461 | w.cidx++
462 | v, err := w.newBuffer()
463 | if err != nil {
464 | return err
465 | }
466 | w.w = v
467 | return nil
468 | }
469 |
470 | // ReadFrom reads all of r into w, returning the first error or no error if r
471 | // returns io.EOF. If r is also an io.Seeker, ReadFrom will stream r directly
472 | // over the wire instead of buffering it locally. This reduces memory usage.
473 | //
474 | // Do not issue multiple calls to ReadFrom, or mix ReadFrom and Write. If you
475 | // have multiple readers you want to concatenate into the same B2 object, use
476 | // an io.MultiReader.
477 | //
478 | // Note that io.Copy will automatically choose to use ReadFrom.
479 | //
480 | // ReadFrom currently doesn't handle w.Resume; if w.Resume is true, ReadFrom
481 | // will act as if r is not an io.Seeker.
482 | func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
483 | rs, ok := r.(io.ReadSeeker)
484 | if !ok || w.Resume {
485 | return copyContext(w.ctx, w, r)
486 | }
487 | blog.V(2).Info("streaming without buffer")
488 | size, err := rs.Seek(0, io.SeekEnd)
489 | if err != nil {
490 | return 0, err
491 | }
492 | var ra io.ReaderAt
493 | if rat, ok := r.(io.ReaderAt); ok {
494 | ra = rat
495 | } else {
496 | ra = enReaderAt(rs)
497 | }
498 | var offset int64
499 | var wrote int64
500 | w.newBuffer = func() (writeBuffer, error) {
501 | left := size - offset
502 | if left <= 0 {
503 | // We're done sending real chunks; send empty chunks from now on so that
504 | // Close() works.
505 | w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil }
506 | w.w = newMemoryBuffer()
507 | return nil, io.EOF
508 | }
509 | csize := int64(w.csize)
510 | if left < csize {
511 | csize = left
512 | }
513 | nb := newNonBuffer(ra, offset, csize)
514 | wrote += csize // TODO: this is kind of a total lie
515 | offset += csize
516 | return nb, nil
517 | }
518 | w.init()
519 | if size < int64(w.csize) {
520 | // the magic happens on w.Close()
521 | return size, nil
522 | }
523 | for {
524 | if err := w.sendChunk(); err != nil {
525 | if err != io.EOF {
526 | return wrote, err
527 | }
528 | return wrote, nil
529 | }
530 | }
531 | }
532 |
533 | // Close satisfies the io.Closer interface. It is critical to check the return
534 | // value of Close for all writers.
535 | func (w *Writer) Close() error {
536 | w.done.Do(func() {
537 | w.closeWrite.Lock()
538 | defer w.closeWrite.Unlock()
539 | if !w.everStarted {
540 | w.init()
541 | w.setErr(w.simpleWriteFile())
542 | return
543 | }
544 | defer w.o.b.c.removeWriter(w)
545 | defer func() {
546 | w.wmux.Lock()
547 | defer w.wmux.Unlock()
548 | if err := w.w.Close(); err != nil {
549 | // this is non-fatal, but alarming
550 | blog.V(1).Infof("close %s: %v", w.name, err)
551 | }
552 | }()
553 | // We need the lock to dereference w.cidx and w.w.Len()
554 | w.wmux.RLock()
555 | // Don't defer the RUnlock, since we don't want to be RLocked when we call sendChunk
556 | if w.cidx == 0 {
557 | w.wmux.RUnlock()
558 | w.setErr(w.simpleWriteFile())
559 | return
560 | }
561 | if w.w.Len() > 0 {
562 | w.wmux.RUnlock()
563 | if err := w.sendChunk(); err != nil {
564 | w.setErr(err)
565 | return
566 | }
567 | // Get the lock back, so all code paths have it
568 | w.wmux.RLock()
569 | }
570 | defer w.wmux.RUnlock()
571 | // See https://github.com/Backblaze/blazer/issues/60 for why we use a special
572 | // channel for this.
573 | close(w.cdone)
574 | w.wg.Wait()
575 | err := w.ctx.Err()
576 | var f beFileInterface = nil
577 | if err == nil {
578 | f, err = w.file.finishLargeFile(w.ctx)
579 | }
580 | if err != nil {
581 | w.setErr(err)
582 | return
583 | }
584 | w.o.f = f
585 | w.closed = true
586 | })
587 | return w.getErr()
588 | }
589 |
590 | func (w *Writer) withAttrs(attrs *Attrs) *Writer {
591 | w.contentType = attrs.ContentType
592 | w.info = make(map[string]string)
593 | for k, v := range attrs.Info {
594 | w.info[k] = v
595 | }
596 | if len(w.info) < 10 && attrs.SHA1 != "" {
597 | w.info["large_file_sha1"] = attrs.SHA1
598 | }
599 | if len(w.info) < 10 && !attrs.LastModified.IsZero() {
600 | w.info["src_last_modified_millis"] = fmt.Sprintf("%d", attrs.LastModified.UnixNano()/1e6)
601 | }
602 | return w
603 | }
604 |
605 | // A WriterOption sets Writer-specific behavior.
606 | type WriterOption func(*Writer)
607 |
608 | // WithAttrs attaches the given Attrs to the writer.
609 | func WithAttrsOption(attrs *Attrs) WriterOption {
610 | return func(w *Writer) {
611 | w.withAttrs(attrs)
612 | }
613 | }
614 |
615 | // WithCancelOnError requests the writer, if it has started a large file
616 | // upload, to call b2_cancel_large_file on any permanent error. It calls ctxf
617 | // to obtain a context with which to cancel the file; this is to allow callers
618 | // to set specific timeouts. If errf is non-nil, then it is called with the
619 | // (possibly nil) output of b2_cancel_large_file.
620 | func WithCancelOnError(ctxf func() context.Context, errf func(error)) WriterOption {
621 | return func(w *Writer) {
622 | w.ctxf = ctxf
623 | w.errf = errf
624 | }
625 | }
626 |
627 | // DefaultWriterOptions returns a ClientOption that will apply the given
628 | // WriterOptions to every Writer. These options can be overridden by passing
629 | // new options to NewWriter.
630 | func DefaultWriterOptions(opts ...WriterOption) ClientOption {
631 | return func(c *clientOptions) {
632 | c.writerOpts = opts
633 | }
634 | }
635 |
636 | func (w *Writer) status() *WriterStatus {
637 | w.smux.RLock()
638 | defer w.smux.RUnlock()
639 |
640 | ws := &WriterStatus{
641 | Progress: make([]float64, len(w.smap)),
642 | }
643 |
644 | for i := 1; i <= len(w.smap); i++ {
645 | ws.Progress[i-1] = w.smap[i].done()
646 | }
647 |
648 | return ws
649 | }
650 |
651 | type meteredReader struct {
652 | read int64
653 | size int
654 | r readResetter
655 | mux sync.Mutex
656 | }
657 |
658 | func (mr *meteredReader) Read(p []byte) (int, error) {
659 | mr.mux.Lock()
660 | defer mr.mux.Unlock()
661 | n, err := mr.r.Read(p)
662 | mr.read += int64(n)
663 | return n, err
664 | }
665 |
666 | func (mr *meteredReader) Reset() error {
667 | mr.mux.Lock()
668 | defer mr.mux.Unlock()
669 | mr.read = 0
670 | return mr.r.Reset()
671 | }
672 |
673 | func (mr *meteredReader) done() float64 {
674 | if mr == nil {
675 | return 1
676 | }
677 | read := float64(atomic.LoadInt64(&mr.read))
678 | return read / float64(mr.size)
679 | }
680 |
--------------------------------------------------------------------------------
/b2/baseline.go:
--------------------------------------------------------------------------------
1 | // Copyright 2016, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package b2
16 |
17 | import (
18 | "context"
19 | "io"
20 | "net/http"
21 | "time"
22 |
23 | "github.com/Backblaze/blazer/base"
24 | "github.com/Backblaze/blazer/internal/b2types"
25 | )
26 |
27 | // This file wraps the base package in a thin layer, for testing. It should be
28 | // the only file in b2 that imports base.
29 |
30 | type b2RootInterface interface {
31 | authorizeAccount(context.Context, string, string, clientOptions) error
32 | backoff(error) time.Duration
33 | maxRetries(error) uint
34 | maxReuploads(error) uint
35 | retry(error) bool
36 | reauth(error) bool
37 | reupload(error) bool
38 | createBucket(context.Context, string, string, map[string]string, []LifecycleRule) (b2BucketInterface, error)
39 | listBuckets(context.Context, string, ...string) ([]b2BucketInterface, error)
40 | createKey(context.Context, string, []string, time.Duration, string, string) (b2KeyInterface, error)
41 | listKeys(context.Context, int, string) ([]b2KeyInterface, string, error)
42 | }
43 |
44 | type b2BucketInterface interface {
45 | name() string
46 | btype() string
47 | attrs() *BucketAttrs
48 | id() string
49 | updateBucket(context.Context, *BucketAttrs) error
50 | deleteBucket(context.Context) error
51 | getUploadURL(context.Context) (b2URLInterface, error)
52 | startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (b2LargeFileInterface, error)
53 | listFileNames(context.Context, int, string, string, string) ([]b2FileInterface, string, error)
54 | listFileVersions(context.Context, int, string, string, string, string) ([]b2FileInterface, string, string, error)
55 | listUnfinishedLargeFiles(context.Context, int, string) ([]b2FileInterface, string, error)
56 | downloadFileByName(context.Context, string, int64, int64, bool) (b2FileReaderInterface, error)
57 | hideFile(context.Context, string) (b2FileInterface, error)
58 | getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error)
59 | baseURL() string
60 | s3URL() string
61 | file(string, string) b2FileInterface
62 | }
63 |
64 | type b2URLInterface interface {
65 | reload(context.Context) error
66 | uploadFile(context.Context, io.Reader, int, string, string, string, map[string]string) (b2FileInterface, error)
67 | }
68 |
69 | type b2FileInterface interface {
70 | name() string
71 | id() string
72 | size() int64
73 | timestamp() time.Time
74 | status() string
75 | deleteFileVersion(context.Context) error
76 | getFileInfo(context.Context) (b2FileInfoInterface, error)
77 | listParts(context.Context, int, int) ([]b2FilePartInterface, int, error)
78 | compileParts(int64, map[int]string) b2LargeFileInterface
79 | }
80 |
81 | type b2LargeFileInterface interface {
82 | finishLargeFile(context.Context) (b2FileInterface, error)
83 | getUploadPartURL(context.Context) (b2FileChunkInterface, error)
84 | cancel(context.Context) error
85 | }
86 |
87 | type b2FileChunkInterface interface {
88 | reload(context.Context) error
89 | uploadPart(context.Context, io.Reader, string, int, int) (int, error)
90 | }
91 |
92 | type b2FileReaderInterface interface {
93 | io.ReadCloser
94 | stats() (int, string, string, map[string]string)
95 | id() string
96 | }
97 |
98 | type b2FileInfoInterface interface {
99 | stats() (string, string, int64, string, map[string]string, string, time.Time) // bleck
100 | }
101 |
102 | type b2FilePartInterface interface {
103 | number() int
104 | sha1() string
105 | size() int64
106 | }
107 |
108 | type b2KeyInterface interface {
109 | del(context.Context) error
110 | caps() []string
111 | name() string
112 | expires() time.Time
113 | secret() string
114 | id() string
115 | }
116 |
117 | type b2Root struct {
118 | b *base.B2
119 | }
120 |
121 | type b2Bucket struct {
122 | b *base.Bucket
123 | }
124 |
125 | type b2URL struct {
126 | b *base.URL
127 | }
128 |
129 | type b2File struct {
130 | b *base.File
131 | }
132 |
133 | type b2LargeFile struct {
134 | b *base.LargeFile
135 | }
136 |
137 | type b2FileChunk struct {
138 | b *base.FileChunk
139 | }
140 |
141 | type b2FileReader struct {
142 | b *base.FileReader
143 | }
144 |
145 | type b2FileInfo struct {
146 | b *base.FileInfo
147 | }
148 |
149 | type b2FilePart struct {
150 | b *base.FilePart
151 | }
152 |
153 | type b2Key struct {
154 | b *base.Key
155 | }
156 |
157 | func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error {
158 | var aopts []base.AuthOption
159 | ct := &clientTransport{client: c.client}
160 | if c.transport != nil {
161 | ct.rt = c.transport
162 | }
163 | aopts = append(aopts, base.Transport(ct))
164 | if c.failSomeUploads {
165 | aopts = append(aopts, base.FailSomeUploads())
166 | }
167 | if c.expireTokens {
168 | aopts = append(aopts, base.ExpireSomeAuthTokens())
169 | }
170 | if c.capExceeded {
171 | aopts = append(aopts, base.ForceCapExceeded())
172 | }
173 | if c.apiBase != "" {
174 | aopts = append(aopts, base.SetAPIBase(c.apiBase))
175 | }
176 | for _, agent := range c.userAgents {
177 | aopts = append(aopts, base.UserAgent(agent))
178 | }
179 | nb, err := base.AuthorizeAccount(ctx, account, key, aopts...)
180 | if err != nil {
181 | return err
182 | }
183 | if b.b == nil {
184 | b.b = nb
185 | return nil
186 | }
187 | b.b.Update(nb)
188 | return nil
189 | }
190 |
191 | func (b *b2Root) backoff(err error) time.Duration {
192 | if !b.retry(err) {
193 | return 0
194 | }
195 | return base.Backoff(err)
196 | }
197 |
198 | func (b *b2Root) maxRetries(err error) uint {
199 | return base.MaxRetries(err)
200 | }
201 |
202 | func (b *b2Root) maxReuploads(err error) uint {
203 | return base.MaxReuploads(err)
204 | }
205 |
206 | func (*b2Root) retry(err error) bool {
207 | return base.Action(err) == base.Retry
208 | }
209 |
210 | func (*b2Root) reauth(err error) bool {
211 | return base.Action(err) == base.ReAuthenticate
212 | }
213 |
214 | func (*b2Root) reupload(err error) bool {
215 | return base.Action(err) == base.AttemptNewUpload
216 | }
217 |
218 | func (b *b2Root) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (b2BucketInterface, error) {
219 | var baseRules []base.LifecycleRule
220 | for _, rule := range rules {
221 | baseRules = append(baseRules, base.LifecycleRule{
222 | DaysNewUntilHidden: rule.DaysNewUntilHidden,
223 | DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted,
224 | Prefix: rule.Prefix,
225 | })
226 | }
227 | bucket, err := b.b.CreateBucket(ctx, name, btype, info, baseRules)
228 | if err != nil {
229 | return nil, err
230 | }
231 | return &b2Bucket{bucket}, nil
232 | }
233 |
234 | func (b *b2Root) listBuckets(ctx context.Context, name string, bucketTypes ...string) ([]b2BucketInterface, error) {
235 | buckets, err := b.b.ListBuckets(ctx, name, bucketTypes...)
236 | if err != nil {
237 | return nil, err
238 | }
239 | var rtn []b2BucketInterface
240 | for _, bucket := range buckets {
241 | rtn = append(rtn, &b2Bucket{bucket})
242 | }
243 | return rtn, err
244 | }
245 |
246 | func (b *b2Bucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error {
247 | if attrs == nil {
248 | return nil
249 | }
250 | if attrs.Type != UnknownType {
251 | b.b.Type = string(attrs.Type)
252 | }
253 | if attrs.Info != nil {
254 | b.b.Info = attrs.Info
255 | }
256 | if attrs.LifecycleRules != nil {
257 | rules := []base.LifecycleRule{}
258 | for _, rule := range attrs.LifecycleRules {
259 | rules = append(rules, base.LifecycleRule{
260 | DaysNewUntilHidden: rule.DaysNewUntilHidden,
261 | DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted,
262 | Prefix: rule.Prefix,
263 | })
264 | }
265 | b.b.LifecycleRules = rules
266 | }
267 | if len(attrs.CORSRules) > 0 {
268 | rules := []b2types.CORSRule{}
269 | for _, rule := range attrs.CORSRules {
270 | rules = append(rules, b2types.CORSRule{
271 | AllowedOrigins: rule.AllowedOrigins,
272 | AllowedHeaders: rule.AllowedHeaders,
273 | AllowedOperations: rule.AllowedOperations,
274 | ExposeHeaders: rule.ExposeHeaders,
275 | MaxAgeSeconds: rule.MaxAgeSeconds,
276 | })
277 | }
278 | b.b.CORSRules = rules
279 | }
280 |
281 | if attrs.DefaultRetention != nil {
282 | b.b.DefaultRetention = &b2types.Retention{
283 | Mode: attrs.DefaultRetention.Mode,
284 | Period: &b2types.RetentionPeriod{
285 | Duration: attrs.DefaultRetention.Period.Duration,
286 | Unit: attrs.DefaultRetention.Period.Unit,
287 | },
288 | }
289 | }
290 |
291 | if attrs.DefaultServerSideEncryption != nil {
292 | b.b.DefaultServerSideEncryption = &b2types.ServerSideEncryption{
293 | Algorithm: attrs.DefaultServerSideEncryption.Algorithm,
294 | Mode: attrs.DefaultServerSideEncryption.Mode,
295 | }
296 | }
297 |
298 | b.b.FileLockEnabled = attrs.FileLockEnabled
299 |
300 | if b.b.ReplicationConfiguration != nil {
301 | asRepSource := b2types.AsReplicationSource{
302 | KeyID: attrs.ReplicationConfig.AsReplicationSource.SourceApplicationKeyID,
303 | ReplicationRules: make([]b2types.ReplicationRules, len(attrs.ReplicationConfig.AsReplicationSource.ReplicationRules)),
304 | }
305 |
306 | for i, rule := range attrs.ReplicationConfig.AsReplicationSource.ReplicationRules {
307 | asRepSource.ReplicationRules[i] = b2types.ReplicationRules{
308 | ReplicationRuleName: rule.ReplicationRuleName,
309 | DestinationBucketID: rule.DestinationBucketID,
310 | FileNamePrefix: rule.FileNamePrefix,
311 | IncludeExistingFiles: rule.IncludeExistingFiles,
312 | IsEnabled: rule.IsEnabled,
313 | Priority: rule.Priority,
314 | }
315 | }
316 |
317 | b.b.ReplicationConfiguration = &b2types.ReplicationConfiguration{
318 | AsReplicationSource: &asRepSource,
319 | }
320 | }
321 |
322 | newBucket, err := b.b.Update(ctx)
323 | if err == nil {
324 | b.b = newBucket
325 | }
326 | code, _ := base.Code(err)
327 | if code == 409 {
328 | return b2err{
329 | err: err,
330 | isUpdateConflict: true,
331 | }
332 | }
333 | return err
334 | }
335 |
336 | func (b *b2Root) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (b2KeyInterface, error) {
337 | k, err := b.b.CreateKey(ctx, name, caps, valid, bucketID, prefix)
338 | if err != nil {
339 | return nil, err
340 | }
341 | return &b2Key{k}, nil
342 | }
343 |
344 | func (b *b2Root) listKeys(ctx context.Context, max int, next string) ([]b2KeyInterface, string, error) {
345 | keys, next, err := b.b.ListKeys(ctx, max, next)
346 | if err != nil {
347 | return nil, "", err
348 | }
349 | var k []b2KeyInterface
350 | for _, key := range keys {
351 | k = append(k, &b2Key{key})
352 | }
353 | return k, next, nil
354 | }
355 |
356 | func (b *b2Bucket) deleteBucket(ctx context.Context) error {
357 | return b.b.DeleteBucket(ctx)
358 | }
359 |
360 | func (b *b2Bucket) name() string {
361 | return b.b.Name
362 | }
363 |
364 | func (b *b2Bucket) btype() string {
365 | return b.b.Type
366 | }
367 |
368 | func (b *b2Bucket) attrs() *BucketAttrs {
369 | var rules []LifecycleRule
370 | for _, rule := range b.b.LifecycleRules {
371 | rules = append(rules, LifecycleRule{
372 | DaysNewUntilHidden: rule.DaysNewUntilHidden,
373 | DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted,
374 | Prefix: rule.Prefix,
375 | })
376 | }
377 | return &BucketAttrs{
378 | LifecycleRules: rules,
379 | Info: b.b.Info,
380 | Type: BucketType(b.b.Type),
381 | }
382 | }
383 |
384 | func (b *b2Bucket) id() string { return b.b.ID }
385 |
386 | func (b *b2Bucket) getUploadURL(ctx context.Context) (b2URLInterface, error) {
387 | url, err := b.b.GetUploadURL(ctx)
388 | if err != nil {
389 | return nil, err
390 | }
391 | return &b2URL{url}, nil
392 | }
393 |
394 | func (b *b2Bucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (b2LargeFileInterface, error) {
395 | lf, err := b.b.StartLargeFile(ctx, name, ct, info)
396 | if err != nil {
397 | return nil, err
398 | }
399 | return &b2LargeFile{lf}, nil
400 | }
401 |
402 | func (b *b2Bucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]b2FileInterface, string, error) {
403 | fs, c, err := b.b.ListFileNames(ctx, count, continuation, prefix, delimiter)
404 | if err != nil {
405 | return nil, "", err
406 | }
407 | var files []b2FileInterface
408 | for _, f := range fs {
409 | files = append(files, &b2File{f})
410 | }
411 | return files, c, nil
412 | }
413 |
414 | func (b *b2Bucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]b2FileInterface, string, string, error) {
415 | fs, name, id, err := b.b.ListFileVersions(ctx, count, nextName, nextID, prefix, delimiter)
416 | if err != nil {
417 | return nil, "", "", err
418 | }
419 | var files []b2FileInterface
420 | for _, f := range fs {
421 | files = append(files, &b2File{f})
422 | }
423 | return files, name, id, nil
424 | }
425 |
426 | func (b *b2Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]b2FileInterface, string, error) {
427 | fs, cont, err := b.b.ListUnfinishedLargeFiles(ctx, count, continuation)
428 | if err != nil {
429 | return nil, "", err
430 | }
431 | var files []b2FileInterface
432 | for _, f := range fs {
433 | files = append(files, &b2File{f})
434 | }
435 | return files, cont, nil
436 | }
437 |
438 | func (b *b2Bucket) downloadFileByName(ctx context.Context, name string, offset, size int64, header bool) (b2FileReaderInterface, error) {
439 | fr, err := b.b.DownloadFileByName(ctx, name, offset, size, header)
440 | if err != nil {
441 | code, _ := base.Code(err)
442 | switch code {
443 | case http.StatusRequestedRangeNotSatisfiable:
444 | return nil, errNoMoreContent
445 | case http.StatusNotFound:
446 | return nil, b2err{err: err, notFoundErr: true}
447 | }
448 | return nil, err
449 | }
450 | return &b2FileReader{fr}, nil
451 | }
452 |
453 | func (b *b2Bucket) hideFile(ctx context.Context, name string) (b2FileInterface, error) {
454 | f, err := b.b.HideFile(ctx, name)
455 | if err != nil {
456 | return nil, err
457 | }
458 | return &b2File{f}, nil
459 | }
460 |
461 | func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) {
462 | return b.b.GetDownloadAuthorization(ctx, p, v, s)
463 | }
464 |
465 | func (b *b2Bucket) baseURL() string {
466 | return b.b.BaseURL()
467 | }
468 |
469 | func (b *b2Bucket) s3URL() string {
470 | return b.b.S3URL()
471 | }
472 |
473 | func (b *b2Bucket) file(id, name string) b2FileInterface { return &b2File{b.b.File(id, name)} }
474 |
475 | func (b *b2URL) uploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (b2FileInterface, error) {
476 | file, err := b.b.UploadFile(ctx, r, size, name, contentType, sha1, info)
477 | if err != nil {
478 | return nil, err
479 | }
480 | return &b2File{file}, nil
481 | }
482 |
483 | func (b *b2URL) reload(ctx context.Context) error {
484 | return b.b.Reload(ctx)
485 | }
486 |
487 | func (b *b2File) deleteFileVersion(ctx context.Context) error {
488 | return b.b.DeleteFileVersion(ctx)
489 | }
490 |
491 | func (b *b2File) name() string {
492 | return b.b.Name
493 | }
494 |
495 | func (b *b2File) id() string {
496 | return b.b.ID
497 | }
498 |
499 | func (b *b2File) size() int64 {
500 | return b.b.Size
501 | }
502 |
503 | func (b *b2File) timestamp() time.Time {
504 | return b.b.Timestamp
505 | }
506 |
507 | func (b *b2File) status() string {
508 | return b.b.Status
509 | }
510 |
511 | func (b *b2File) getFileInfo(ctx context.Context) (b2FileInfoInterface, error) {
512 | if b.b.Info != nil {
513 | return &b2FileInfo{b.b.Info}, nil
514 | }
515 | fi, err := b.b.GetFileInfo(ctx)
516 | if err != nil {
517 | return nil, err
518 | }
519 | return &b2FileInfo{fi}, nil
520 | }
521 |
522 | func (b *b2File) listParts(ctx context.Context, next, count int) ([]b2FilePartInterface, int, error) {
523 | parts, n, err := b.b.ListParts(ctx, next, count)
524 | if err != nil {
525 | return nil, 0, err
526 | }
527 | var rtn []b2FilePartInterface
528 | for _, part := range parts {
529 | rtn = append(rtn, &b2FilePart{part})
530 | }
531 | return rtn, n, nil
532 | }
533 |
534 | func (b *b2File) compileParts(size int64, seen map[int]string) b2LargeFileInterface {
535 | return &b2LargeFile{b.b.CompileParts(size, seen)}
536 | }
537 |
538 | func (b *b2LargeFile) finishLargeFile(ctx context.Context) (b2FileInterface, error) {
539 | f, err := b.b.FinishLargeFile(ctx)
540 | if err != nil {
541 | return nil, err
542 | }
543 | return &b2File{f}, nil
544 | }
545 |
546 | func (b *b2LargeFile) getUploadPartURL(ctx context.Context) (b2FileChunkInterface, error) {
547 | c, err := b.b.GetUploadPartURL(ctx)
548 | if err != nil {
549 | return nil, err
550 | }
551 | return &b2FileChunk{c}, nil
552 | }
553 |
554 | func (b *b2LargeFile) cancel(ctx context.Context) error {
555 | return b.b.CancelLargeFile(ctx)
556 | }
557 |
558 | func (b *b2FileChunk) reload(ctx context.Context) error {
559 | return b.b.Reload(ctx)
560 | }
561 |
562 | func (b *b2FileChunk) uploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (int, error) {
563 | return b.b.UploadPart(ctx, r, sha1, size, index)
564 | }
565 |
566 | func (b *b2FileReader) Read(p []byte) (int, error) {
567 | return b.b.Read(p)
568 | }
569 |
570 | func (b *b2FileReader) Close() error {
571 | return b.b.Close()
572 | }
573 |
574 | func (b *b2FileReader) stats() (int, string, string, map[string]string) {
575 | return b.b.ContentLength, b.b.ContentType, b.b.SHA1, b.b.Info
576 | }
577 |
578 | func (b *b2FileReader) id() string { return b.b.ID }
579 |
580 | func (b *b2FileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) {
581 | return b.b.Name, b.b.SHA1, b.b.Size, b.b.ContentType, b.b.Info, b.b.Status, b.b.Timestamp
582 | }
583 |
584 | func (b *b2FilePart) number() int { return b.b.Number }
585 | func (b *b2FilePart) sha1() string { return b.b.SHA1 }
586 | func (b *b2FilePart) size() int64 { return b.b.Size }
587 |
588 | func (b *b2Key) del(ctx context.Context) error { return b.b.Delete(ctx) }
589 | func (b *b2Key) caps() []string { return b.b.Capabilities }
590 | func (b *b2Key) name() string { return b.b.Name }
591 | func (b *b2Key) expires() time.Time { return b.b.Expires }
592 | func (b *b2Key) secret() string { return b.b.Secret }
593 | func (b *b2Key) id() string { return b.b.ID }
594 |
--------------------------------------------------------------------------------
/b2/backend.go:
--------------------------------------------------------------------------------
1 | // Copyright 2016, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package b2
16 |
17 | import (
18 | "context"
19 | "io"
20 | "time"
21 |
22 | "github.com/Backblaze/blazer/internal/retry"
23 | )
24 |
25 | // This file wraps the baseline interfaces with backoff and retry semantics.
26 |
27 | type beRootInterface interface {
28 | backoff(error) time.Duration
29 | maxRetries(error) uint
30 | maxReuploads(error) uint
31 | retry(error) bool
32 | reauth(error) bool
33 | reupload(error) bool
34 | authorizeAccount(context.Context, string, string, clientOptions) error
35 | reauthorizeAccount(context.Context) error
36 | createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error)
37 | listBuckets(context.Context, string, ...string) ([]beBucketInterface, error)
38 | createKey(context.Context, string, []string, time.Duration, string, string) (beKeyInterface, error)
39 | listKeys(context.Context, int, string) ([]beKeyInterface, string, error)
40 | }
41 |
42 | type beRoot struct {
43 | account, key string
44 | b2i b2RootInterface
45 | options clientOptions
46 | }
47 |
48 | type beBucketInterface interface {
49 | name() string
50 | btype() BucketType
51 | attrs() *BucketAttrs
52 | id() string
53 | updateBucket(context.Context, *BucketAttrs) error
54 | deleteBucket(context.Context) error
55 | getUploadURL(context.Context) (beURLInterface, error)
56 | startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (beLargeFileInterface, error)
57 | listFileNames(context.Context, int, string, string, string) ([]beFileInterface, string, error)
58 | listFileVersions(context.Context, int, string, string, string, string) ([]beFileInterface, string, string, error)
59 | listUnfinishedLargeFiles(context.Context, int, string) ([]beFileInterface, string, error)
60 | downloadFileByName(context.Context, string, int64, int64, bool) (beFileReaderInterface, error)
61 | hideFile(context.Context, string) (beFileInterface, error)
62 | getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error)
63 | baseURL() string
64 | s3URL() string
65 | file(string, string) beFileInterface
66 | }
67 |
68 | type beBucket struct {
69 | b2bucket b2BucketInterface
70 | ri beRootInterface
71 | }
72 |
73 | type beURLInterface interface {
74 | uploadFile(context.Context, readResetter, int, string, string, string, map[string]string) (beFileInterface, error)
75 | }
76 |
77 | type beURL struct {
78 | b2url b2URLInterface
79 | ri beRootInterface
80 | }
81 |
82 | type beFileInterface interface {
83 | name() string
84 | id() string
85 | size() int64
86 | timestamp() time.Time
87 | status() string
88 | deleteFileVersion(context.Context) error
89 | getFileInfo(context.Context) (beFileInfoInterface, error)
90 | listParts(context.Context, int, int) ([]beFilePartInterface, int, error)
91 | compileParts(int64, map[int]string) beLargeFileInterface
92 | AsLargeFile() beLargeFileInterface
93 | }
94 |
95 | type beFile struct {
96 | b2file b2FileInterface
97 | url beURLInterface
98 | ri beRootInterface
99 | }
100 |
101 | type beLargeFileInterface interface {
102 | finishLargeFile(context.Context) (beFileInterface, error)
103 | getUploadPartURL(context.Context) (beFileChunkInterface, error)
104 | cancel(context.Context) error
105 | }
106 |
107 | type beLargeFile struct {
108 | b2largeFile b2LargeFileInterface
109 | ri beRootInterface
110 | }
111 |
112 | type beFileChunkInterface interface {
113 | reload(context.Context) error
114 | uploadPart(context.Context, readResetter, string, int, int) (int, error)
115 | }
116 |
117 | type beFileChunk struct {
118 | b2fileChunk b2FileChunkInterface
119 | ri beRootInterface
120 | }
121 |
122 | type beFileReaderInterface interface {
123 | io.ReadCloser
124 | stats() (int, string, string, map[string]string)
125 | id() string
126 | }
127 |
128 | type beFileReader struct {
129 | b2fileReader b2FileReaderInterface
130 | ri beRootInterface
131 | }
132 |
133 | type beFileInfoInterface interface {
134 | stats() (string, string, int64, string, map[string]string, string, time.Time)
135 | }
136 |
137 | type beFilePartInterface interface {
138 | number() int
139 | sha1() string
140 | size() int64
141 | }
142 |
143 | type beFilePart struct {
144 | b2filePart b2FilePartInterface
145 | ri beRootInterface
146 | }
147 |
148 | type beFileInfo struct {
149 | name string
150 | sha string
151 | size int64
152 | ct string
153 | info map[string]string
154 | status string
155 | stamp time.Time
156 | }
157 |
158 | type beKeyInterface interface {
159 | del(context.Context) error
160 | caps() []string
161 | name() string
162 | expires() time.Time
163 | secret() string
164 | id() string
165 | }
166 |
167 | type beKey struct {
168 | b2i beRootInterface
169 | k b2KeyInterface
170 | }
171 |
172 | func (r *beRoot) backoff(err error) time.Duration { return r.b2i.backoff(err) }
173 | func (r *beRoot) maxRetries(err error) uint { return r.b2i.maxRetries(err) }
174 | func (r *beRoot) maxReuploads(err error) uint { return r.b2i.maxReuploads(err) }
175 | func (r *beRoot) retry(err error) bool { return r.b2i.retry(err) }
176 | func (r *beRoot) reauth(err error) bool { return r.b2i.reauth(err) }
177 | func (r *beRoot) reupload(err error) bool { return r.b2i.reupload(err) }
178 |
179 | func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error {
180 | f := func() error {
181 | if err := r.b2i.authorizeAccount(ctx, account, key, c); err != nil {
182 | return err
183 | }
184 | r.account = account
185 | r.key = key
186 | r.options = c
187 | return nil
188 | }
189 | return withBackoff(ctx, r, f)
190 | }
191 |
192 | func (r *beRoot) reauthorizeAccount(ctx context.Context) error {
193 | return r.authorizeAccount(ctx, r.account, r.key, r.options)
194 | }
195 |
196 | func (r *beRoot) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) {
197 | var bi beBucketInterface
198 | f := func() error {
199 | g := func() error {
200 | bucket, err := r.b2i.createBucket(ctx, name, btype, info, rules)
201 | if err != nil {
202 | return err
203 | }
204 | bi = &beBucket{
205 | b2bucket: bucket,
206 | ri: r,
207 | }
208 | return nil
209 | }
210 | return withReauth(ctx, r, g)
211 | }
212 | if err := withBackoff(ctx, r, f); err != nil {
213 | return nil, err
214 | }
215 | return bi, nil
216 | }
217 |
218 | func (r *beRoot) listBuckets(ctx context.Context, name string, bucketTypes ...string) ([]beBucketInterface, error) {
219 | var buckets []beBucketInterface
220 | f := func() error {
221 | g := func() error {
222 | bs, err := r.b2i.listBuckets(ctx, name, bucketTypes...)
223 | if err != nil {
224 | return err
225 | }
226 | for _, b := range bs {
227 | buckets = append(buckets, &beBucket{
228 | b2bucket: b,
229 | ri: r,
230 | })
231 | }
232 | return nil
233 | }
234 | return withReauth(ctx, r, g)
235 | }
236 | if err := withBackoff(ctx, r, f); err != nil {
237 | return nil, err
238 | }
239 | return buckets, nil
240 | }
241 |
242 | func (r *beRoot) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (beKeyInterface, error) {
243 | var k *beKey
244 | f := func() error {
245 | g := func() error {
246 | got, err := r.b2i.createKey(ctx, name, caps, valid, bucketID, prefix)
247 | if err != nil {
248 | return err
249 | }
250 | k = &beKey{
251 | b2i: r,
252 | k: got,
253 | }
254 | return nil
255 | }
256 | return withReauth(ctx, r, g)
257 | }
258 | if err := withBackoff(ctx, r, f); err != nil {
259 | return nil, err
260 | }
261 | return k, nil
262 | }
263 |
264 | func (r *beRoot) listKeys(ctx context.Context, max int, next string) ([]beKeyInterface, string, error) {
265 | var keys []beKeyInterface
266 | var cur string
267 | f := func() error {
268 | g := func() error {
269 | got, n, err := r.b2i.listKeys(ctx, max, next)
270 | if err != nil {
271 | return err
272 | }
273 | cur = n
274 | for _, g := range got {
275 | keys = append(keys, &beKey{
276 | b2i: r,
277 | k: g,
278 | })
279 | }
280 | return nil
281 | }
282 | return withReauth(ctx, r, g)
283 | }
284 | if err := withBackoff(ctx, r, f); err != nil {
285 | return nil, "", err
286 | }
287 | return keys, cur, nil
288 | }
289 |
290 | func (b *beBucket) name() string { return b.b2bucket.name() }
291 | func (b *beBucket) btype() BucketType { return BucketType(b.b2bucket.btype()) }
292 | func (b *beBucket) attrs() *BucketAttrs { return b.b2bucket.attrs() }
293 | func (b *beBucket) id() string { return b.b2bucket.id() }
294 |
295 | func (b *beBucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error {
296 | f := func() error {
297 | g := func() error {
298 | return b.b2bucket.updateBucket(ctx, attrs)
299 | }
300 | return withReauth(ctx, b.ri, g)
301 | }
302 | return withBackoff(ctx, b.ri, f)
303 | }
304 |
305 | func (b *beBucket) deleteBucket(ctx context.Context) error {
306 | f := func() error {
307 | g := func() error {
308 | return b.b2bucket.deleteBucket(ctx)
309 | }
310 | return withReauth(ctx, b.ri, g)
311 | }
312 | return withBackoff(ctx, b.ri, f)
313 | }
314 |
315 | func (b *beBucket) getUploadURL(ctx context.Context) (beURLInterface, error) {
316 | var url beURLInterface
317 | f := func() error {
318 | g := func() error {
319 | u, err := b.b2bucket.getUploadURL(ctx)
320 | if err != nil {
321 | return err
322 | }
323 | url = &beURL{
324 | b2url: u,
325 | ri: b.ri,
326 | }
327 | return nil
328 | }
329 | return withReauth(ctx, b.ri, g)
330 | }
331 | if err := withBackoff(ctx, b.ri, f); err != nil {
332 | return nil, err
333 | }
334 | return url, nil
335 | }
336 |
337 | func (b *beBucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (beLargeFileInterface, error) {
338 | var file beLargeFileInterface
339 | f := func() error {
340 | g := func() error {
341 | f, err := b.b2bucket.startLargeFile(ctx, name, ct, info)
342 | if err != nil {
343 | return err
344 | }
345 | file = &beLargeFile{
346 | b2largeFile: f,
347 | ri: b.ri,
348 | }
349 | return nil
350 | }
351 | return withReauth(ctx, b.ri, g)
352 | }
353 | if err := withBackoff(ctx, b.ri, f); err != nil {
354 | return nil, err
355 | }
356 | return file, nil
357 | }
358 |
359 | func (b *beBucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]beFileInterface, string, error) {
360 | var cont string
361 | var files []beFileInterface
362 | f := func() error {
363 | g := func() error {
364 | fs, c, err := b.b2bucket.listFileNames(ctx, count, continuation, prefix, delimiter)
365 | if err != nil {
366 | return err
367 | }
368 | cont = c
369 | for _, f := range fs {
370 | files = append(files, &beFile{
371 | b2file: f,
372 | ri: b.ri,
373 | })
374 | }
375 | return nil
376 | }
377 | return withReauth(ctx, b.ri, g)
378 | }
379 | if err := withBackoff(ctx, b.ri, f); err != nil {
380 | return nil, "", err
381 | }
382 | return files, cont, nil
383 | }
384 |
385 | func (b *beBucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]beFileInterface, string, string, error) {
386 | var name, id string
387 | var files []beFileInterface
388 | f := func() error {
389 | g := func() error {
390 | fs, n, d, err := b.b2bucket.listFileVersions(ctx, count, nextName, nextID, prefix, delimiter)
391 | if err != nil {
392 | return err
393 | }
394 | name = n
395 | id = d
396 | for _, f := range fs {
397 | files = append(files, &beFile{
398 | b2file: f,
399 | ri: b.ri,
400 | })
401 | }
402 | return nil
403 | }
404 | return withReauth(ctx, b.ri, g)
405 | }
406 | if err := withBackoff(ctx, b.ri, f); err != nil {
407 | return nil, "", "", err
408 | }
409 | return files, name, id, nil
410 | }
411 |
412 | func (b *beBucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]beFileInterface, string, error) {
413 | var cont string
414 | var files []beFileInterface
415 | f := func() error {
416 | g := func() error {
417 | fs, c, err := b.b2bucket.listUnfinishedLargeFiles(ctx, count, continuation)
418 | if err != nil {
419 | return err
420 | }
421 | cont = c
422 | for _, f := range fs {
423 | files = append(files, &beFile{
424 | b2file: f,
425 | ri: b.ri,
426 | })
427 | }
428 | return nil
429 | }
430 | return withReauth(ctx, b.ri, g)
431 | }
432 | if err := withBackoff(ctx, b.ri, f); err != nil {
433 | return nil, "", err
434 | }
435 | return files, cont, nil
436 | }
437 |
438 | func (b *beBucket) downloadFileByName(ctx context.Context, name string, offset, size int64, header bool) (beFileReaderInterface, error) {
439 | var reader beFileReaderInterface
440 | f := func() error {
441 | g := func() error {
442 | fr, err := b.b2bucket.downloadFileByName(ctx, name, offset, size, header)
443 | if err != nil {
444 | return err
445 | }
446 | reader = &beFileReader{
447 | b2fileReader: fr,
448 | ri: b.ri,
449 | }
450 | return nil
451 | }
452 | return withReauth(ctx, b.ri, g)
453 | }
454 | if err := withBackoff(ctx, b.ri, f); err != nil {
455 | return nil, err
456 | }
457 | return reader, nil
458 | }
459 |
460 | func (b *beBucket) hideFile(ctx context.Context, name string) (beFileInterface, error) {
461 | var file beFileInterface
462 | f := func() error {
463 | g := func() error {
464 | f, err := b.b2bucket.hideFile(ctx, name)
465 | if err != nil {
466 | return err
467 | }
468 | file = &beFile{
469 | b2file: f,
470 | ri: b.ri,
471 | }
472 | return nil
473 | }
474 | return withReauth(ctx, b.ri, g)
475 | }
476 | if err := withBackoff(ctx, b.ri, f); err != nil {
477 | return nil, err
478 | }
479 | return file, nil
480 | }
481 |
482 | func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) {
483 | var tok string
484 | f := func() error {
485 | g := func() error {
486 | t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v, s)
487 | if err != nil {
488 | return err
489 | }
490 | tok = t
491 | return nil
492 | }
493 | return withReauth(ctx, b.ri, g)
494 | }
495 | if err := withBackoff(ctx, b.ri, f); err != nil {
496 | return "", err
497 | }
498 | return tok, nil
499 | }
500 |
501 | func (b *beBucket) baseURL() string {
502 | return b.b2bucket.baseURL()
503 | }
504 |
505 | func (b *beBucket) s3URL() string {
506 | return b.b2bucket.s3URL()
507 | }
508 |
509 | func (b *beBucket) file(id, name string) beFileInterface {
510 | return &beFile{
511 | b2file: b.b2bucket.file(id, name),
512 | ri: b.ri,
513 | }
514 | }
515 |
516 | func (b *beURL) uploadFile(ctx context.Context, r readResetter, size int, name, ct, sha1 string, info map[string]string) (beFileInterface, error) {
517 | var file beFileInterface
518 | f := func() error {
519 | if err := r.Reset(); err != nil {
520 | return err
521 | }
522 | f, err := b.b2url.uploadFile(ctx, r, size, name, ct, sha1, info)
523 | if err != nil {
524 | return err
525 | }
526 | file = &beFile{
527 | b2file: f,
528 | url: b,
529 | ri: b.ri,
530 | }
531 | return nil
532 | }
533 | if err := withBackoff(ctx, b.ri, f); err != nil {
534 | return nil, err
535 | }
536 | return file, nil
537 | }
538 |
539 | func (b *beFile) deleteFileVersion(ctx context.Context) error {
540 | f := func() error {
541 | g := func() error {
542 | return b.b2file.deleteFileVersion(ctx)
543 | }
544 | return withReauth(ctx, b.ri, g)
545 | }
546 | return withBackoff(ctx, b.ri, f)
547 | }
548 |
549 | func (b *beFile) size() int64 {
550 | return b.b2file.size()
551 | }
552 |
553 | func (b *beFile) name() string {
554 | return b.b2file.name()
555 | }
556 |
557 | func (b *beFile) id() string {
558 | return b.b2file.id()
559 | }
560 |
561 | func (b *beFile) timestamp() time.Time {
562 | return b.b2file.timestamp()
563 | }
564 |
565 | func (b *beFile) status() string {
566 | return b.b2file.status()
567 | }
568 |
569 | func (b *beFile) getFileInfo(ctx context.Context) (beFileInfoInterface, error) {
570 | var fileInfo beFileInfoInterface
571 | f := func() error {
572 | g := func() error {
573 | fi, err := b.b2file.getFileInfo(ctx)
574 | if err != nil {
575 | return err
576 | }
577 | name, sha, size, ct, info, status, stamp := fi.stats()
578 | fileInfo = &beFileInfo{
579 | name: name,
580 | sha: sha,
581 | size: size,
582 | ct: ct,
583 | info: info,
584 | status: status,
585 | stamp: stamp,
586 | }
587 | return nil
588 | }
589 | return withReauth(ctx, b.ri, g)
590 | }
591 | if err := withBackoff(ctx, b.ri, f); err != nil {
592 | return nil, err
593 | }
594 | return fileInfo, nil
595 | }
596 |
597 | func (b *beFile) listParts(ctx context.Context, next, count int) ([]beFilePartInterface, int, error) {
598 | var fpi []beFilePartInterface
599 | var rnxt int
600 | f := func() error {
601 | g := func() error {
602 | ps, n, err := b.b2file.listParts(ctx, next, count)
603 | if err != nil {
604 | return err
605 | }
606 | rnxt = n
607 | for _, p := range ps {
608 | fpi = append(fpi, &beFilePart{
609 | b2filePart: p,
610 | ri: b.ri,
611 | })
612 | }
613 | return nil
614 | }
615 | return withReauth(ctx, b.ri, g)
616 | }
617 | if err := withBackoff(ctx, b.ri, f); err != nil {
618 | return nil, 0, err
619 | }
620 | return fpi, rnxt, nil
621 | }
622 |
623 | func (b *beFile) compileParts(size int64, seen map[int]string) beLargeFileInterface {
624 | return &beLargeFile{
625 | b2largeFile: b.b2file.compileParts(size, seen),
626 | ri: b.ri,
627 | }
628 | }
629 |
630 | // AsLargeFile returns a beLargeFileInterface with the same fields as this beFile
631 | func (b *beFile) AsLargeFile() beLargeFileInterface {
632 | b2file := b.b2file.(*b2File)
633 | return &beLargeFile{
634 | b2largeFile: &b2LargeFile{
635 | b: b2file.b.AsLargeFile(),
636 | },
637 | ri: b.ri,
638 | }
639 | }
640 |
641 | func (b *beLargeFile) getUploadPartURL(ctx context.Context) (beFileChunkInterface, error) {
642 | var chunk beFileChunkInterface
643 | f := func() error {
644 | g := func() error {
645 | fc, err := b.b2largeFile.getUploadPartURL(ctx)
646 | if err != nil {
647 | return err
648 | }
649 | chunk = &beFileChunk{
650 | b2fileChunk: fc,
651 | ri: b.ri,
652 | }
653 | return nil
654 | }
655 | return withReauth(ctx, b.ri, g)
656 | }
657 | if err := withBackoff(ctx, b.ri, f); err != nil {
658 | return nil, err
659 | }
660 | return chunk, nil
661 | }
662 |
663 | func (b *beLargeFile) finishLargeFile(ctx context.Context) (beFileInterface, error) {
664 | var file beFileInterface
665 | f := func() error {
666 | g := func() error {
667 | f, err := b.b2largeFile.finishLargeFile(ctx)
668 | if err != nil {
669 | return err
670 | }
671 | file = &beFile{
672 | b2file: f,
673 | ri: b.ri,
674 | }
675 | return nil
676 | }
677 | return withReauth(ctx, b.ri, g)
678 | }
679 | if err := withBackoff(ctx, b.ri, f); err != nil {
680 | return nil, err
681 | }
682 | return file, nil
683 | }
684 |
685 | func (b *beLargeFile) cancel(ctx context.Context) error {
686 | f := func() error {
687 | g := func() error {
688 | return b.b2largeFile.cancel(ctx)
689 | }
690 | return withReauth(ctx, b.ri, g)
691 | }
692 | return withBackoff(ctx, b.ri, f)
693 | }
694 |
695 | func (b *beFileChunk) reload(ctx context.Context) error {
696 | f := func() error {
697 | g := func() error {
698 | return b.b2fileChunk.reload(ctx)
699 | }
700 | return withReauth(ctx, b.ri, g)
701 | }
702 | return withBackoff(ctx, b.ri, f)
703 | }
704 |
705 | func (b *beFileChunk) uploadPart(ctx context.Context, r readResetter, sha1 string, size, index int) (int, error) {
706 | // no re-auth; pass it back up to the caller so they can get an new upload URI and token
707 | // TODO: we should handle that here probably
708 | var i int
709 | f := func() error {
710 | if err := r.Reset(); err != nil {
711 | return err
712 | }
713 | j, err := b.b2fileChunk.uploadPart(ctx, r, sha1, size, index)
714 | if err != nil {
715 | return err
716 | }
717 | i = j
718 | return nil
719 | }
720 | if err := withBackoff(ctx, b.ri, f); err != nil {
721 | return 0, err
722 | }
723 | return i, nil
724 | }
725 |
726 | func (b *beFileReader) Read(p []byte) (int, error) {
727 | return b.b2fileReader.Read(p)
728 | }
729 |
730 | func (b *beFileReader) Close() error {
731 | return b.b2fileReader.Close()
732 | }
733 |
734 | func (b *beFileReader) stats() (int, string, string, map[string]string) {
735 | return b.b2fileReader.stats()
736 | }
737 |
738 | func (b *beFileReader) id() string { return b.b2fileReader.id() }
739 |
740 | func (b *beFileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) {
741 | return b.name, b.sha, b.size, b.ct, b.info, b.status, b.stamp
742 | }
743 |
744 | func (b *beFilePart) number() int { return b.b2filePart.number() }
745 | func (b *beFilePart) sha1() string { return b.b2filePart.sha1() }
746 | func (b *beFilePart) size() int64 { return b.b2filePart.size() }
747 |
748 | func (b *beKey) del(ctx context.Context) error {
749 | f := func() error {
750 | return b.k.del(ctx)
751 | }
752 | return withBackoff(ctx, b.b2i, f)
753 | }
754 |
755 | func (b *beKey) caps() []string { return b.k.caps() }
756 | func (b *beKey) name() string { return b.k.name() }
757 | func (b *beKey) expires() time.Time { return b.k.expires() }
758 | func (b *beKey) secret() string { return b.k.secret() }
759 | func (b *beKey) id() string { return b.k.id() }
760 |
761 | var after = time.After
762 |
763 | func withBackoff(ctx context.Context, ri beRootInterface, f func() error) error {
764 | return retry.Do(
765 | ctx,
766 | f,
767 | retry.DynamicAttempts(func(attempt uint, attempts uint, err error) uint {
768 | if attempt == 1 {
769 | return ri.maxRetries(err) + 1
770 | }
771 | return attempts
772 | }),
773 | retry.DynamicDelay(func(attempt uint, delay time.Duration, err error) time.Duration {
774 | bo := ri.backoff(err)
775 | if bo > 0 {
776 | return bo
777 | } else {
778 | if attempt == 1 {
779 | return retry.Backoff(500 * time.Millisecond)
780 | }
781 | return retry.Backoff(delay)
782 | }
783 | }),
784 | retry.RetryIf(func(attempt uint, err error) bool {
785 | return ri.retry(err)
786 | }),
787 | retry.WithAfter(after),
788 | )
789 | }
790 |
791 | func withReauth(ctx context.Context, ri beRootInterface, f func() error) error {
792 | err := f()
793 | if ri.reauth(err) {
794 | if err := ri.reauthorizeAccount(ctx); err != nil {
795 | return err
796 | }
797 | err = f()
798 | }
799 | return err
800 | }
801 |
--------------------------------------------------------------------------------
/b2/b2.go:
--------------------------------------------------------------------------------
1 | // Copyright 2016, the Blazer authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | // Package b2 provides a high-level interface to Backblaze's B2 cloud storage
16 | // service.
17 | //
18 | // It is specifically designed to abstract away the Backblaze API details by
19 | // providing familiar Go interfaces, specifically an io.Writer for object
20 | // storage, and an io.Reader for object download. Handling of transient
21 | // errors, including network and authentication timeouts, is transparent.
22 | //
23 | // Methods that perform network requests accept a context.Context argument.
24 | // Callers should use the context's cancellation abilities to end requests
25 | // early, or to provide timeout or deadline guarantees.
26 | //
27 | // This package is in development and may make API changes.
28 | package b2
29 |
30 | import (
31 | "context"
32 | "fmt"
33 | "io"
34 | "net/http"
35 | "net/url"
36 | "regexp"
37 | "strconv"
38 | "sync"
39 | "time"
40 | )
41 |
42 | // Client is a Backblaze B2 client.
43 | type Client struct {
44 | backend beRootInterface
45 |
46 | slock sync.Mutex
47 | sWriters map[string]*Writer
48 | sReaders map[string]*Reader
49 | sMethods []methodCounter
50 | opts clientOptions
51 | }
52 |
53 | // NewClient creates and returns a new Client with valid B2 service account
54 | // tokens.
55 | func NewClient(ctx context.Context, account, key string, opts ...ClientOption) (*Client, error) {
56 | c := &Client{
57 | backend: &beRoot{
58 | b2i: &b2Root{},
59 | },
60 | sMethods: []methodCounter{
61 | newMethodCounter(time.Minute, time.Second),
62 | newMethodCounter(time.Minute*5, time.Second),
63 | newMethodCounter(time.Hour, time.Minute),
64 | newMethodCounter(0, 0), // forever
65 | },
66 | }
67 | opts = append(opts, client(c))
68 | for _, f := range opts {
69 | f(&c.opts)
70 | }
71 | if err := c.backend.authorizeAccount(ctx, account, key, c.opts); err != nil {
72 | return nil, err
73 | }
74 | return c, nil
75 | }
76 |
77 | type clientOptions struct {
78 | client *Client
79 | transport http.RoundTripper
80 | failSomeUploads bool
81 | expireTokens bool
82 | capExceeded bool
83 | apiBase string
84 | userAgents []string
85 | writerOpts []WriterOption
86 | }
87 |
88 | // A ClientOption allows callers to adjust various per-client settings.
89 | type ClientOption func(*clientOptions)
90 |
91 | // UserAgent sets the User-Agent HTTP header. The default header is
92 | // "blazer/"; the value set here will be prepended to that. This can
93 | // be set multiple times.
94 | //
95 | // A user agent is generally of the form "/ ()".
96 | func UserAgent(agent string) ClientOption {
97 | return func(o *clientOptions) {
98 | o.userAgents = append(o.userAgents, agent)
99 | }
100 | }
101 |
102 | // APIBase returns a ClientOption specifying the URL root of API requests.
103 | func APIBase(url string) ClientOption {
104 | return func(o *clientOptions) {
105 | o.apiBase = url
106 | }
107 | }
108 |
109 | // Transport sets the underlying HTTP transport mechanism. If unset,
110 | // http.DefaultTransport is used.
111 | func Transport(rt http.RoundTripper) ClientOption {
112 | return func(c *clientOptions) {
113 | c.transport = rt
114 | }
115 | }
116 |
117 | // FailSomeUploads requests intermittent upload failures from the B2 service.
118 | // This is mostly useful for testing.
119 | func FailSomeUploads() ClientOption {
120 | return func(c *clientOptions) {
121 | c.failSomeUploads = true
122 | }
123 | }
124 |
125 | // ExpireSomeAuthTokens requests intermittent authentication failures from the
126 | // B2 service.
127 | func ExpireSomeAuthTokens() ClientOption {
128 | return func(c *clientOptions) {
129 | c.expireTokens = true
130 | }
131 | }
132 |
133 | // ForceCapExceeded requests a cap limit from the B2 service. This causes all
134 | // uploads to be treated as if they would exceed the configure B2 capacity.
135 | func ForceCapExceeded() ClientOption {
136 | return func(c *clientOptions) {
137 | c.capExceeded = true
138 | }
139 | }
140 |
141 | func client(cl *Client) ClientOption {
142 | return func(c *clientOptions) {
143 | c.client = cl
144 | }
145 | }
146 |
147 | type clientTransport struct {
148 | client *Client
149 | rt http.RoundTripper
150 | }
151 |
152 | func (ct *clientTransport) RoundTrip(r *http.Request) (*http.Response, error) {
153 | m := r.Header.Get("X-Blazer-Method")
154 | t := ct.rt
155 | if t == nil {
156 | t = http.DefaultTransport
157 | }
158 | b := time.Now()
159 | resp, err := t.RoundTrip(r)
160 | e := time.Now()
161 | if err != nil {
162 | return resp, err
163 | }
164 | if m != "" && ct.client != nil {
165 | ct.client.slock.Lock()
166 | m := method{
167 | name: m,
168 | duration: e.Sub(b),
169 | status: resp.StatusCode,
170 | }
171 | for _, counter := range ct.client.sMethods {
172 | counter.record(m)
173 | }
174 | ct.client.slock.Unlock()
175 | }
176 | return resp, nil
177 | }
178 |
179 | // Bucket is a reference to a B2 bucket.
180 | type Bucket struct {
181 | b beBucketInterface
182 | r beRootInterface
183 |
184 | c *Client
185 | urlPool *urlPool
186 | }
187 |
188 | type BucketType string
189 |
190 | const (
191 | UnknownType BucketType = ""
192 | Private = "allPrivate"
193 | Public = "allPublic"
194 | Snapshot = "snapshot"
195 | )
196 |
197 | // BucketAttrs holds a bucket's metadata attributes.
198 | type BucketAttrs struct {
199 | // Type lists or sets the new bucket type. If Type is UnknownType during a
200 | // bucket.Update, the type is not changed.
201 | Type BucketType
202 |
203 | // Info records user data, limited to ten keys. If nil during a
204 | // bucket.Update, the existing bucket info is not modified. A bucket's
205 | // metadata can be removed by updating with an empty map.
206 | Info map[string]string
207 |
208 | // Reports or sets bucket lifecycle rules. If nil during a bucket.Update,
209 | // the rules are not modified. A bucket's rules can be removed by updating
210 | // with an empty slice.
211 | LifecycleRules []LifecycleRule
212 |
213 | // The initial list (a JSON array) of CORS rules for this bucket.
214 | // See CORS Rules (https://www.backblaze.com/docs/cloud-storage-cross-origin-resource-sharing-rules) for an overview and the rule structure.
215 | CORSRules []CORSRule
216 |
217 | // The default Object Lock retention settings for this bucket. See Object Lock for an overview and the parameter structure.
218 | // If specified, the existing default bucket retention settings will be replaced with the new settings. If not specified,
219 | // the setting will remain unchanged. Setting the value requires the writeBucketRetentions capability and that the bucket is Object Lock-enabled.
220 | // Object Lock: https://www.backblaze.com/docs/cloud-storage-enable-object-lock-with-the-native-api.
221 | DefaultRetention *Retention
222 | // The default server-side encryption settings for this bucket. See Server-Side Encryption settings for new files uploaded to this bucket.
223 | // This field is filtered based on application key capabilities; readBucketEncryption capability is required to access the value.
224 | // See Server-Side Encryption for an overview and the parameter structure. If specified, the existing default bucket encryption settings will
225 | // be replaced with the new settings. If not specified, the setting will remain unchanged. Setting the value requires the writeBucketEncryption
226 | // capability. Server-Side Encryption settings: https://www.backblaze.com/docs/cloud-storage-enable-server-side-encryption-with-the-native-api.
227 | DefaultServerSideEncryption *ServerSideEncryption
228 |
229 | // If present, the Boolean value specifies whether the bucket has Object Lock enabled. Once Object Lock is enabled on a bucket, it cannot be disabled.
230 | // A value of true will be accepted if you have writeBucketRetentions capability. But you cannot enable Object Lock on a restricted bucket
231 | // (e.g. share buckets, snapshot) or on a bucket that contains source replication configuration.
232 | // A value of false will only be accepted if the bucket does not have Object Lock enabled. After enabling Object Lock on a bucket containing files with
233 | // metadata over the lower 2,048 byte limit, API requests to b2_update_file_retention and b2_update_file_legal_hold will be rejected. This is because
234 | // setting file retention or legal hold on a file adds additional headers when the file is downloaded, for example, with b2_download_file_by_name.
235 | // In such cases, you can use b2_copy_file with a REPLACE metadataDirective to copy the file, give it less metadata, and also specify the fileRetention
236 | // and legalHold parameters. The original file can then be deleted with b2_delete_file_version.
237 | // Object Lock: https://www.backblaze.com/docs/cloud-storage-enable-object-lock-with-the-native-api.
238 | FileLockEnabled bool
239 |
240 | // The configuration to create a Replication Rule. See Cloud Replication Rules. At least one of the asReplicationSource or asReplicationDestination
241 | // parameters is required, but they can also both be present.
242 | // NOTE: The first time that you configure Cloud Replication, complete the following tasks to ensure that you have the correct permission:
243 | // 1. Verify your email address.
244 | // 2. Have a payment history on file or make a payment.
245 | ReplicationConfig *ReplicationConfiguration
246 | }
247 |
248 | // DefaultServerSideEncryption sets the bucket defaultServerSideEncryption to { "mode": "SSE-B2", "algorithm": "AES256" }
249 | // Must call Bucket.Update() to apply the change.
250 | func DefaultServerSideEncryption() *ServerSideEncryption {
251 | return &ServerSideEncryption{
252 | Mode: "SSE-B2",
253 | Algorithm: "AES256",
254 | }
255 | }
256 |
257 | type ServerSideEncryption struct {
258 | Mode string
259 | Algorithm string
260 | }
261 |
262 | type CORSRule struct {
263 | Name string
264 | AllowedOrigins []string
265 | AllowedHeaders []string
266 | AllowedOperations []string
267 | ExposeHeaders []string
268 | MaxAgeSeconds int
269 | }
270 |
271 | type Retention struct {
272 | Mode string
273 | Period *RetentionPeriod
274 | }
275 |
276 | type RetentionPeriod struct {
277 | Duration int
278 | Unit string
279 | }
280 |
281 | type ReplicationConfiguration struct {
282 | AsReplicationSource AsReplicationSource
283 | }
284 |
285 | type AsReplicationSource struct {
286 | ReplicationRules []ReplicationRules
287 | SourceApplicationKeyID string
288 | }
289 |
290 | type ReplicationRules struct {
291 | DestinationBucketID string
292 | FileNamePrefix string
293 | IncludeExistingFiles bool
294 | IsEnabled bool
295 | Priority int
296 | ReplicationRuleName string
297 | }
298 |
299 | // A LifecycleRule describes an object's life cycle, namely how many days after
300 | // uploading an object should be hidden, and after how many days hidden an
301 | // object should be deleted. Multiple rules may not apply to the same file or
302 | // set of files. Be careful when using this feature; it can (is designed to)
303 | // delete your data.
304 | type LifecycleRule struct {
305 | // Prefix specifies all the files in the bucket to which this rule applies.
306 | Prefix string
307 |
308 | // DaysUploadedUntilHidden specifies the number of days after which a file
309 | // will automatically be hidden. 0 means "do not automatically hide new
310 | // files".
311 | DaysNewUntilHidden int
312 |
313 | // DaysHiddenUntilDeleted specifies the number of days after which a hidden
314 | // file is deleted. 0 means "do not automatically delete hidden files".
315 | DaysHiddenUntilDeleted int
316 | }
317 |
318 | type b2err struct {
319 | err error
320 | notFoundErr bool
321 | isUpdateConflict bool
322 | }
323 |
324 | func (e b2err) Error() string {
325 | return e.err.Error()
326 | }
327 |
328 | // IsNotExist reports whether a given error indicates that an object or bucket
329 | // does not exist.
330 | func IsNotExist(err error) bool {
331 | berr, ok := err.(b2err)
332 | if !ok {
333 | return false
334 | }
335 | return berr.notFoundErr
336 | }
337 |
338 | const uploadURLPoolSize = 100
339 |
340 | type urlPool struct {
341 | ch chan beURLInterface
342 | }
343 |
344 | func newURLPool() *urlPool {
345 | return &urlPool{ch: make(chan beURLInterface, uploadURLPoolSize)}
346 | }
347 |
348 | func (p *urlPool) get() beURLInterface {
349 | select {
350 | case ue := <-p.ch:
351 | // if the channel has an upload URL available, use that
352 | return ue
353 | default:
354 | // otherwise return nil, a new upload URL needs to be generated
355 | return nil
356 | }
357 | }
358 |
359 | func (p *urlPool) put(u beURLInterface) {
360 | select {
361 | case p.ch <- u:
362 | // put the URL back if possible
363 | default:
364 | // if the channel is full, throw it away
365 | }
366 | }
367 |
368 | // Bucket returns a bucket if it exists.
369 | func (c *Client) Bucket(ctx context.Context, name string) (*Bucket, error) {
370 | buckets, err := c.backend.listBuckets(ctx, name)
371 | if err != nil {
372 | return nil, err
373 | }
374 | for _, bucket := range buckets {
375 | if bucket.name() == name {
376 | return &Bucket{
377 | b: bucket,
378 | r: c.backend,
379 | c: c,
380 | urlPool: newURLPool(),
381 | }, nil
382 | }
383 | }
384 | return nil, b2err{
385 | err: fmt.Errorf("%s: bucket not found", name),
386 | notFoundErr: true,
387 | }
388 | }
389 |
390 | // NewBucket returns a bucket. The bucket is created with the given attributes
391 | // if it does not already exist. If attrs is nil, it is created as a private
392 | // bucket with no info metadata and no lifecycle rules.
393 | func (c *Client) NewBucket(ctx context.Context, name string, attrs *BucketAttrs) (*Bucket, error) {
394 | buckets, err := c.backend.listBuckets(ctx, name)
395 | if err != nil {
396 | return nil, err
397 | }
398 | for _, bucket := range buckets {
399 | if bucket.name() == name {
400 | return &Bucket{
401 | b: bucket,
402 | r: c.backend,
403 | c: c,
404 | urlPool: newURLPool(),
405 | }, nil
406 | }
407 | }
408 | if attrs == nil {
409 | attrs = &BucketAttrs{Type: Private}
410 | }
411 | b, err := c.backend.createBucket(ctx, name, string(attrs.Type), attrs.Info, attrs.LifecycleRules)
412 | if err != nil {
413 | return nil, err
414 | }
415 | return &Bucket{
416 | b: b,
417 | r: c.backend,
418 | c: c,
419 | urlPool: newURLPool(),
420 | }, err
421 | }
422 |
423 | // ListBuckets returns all the available buckets.
424 | func (c *Client) ListBuckets(ctx context.Context, bucketTypes ...string) ([]*Bucket, error) {
425 | bs, err := c.backend.listBuckets(ctx, "", bucketTypes...)
426 | if err != nil {
427 | return nil, err
428 | }
429 | var buckets []*Bucket
430 | for _, b := range bs {
431 | buckets = append(buckets, &Bucket{
432 | b: b,
433 | r: c.backend,
434 | c: c,
435 | urlPool: newURLPool(),
436 | })
437 | }
438 | return buckets, nil
439 | }
440 |
441 | // IsUpdateConflict reports whether a given error is the result of a bucket
442 | // update conflict.
443 | func IsUpdateConflict(err error) bool {
444 | e, ok := err.(b2err)
445 | if !ok {
446 | return false
447 | }
448 | return e.isUpdateConflict
449 | }
450 |
451 | // Update modifies the given bucket with new attributes. It is possible that
452 | // this method could fail with an update conflict, in which case you should
453 | // retrieve the latest bucket attributes with Attrs and try again.
454 | func (b *Bucket) Update(ctx context.Context, attrs *BucketAttrs) error {
455 | return b.b.updateBucket(ctx, attrs)
456 | }
457 |
458 | // Attrs retrieves and returns the current bucket's attributes.
459 | func (b *Bucket) Attrs(ctx context.Context) (*BucketAttrs, error) {
460 | bucket, err := b.c.Bucket(ctx, b.Name())
461 | if err != nil {
462 | return nil, err
463 | }
464 | b.b = bucket.b
465 | return b.b.attrs(), nil
466 | }
467 |
468 | var bNotExist = regexp.MustCompile("Bucket.*does not exist")
469 |
470 | // Delete removes a bucket. The bucket must be empty.
471 | func (b *Bucket) Delete(ctx context.Context) error {
472 | err := b.b.deleteBucket(ctx)
473 | if err == nil {
474 | return err
475 | }
476 | // So, the B2 documentation disagrees with the implementation here, and the
477 | // error code is not really helpful. If the bucket doesn't exist, the error is
478 | // 400, not 404, and the string is "Bucket does not exist". However, the
479 | // documentation says it will be "Bucket id does not exist". In case
480 | // they update the implementation to match the documentation, we're just going
481 | // to regexp over the error message and hope it's okay.
482 | if bNotExist.MatchString(err.Error()) {
483 | return b2err{
484 | err: err,
485 | notFoundErr: true,
486 | }
487 | }
488 | return err
489 | }
490 |
491 | // BaseURL returns the base URL to use for all files uploaded to this bucket.
492 | func (b *Bucket) BaseURL() string {
493 | return b.b.baseURL()
494 | }
495 |
496 | func (b *Bucket) S3URL() string {
497 | return b.b.s3URL()
498 | }
499 |
500 | // Name returns the bucket's name.
501 | func (b *Bucket) Name() string {
502 | return b.b.name()
503 | }
504 |
505 | // Object represents a B2 object.
506 | type Object struct {
507 | attrs *Attrs
508 | name string
509 | f beFileInterface
510 | b *Bucket
511 | }
512 |
513 | // Attrs holds an object's metadata.
514 | type Attrs struct {
515 | Name string // Not used on upload.
516 | Size int64 // Not used on upload.
517 | ContentType string // Used on upload, default is "application/octet-stream".
518 | Status ObjectState // Not used on upload.
519 | UploadTimestamp time.Time // Not used on upload.
520 | SHA1 string // Can be "none" for large files. If set on upload, will be used for large files.
521 | LastModified time.Time // If present, and there are fewer than 10 keys in the Info field, this is saved on upload.
522 | Info map[string]string // Save arbitrary metadata on upload, but limited to 10 keys.
523 | }
524 |
525 | // Name returns an object's name
526 | func (o *Object) Name() string {
527 | return o.name
528 | }
529 |
530 | // ID returns an object's id
531 | func (o *Object) ID() string {
532 | return o.f.id()
533 | }
534 |
535 | // Attrs returns an object's attributes.
536 | func (o *Object) Attrs(ctx context.Context) (*Attrs, error) {
537 | if err := o.ensure(ctx); err != nil {
538 | return nil, err
539 | }
540 | fi, err := o.f.getFileInfo(ctx)
541 | if err != nil {
542 | return nil, err
543 | }
544 | name, sha, size, ct, info, st, stamp := fi.stats()
545 | var state ObjectState
546 | switch st {
547 | case "upload":
548 | state = Uploaded
549 | case "start":
550 | state = Started
551 | case "hide":
552 | state = Hider
553 | case "folder":
554 | state = Folder
555 | }
556 | var mtime time.Time
557 | if v, ok := info["src_last_modified_millis"]; ok {
558 | ms, err := strconv.ParseInt(v, 10, 64)
559 | if err != nil {
560 | return nil, err
561 | }
562 | mtime = time.Unix(ms/1e3, (ms%1e3)*1e6)
563 | delete(info, "src_last_modified_millis")
564 | }
565 | if v, ok := info["large_file_sha1"]; ok {
566 | sha = v
567 | }
568 | return &Attrs{
569 | Name: name,
570 | Size: size,
571 | ContentType: ct,
572 | UploadTimestamp: stamp,
573 | SHA1: sha,
574 | Info: info,
575 | Status: state,
576 | LastModified: mtime,
577 | }, nil
578 | }
579 |
580 | // ObjectState represents the various states an object can be in.
581 | type ObjectState int
582 |
583 | const (
584 | Unknown ObjectState = iota
585 | // Started represents a large upload that has been started but not finished
586 | // or canceled.
587 | Started
588 | // Uploaded represents an object that has finished uploading and is complete.
589 | Uploaded
590 | // Hider represents an object that exists only to hide another object. It
591 | // cannot in itself be downloaded and, in particular, is not a hidden object.
592 | Hider
593 |
594 | // Folder is a special state given to non-objects that are returned during a
595 | // List call with a ListDelimiter option.
596 | Folder
597 | )
598 |
599 | // Object returns a reference to the named object in the bucket. Hidden
600 | // objects cannot be referenced in this manner; they can only be found by
601 | // finding the appropriate reference in ListObjects.
602 | func (b *Bucket) Object(name string) *Object {
603 | return &Object{
604 | name: name,
605 | b: b,
606 | }
607 | }
608 |
609 | // URL returns the full URL to the given object.
610 | func (o *Object) URL() string {
611 | return fmt.Sprintf("%s/file/%s/%s", o.b.BaseURL(), o.b.Name(), o.name)
612 | }
613 |
614 | // NewWriter returns a new writer for the given object. Objects that are
615 | // overwritten are not deleted, but are "hidden".
616 | //
617 | // Callers must close the writer when finished and check the error status.
618 | func (o *Object) NewWriter(ctx context.Context, opts ...WriterOption) *Writer {
619 | ctx, cancel := context.WithCancel(ctx)
620 | w := &Writer{
621 | o: o,
622 | name: o.name,
623 | ctx: ctx,
624 | cancel: cancel,
625 | }
626 | for _, f := range o.b.c.opts.writerOpts {
627 | f(w)
628 | }
629 | for _, f := range opts {
630 | f(w)
631 | }
632 | return w
633 | }
634 |
635 | // NewRangeReader returns a reader for the given object, reading up to length
636 | // bytes. If length is negative, the rest of the object is read.
637 | func (o *Object) NewRangeReader(ctx context.Context, offset, length int64) *Reader {
638 | ctx, cancel := context.WithCancel(ctx)
639 | return &Reader{
640 | ctx: ctx,
641 | cancel: cancel,
642 | o: o,
643 | name: o.name,
644 | chunks: make(map[int]*rchunk),
645 | length: length,
646 | offset: offset,
647 | }
648 | }
649 |
650 | // NewReader returns a reader for the given object.
651 | func (o *Object) NewReader(ctx context.Context) *Reader {
652 | return o.NewRangeReader(ctx, 0, -1)
653 | }
654 |
655 | func (o *Object) ensure(ctx context.Context) error {
656 | if o.f == nil {
657 | f, err := o.b.getObject(ctx, o.name)
658 | if err != nil {
659 | return err
660 | }
661 | o.f = f.f
662 | }
663 | return nil
664 | }
665 |
666 | // Delete removes the given object, if it is a regular file or hide marker
667 | func (o *Object) Delete(ctx context.Context) error {
668 | if err := o.ensure(ctx); err != nil {
669 | return err
670 | }
671 | status := o.f.status()
672 | if status == "upload" || status == "hide" {
673 | return o.f.deleteFileVersion(ctx)
674 | } else {
675 | return fmt.Errorf("%s is not a regular file or hide marker: %s", o.name, status)
676 | }
677 | }
678 |
679 | // Cancel cancels a large file upload
680 | func (o *Object) Cancel(ctx context.Context) error {
681 | if err := o.ensure(ctx); err != nil {
682 | return err
683 | }
684 | status := o.f.status()
685 | if status == "start" {
686 | // b2.List allows you to iterate through b2.Object structs, which each contain a beFileInterface.
687 | // However, we need a beLargeFileInterface to call b2.cancel, so just create a new beLargeFileInterface
688 | // that contains all the same fields as the beFileInterface.
689 | // This isn't great, but the alternative, making large files a kind of file, or having files and
690 | // large files have a common parent, seems to introduce unworkable complexity
691 | lf := o.f.AsLargeFile()
692 | return lf.cancel(ctx)
693 | } else {
694 | return fmt.Errorf("%s is not an unfinished large file: %s", o.name, status)
695 | }
696 | }
697 |
698 | // Hide hides the object from name-based listing.
699 | func (o *Object) Hide(ctx context.Context) error {
700 | if err := o.ensure(ctx); err != nil {
701 | return err
702 | }
703 | _, err := o.b.b.hideFile(ctx, o.name)
704 | return err
705 | }
706 |
707 | // Reveal unhides (if hidden) the named object. If there are multiple objects
708 | // of a given name, it will reveal the most recent.
709 | func (b *Bucket) Reveal(ctx context.Context, name string) error {
710 | iter := b.List(ctx, ListPrefix(name), ListHidden())
711 | for iter.Next() {
712 | obj := iter.Object()
713 | if obj.Name() == name {
714 | if obj.f.status() == "hide" {
715 | return obj.Delete(ctx)
716 | }
717 | return nil
718 | }
719 | if obj.Name() > name {
720 | break
721 | }
722 | }
723 | return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true}
724 | }
725 |
726 | // I don't want to import all of ioutil for this.
727 | type discard struct{}
728 |
729 | func (discard) Write(p []byte) (int, error) {
730 | return len(p), nil
731 | }
732 |
733 | func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) {
734 | fr, err := b.b.downloadFileByName(ctx, name, 0, 0, true)
735 | if err != nil {
736 | return nil, err
737 | }
738 | io.Copy(discard{}, fr)
739 | fr.Close()
740 | return &Object{
741 | name: name,
742 | f: b.b.file(fr.id(), name),
743 | b: b,
744 | }, nil
745 | }
746 |
747 | // AuthToken returns an authorization token that can be used to access objects
748 | // in a private bucket. Only objects that begin with prefix can be accessed.
749 | // The token expires after the given duration.
750 | func (b *Bucket) AuthToken(ctx context.Context, prefix string, valid time.Duration) (string, error) {
751 | return b.b.getDownloadAuthorization(ctx, prefix, valid, "")
752 | }
753 |
754 | // AuthURL returns a URL for the given object with embedded token and,
755 | // possibly, b2ContentDisposition arguments. Leave b2cd blank for no content
756 | // disposition.
757 | func (o *Object) AuthURL(ctx context.Context, valid time.Duration, b2cd string) (*url.URL, error) {
758 | token, err := o.b.b.getDownloadAuthorization(ctx, o.name, valid, b2cd)
759 | if err != nil {
760 | return nil, err
761 | }
762 | urlString := fmt.Sprintf("%s?Authorization=%s", o.URL(), url.QueryEscape(token))
763 | if b2cd != "" {
764 | urlString = fmt.Sprintf("%s&b2ContentDisposition=%s", urlString, url.QueryEscape(b2cd))
765 | }
766 | u, err := url.Parse(urlString)
767 | if err != nil {
768 | return nil, err
769 | }
770 | return u, nil
771 | }
772 |
--------------------------------------------------------------------------------