├── .github ├── dependabot.yml └── workflows │ └── ci.yml ├── LICENSE.md ├── README.md ├── SECURITY.md ├── backend.go ├── backend_nix.go ├── backend_test.go ├── backend_windows.go ├── blob.go ├── blob_test.go ├── commit.go ├── commit_test.go ├── errors.go ├── errors ├── errors.go └── errors_test.go ├── errors_test.go ├── file_storer.go ├── go.mod ├── go.sum ├── memory_storer.go ├── memory_storer_test.go ├── object.go ├── object_db.go ├── object_db_test.go ├── object_reader.go ├── object_reader_test.go ├── object_type.go ├── object_type_test.go ├── object_writer.go ├── object_writer_test.go ├── pack ├── bounds.go ├── bounds_test.go ├── chain.go ├── chain_base.go ├── chain_base_test.go ├── chain_delta.go ├── chain_delta_test.go ├── chain_test.go ├── delayed_object.go ├── errors.go ├── errors_test.go ├── index.go ├── index_decode.go ├── index_decode_test.go ├── index_entry.go ├── index_test.go ├── index_v1.go ├── index_v1_test.go ├── index_v2.go ├── index_v2_test.go ├── index_version.go ├── io.go ├── io_test.go ├── object.go ├── object_test.go ├── packfile.go ├── packfile_decode.go ├── packfile_decode_test.go ├── packfile_test.go ├── set.go ├── set_test.go ├── storage.go ├── type.go └── type_test.go ├── script └── cibuild ├── storage ├── backend.go ├── decompressing_readcloser.go ├── multi_storage.go └── storage.go ├── storer.go ├── tag.go ├── tag_test.go ├── tree.go ├── tree_test.go └── vendor ├── github.com ├── davecgh │ └── go-spew │ │ ├── LICENSE │ │ └── spew │ │ ├── bypass.go │ │ ├── bypasssafe.go │ │ ├── common.go │ │ ├── config.go │ │ ├── doc.go │ │ ├── dump.go │ │ ├── format.go │ │ └── spew.go ├── pmezard │ └── go-difflib │ │ ├── LICENSE │ │ └── difflib │ │ └── difflib.go └── stretchr │ └── testify │ ├── LICENSE │ ├── assert │ ├── assertion_format.go │ ├── assertion_format.go.tmpl │ ├── assertion_forward.go │ ├── assertion_forward.go.tmpl │ ├── assertions.go │ ├── doc.go │ ├── errors.go │ ├── forward_assertions.go │ └── http_assertions.go │ └── require │ ├── doc.go │ ├── forward_requirements.go │ ├── require.go │ ├── require.go.tmpl │ ├── require_forward.go │ ├── require_forward.go.tmpl │ └── requirements.go └── modules.txt /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "monthly" 8 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: [push, pull_request] 3 | env: 4 | GOTOOLCHAIN: local 5 | 6 | jobs: 7 | build-go: 8 | name: Default build 9 | strategy: 10 | matrix: 11 | go: ['1.20.x', '1.21.x'] 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - uses: actions/setup-go@v5 16 | with: 17 | go-version: ${{ matrix.go }} 18 | - run: script/cibuild 19 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017- GitHub, Inc. and Git LFS contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gitobj 2 | 3 | [![CI status][ci_badge]][ci_url] 4 | 5 | [ci_badge]: https://github.com/git-lfs/gitobj/workflows/CI/badge.svg 6 | [ci_url]: https://github.com/git-lfs/gitobj/actions?query=workflow%3ACI 7 | 8 | Package `gitobj` reads and writes loose and packed Git objects. 9 | 10 | ## Getting Started 11 | 12 | To access a repository's objects, begin by "opening" that repository for use: 13 | 14 | ```go 15 | package main 16 | 17 | import ( 18 | "github.com/git-lfs/gitobj" 19 | ) 20 | 21 | func main() { 22 | repo, err := gitobj.FromFilesystem("/path/to/repo.git", "") 23 | if err != nil { 24 | panic(err) 25 | } 26 | defer repo.Close() 27 | } 28 | ``` 29 | 30 | You can then open objects for inspection with the [`Blob()`][blob], 31 | [`Commit()`][commit], [`Tag()`][tag], or [`Tree()`][tree] functions: 32 | 33 | [blob]: https://godoc.org/github.com/git-lfs/gitobj#ObjectDatabase.Blob 34 | [commit]: https://godoc.org/github.com/git-lfs/gitobj#ObjectDatabase.Commit 35 | [tag]: https://godoc.org/github.com/git-lfs/gitobj#ObjectDatabase.Tag 36 | [tree]: https://godoc.org/github.com/git-lfs/gitobj#ObjectDatabase.Tree 37 | 38 | ```go 39 | func main() { 40 | repo, err := gitobj.FromFilesystem("/path/to/repo.git", "") 41 | if err != nil { 42 | panic(err) 43 | } 44 | defer repo.Close() 45 | 46 | commit, err := repo.Commit([]byte{...}) 47 | if err != nil { 48 | panic(err) 49 | } 50 | } 51 | ``` 52 | 53 | Once an object is opened or an instance is held, it can be saved to the object 54 | database using the [`WriteBlob()`][wblob], [`WriteCommit()`][wcommit], 55 | [`WriteTag()`][wtag], or [`WriteTree()`][wtree] functions: 56 | 57 | [wblob]: https://godoc.org/github.com/git-lfs/gitobj#ObjectDatabase.WriteBlob 58 | [wcommit]: https://godoc.org/github.com/git-lfs/gitobj#ObjectDatabase.WriteCommit 59 | [wtag]: https://godoc.org/github.com/git-lfs/gitobj#ObjectDatabase.WriteTag 60 | [wtree]: https://godoc.org/github.com/git-lfs/gitobj#ObjectDatabase.WriteTree 61 | 62 | ```go 63 | func main() { 64 | repo, err := gitobj.FromFilesystem("/path/to/repo.git", "") 65 | if err != nil { 66 | panic(err) 67 | } 68 | defer repo.Close() 69 | 70 | commit, err := repo.Commit([]byte{...}) 71 | if err != nil { 72 | panic(err) 73 | } 74 | 75 | commit.Message = "Hello from gitobj!" 76 | commit.ExtraHeaders = append(commit.ExtraHeaders, &gitobj.ExtraHeader{ 77 | K: "Signed-off-by", 78 | V: "Jane Doe ", 79 | }) 80 | 81 | if _, err := repository.WriteCommit(commit); err != nil { 82 | panic(err) 83 | } 84 | } 85 | ``` 86 | 87 | ### Packed Objects 88 | 89 | Package `gitobj` has support for reading "packed" objects (i.e., objects found 90 | in [packfiles][1]) via package `github.com/git-lfs/gitobj/pack`. Package `pack` 91 | implements searching pack index (`.idx`) files and locating the corresponding 92 | delta-base chain in the appropriate `pack` file. It understands both version 93 | 1 and version 2 of the packfile specification. 94 | 95 | `gitobj` will always try to locate a loose object first. If a loose object 96 | cannot be found with the appropriate SHA-1, the repository's packfile(s) will 97 | be searched. If an object is located in a packfile, that object will be 98 | reconstructed along its delta-base chain and then returned transparently. 99 | 100 | ### More information 101 | 102 | For more: https://godoc.org/github.com/git-lfs/gitobj. 103 | 104 | ## License 105 | 106 | MIT. 107 | 108 | [1]: https://git-scm.com/book/en/v2/Git-Internals-Packfiles 109 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | Please see 2 | [SECURITY.md](https://github.com/git-lfs/git-lfs/blob/main/SECURITY.md) 3 | in the main Git LFS repository for information on how to report security 4 | vulnerabilities in this package. 5 | -------------------------------------------------------------------------------- /backend.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bufio" 5 | "hash" 6 | "io" 7 | "os" 8 | "path" 9 | "regexp" 10 | "strconv" 11 | "strings" 12 | 13 | "github.com/git-lfs/gitobj/v2/pack" 14 | "github.com/git-lfs/gitobj/v2/storage" 15 | ) 16 | 17 | // NewFilesystemBackend initializes a new filesystem-based backend, 18 | // optionally with additional alternates as specified in the 19 | // `alternates` variable. The syntax is that of the Git environment variable 20 | // GIT_ALTERNATE_OBJECT_DIRECTORIES. The hash algorithm used is specified by 21 | // the algo parameter. 22 | func NewFilesystemBackend(root, tmp, alternates string, algo hash.Hash) (storage.Backend, error) { 23 | fsobj := newFileStorer(root, tmp) 24 | packs, err := pack.NewStorage(root, algo) 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | storage, err := findAllBackends(fsobj, packs, root, algo) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | storage, err = addAlternatesFromEnvironment(storage, alternates, algo) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | return &filesystemBackend{ 40 | fs: fsobj, 41 | backends: storage, 42 | }, nil 43 | } 44 | 45 | func findAllBackends(mainLoose *fileStorer, mainPacked *pack.Storage, root string, algo hash.Hash) ([]storage.Storage, error) { 46 | storage := make([]storage.Storage, 2) 47 | storage[0] = mainLoose 48 | storage[1] = mainPacked 49 | f, err := os.Open(path.Join(root, "info", "alternates")) 50 | if err != nil { 51 | // No alternates file, no problem. 52 | if err != os.ErrNotExist { 53 | return storage, nil 54 | } 55 | return nil, err 56 | } 57 | defer f.Close() 58 | 59 | scanner := bufio.NewScanner(f) 60 | for scanner.Scan() { 61 | storage, err = addAlternateDirectory(storage, scanner.Text(), algo) 62 | if err != nil { 63 | return nil, err 64 | } 65 | } 66 | 67 | if err := scanner.Err(); err != nil { 68 | return nil, err 69 | } 70 | 71 | return storage, nil 72 | } 73 | 74 | func addAlternateDirectory(s []storage.Storage, dir string, algo hash.Hash) ([]storage.Storage, error) { 75 | s = append(s, newFileStorer(dir, "")) 76 | pack, err := pack.NewStorage(dir, algo) 77 | if err != nil { 78 | return s, err 79 | } 80 | s = append(s, pack) 81 | return s, nil 82 | } 83 | 84 | func addAlternatesFromEnvironment(s []storage.Storage, env string, algo hash.Hash) ([]storage.Storage, error) { 85 | if len(env) == 0 { 86 | return s, nil 87 | } 88 | 89 | for _, dir := range splitAlternateString(env, alternatesSeparator) { 90 | var err error 91 | s, err = addAlternateDirectory(s, dir, algo) 92 | if err != nil { 93 | return nil, err 94 | } 95 | } 96 | return s, nil 97 | } 98 | 99 | var ( 100 | octalEscape = regexp.MustCompile("\\\\[0-7]{1,3}") 101 | hexEscape = regexp.MustCompile("\\\\x[0-9a-fA-F]{2}") 102 | replacements = []struct { 103 | olds string 104 | news string 105 | }{ 106 | {`\a`, "\a"}, 107 | {`\b`, "\b"}, 108 | {`\t`, "\t"}, 109 | {`\n`, "\n"}, 110 | {`\v`, "\v"}, 111 | {`\f`, "\f"}, 112 | {`\r`, "\r"}, 113 | {`\\`, "\\"}, 114 | {`\"`, "\""}, 115 | {`\'`, "'"}, 116 | } 117 | ) 118 | 119 | func splitAlternateString(env string, separator string) []string { 120 | dirs := strings.Split(env, separator) 121 | for i, s := range dirs { 122 | if !strings.HasPrefix(s, `"`) || !strings.HasSuffix(s, `"`) { 123 | continue 124 | } 125 | 126 | // Strip leading and trailing quotation marks 127 | s = s[1 : len(s)-1] 128 | for _, repl := range replacements { 129 | s = strings.Replace(s, repl.olds, repl.news, -1) 130 | } 131 | s = octalEscape.ReplaceAllStringFunc(s, func(inp string) string { 132 | val, _ := strconv.ParseUint(inp[1:], 8, 64) 133 | return string([]byte{byte(val)}) 134 | }) 135 | s = hexEscape.ReplaceAllStringFunc(s, func(inp string) string { 136 | val, _ := strconv.ParseUint(inp[2:], 16, 64) 137 | return string([]byte{byte(val)}) 138 | }) 139 | dirs[i] = s 140 | } 141 | return dirs 142 | } 143 | 144 | // NewMemoryBackend initializes a new memory-based backend. 145 | // 146 | // A value of "nil" is acceptable and indicates that no entries should be added 147 | // to the memory backend at construction time. 148 | func NewMemoryBackend(m map[string]io.ReadWriter) (storage.Backend, error) { 149 | return &memoryBackend{ms: newMemoryStorer(m)}, nil 150 | } 151 | 152 | type filesystemBackend struct { 153 | fs *fileStorer 154 | backends []storage.Storage 155 | } 156 | 157 | func (b *filesystemBackend) Storage() (storage.Storage, storage.WritableStorage) { 158 | return storage.MultiStorage(b.backends...), b.fs 159 | } 160 | 161 | type memoryBackend struct { 162 | ms *memoryStorer 163 | } 164 | 165 | func (b *memoryBackend) Storage() (storage.Storage, storage.WritableStorage) { 166 | return b.ms, b.ms 167 | } 168 | -------------------------------------------------------------------------------- /backend_nix.go: -------------------------------------------------------------------------------- 1 | // +build !windows 2 | 3 | package gitobj 4 | 5 | const alternatesSeparator = ":" 6 | -------------------------------------------------------------------------------- /backend_test.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bytes" 5 | "encoding/hex" 6 | "io" 7 | "io/ioutil" 8 | "reflect" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestNewMemoryBackend(t *testing.T) { 15 | backend, err := NewMemoryBackend(nil) 16 | assert.NoError(t, err) 17 | 18 | ro, rw := backend.Storage() 19 | assert.Equal(t, ro, rw) 20 | assert.NotNil(t, ro.(*memoryStorer)) 21 | } 22 | 23 | func TestNewMemoryBackendWithReadOnlyData(t *testing.T) { 24 | sha := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" 25 | oid, err := hex.DecodeString(sha) 26 | 27 | assert.Nil(t, err) 28 | 29 | m := map[string]io.ReadWriter{ 30 | sha: bytes.NewBuffer([]byte{0x1}), 31 | } 32 | 33 | backend, err := NewMemoryBackend(m) 34 | assert.NoError(t, err) 35 | 36 | ro, _ := backend.Storage() 37 | reader, err := ro.Open(oid) 38 | assert.NoError(t, err) 39 | 40 | contents, err := ioutil.ReadAll(reader) 41 | assert.NoError(t, err) 42 | assert.Equal(t, []byte{0x1}, contents) 43 | } 44 | 45 | func TestNewMemoryBackendWithWritableData(t *testing.T) { 46 | sha := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" 47 | oid, err := hex.DecodeString(sha) 48 | 49 | assert.Nil(t, err) 50 | 51 | backend, err := NewMemoryBackend(make(map[string]io.ReadWriter)) 52 | assert.NoError(t, err) 53 | 54 | buf := bytes.NewBuffer([]byte{0x1}) 55 | 56 | ro, rw := backend.Storage() 57 | rw.Store(oid, buf) 58 | 59 | reader, err := ro.Open(oid) 60 | assert.NoError(t, err) 61 | 62 | contents, err := ioutil.ReadAll(reader) 63 | assert.NoError(t, err) 64 | assert.Equal(t, []byte{0x1}, contents) 65 | } 66 | 67 | func TestSplitAlternatesString(t *testing.T) { 68 | testCases := []struct { 69 | input string 70 | expected []string 71 | }{ 72 | {"abc", []string{"abc"}}, 73 | {"abc:def", []string{"abc", "def"}}, 74 | {`"abc":def`, []string{"abc", "def"}}, 75 | {`"i\alike\bcomplicated\tstrings":def`, []string{"i\alike\bcomplicated\tstrings", "def"}}, 76 | {`abc:"i\nlike\vcomplicated\fstrings\r":def`, []string{"abc", "i\nlike\vcomplicated\fstrings\r", "def"}}, 77 | {`abc:"uni\xc2\xa9ode":def`, []string{"abc", "uni©ode", "def"}}, 78 | {`abc:"uni\302\251ode\10\0":def`, []string{"abc", "uni©ode\x08\x00", "def"}}, 79 | {`abc:"cookie\\monster\"":def`, []string{"abc", "cookie\\monster\"", "def"}}, 80 | } 81 | 82 | for _, test := range testCases { 83 | actual := splitAlternateString(test.input, ":") 84 | if !reflect.DeepEqual(actual, test.expected) { 85 | t.Errorf("unexpected output for %q: got %v, expected %v", test.input, actual, test.expected) 86 | } 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /backend_windows.go: -------------------------------------------------------------------------------- 1 | // +build windows 2 | 3 | package gitobj 4 | 5 | const alternatesSeparator = ";" 6 | -------------------------------------------------------------------------------- /blob.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "hash" 7 | "io" 8 | "os" 9 | ) 10 | 11 | // Blob represents a Git object of type "blob". 12 | type Blob struct { 13 | // Size is the total uncompressed size of the blob's contents. 14 | Size int64 15 | // Contents is a reader that yields the uncompressed blob contents. It 16 | // may only be read once. It may or may not implement io.ReadSeeker. 17 | Contents io.Reader 18 | 19 | // closeFn is a function that is called to free any resources held by 20 | // the Blob. In particular, this will close a file, if the Blob is 21 | // being read from a file on disk. 22 | closeFn func() error 23 | } 24 | 25 | // NewBlobFromBytes returns a new *Blob that yields the data given. 26 | func NewBlobFromBytes(contents []byte) *Blob { 27 | return &Blob{ 28 | Contents: bytes.NewReader(contents), 29 | Size: int64(len(contents)), 30 | } 31 | } 32 | 33 | // NewBlobFromFile returns a new *Blob that contains the contents of the file 34 | // at location "path" on disk. NewBlobFromFile does not read the file ahead of 35 | // time, and instead defers this task until encoding the blob to the object 36 | // database. 37 | // 38 | // If the file cannot be opened or stat(1)-ed, an error will be returned. 39 | // 40 | // When the blob receives a function call Close(), the file will also be closed, 41 | // and any error encountered in doing so will be returned from Close(). 42 | func NewBlobFromFile(path string) (*Blob, error) { 43 | f, err := os.Open(path) 44 | if err != nil { 45 | return nil, fmt.Errorf("gitobj: could not open: %s: %s", path, 46 | err) 47 | } 48 | 49 | stat, err := f.Stat() 50 | if err != nil { 51 | return nil, fmt.Errorf("gitobj: could not stat %s: %s", path, 52 | err) 53 | } 54 | 55 | return &Blob{ 56 | Contents: f, 57 | Size: stat.Size(), 58 | 59 | closeFn: func() error { 60 | if err := f.Close(); err != nil { 61 | return fmt.Errorf( 62 | "gitobj: could not close %s: %s", 63 | path, err) 64 | } 65 | return nil 66 | }, 67 | }, nil 68 | } 69 | 70 | // Type implements Object.ObjectType by returning the correct object type for 71 | // Blobs, BlobObjectType. 72 | func (b *Blob) Type() ObjectType { return BlobObjectType } 73 | 74 | // Decode implements Object.Decode and decodes the uncompressed blob contents 75 | // being read. It returns the number of bytes that it consumed off of the 76 | // stream, which is always zero. 77 | // 78 | // If any errors are encountered while reading the blob, they will be returned. 79 | func (b *Blob) Decode(hash hash.Hash, r io.Reader, size int64) (n int, err error) { 80 | b.Size = size 81 | b.Contents = io.LimitReader(r, size) 82 | 83 | b.closeFn = func() error { 84 | if closer, ok := r.(io.Closer); ok { 85 | return closer.Close() 86 | } 87 | return nil 88 | } 89 | 90 | return 0, nil 91 | } 92 | 93 | // Encode encodes the blob's contents to the given io.Writer, "w". If there was 94 | // any error copying the blob's contents, that error will be returned. 95 | // 96 | // Otherwise, the number of bytes written will be returned. 97 | func (b *Blob) Encode(to io.Writer) (n int, err error) { 98 | nn, err := io.Copy(to, b.Contents) 99 | 100 | return int(nn), err 101 | } 102 | 103 | // Closes closes any resources held by the open Blob, or returns nil if there 104 | // were no errors. 105 | func (b *Blob) Close() error { 106 | if b.closeFn == nil { 107 | return nil 108 | } 109 | return b.closeFn() 110 | } 111 | 112 | // Equal returns whether the receiving and given blobs are equal, or in other 113 | // words, whether they are represented by the same SHA-1 when saved to the 114 | // object database. 115 | func (b *Blob) Equal(other *Blob) bool { 116 | if (b == nil) != (other == nil) { 117 | return false 118 | } 119 | 120 | if b != nil { 121 | return b.Contents == other.Contents && 122 | b.Size == other.Size 123 | } 124 | return true 125 | } 126 | -------------------------------------------------------------------------------- /blob_test.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "errors" 7 | "io/ioutil" 8 | "strings" 9 | "sync/atomic" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestBlobReturnsCorrectObjectType(t *testing.T) { 16 | assert.Equal(t, BlobObjectType, new(Blob).Type()) 17 | } 18 | 19 | func TestBlobFromString(t *testing.T) { 20 | given := []byte("example") 21 | glen := len(given) 22 | 23 | b := NewBlobFromBytes(given) 24 | 25 | assert.EqualValues(t, glen, b.Size) 26 | 27 | contents, err := ioutil.ReadAll(b.Contents) 28 | assert.NoError(t, err) 29 | assert.Equal(t, given, contents) 30 | } 31 | 32 | func TestBlobEncoding(t *testing.T) { 33 | const contents = "Hello, world!\n" 34 | 35 | b := &Blob{ 36 | Size: int64(len(contents)), 37 | Contents: strings.NewReader(contents), 38 | } 39 | 40 | var buf bytes.Buffer 41 | if _, err := b.Encode(&buf); err != nil { 42 | t.Fatal(err.Error()) 43 | } 44 | assert.Equal(t, contents, (&buf).String()) 45 | } 46 | 47 | func TestBlobDecoding(t *testing.T) { 48 | const contents = "Hello, world!\n" 49 | from := strings.NewReader(contents) 50 | 51 | b := new(Blob) 52 | n, err := b.Decode(sha1.New(), from, int64(len(contents))) 53 | 54 | assert.Equal(t, 0, n) 55 | assert.Nil(t, err) 56 | 57 | assert.EqualValues(t, len(contents), b.Size) 58 | 59 | got, err := ioutil.ReadAll(b.Contents) 60 | assert.Nil(t, err) 61 | assert.Equal(t, []byte(contents), got) 62 | } 63 | 64 | func TestBlobCallCloseFn(t *testing.T) { 65 | var calls uint32 66 | 67 | expected := errors.New("some close error") 68 | 69 | b := &Blob{ 70 | closeFn: func() error { 71 | atomic.AddUint32(&calls, 1) 72 | return expected 73 | }, 74 | } 75 | 76 | got := b.Close() 77 | 78 | assert.Equal(t, expected, got) 79 | assert.EqualValues(t, 1, calls) 80 | } 81 | 82 | func TestBlobCanCloseWithoutCloseFn(t *testing.T) { 83 | b := &Blob{ 84 | closeFn: nil, 85 | } 86 | 87 | assert.Nil(t, b.Close()) 88 | } 89 | 90 | func TestBlobEqualReturnsTrueWithUnchangedContents(t *testing.T) { 91 | c := strings.NewReader("Hello, world!") 92 | 93 | b1 := &Blob{Size: int64(c.Len()), Contents: c} 94 | b2 := &Blob{Size: int64(c.Len()), Contents: c} 95 | 96 | assert.True(t, b1.Equal(b2)) 97 | } 98 | 99 | func TestBlobEqualReturnsFalseWithChangedContents(t *testing.T) { 100 | c1 := strings.NewReader("Hello, world!") 101 | c2 := strings.NewReader("Goodbye, world!") 102 | 103 | b1 := &Blob{Size: int64(c1.Len()), Contents: c1} 104 | b2 := &Blob{Size: int64(c2.Len()), Contents: c2} 105 | 106 | assert.False(t, b1.Equal(b2)) 107 | } 108 | 109 | func TestBlobEqualReturnsTrueWhenOneBlobIsNil(t *testing.T) { 110 | b1 := &Blob{Size: 1, Contents: bytes.NewReader([]byte{0xa})} 111 | b2 := (*Blob)(nil) 112 | 113 | assert.False(t, b1.Equal(b2)) 114 | assert.False(t, b2.Equal(b1)) 115 | } 116 | 117 | func TestBlobEqualReturnsTrueWhenBothBlobsAreNil(t *testing.T) { 118 | b1 := (*Blob)(nil) 119 | b2 := (*Blob)(nil) 120 | 121 | assert.True(t, b1.Equal(b2)) 122 | } 123 | -------------------------------------------------------------------------------- /commit.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/hex" 7 | "fmt" 8 | "hash" 9 | "io" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | // Signature represents a commit signature, which can represent either 15 | // committership or authorship of the commit that this signature belongs to. It 16 | // specifies a name, email, and time that the signature was created. 17 | // 18 | // NOTE: this type is _not_ used by the `*Commit` instance, as it does not 19 | // preserve cruft bytes. It is kept as a convenience type to test with. 20 | type Signature struct { 21 | // Name is the first and last name of the individual holding this 22 | // signature. 23 | Name string 24 | // Email is the email address of the individual holding this signature. 25 | Email string 26 | // When is the instant in time when the signature was created. 27 | When time.Time 28 | } 29 | 30 | const ( 31 | formatTimeZoneOnly = "-0700" 32 | ) 33 | 34 | // String implements the fmt.Stringer interface and formats a Signature as 35 | // expected in the Git commit internal object format. For instance: 36 | // 37 | // Taylor Blau 1494258422 -0600 38 | func (s *Signature) String() string { 39 | at := s.When.Unix() 40 | zone := s.When.Format(formatTimeZoneOnly) 41 | 42 | return fmt.Sprintf("%s <%s> %d %s", s.Name, s.Email, at, zone) 43 | } 44 | 45 | // ExtraHeader encapsulates a key-value pairing of header key to header value. 46 | // It is stored as a struct{string, string} in memory as opposed to a 47 | // map[string]string to maintain ordering in a byte-for-byte encode/decode round 48 | // trip. 49 | type ExtraHeader struct { 50 | // K is the header key, or the first run of bytes up until a ' ' (\x20) 51 | // character. 52 | K string 53 | // V is the header value, or the remaining run of bytes in the line, 54 | // stripping off the above "K" field as a prefix. 55 | V string 56 | } 57 | 58 | // Commit encapsulates a Git commit entry. 59 | type Commit struct { 60 | // Author is the Author this commit, or the original writer of the 61 | // contents. 62 | // 63 | // NOTE: this field is stored as a string to ensure any extra "cruft" 64 | // bytes are preserved through migration. 65 | Author string 66 | // Committer is the individual or entity that added this commit to the 67 | // history. 68 | // 69 | // NOTE: this field is stored as a string to ensure any extra "cruft" 70 | // bytes are preserved through migration. 71 | Committer string 72 | // ParentIDs are the IDs of all parents for which this commit is a 73 | // linear child. 74 | ParentIDs [][]byte 75 | // TreeID is the root Tree associated with this commit. 76 | TreeID []byte 77 | // ExtraHeaders stores headers not listed above, for instance 78 | // "encoding", "gpgsig", or "mergetag" (among others). 79 | ExtraHeaders []*ExtraHeader 80 | // Message is the commit message, including any signing information 81 | // associated with this commit. 82 | Message string 83 | } 84 | 85 | // Type implements Object.ObjectType by returning the correct object type for 86 | // Commits, CommitObjectType. 87 | func (c *Commit) Type() ObjectType { return CommitObjectType } 88 | 89 | // Decode implements Object.Decode and decodes the uncompressed commit being 90 | // read. It returns the number of uncompressed bytes being consumed off of the 91 | // stream, which should be strictly equal to the size given. 92 | // 93 | // If any error was encountered along the way, that will be returned, along with 94 | // the number of bytes read up to that point. 95 | func (c *Commit) Decode(hash hash.Hash, from io.Reader, size int64) (n int, err error) { 96 | var finishedHeaders bool 97 | var messageParts []string 98 | 99 | s := bufio.NewScanner(from) 100 | s.Buffer(nil, 10*1024*1024) 101 | for s.Scan() { 102 | text := s.Text() 103 | n = n + len(text+"\n") 104 | 105 | if len(s.Text()) == 0 && !finishedHeaders { 106 | finishedHeaders = true 107 | continue 108 | } 109 | 110 | if fields := strings.Split(text, " "); !finishedHeaders { 111 | if len(fields) == 0 { 112 | // Executing in this block means that we got a 113 | // whitespace-only line, while parsing a header. 114 | // 115 | // Append it to the last-parsed header, and 116 | // continue. 117 | c.ExtraHeaders[len(c.ExtraHeaders)-1].V += 118 | fmt.Sprintf("\n%s", text[1:]) 119 | continue 120 | } 121 | 122 | switch fields[0] { 123 | case "tree": 124 | id, err := hex.DecodeString(fields[1]) 125 | if err != nil { 126 | return n, fmt.Errorf("error parsing tree: %s", err) 127 | } 128 | c.TreeID = id 129 | case "parent": 130 | id, err := hex.DecodeString(fields[1]) 131 | if err != nil { 132 | return n, fmt.Errorf("error parsing parent: %s", err) 133 | } 134 | c.ParentIDs = append(c.ParentIDs, id) 135 | case "author": 136 | if len(text) >= 7 { 137 | c.Author = text[7:] 138 | } else { 139 | c.Author = "" 140 | } 141 | case "committer": 142 | if len(text) >= 10 { 143 | c.Committer = text[10:] 144 | } else { 145 | c.Committer = "" 146 | } 147 | default: 148 | if strings.HasPrefix(text, " ") && len(c.ExtraHeaders) > 0 { 149 | idx := len(c.ExtraHeaders) - 1 150 | hdr := c.ExtraHeaders[idx] 151 | 152 | // Append the line of text (removing the 153 | // leading space) to the last header 154 | // that we parsed, adding a newline 155 | // between the two. 156 | hdr.V = strings.Join(append( 157 | []string{hdr.V}, s.Text()[1:], 158 | ), "\n") 159 | } else { 160 | c.ExtraHeaders = append(c.ExtraHeaders, &ExtraHeader{ 161 | K: fields[0], 162 | V: strings.Join(fields[1:], " "), 163 | }) 164 | } 165 | } 166 | } else { 167 | messageParts = append(messageParts, s.Text()) 168 | } 169 | } 170 | 171 | c.Message = strings.Join(messageParts, "\n") 172 | 173 | if err = s.Err(); err != nil { 174 | return n, fmt.Errorf("failed to parse commit buffer: %s", err) 175 | } 176 | return n, err 177 | } 178 | 179 | // Encode encodes the commit's contents to the given io.Writer, "w". If there was 180 | // any error copying the commit's contents, that error will be returned. 181 | // 182 | // Otherwise, the number of bytes written will be returned. 183 | func (c *Commit) Encode(to io.Writer) (n int, err error) { 184 | n, err = fmt.Fprintf(to, "tree %s\n", hex.EncodeToString(c.TreeID)) 185 | if err != nil { 186 | return n, err 187 | } 188 | 189 | for _, pid := range c.ParentIDs { 190 | n1, err := fmt.Fprintf(to, "parent %s\n", hex.EncodeToString(pid)) 191 | if err != nil { 192 | return n, err 193 | } 194 | 195 | n = n + n1 196 | } 197 | 198 | n2, err := fmt.Fprintf(to, "author %s\ncommitter %s\n", c.Author, c.Committer) 199 | if err != nil { 200 | return n, err 201 | } 202 | 203 | n = n + n2 204 | 205 | for _, hdr := range c.ExtraHeaders { 206 | n3, err := fmt.Fprintf(to, "%s %s\n", 207 | hdr.K, strings.Replace(hdr.V, "\n", "\n ", -1)) 208 | if err != nil { 209 | return n, err 210 | } 211 | 212 | n = n + n3 213 | } 214 | 215 | // c.Message is built from messageParts in the Decode() function. 216 | // 217 | // Since each entry in messageParts _does not_ contain its trailing LF, 218 | // append an empty string to capture the final newline. 219 | n4, err := fmt.Fprintf(to, "\n%s\n", c.Message) 220 | if err != nil { 221 | return n, err 222 | } 223 | 224 | return n + n4, err 225 | } 226 | 227 | // Equal returns whether the receiving and given commits are equal, or in other 228 | // words, whether they are represented by the same SHA-1 when saved to the 229 | // object database. 230 | func (c *Commit) Equal(other *Commit) bool { 231 | if (c == nil) != (other == nil) { 232 | return false 233 | } 234 | 235 | if c != nil { 236 | if len(c.ParentIDs) != len(other.ParentIDs) { 237 | return false 238 | } 239 | for i := 0; i < len(c.ParentIDs); i++ { 240 | p1 := c.ParentIDs[i] 241 | p2 := other.ParentIDs[i] 242 | 243 | if !bytes.Equal(p1, p2) { 244 | return false 245 | } 246 | } 247 | 248 | if len(c.ExtraHeaders) != len(other.ExtraHeaders) { 249 | return false 250 | } 251 | for i := 0; i < len(c.ExtraHeaders); i++ { 252 | e1 := c.ExtraHeaders[i] 253 | e2 := other.ExtraHeaders[i] 254 | 255 | if e1.K != e2.K || e1.V != e2.V { 256 | return false 257 | } 258 | } 259 | 260 | return c.Author == other.Author && 261 | c.Committer == other.Committer && 262 | c.Message == other.Message && 263 | bytes.Equal(c.TreeID, other.TreeID) 264 | } 265 | return true 266 | } 267 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import "fmt" 4 | 5 | // UnexpectedObjectType is an error type that represents a scenario where an 6 | // object was requested of a given type "Wanted", and received as a different 7 | // _other_ type, "Wanted". 8 | type UnexpectedObjectType struct { 9 | // Got was the object type requested. 10 | Got ObjectType 11 | // Wanted was the object type received. 12 | Wanted ObjectType 13 | } 14 | 15 | // Error implements the error.Error() function. 16 | func (e *UnexpectedObjectType) Error() string { 17 | return fmt.Sprintf("gitobj: unexpected object type, got: %q, wanted: %q", e.Got, e.Wanted) 18 | } 19 | -------------------------------------------------------------------------------- /errors/errors.go: -------------------------------------------------------------------------------- 1 | package errors 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // noSuchObject is an error type that occurs when no object with a given object 8 | // ID is available. 9 | type noSuchObject struct { 10 | oid []byte 11 | } 12 | 13 | // Error implements the error.Error() function. 14 | func (e *noSuchObject) Error() string { 15 | return fmt.Sprintf("gitobj: no such object: %x", e.oid) 16 | } 17 | 18 | // NoSuchObject creates a new error representing a missing object with a given 19 | // object ID. 20 | func NoSuchObject(oid []byte) error { 21 | return &noSuchObject{oid: oid} 22 | } 23 | 24 | // IsNoSuchObject indicates whether an error is a noSuchObject and is non-nil. 25 | func IsNoSuchObject(e error) bool { 26 | err, ok := e.(*noSuchObject) 27 | return ok && err != nil 28 | } 29 | -------------------------------------------------------------------------------- /errors/errors_test.go: -------------------------------------------------------------------------------- 1 | package errors 2 | 3 | import ( 4 | "encoding/hex" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestNoSuchObjectTypeErrFormatting(t *testing.T) { 11 | sha := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" 12 | oid, err := hex.DecodeString(sha) 13 | assert.NoError(t, err) 14 | 15 | err = NoSuchObject(oid) 16 | 17 | assert.Equal(t, "gitobj: no such object: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", err.Error()) 18 | assert.Equal(t, IsNoSuchObject(err), true) 19 | } 20 | 21 | func TestIsNoSuchObjectNilHandling(t *testing.T) { 22 | assert.Equal(t, IsNoSuchObject((*noSuchObject)(nil)), false) 23 | assert.Equal(t, IsNoSuchObject(nil), false) 24 | } 25 | -------------------------------------------------------------------------------- /errors_test.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestUnexpectedObjectTypeErrFormatting(t *testing.T) { 10 | err := &UnexpectedObjectType{ 11 | Got: TreeObjectType, Wanted: BlobObjectType, 12 | } 13 | 14 | assert.Equal(t, "gitobj: unexpected object type, got: \"tree\", wanted: \"blob\"", err.Error()) 15 | } 16 | -------------------------------------------------------------------------------- /file_storer.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "os" 9 | "path/filepath" 10 | 11 | "github.com/git-lfs/gitobj/v2/errors" 12 | ) 13 | 14 | // fileStorer implements the storer interface by writing to the .git/objects 15 | // directory on disc. 16 | type fileStorer struct { 17 | // root is the top level /objects directory's path on disc. 18 | root string 19 | 20 | // temp directory, defaults to os.TempDir 21 | tmp string 22 | } 23 | 24 | // NewFileStorer returns a new fileStorer instance with the given root. 25 | func newFileStorer(root, tmp string) *fileStorer { 26 | return &fileStorer{ 27 | root: root, 28 | tmp: tmp, 29 | } 30 | } 31 | 32 | // Open implements the storer.Open function, and returns a io.ReadCloser 33 | // for the given SHA. If the file does not exist, or if there was any other 34 | // error in opening the file, an error will be returned. 35 | // 36 | // It is the caller's responsibility to close the given file "f" after its use 37 | // is complete. 38 | func (fs *fileStorer) Open(sha []byte) (f io.ReadCloser, err error) { 39 | f, err = fs.open(fs.path(sha), os.O_RDONLY) 40 | if os.IsNotExist(err) { 41 | return nil, errors.NoSuchObject(sha) 42 | } 43 | return f, err 44 | } 45 | 46 | // Store implements the storer.Store function and returns the number of bytes 47 | // written, along with any error encountered in copying the given io.Reader, "r" 48 | // into the object database on disk at a path given by "sha". 49 | // 50 | // If the file could not be created, or opened, an error will be returned. 51 | func (fs *fileStorer) Store(sha []byte, r io.Reader) (n int64, err error) { 52 | path := fs.path(sha) 53 | dir := filepath.Dir(path) 54 | 55 | if stat, err := os.Stat(path); stat != nil || os.IsExist(err) { 56 | // If the file already exists, there is no work left for us to 57 | // do, since the object already exists (or there is a SHA1 58 | // collision). 59 | _, err = io.Copy(ioutil.Discard, r) 60 | if err != nil { 61 | return 0, fmt.Errorf("discard pre-existing object data: %s", err) 62 | } 63 | 64 | return 0, nil 65 | } 66 | 67 | tmp, err := ioutil.TempFile(fs.tmp, "") 68 | if err != nil { 69 | return 0, err 70 | } 71 | 72 | n, err = io.Copy(tmp, r) 73 | if err = tmp.Close(); err != nil { 74 | return n, err 75 | } 76 | if err != nil { 77 | return n, err 78 | } 79 | 80 | // Since .git/objects partitions objects based on the first two 81 | // characters of their ASCII-encoded SHA1 object ID, ensure that 82 | // the directory exists before copying a file into it. 83 | if err = os.MkdirAll(dir, 0755); err != nil { 84 | return n, err 85 | } 86 | 87 | if err = os.Rename(tmp.Name(), path); err != nil { 88 | return n, err 89 | } 90 | 91 | return n, nil 92 | } 93 | 94 | // Root gives the absolute (fully-qualified) path to the file storer on disk. 95 | func (fs *fileStorer) Root() string { 96 | return fs.root 97 | } 98 | 99 | // Close closes the file storer. 100 | func (fs *fileStorer) Close() error { 101 | return nil 102 | } 103 | 104 | // IsCompressed returns true, because the file storer returns compressed data. 105 | func (fs *fileStorer) IsCompressed() bool { 106 | return true 107 | } 108 | 109 | // open opens a given file. 110 | func (fs *fileStorer) open(path string, flag int) (*os.File, error) { 111 | return os.OpenFile(path, flag, 0) 112 | } 113 | 114 | // path returns an absolute path on disk to the object given by the OID "sha". 115 | func (fs *fileStorer) path(sha []byte) string { 116 | encoded := hex.EncodeToString(sha) 117 | 118 | return filepath.Join(fs.root, encoded[:2], encoded[2:]) 119 | } 120 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/git-lfs/gitobj/v2 2 | 3 | require ( 4 | github.com/davecgh/go-spew v1.1.1 // indirect 5 | github.com/pmezard/go-difflib v1.0.0 // indirect 6 | github.com/stretchr/testify v1.2.2 7 | ) 8 | 9 | go 1.11 10 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 4 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 5 | github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= 6 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 7 | -------------------------------------------------------------------------------- /memory_storer.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "sync" 8 | 9 | "github.com/git-lfs/gitobj/v2/errors" 10 | ) 11 | 12 | // memoryStorer is an implementation of the storer interface that holds data for 13 | // the object database in memory. 14 | type memoryStorer struct { 15 | // mu guards reads and writes to the map "fs" below. 16 | mu *sync.Mutex 17 | // fs maps a hex-encoded SHA to a bytes.Buffer wrapped in a no-op closer 18 | // type. 19 | fs map[string]*bufCloser 20 | } 21 | 22 | // newMemoryStorer initializes a new memoryStorer instance with the given 23 | // initial set. 24 | // 25 | // A value of "nil" is acceptable and indicates that no entries shall be added 26 | // to the memory storer at/during construction time. 27 | func newMemoryStorer(m map[string]io.ReadWriter) *memoryStorer { 28 | fs := make(map[string]*bufCloser, len(m)) 29 | for n, rw := range m { 30 | fs[n] = &bufCloser{rw} 31 | } 32 | 33 | return &memoryStorer{ 34 | mu: new(sync.Mutex), 35 | fs: fs, 36 | } 37 | } 38 | 39 | // Store implements the storer.Store function and copies the data given in "r" 40 | // into an object entry in the memory. If an object given by that SHA "sha" is 41 | // already indexed in the database, Store will panic(). 42 | func (ms *memoryStorer) Store(sha []byte, r io.Reader) (n int64, err error) { 43 | ms.mu.Lock() 44 | defer ms.mu.Unlock() 45 | 46 | key := fmt.Sprintf("%x", sha) 47 | 48 | ms.fs[key] = &bufCloser{new(bytes.Buffer)} 49 | return io.Copy(ms.fs[key], r) 50 | } 51 | 52 | // Open implements the storer.Open function, and returns a io.ReadWriteCloser 53 | // for the given SHA. If a reader for the given SHA does not exist an error will 54 | // be returned. 55 | func (ms *memoryStorer) Open(sha []byte) (f io.ReadCloser, err error) { 56 | ms.mu.Lock() 57 | defer ms.mu.Unlock() 58 | 59 | key := fmt.Sprintf("%x", sha) 60 | if _, ok := ms.fs[key]; !ok { 61 | return nil, errors.NoSuchObject(sha) 62 | } 63 | return ms.fs[key], nil 64 | } 65 | 66 | // Close closes the memory storer. 67 | func (ms *memoryStorer) Close() error { 68 | return nil 69 | } 70 | 71 | // IsCompressed returns true, because the memory storer returns compressed data. 72 | func (ms *memoryStorer) IsCompressed() bool { 73 | return true 74 | } 75 | 76 | // bufCloser wraps a type satisfying the io.ReadWriter interface with a no-op 77 | // Close() function, thus implementing the io.ReadWriteCloser composite 78 | // interface. 79 | type bufCloser struct { 80 | io.ReadWriter 81 | } 82 | 83 | // Close implements io.Closer, and returns nothing. 84 | func (b *bufCloser) Close() error { return nil } 85 | -------------------------------------------------------------------------------- /memory_storer_test.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bytes" 5 | "encoding/hex" 6 | "io" 7 | "io/ioutil" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/git-lfs/gitobj/v2/errors" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestMemoryStorerIncludesGivenEntries(t *testing.T) { 16 | sha := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" 17 | hex, err := hex.DecodeString(sha) 18 | 19 | assert.Nil(t, err) 20 | 21 | ms := newMemoryStorer(map[string]io.ReadWriter{ 22 | sha: bytes.NewBuffer([]byte{0x1}), 23 | }) 24 | 25 | buf, err := ms.Open(hex) 26 | assert.Nil(t, err) 27 | 28 | contents, err := ioutil.ReadAll(buf) 29 | assert.Nil(t, err) 30 | assert.Equal(t, []byte{0x1}, contents) 31 | } 32 | 33 | func TestMemoryStorerAcceptsNilEntries(t *testing.T) { 34 | ms := newMemoryStorer(nil) 35 | 36 | assert.NotNil(t, ms) 37 | assert.Equal(t, 0, len(ms.fs)) 38 | assert.NoError(t, ms.Close()) 39 | } 40 | 41 | func TestMemoryStorerDoesntOpenMissingEntries(t *testing.T) { 42 | sha := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" 43 | 44 | hex, err := hex.DecodeString(sha) 45 | assert.Nil(t, err) 46 | 47 | ms := newMemoryStorer(nil) 48 | 49 | f, err := ms.Open(hex) 50 | assert.Equal(t, errors.NoSuchObject(hex), err) 51 | assert.Nil(t, f) 52 | } 53 | 54 | func TestMemoryStorerStoresNewEntries(t *testing.T) { 55 | hex, err := hex.DecodeString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") 56 | assert.Nil(t, err) 57 | 58 | ms := newMemoryStorer(nil) 59 | 60 | assert.Equal(t, 0, len(ms.fs)) 61 | 62 | _, err = ms.Store(hex, strings.NewReader("hello")) 63 | assert.Nil(t, err) 64 | assert.Equal(t, 1, len(ms.fs)) 65 | 66 | got, err := ms.Open(hex) 67 | assert.Nil(t, err) 68 | 69 | contents, err := ioutil.ReadAll(got) 70 | assert.Nil(t, err) 71 | assert.Equal(t, "hello", string(contents)) 72 | } 73 | 74 | func TestMemoryStorerStoresExistingEntries(t *testing.T) { 75 | hex, err := hex.DecodeString("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") 76 | assert.Nil(t, err) 77 | 78 | ms := newMemoryStorer(nil) 79 | 80 | assert.Equal(t, 0, len(ms.fs)) 81 | 82 | _, err = ms.Store(hex, new(bytes.Buffer)) 83 | assert.Nil(t, err) 84 | assert.Equal(t, 1, len(ms.fs)) 85 | 86 | n, err := ms.Store(hex, new(bytes.Buffer)) 87 | assert.Nil(t, err) 88 | assert.EqualValues(t, 0, n) 89 | } 90 | -------------------------------------------------------------------------------- /object.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "hash" 5 | "io" 6 | ) 7 | 8 | // Object is an interface satisfied by any concrete type that represents a loose 9 | // Git object. 10 | type Object interface { 11 | // Encode takes an io.Writer, "to", and encodes an uncompressed 12 | // Git-compatible representation of itself to that stream. 13 | // 14 | // It must return "n", the number of uncompressed bytes written to that 15 | // stream, along with "err", any error that was encountered during the 16 | // write. 17 | // 18 | // Any error that was encountered should be treated as "fatal-local", 19 | // meaning that a particular invocation of Encode() cannot progress, and 20 | // an accurate number "n" of bytes written up that point should be 21 | // returned. 22 | Encode(to io.Writer) (n int, err error) 23 | 24 | // Decode takes an io.Reader, "from" as well as a size "size" (the 25 | // number of uncompressed bytes on the stream that represent the object 26 | // trying to be decoded) and decodes the encoded object onto itself, 27 | // as a mutative transaction. 28 | // 29 | // It returns the number of uncompressed bytes "n" that an invoication 30 | // of this function has advanced the io.Reader, "from", as well as any 31 | // error that was encountered along the way. 32 | // 33 | // If an(y) error was encountered, it should be returned immediately, 34 | // along with the number of bytes read up to that point. 35 | Decode(hash hash.Hash, from io.Reader, size int64) (n int, err error) 36 | 37 | // Type returns the ObjectType constant that represents an instance of 38 | // the implementing type. 39 | Type() ObjectType 40 | } 41 | -------------------------------------------------------------------------------- /object_reader.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bufio" 5 | "compress/zlib" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "strconv" 10 | "strings" 11 | ) 12 | 13 | // ObjectReader provides an io.Reader implementation that can read Git object 14 | // headers, as well as provide an uncompressed view into the object contents 15 | // itself. 16 | type ObjectReader struct { 17 | // header is the object header type 18 | header *struct { 19 | // typ is the ObjectType encoded in the header pointed at by 20 | // this reader. 21 | typ ObjectType 22 | // size is the number of uncompressed bytes following the header 23 | // that encodes the object. 24 | size int64 25 | } 26 | // r is the underling uncompressed reader. 27 | r *bufio.Reader 28 | 29 | // closeFn supplies an optional function that, when called, frees an 30 | // resources (open files, memory, etc) held by this instance of the 31 | // *ObjectReader. 32 | // 33 | // closeFn returns any error encountered when closing/freeing resources 34 | // held. 35 | // 36 | // It is allowed to be nil. 37 | closeFn func() error 38 | } 39 | 40 | // NewObjectReader takes a given io.Reader that yields zlib-compressed data, and 41 | // returns an *ObjectReader wrapping it, or an error if one occurred during 42 | // construction time. 43 | func NewObjectReader(r io.Reader) (*ObjectReader, error) { 44 | return NewObjectReadCloser(ioutil.NopCloser(r)) 45 | } 46 | 47 | // NewObjectReader takes a given io.Reader that yields uncompressed data and 48 | // returns an *ObjectReader wrapping it, or an error if one occurred during 49 | // construction time. 50 | func NewUncompressedObjectReader(r io.Reader) (*ObjectReader, error) { 51 | return NewUncompressedObjectReadCloser(ioutil.NopCloser(r)) 52 | } 53 | 54 | // NewObjectReadCloser takes a given io.Reader that yields zlib-compressed data, and 55 | // returns an *ObjectReader wrapping it, or an error if one occurred during 56 | // construction time. 57 | // 58 | // It also calls the Close() function given by the implementation "r" of the 59 | // type io.Closer. 60 | func NewObjectReadCloser(r io.ReadCloser) (*ObjectReader, error) { 61 | zr, err := zlib.NewReader(r) 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | return &ObjectReader{ 67 | r: bufio.NewReader(zr), 68 | closeFn: func() error { 69 | if err := zr.Close(); err != nil { 70 | return err 71 | } 72 | if err := r.Close(); err != nil { 73 | return err 74 | } 75 | return nil 76 | }, 77 | }, nil 78 | } 79 | 80 | // NewUncompressObjectReadCloser takes a given io.Reader that yields 81 | // uncompressed data, and returns an *ObjectReader wrapping it, or an error if 82 | // one occurred during construction time. 83 | // 84 | // It also calls the Close() function given by the implementation "r" of the 85 | // type io.Closer. 86 | func NewUncompressedObjectReadCloser(r io.ReadCloser) (*ObjectReader, error) { 87 | return &ObjectReader{ 88 | r: bufio.NewReader(r), 89 | closeFn: r.Close, 90 | }, nil 91 | } 92 | 93 | // Header returns information about the Object's header, or an error if one 94 | // occurred while reading the data. 95 | // 96 | // Header information is cached, so this function is safe to call at any point 97 | // during the object read, and can be called more than once. 98 | func (r *ObjectReader) Header() (typ ObjectType, size int64, err error) { 99 | if r.header != nil { 100 | return r.header.typ, r.header.size, nil 101 | } 102 | 103 | typs, err := r.r.ReadString(' ') 104 | if err != nil { 105 | return UnknownObjectType, 0, err 106 | } 107 | if len(typs) == 0 { 108 | return UnknownObjectType, 0, fmt.Errorf( 109 | "gitobj: object type must not be empty", 110 | ) 111 | } 112 | typs = strings.TrimSuffix(typs, " ") 113 | 114 | sizeStr, err := r.r.ReadString('\x00') 115 | if err != nil { 116 | return UnknownObjectType, 0, err 117 | } 118 | sizeStr = strings.TrimSuffix(sizeStr, "\x00") 119 | 120 | size, err = strconv.ParseInt(sizeStr, 10, 64) 121 | if err != nil { 122 | return UnknownObjectType, 0, err 123 | } 124 | 125 | r.header = &struct { 126 | typ ObjectType 127 | size int64 128 | }{ 129 | ObjectTypeFromString(typs), 130 | size, 131 | } 132 | 133 | return r.header.typ, r.header.size, nil 134 | } 135 | 136 | // Read reads uncompressed bytes into the buffer "p", and returns the number of 137 | // uncompressed bytes read. Otherwise, it returns any error encountered along 138 | // the way. 139 | // 140 | // This function is safe to call before reading the Header information, as any 141 | // call to Read() will ensure that read has been called at least once. 142 | func (r *ObjectReader) Read(p []byte) (n int, err error) { 143 | if _, _, err = r.Header(); err != nil { 144 | return 0, err 145 | } 146 | return r.r.Read(p) 147 | } 148 | 149 | // Close frees any resources held by the ObjectReader and must be called before 150 | // disposing of this instance. 151 | // 152 | // It returns any error encountered by the *ObjectReader during close. 153 | func (r *ObjectReader) Close() error { 154 | if r.closeFn == nil { 155 | return nil 156 | } 157 | return r.closeFn() 158 | } 159 | -------------------------------------------------------------------------------- /object_reader_test.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bytes" 5 | "compress/zlib" 6 | "errors" 7 | "io" 8 | "sync/atomic" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestObjectReaderReadsHeaders(t *testing.T) { 15 | var compressed bytes.Buffer 16 | 17 | zw := zlib.NewWriter(&compressed) 18 | zw.Write([]byte("blob 1\x00")) 19 | zw.Close() 20 | 21 | or, err := NewObjectReader(&compressed) 22 | assert.Nil(t, err) 23 | 24 | typ, size, err := or.Header() 25 | 26 | assert.Nil(t, err) 27 | assert.EqualValues(t, 1, size) 28 | assert.Equal(t, BlobObjectType, typ) 29 | } 30 | 31 | func TestObjectReaderConsumesHeaderBeforeReads(t *testing.T) { 32 | var compressed bytes.Buffer 33 | 34 | zw := zlib.NewWriter(&compressed) 35 | zw.Write([]byte("blob 1\x00asdf")) 36 | zw.Close() 37 | 38 | or, err := NewObjectReader(&compressed) 39 | assert.Nil(t, err) 40 | 41 | var buf [4]byte 42 | n, err := or.Read(buf[:]) 43 | 44 | assert.Equal(t, 4, n) 45 | assert.Equal(t, []byte{'a', 's', 'd', 'f'}, buf[:]) 46 | assert.Nil(t, err) 47 | } 48 | 49 | type ReadCloserFn struct { 50 | io.Reader 51 | closeFn func() error 52 | } 53 | 54 | func (r *ReadCloserFn) Close() error { 55 | return r.closeFn() 56 | } 57 | 58 | func TestObjectReaderCallsClose(t *testing.T) { 59 | var calls uint32 60 | expected := errors.New("expected") 61 | 62 | or, err := NewObjectReadCloser(&ReadCloserFn{ 63 | Reader: bytes.NewBuffer([]byte{0x78, 0x01}), 64 | closeFn: func() error { 65 | atomic.AddUint32(&calls, 1) 66 | return expected 67 | }, 68 | }) 69 | assert.Nil(t, err) 70 | 71 | got := or.Close() 72 | 73 | assert.Equal(t, expected, got) 74 | assert.EqualValues(t, 1, atomic.LoadUint32(&calls)) 75 | 76 | } 77 | -------------------------------------------------------------------------------- /object_type.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import "strings" 4 | 5 | // ObjectType is a constant enumeration type for identifying the kind of object 6 | // type an implementing instance of the Object interface is. 7 | type ObjectType uint8 8 | 9 | const ( 10 | UnknownObjectType ObjectType = iota 11 | BlobObjectType 12 | TreeObjectType 13 | CommitObjectType 14 | TagObjectType 15 | ) 16 | 17 | // ObjectTypeFromString converts from a given string to an ObjectType 18 | // enumeration instance. 19 | func ObjectTypeFromString(s string) ObjectType { 20 | switch strings.ToLower(s) { 21 | case "blob": 22 | return BlobObjectType 23 | case "tree": 24 | return TreeObjectType 25 | case "commit": 26 | return CommitObjectType 27 | case "tag": 28 | return TagObjectType 29 | default: 30 | return UnknownObjectType 31 | } 32 | } 33 | 34 | // String implements the fmt.Stringer interface and returns a string 35 | // representation of the ObjectType enumeration instance. 36 | func (t ObjectType) String() string { 37 | switch t { 38 | case UnknownObjectType: 39 | return "unknown" 40 | case BlobObjectType: 41 | return "blob" 42 | case TreeObjectType: 43 | return "tree" 44 | case CommitObjectType: 45 | return "commit" 46 | case TagObjectType: 47 | return "tag" 48 | } 49 | return "" 50 | } 51 | -------------------------------------------------------------------------------- /object_type_test.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "math" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestObjectTypeFromString(t *testing.T) { 11 | for str, typ := range map[string]ObjectType{ 12 | "blob": BlobObjectType, 13 | "tree": TreeObjectType, 14 | "commit": CommitObjectType, 15 | "tag": TagObjectType, 16 | "something else": UnknownObjectType, 17 | } { 18 | t.Run(str, func(t *testing.T) { 19 | assert.Equal(t, typ, ObjectTypeFromString(str)) 20 | }) 21 | } 22 | } 23 | 24 | func TestObjectTypeToString(t *testing.T) { 25 | for typ, str := range map[ObjectType]string{ 26 | BlobObjectType: "blob", 27 | TreeObjectType: "tree", 28 | CommitObjectType: "commit", 29 | TagObjectType: "tag", 30 | UnknownObjectType: "unknown", 31 | ObjectType(math.MaxUint8): "", 32 | } { 33 | t.Run(str, func(t *testing.T) { 34 | assert.Equal(t, str, typ.String()) 35 | }) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /object_writer.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "compress/zlib" 5 | "fmt" 6 | "hash" 7 | "io" 8 | "sync/atomic" 9 | ) 10 | 11 | // ObjectWriter provides an implementation of io.Writer that compresses and 12 | // writes data given to it, and keeps track of the SHA1 hash of the data as it 13 | // is written. 14 | type ObjectWriter struct { 15 | // members managed via sync/atomic must be aligned at the top of this 16 | // structure (see: https://github.com/git-lfs/git-lfs/pull/2880). 17 | 18 | // wroteHeader is a uint32 managed by the sync/atomic package. It is 1 19 | // if the header was written, and 0 otherwise. 20 | wroteHeader uint32 21 | 22 | // w is the underling writer that this ObjectWriter is writing to. 23 | w io.Writer 24 | // sum is the in-progress hash calculation. 25 | sum hash.Hash 26 | 27 | // closeFn supplies an optional function that, when called, frees an 28 | // resources (open files, memory, etc) held by this instance of the 29 | // *ObjectWriter. 30 | // 31 | // closeFn returns any error encountered when closing/freeing resources 32 | // held. 33 | // 34 | // It is allowed to be nil. 35 | closeFn func() error 36 | } 37 | 38 | // nopCloser provides a no-op implementation of the io.WriteCloser interface by 39 | // taking an io.Writer and wrapping it with a Close() method that returns nil. 40 | type nopCloser struct { 41 | // Writer is an embedded io.Writer that receives the Write() method 42 | // call. 43 | io.Writer 44 | } 45 | 46 | // Close implements the io.Closer interface by returning nil. 47 | func (n *nopCloser) Close() error { 48 | return nil 49 | } 50 | 51 | // NewObjectWriter returns a new *ObjectWriter instance that drains incoming 52 | // writes into the io.Writer given, "w". "hash" is a hash instance from the 53 | // ObjectDatabase'e Hash method. 54 | func NewObjectWriter(w io.Writer, hash hash.Hash) *ObjectWriter { 55 | return NewObjectWriteCloser(&nopCloser{w}, hash) 56 | } 57 | 58 | // NewObjectWriter returns a new *ObjectWriter instance that drains incoming 59 | // writes into the io.Writer given, "w". "sum" is a hash instance from the 60 | // ObjectDatabase'e Hash method. 61 | // 62 | // Upon closing, it calls the given Close() function of the io.WriteCloser. 63 | func NewObjectWriteCloser(w io.WriteCloser, sum hash.Hash) *ObjectWriter { 64 | zw := zlib.NewWriter(w) 65 | sum.Reset() 66 | 67 | return &ObjectWriter{ 68 | w: io.MultiWriter(zw, sum), 69 | sum: sum, 70 | 71 | closeFn: func() error { 72 | if err := zw.Close(); err != nil { 73 | return err 74 | } 75 | if err := w.Close(); err != nil { 76 | return err 77 | } 78 | return nil 79 | }, 80 | } 81 | } 82 | 83 | // WriteHeader writes object header information and returns the number of 84 | // uncompressed bytes written, or any error that was encountered along the way. 85 | // 86 | // WriteHeader MUST be called only once, or a panic() will occur. 87 | func (w *ObjectWriter) WriteHeader(typ ObjectType, len int64) (n int, err error) { 88 | if !atomic.CompareAndSwapUint32(&w.wroteHeader, 0, 1) { 89 | panic("gitobj: cannot write headers more than once") 90 | } 91 | return fmt.Fprintf(w, "%s %d\x00", typ, len) 92 | } 93 | 94 | // Write writes the given buffer "p" of uncompressed bytes into the underlying 95 | // data-stream, returning the number of uncompressed bytes written, along with 96 | // any error encountered along the way. 97 | // 98 | // A call to WriteHeaders MUST occur before calling Write, or a panic() will 99 | // occur. 100 | func (w *ObjectWriter) Write(p []byte) (n int, err error) { 101 | if atomic.LoadUint32(&w.wroteHeader) != 1 { 102 | panic("gitobj: cannot write data without header") 103 | } 104 | return w.w.Write(p) 105 | } 106 | 107 | // Sha returns the in-progress SHA1 of the compressed object contents. 108 | func (w *ObjectWriter) Sha() []byte { 109 | return w.sum.Sum(nil) 110 | } 111 | 112 | // Close closes the ObjectWriter and frees any resources held by it, including 113 | // flushing the zlib-compressed content to the underling writer. It must be 114 | // called before discarding of the Writer instance. 115 | // 116 | // If any error occurred while calling close, it will be returned immediately, 117 | // otherwise nil. 118 | func (w *ObjectWriter) Close() error { 119 | if w.closeFn == nil { 120 | return nil 121 | } 122 | return w.closeFn() 123 | } 124 | -------------------------------------------------------------------------------- /object_writer_test.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bytes" 5 | "compress/zlib" 6 | "crypto/sha1" 7 | "crypto/sha256" 8 | "encoding/hex" 9 | "errors" 10 | "hash" 11 | "io" 12 | "io/ioutil" 13 | "sync/atomic" 14 | "testing" 15 | 16 | "github.com/stretchr/testify/assert" 17 | ) 18 | 19 | func TestObjectWriterWritesHeaders(t *testing.T) { 20 | var buf bytes.Buffer 21 | 22 | w := NewObjectWriter(&buf, sha1.New()) 23 | 24 | n, err := w.WriteHeader(BlobObjectType, 1) 25 | assert.Equal(t, 7, n) 26 | assert.Nil(t, err) 27 | 28 | assert.Nil(t, w.Close()) 29 | 30 | r, err := zlib.NewReader(&buf) 31 | assert.Nil(t, err) 32 | 33 | all, err := ioutil.ReadAll(r) 34 | assert.Nil(t, err) 35 | assert.Equal(t, []byte("blob 1\x00"), all) 36 | 37 | assert.Nil(t, r.Close()) 38 | } 39 | 40 | func TestObjectWriterWritesData(t *testing.T) { 41 | testCases := []struct { 42 | h hash.Hash 43 | sha string 44 | }{ 45 | { 46 | sha1.New(), "56a6051ca2b02b04ef92d5150c9ef600403cb1de", 47 | }, 48 | { 49 | sha256.New(), "36456d9b87f21fc54ed5babf1222a9ab0fbbd0c4ad239a7933522d5e4447049c", 50 | }, 51 | } 52 | 53 | for _, test := range testCases { 54 | var buf bytes.Buffer 55 | 56 | w := NewObjectWriter(&buf, test.h) 57 | w.WriteHeader(BlobObjectType, 1) 58 | 59 | n, err := w.Write([]byte{0x31}) 60 | assert.Equal(t, 1, n) 61 | assert.Nil(t, err) 62 | 63 | assert.Nil(t, w.Close()) 64 | 65 | r, err := zlib.NewReader(&buf) 66 | assert.Nil(t, err) 67 | 68 | all, err := ioutil.ReadAll(r) 69 | assert.Nil(t, err) 70 | assert.Equal(t, []byte("blob 1\x001"), all) 71 | 72 | assert.Nil(t, r.Close()) 73 | assert.Equal(t, test.sha, hex.EncodeToString(w.Sha())) 74 | } 75 | } 76 | 77 | func TestObjectWriterPanicsOnWritesWithoutHeader(t *testing.T) { 78 | defer func() { 79 | err := recover() 80 | 81 | assert.NotNil(t, err) 82 | assert.Equal(t, "gitobj: cannot write data without header", err) 83 | }() 84 | 85 | w := NewObjectWriter(new(bytes.Buffer), sha1.New()) 86 | w.Write(nil) 87 | } 88 | 89 | func TestObjectWriterPanicsOnMultipleHeaderWrites(t *testing.T) { 90 | defer func() { 91 | err := recover() 92 | 93 | assert.NotNil(t, err) 94 | assert.Equal(t, "gitobj: cannot write headers more than once", err) 95 | }() 96 | 97 | w := NewObjectWriter(new(bytes.Buffer), sha1.New()) 98 | w.WriteHeader(BlobObjectType, 1) 99 | w.WriteHeader(TreeObjectType, 2) 100 | } 101 | 102 | func TestObjectWriterKeepsTrackOfHash(t *testing.T) { 103 | w := NewObjectWriter(new(bytes.Buffer), sha1.New()) 104 | n, err := w.WriteHeader(BlobObjectType, 1) 105 | 106 | assert.Nil(t, err) 107 | assert.Equal(t, 7, n) 108 | 109 | assert.Equal(t, "bb6ca78b66403a67c6281df142de5ef472186283", hex.EncodeToString(w.Sha())) 110 | 111 | w = NewObjectWriter(new(bytes.Buffer), sha256.New()) 112 | n, err = w.WriteHeader(BlobObjectType, 1) 113 | 114 | assert.Nil(t, err) 115 | assert.Equal(t, 7, n) 116 | 117 | assert.Equal(t, "3a68c454a6eb75cc55bda147a53756f0f581497eb80b9b67156fb8a8d3931cd7", hex.EncodeToString(w.Sha())) 118 | } 119 | 120 | type WriteCloserFn struct { 121 | io.Writer 122 | closeFn func() error 123 | } 124 | 125 | func (r *WriteCloserFn) Close() error { return r.closeFn() } 126 | 127 | func TestObjectWriterCallsClose(t *testing.T) { 128 | var calls uint32 129 | 130 | expected := errors.New("close error") 131 | 132 | w := NewObjectWriteCloser(&WriteCloserFn{ 133 | Writer: new(bytes.Buffer), 134 | closeFn: func() error { 135 | atomic.AddUint32(&calls, 1) 136 | return expected 137 | }, 138 | }, sha1.New()) 139 | 140 | got := w.Close() 141 | 142 | assert.EqualValues(t, 1, calls) 143 | assert.Equal(t, expected, got) 144 | } 145 | -------------------------------------------------------------------------------- /pack/bounds.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import "fmt" 4 | 5 | // bounds encapsulates the window of search for a single iteration of binary 6 | // search. 7 | // 8 | // Callers may choose to treat the return values from Left() and Right() as 9 | // inclusive or exclusive. *bounds makes no assumptions on the inclusivity of 10 | // those values. 11 | // 12 | // See: *gitobj/pack.Index for more. 13 | type bounds struct { 14 | // left is the left or lower bound of the bounds. 15 | left int64 16 | // right is the rightmost or upper bound of the bounds. 17 | right int64 18 | } 19 | 20 | // newBounds returns a new *bounds instance with the given left and right 21 | // values. 22 | func newBounds(left, right int64) *bounds { 23 | return &bounds{ 24 | left: left, 25 | right: right, 26 | } 27 | } 28 | 29 | // Left returns the leftmost value or lower bound of this *bounds instance. 30 | func (b *bounds) Left() int64 { 31 | return b.left 32 | } 33 | 34 | // right returns the rightmost value or upper bound of this *bounds instance. 35 | func (b *bounds) Right() int64 { 36 | return b.right 37 | } 38 | 39 | // WithLeft returns a new copy of this *bounds instance, replacing the left 40 | // value with the given argument. 41 | func (b *bounds) WithLeft(new int64) *bounds { 42 | return &bounds{ 43 | left: new, 44 | right: b.right, 45 | } 46 | } 47 | 48 | // WithRight returns a new copy of this *bounds instance, replacing the right 49 | // value with the given argument. 50 | func (b *bounds) WithRight(new int64) *bounds { 51 | return &bounds{ 52 | left: b.left, 53 | right: new, 54 | } 55 | } 56 | 57 | // Equal returns whether or not the receiving *bounds instance is equal to the 58 | // given one: 59 | // 60 | // - If both the argument and receiver are nil, they are given to be equal. 61 | // - If both the argument and receiver are not nil, and they share the same 62 | // Left() and Right() values, they are equal. 63 | // - If both the argument and receiver are not nil, but they do not share the 64 | // same Left() and Right() values, they are not equal. 65 | // - If either the argument or receiver is nil, but the other is not, they are 66 | // not equal. 67 | func (b *bounds) Equal(other *bounds) bool { 68 | if b == nil { 69 | if other == nil { 70 | return true 71 | } 72 | return false 73 | } 74 | 75 | if other == nil { 76 | return false 77 | } 78 | 79 | return b.left == other.left && 80 | b.right == other.right 81 | } 82 | 83 | // String returns a string representation of this bounds instance, given as: 84 | // 85 | // [,] 86 | func (b *bounds) String() string { 87 | return fmt.Sprintf("[%d,%d]", b.Left(), b.Right()) 88 | } 89 | -------------------------------------------------------------------------------- /pack/bounds_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestBoundsLeft(t *testing.T) { 10 | assert.EqualValues(t, 1, newBounds(1, 2).Left()) 11 | } 12 | 13 | func TestBoundsRight(t *testing.T) { 14 | assert.EqualValues(t, 2, newBounds(1, 2).Right()) 15 | } 16 | 17 | func TestBoundsWithLeftReturnsNewBounds(t *testing.T) { 18 | b1 := newBounds(1, 2) 19 | b2 := b1.WithLeft(3) 20 | 21 | assert.EqualValues(t, 1, b1.Left()) 22 | assert.EqualValues(t, 2, b1.Right()) 23 | 24 | assert.EqualValues(t, 3, b2.Left()) 25 | assert.EqualValues(t, 2, b2.Right()) 26 | } 27 | 28 | func TestBoundsWithRightReturnsNewBounds(t *testing.T) { 29 | b1 := newBounds(1, 2) 30 | b2 := b1.WithRight(3) 31 | 32 | assert.EqualValues(t, 1, b1.Left()) 33 | assert.EqualValues(t, 2, b1.Right()) 34 | 35 | assert.EqualValues(t, 1, b2.Left()) 36 | assert.EqualValues(t, 3, b2.Right()) 37 | } 38 | 39 | func TestBoundsEqualWithIdenticalBounds(t *testing.T) { 40 | b1 := newBounds(1, 2) 41 | b2 := newBounds(1, 2) 42 | 43 | assert.True(t, b1.Equal(b2)) 44 | } 45 | 46 | func TestBoundsEqualWithDifferentBounds(t *testing.T) { 47 | b1 := newBounds(1, 2) 48 | b2 := newBounds(3, 4) 49 | 50 | assert.False(t, b1.Equal(b2)) 51 | } 52 | 53 | func TestBoundsEqualWithNilReceiver(t *testing.T) { 54 | bnil := (*bounds)(nil) 55 | b2 := newBounds(1, 2) 56 | 57 | assert.False(t, bnil.Equal(b2)) 58 | } 59 | 60 | func TestBoundsEqualWithNilArgument(t *testing.T) { 61 | b1 := newBounds(1, 2) 62 | bnil := (*bounds)(nil) 63 | 64 | assert.False(t, b1.Equal(bnil)) 65 | } 66 | 67 | func TestBoundsEqualWithNilArgumentAndReceiver(t *testing.T) { 68 | b1 := (*bounds)(nil) 69 | b2 := (*bounds)(nil) 70 | 71 | assert.True(t, b1.Equal(b2)) 72 | } 73 | 74 | func TestBoundsString(t *testing.T) { 75 | b1 := newBounds(1, 2) 76 | 77 | assert.Equal(t, "[1,2]", b1.String()) 78 | } 79 | -------------------------------------------------------------------------------- /pack/chain.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | // Chain represents an element in the delta-base chain corresponding to a packed 4 | // object. 5 | type Chain interface { 6 | // Unpack unpacks the data encoded in the delta-base chain up to and 7 | // including the receiving Chain implementation by applying the 8 | // delta-base chain successively to itself. 9 | // 10 | // If there was an error in the delta-base resolution, i.e., the chain 11 | // is malformed, has a bad instruction, or there was a file read error, this 12 | // function is expected to return that error. 13 | // 14 | // In the event that a non-nil error is returned, it is assumed that the 15 | // unpacked data this function returns is malformed, or otherwise 16 | // corrupt. 17 | Unpack() ([]byte, error) 18 | 19 | // Type returns the type of the receiving chain element. 20 | Type() PackedObjectType 21 | } 22 | -------------------------------------------------------------------------------- /pack/chain_base.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "compress/zlib" 5 | "io" 6 | ) 7 | 8 | // ChainBase represents the "base" component of a delta-base chain. 9 | type ChainBase struct { 10 | // offset returns the offset into the given io.ReaderAt where the read 11 | // will begin. 12 | offset int64 13 | // size is the total uncompressed size of the data in the base chain. 14 | size int64 15 | // typ is the type of data that this *ChainBase encodes. 16 | typ PackedObjectType 17 | 18 | // r is the io.ReaderAt yielding a stream of zlib-compressed data. 19 | r io.ReaderAt 20 | } 21 | 22 | // Unpack inflates and returns the uncompressed data encoded in the base 23 | // element. 24 | // 25 | // If there was any error in reading the compressed data (invalid headers, 26 | // etc.), it will be returned immediately. 27 | func (b *ChainBase) Unpack() ([]byte, error) { 28 | zr, err := zlib.NewReader(&OffsetReaderAt{ 29 | r: b.r, 30 | o: b.offset, 31 | }) 32 | 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | defer zr.Close() 38 | 39 | buf := make([]byte, b.size) 40 | if _, err := io.ReadFull(zr, buf); err != nil { 41 | return nil, err 42 | } 43 | return buf, nil 44 | } 45 | 46 | // ChainBase returns the type of the object it encodes. 47 | func (b *ChainBase) Type() PackedObjectType { 48 | return b.typ 49 | } 50 | -------------------------------------------------------------------------------- /pack/chain_base_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "compress/zlib" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestChainBaseDecompressesData(t *testing.T) { 12 | const contents = "Hello, world!\n" 13 | 14 | compressed, err := compress(contents) 15 | assert.NoError(t, err) 16 | 17 | var buf bytes.Buffer 18 | 19 | _, err = buf.Write([]byte{0x0, 0x0, 0x0, 0x0}) 20 | assert.NoError(t, err) 21 | 22 | _, err = buf.Write(compressed) 23 | assert.NoError(t, err) 24 | 25 | _, err = buf.Write([]byte{0x0, 0x0, 0x0, 0x0}) 26 | assert.NoError(t, err) 27 | 28 | base := &ChainBase{ 29 | offset: 4, 30 | size: int64(len(contents)), 31 | 32 | r: bytes.NewReader(buf.Bytes()), 33 | } 34 | 35 | unpacked, err := base.Unpack() 36 | assert.NoError(t, err) 37 | assert.Equal(t, contents, string(unpacked)) 38 | } 39 | 40 | func TestChainBaseTypeReturnsType(t *testing.T) { 41 | b := &ChainBase{ 42 | typ: TypeCommit, 43 | } 44 | 45 | assert.Equal(t, TypeCommit, b.Type()) 46 | } 47 | 48 | func compress(base string) ([]byte, error) { 49 | var buf bytes.Buffer 50 | 51 | zw := zlib.NewWriter(&buf) 52 | if _, err := zw.Write([]byte(base)); err != nil { 53 | return nil, err 54 | } 55 | 56 | if err := zw.Close(); err != nil { 57 | return nil, err 58 | } 59 | return buf.Bytes(), nil 60 | } 61 | -------------------------------------------------------------------------------- /pack/chain_delta.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import "fmt" 4 | 5 | // ChainDelta represents a "delta" component of a delta-base chain. 6 | type ChainDelta struct { 7 | // Base is the base delta-base chain that this delta should be applied 8 | // to. It can be a ChainBase in the simple case, or it can itself be a 9 | // ChainDelta, which resolves against another ChainBase, when the 10 | // delta-base chain is of length greater than 2. 11 | base Chain 12 | // delta is the set of copy/add instructions to apply on top of the 13 | // base. 14 | delta []byte 15 | } 16 | 17 | // Unpack applies the delta operation to the previous delta-base chain, "base". 18 | // 19 | // If any of the delta-base instructions were invalid, an error will be 20 | // returned. 21 | func (d *ChainDelta) Unpack() ([]byte, error) { 22 | base, err := d.base.Unpack() 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | return patch(base, d.delta) 28 | } 29 | 30 | // Type returns the type of the base of the delta-base chain. 31 | func (d *ChainDelta) Type() PackedObjectType { 32 | return d.base.Type() 33 | } 34 | 35 | // patch applies the delta instructions in "delta" to the base given as "base". 36 | // It returns the result of applying those patch instructions to base, but does 37 | // not modify base itself. 38 | // 39 | // If any of the delta instructions were malformed, or otherwise could not be 40 | // applied to the given base, an error will returned, along with an empty set of 41 | // data. 42 | func patch(base, delta []byte) ([]byte, error) { 43 | srcSize, pos := patchDeltaHeader(delta, 0) 44 | if srcSize != int64(len(base)) { 45 | // The header of the delta gives the size of the source contents 46 | // that it is a patch over. 47 | // 48 | // If this does not match with the srcSize, return an error 49 | // early so as to avoid a possible bounds error below. 50 | return nil, fmt.Errorf("gitobj/pack: invalid delta data") 51 | } 52 | 53 | // The remainder of the delta header contains the destination size, and 54 | // moves the "pos" offset to the correct position to begin the set of 55 | // delta instructions. 56 | destSize, pos := patchDeltaHeader(delta, pos) 57 | 58 | dest := make([]byte, 0, destSize) 59 | 60 | for pos < len(delta) { 61 | c := int(delta[pos]) 62 | pos += 1 63 | 64 | if c&0x80 != 0 { 65 | // If the most significant bit (MSB, at position 0x80) 66 | // is set, this is a copy instruction. Advance the 67 | // position one byte backwards, and initialize variables 68 | // for the copy offset and size instructions. 69 | pos -= 1 70 | 71 | var co, cs int 72 | 73 | // The lower-half of "c" (0000 1111) defines a "bitmask" 74 | // for the copy offset. 75 | if c&0x1 != 0 { 76 | pos += 1 77 | co = int(delta[pos]) 78 | } 79 | if c&0x2 != 0 { 80 | pos += 1 81 | co |= (int(delta[pos]) << 8) 82 | } 83 | if c&0x4 != 0 { 84 | pos += 1 85 | co |= (int(delta[pos]) << 16) 86 | } 87 | if c&0x8 != 0 { 88 | pos += 1 89 | co |= (int(delta[pos]) << 24) 90 | } 91 | 92 | // The upper-half of "c" (1111 0000) defines a "bitmask" 93 | // for the size of the copy instruction. 94 | if c&0x10 != 0 { 95 | pos += 1 96 | cs = int(delta[pos]) 97 | } 98 | if c&0x20 != 0 { 99 | pos += 1 100 | cs |= (int(delta[pos]) << 8) 101 | } 102 | if c&0x40 != 0 { 103 | pos += 1 104 | cs |= (int(delta[pos]) << 16) 105 | } 106 | 107 | if cs == 0 { 108 | // If the copy size is zero, we assume that it 109 | // is the next whole number after the max uint32 110 | // value. 111 | cs = 0x10000 112 | } 113 | pos += 1 114 | 115 | // Once we have the copy offset and length defined, copy 116 | // that number of bytes from the base into the 117 | // destination. Since we are copying from the base and 118 | // not the delta, the position into the delta ("pos") 119 | // need not be updated. 120 | dest = append(dest, base[co:co+cs]...) 121 | } else if c != 0 { 122 | // If the most significant bit (MSB) is _not_ set, we 123 | // instead process a copy instruction, where "c" is the 124 | // number of successive bytes in the delta patch to add 125 | // to the output. 126 | // 127 | // Copy the bytes and increment the read pointer 128 | // forward. 129 | dest = append(dest, delta[pos:int(pos)+c]...) 130 | 131 | pos += int(c) 132 | } else { 133 | // Otherwise, "c" is 0, and is an invalid delta 134 | // instruction. 135 | // 136 | // Return immediately. 137 | return nil, fmt.Errorf( 138 | "gitobj/pack: invalid delta data") 139 | } 140 | } 141 | 142 | if destSize != int64(len(dest)) { 143 | // If after patching the delta against the base, the destination 144 | // size is different than the expected destination size, we have 145 | // an invalid set of patch instructions. 146 | // 147 | // Return immediately. 148 | return nil, fmt.Errorf("gitobj/pack: invalid delta data") 149 | } 150 | return dest, nil 151 | } 152 | 153 | // patchDeltaHeader examines the header within delta at the given offset, and 154 | // returns the size encoded within it, as well as the ending offset where begins 155 | // the next header, or the patch instructions. 156 | func patchDeltaHeader(delta []byte, pos int) (size int64, end int) { 157 | var shift uint 158 | var c int64 159 | 160 | for shift == 0 || c&0x80 != 0 { 161 | if len(delta) <= pos { 162 | panic("gitobj/pack: invalid delta header") 163 | } 164 | 165 | c = int64(delta[pos]) 166 | 167 | pos++ 168 | size |= (c & 0x7f) << shift 169 | shift += 7 170 | } 171 | 172 | return size, pos 173 | } 174 | -------------------------------------------------------------------------------- /pack/chain_delta_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestChainDeltaUnpackCopiesFromBase(t *testing.T) { 10 | c := &ChainDelta{ 11 | base: &ChainSimple{ 12 | X: []byte{0x0, 0x1, 0x2, 0x3}, 13 | }, 14 | delta: []byte{ 15 | 0x04, // Source size: 4. 16 | 0x03, // Destination size: 3. 17 | 18 | 0x80 | 0x01 | 0x10, // Copy, omask=0001, smask=0001. 19 | 0x1, // Offset: 1. 20 | 0x3, // Size: 3. 21 | }, 22 | } 23 | 24 | data, err := c.Unpack() 25 | assert.NoError(t, err) 26 | assert.Equal(t, []byte{0x1, 0x2, 0x3}, data) 27 | } 28 | 29 | func TestChainDeltaUnpackAddsToBase(t *testing.T) { 30 | c := &ChainDelta{ 31 | base: &ChainSimple{ 32 | X: make([]byte, 0), 33 | }, 34 | delta: []byte{ 35 | 0x0, // Source size: 0. 36 | 0x3, // Destination size: 3. 37 | 38 | 0x3, // Add, size=3. 39 | 40 | 0x1, 0x2, 0x3, // Contents: ... 41 | }, 42 | } 43 | 44 | data, err := c.Unpack() 45 | assert.NoError(t, err) 46 | assert.Equal(t, []byte{0x1, 0x2, 0x3}, data) 47 | } 48 | 49 | func TestChainDeltaWithMultipleInstructions(t *testing.T) { 50 | c := &ChainDelta{ 51 | base: &ChainSimple{ 52 | X: []byte{'H', 'e', 'l', 'l', 'o', '!', '\n'}, 53 | }, 54 | delta: []byte{ 55 | 0x07, // Source size: 7. 56 | 0x0e, // Destination size: 14. 57 | 58 | 0x80 | 0x01 | 0x10, // Copy, omask=0001, smask=0001. 59 | 0x0, // Offset: 1. 60 | 0x5, // Size: 5. 61 | 62 | 0x7, // Add, size=7. 63 | ',', ' ', 'w', 'o', 'r', 'l', 'd', // Contents: ... 64 | 65 | 0x80 | 0x01 | 0x10, // Copy, omask=0001, smask=0001. 66 | 0x05, // Offset: 5. 67 | 0x02, // Size: 2. 68 | }, 69 | } 70 | 71 | data, err := c.Unpack() 72 | assert.NoError(t, err) 73 | assert.Equal(t, []byte("Hello, world!\n"), data) 74 | } 75 | 76 | func TestChainDeltaWithInvalidDeltaInstruction(t *testing.T) { 77 | c := &ChainDelta{ 78 | base: &ChainSimple{ 79 | X: make([]byte, 0), 80 | }, 81 | delta: []byte{ 82 | 0x0, // Source size: 0. 83 | 0x1, // Destination size: 3. 84 | 85 | 0x0, // Invalid instruction. 86 | }, 87 | } 88 | 89 | data, err := c.Unpack() 90 | assert.EqualError(t, err, "gitobj/pack: invalid delta data") 91 | assert.Nil(t, data) 92 | } 93 | 94 | func TestChainDeltaWithExtraInstructions(t *testing.T) { 95 | c := &ChainDelta{ 96 | base: &ChainSimple{ 97 | X: make([]byte, 0), 98 | }, 99 | delta: []byte{ 100 | 0x0, // Source size: 0. 101 | 0x3, // Destination size: 3. 102 | 103 | 0x4, // Add, size=4 (invalid). 104 | 105 | 0x1, 0x2, 0x3, 0x4, // Contents: ... 106 | }, 107 | } 108 | 109 | data, err := c.Unpack() 110 | assert.EqualError(t, err, "gitobj/pack: invalid delta data") 111 | assert.Nil(t, data) 112 | } 113 | -------------------------------------------------------------------------------- /pack/chain_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | type ChainSimple struct { 4 | X []byte 5 | Err error 6 | } 7 | 8 | func (c *ChainSimple) Unpack() ([]byte, error) { 9 | return c.X, c.Err 10 | } 11 | 12 | func (c *ChainSimple) Type() PackedObjectType { return TypeNone } 13 | -------------------------------------------------------------------------------- /pack/delayed_object.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "strings" 8 | ) 9 | 10 | // delayedObjectReader provides an interface for reading from an Object while 11 | // loading object data into memory only on demand. It implements io.ReadCloser. 12 | type delayedObjectReader struct { 13 | obj *Object 14 | mr io.Reader 15 | } 16 | 17 | // Read implements the io.Reader method by instantiating a new underlying reader 18 | // only on demand. 19 | func (d *delayedObjectReader) Read(b []byte) (int, error) { 20 | if d.mr == nil { 21 | data, err := d.obj.Unpack() 22 | if err != nil { 23 | return 0, err 24 | } 25 | d.mr = io.MultiReader( 26 | // Git object header: 27 | strings.NewReader(fmt.Sprintf("%s %d\x00", 28 | d.obj.Type(), len(data), 29 | )), 30 | 31 | // Git object (uncompressed) contents: 32 | bytes.NewReader(data), 33 | ) 34 | } 35 | return d.mr.Read(b) 36 | } 37 | 38 | // Close implements the io.Closer interface. 39 | func (d *delayedObjectReader) Close() error { 40 | return nil 41 | } 42 | -------------------------------------------------------------------------------- /pack/errors.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import "fmt" 4 | 5 | // UnsupportedVersionErr is a type implementing 'error' which indicates a 6 | // the presence of an unsupported packfile version. 7 | type UnsupportedVersionErr struct { 8 | // Got is the unsupported version that was detected. 9 | Got uint32 10 | } 11 | 12 | // Error implements 'error.Error()'. 13 | func (u *UnsupportedVersionErr) Error() string { 14 | return fmt.Sprintf("gitobj/pack: unsupported version: %d", u.Got) 15 | } 16 | -------------------------------------------------------------------------------- /pack/errors_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestUnsupportedVersionErr(t *testing.T) { 10 | u := &UnsupportedVersionErr{Got: 3} 11 | 12 | assert.Error(t, u, "gitobj/pack: unsupported version: 3") 13 | } 14 | -------------------------------------------------------------------------------- /pack/index.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "fmt" 7 | "io" 8 | ) 9 | 10 | const MaxHashSize = sha256.Size 11 | 12 | // Index stores information about the location of objects in a corresponding 13 | // packfile. 14 | type Index struct { 15 | // version is the encoding version used by this index. 16 | // 17 | // Currently, versions 1 and 2 are supported. 18 | version IndexVersion 19 | // fanout is the L1 fanout table stored in this index. For a given index 20 | // "i" into the array, the value stored at that index specifies the 21 | // number of objects in the packfile/index that are lexicographically 22 | // less than or equal to that index. 23 | // 24 | // See: https://github.com/git/git/blob/v2.13.0/Documentation/technical/pack-format.txt#L41-L45 25 | fanout []uint32 26 | 27 | // r is the underlying set of encoded data comprising this index file. 28 | r io.ReaderAt 29 | } 30 | 31 | // Count returns the number of objects in the packfile. 32 | func (i *Index) Count() int { 33 | return int(i.fanout[255]) 34 | } 35 | 36 | // Close closes the packfile index if the underlying data stream is closeable. 37 | // If so, it returns any error involved in closing. 38 | func (i *Index) Close() error { 39 | if close, ok := i.r.(io.Closer); ok { 40 | return close.Close() 41 | } 42 | return nil 43 | } 44 | 45 | var ( 46 | // errNotFound is an error returned by Index.Entry() (see: below) when 47 | // an object cannot be found in the index. 48 | errNotFound = fmt.Errorf("gitobj/pack: object not found in index") 49 | ) 50 | 51 | // IsNotFound returns whether a given error represents a missing object in the 52 | // index. 53 | func IsNotFound(err error) bool { 54 | return err == errNotFound 55 | } 56 | 57 | // Entry returns an entry containing the offset of a given SHA1 "name". 58 | // 59 | // Entry operates in O(log(n))-time in the worst case, where "n" is the number 60 | // of objects that begin with the first byte of "name". 61 | // 62 | // If the entry cannot be found, (nil, ErrNotFound) will be returned. If there 63 | // was an error searching for or parsing an entry, it will be returned as (nil, 64 | // err). 65 | // 66 | // Otherwise, (entry, nil) will be returned. 67 | func (i *Index) Entry(name []byte) (*IndexEntry, error) { 68 | var last *bounds 69 | bounds := i.bounds(name) 70 | 71 | for bounds.Left() < bounds.Right() { 72 | if last.Equal(bounds) { 73 | // If the bounds are unchanged, that means either that 74 | // the object does not exist in the packfile, or the 75 | // fanout table is corrupt. 76 | // 77 | // Either way, we won't be able to find the object. 78 | // Return immediately to prevent infinite looping. 79 | return nil, errNotFound 80 | } 81 | last = bounds 82 | 83 | // Find the midpoint between the upper and lower bounds. 84 | mid := bounds.Left() + ((bounds.Right() - bounds.Left()) / 2) 85 | 86 | got, err := i.version.Name(i, mid) 87 | if err != nil { 88 | return nil, err 89 | } 90 | 91 | if cmp := bytes.Compare(name, got); cmp == 0 { 92 | // If "cmp" is zero, that means the object at that index 93 | // "at" had a SHA equal to the one given by name, and we 94 | // are done. 95 | return i.version.Entry(i, mid) 96 | } else if cmp < 0 { 97 | // If the comparison is less than 0, we searched past 98 | // the desired object, so limit the upper bound of the 99 | // search to the midpoint. 100 | bounds = bounds.WithRight(mid) 101 | } else if cmp > 0 { 102 | // Likewise, if the comparison is greater than 0, we 103 | // searched below the desired object. Modify the bounds 104 | // accordingly. 105 | bounds = bounds.WithLeft(mid) 106 | } 107 | 108 | } 109 | 110 | return nil, errNotFound 111 | } 112 | 113 | // readAt is a convenience method that allow reading into the underlying data 114 | // source from other callers within this package. 115 | func (i *Index) readAt(p []byte, at int64) (n int, err error) { 116 | return i.r.ReadAt(p, at) 117 | } 118 | 119 | // bounds returns the initial bounds for a given name using the fanout table to 120 | // limit search results. 121 | func (i *Index) bounds(name []byte) *bounds { 122 | var left, right int64 123 | 124 | if name[0] == 0 { 125 | // If the lower bound is 0, there are no objects before it, 126 | // start at the beginning of the index file. 127 | left = 0 128 | } else { 129 | // Otherwise, make the lower bound the slot before the given 130 | // object. 131 | left = int64(i.fanout[name[0]-1]) 132 | } 133 | 134 | if name[0] == 255 { 135 | // As above, if the upper bound is the max byte value, make the 136 | // upper bound the last object in the list. 137 | right = int64(i.Count()) 138 | } else { 139 | // Otherwise, make the upper bound the first object which is not 140 | // within the given slot. 141 | right = int64(i.fanout[name[0]+1]) 142 | } 143 | 144 | return newBounds(left, right) 145 | } 146 | -------------------------------------------------------------------------------- /pack/index_decode.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "hash" 8 | "io" 9 | ) 10 | 11 | const ( 12 | // indexMagicWidth is the width of the magic header of packfiles version 13 | // 2 and newer. 14 | indexMagicWidth = 4 15 | // indexVersionWidth is the width of the version following the magic 16 | // header. 17 | indexVersionWidth = 4 18 | // indexV2Width is the total width of the header in V2. 19 | indexV2Width = indexMagicWidth + indexVersionWidth 20 | // indexV1Width is the total width of the header in V1. 21 | indexV1Width = 0 22 | 23 | // indexFanoutEntries is the number of entries in the fanout table. 24 | indexFanoutEntries = 256 25 | // indexFanoutEntryWidth is the width of each entry in the fanout table. 26 | indexFanoutEntryWidth = 4 27 | // indexFanoutWidth is the width of the entire fanout table. 28 | indexFanoutWidth = indexFanoutEntries * indexFanoutEntryWidth 29 | 30 | // indexOffsetV1Start is the location of the first object outside of the 31 | // V1 header. 32 | indexOffsetV1Start = indexV1Width + indexFanoutWidth 33 | // indexOffsetV2Start is the location of the first object outside of the 34 | // V2 header. 35 | indexOffsetV2Start = indexV2Width + indexFanoutWidth 36 | 37 | // indexObjectCRCWidth is the width of the CRC accompanying each object 38 | // in V2. 39 | indexObjectCRCWidth = 4 40 | // indexObjectSmallOffsetWidth is the width of the small offset encoded 41 | // into each object. 42 | indexObjectSmallOffsetWidth = 4 43 | // indexObjectLargeOffsetWidth is the width of the optional large offset 44 | // encoded into the small offset. 45 | indexObjectLargeOffsetWidth = 8 46 | ) 47 | 48 | var ( 49 | // ErrShortFanout is an error representing situations where the entire 50 | // fanout table could not be read, and is thus too short. 51 | ErrShortFanout = fmt.Errorf("gitobj/pack: too short fanout table") 52 | 53 | // indexHeader is the first four "magic" bytes of index files version 2 54 | // or newer. 55 | indexHeader = []byte{0xff, 0x74, 0x4f, 0x63} 56 | ) 57 | 58 | // DecodeIndex decodes an index whose underlying data is supplied by "r". 59 | // 60 | // DecodeIndex reads only the header and fanout table, and does not eagerly 61 | // parse index entries. 62 | // 63 | // If there was an error parsing, it will be returned immediately. 64 | func DecodeIndex(r io.ReaderAt, hash hash.Hash) (*Index, error) { 65 | version, err := decodeIndexHeader(r, hash) 66 | if err != nil { 67 | return nil, err 68 | } 69 | 70 | fanout, err := decodeIndexFanout(r, version.Width()) 71 | if err != nil { 72 | return nil, err 73 | } 74 | 75 | return &Index{ 76 | version: version, 77 | fanout: fanout, 78 | 79 | r: r, 80 | }, nil 81 | } 82 | 83 | // decodeIndexHeader determines which version the index given by "r" is. 84 | func decodeIndexHeader(r io.ReaderAt, hash hash.Hash) (IndexVersion, error) { 85 | hdr := make([]byte, 4) 86 | if _, err := r.ReadAt(hdr, 0); err != nil { 87 | return nil, err 88 | } 89 | 90 | if bytes.Equal(hdr, indexHeader) { 91 | vb := make([]byte, 4) 92 | if _, err := r.ReadAt(vb, 4); err != nil { 93 | return nil, err 94 | } 95 | 96 | version := binary.BigEndian.Uint32(vb) 97 | switch version { 98 | case 1: 99 | return &V1{hash: hash}, nil 100 | case 2: 101 | return &V2{hash: hash}, nil 102 | } 103 | return nil, &UnsupportedVersionErr{uint32(version)} 104 | } 105 | return &V1{hash: hash}, nil 106 | } 107 | 108 | // decodeIndexFanout decodes the fanout table given by "r" and beginning at the 109 | // given offset. 110 | func decodeIndexFanout(r io.ReaderAt, offset int64) ([]uint32, error) { 111 | b := make([]byte, 256*4) 112 | if _, err := r.ReadAt(b, offset); err != nil { 113 | if err == io.EOF { 114 | return nil, ErrShortFanout 115 | } 116 | return nil, err 117 | } 118 | 119 | fanout := make([]uint32, 256) 120 | for i, _ := range fanout { 121 | fanout[i] = binary.BigEndian.Uint32(b[(i * 4):]) 122 | } 123 | 124 | return fanout, nil 125 | } 126 | -------------------------------------------------------------------------------- /pack/index_decode_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "encoding/binary" 7 | "io" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestDecodeIndexV2(t *testing.T) { 14 | buf := make([]byte, 0, indexV2Width+indexFanoutWidth) 15 | buf = append(buf, 0xff, 0x74, 0x4f, 0x63) 16 | buf = append(buf, 0x0, 0x0, 0x0, 0x2) 17 | for i := 0; i < indexFanoutEntries; i++ { 18 | x := make([]byte, 4) 19 | 20 | binary.BigEndian.PutUint32(x, uint32(3)) 21 | 22 | buf = append(buf, x...) 23 | } 24 | 25 | idx, err := DecodeIndex(bytes.NewReader(buf), sha1.New()) 26 | 27 | assert.NoError(t, err) 28 | assert.EqualValues(t, 3, idx.Count()) 29 | } 30 | 31 | func TestDecodeIndexV2InvalidFanout(t *testing.T) { 32 | buf := make([]byte, 0, indexV2Width+indexFanoutWidth-indexFanoutEntryWidth) 33 | buf = append(buf, 0xff, 0x74, 0x4f, 0x63) 34 | buf = append(buf, 0x0, 0x0, 0x0, 0x2) 35 | buf = append(buf, make([]byte, indexFanoutWidth-1)...) 36 | 37 | idx, err := DecodeIndex(bytes.NewReader(buf), sha1.New()) 38 | 39 | assert.Equal(t, ErrShortFanout, err) 40 | assert.Nil(t, idx) 41 | } 42 | 43 | func TestDecodeIndexV1(t *testing.T) { 44 | idx, err := DecodeIndex(bytes.NewReader(make([]byte, indexFanoutWidth)), sha1.New()) 45 | 46 | assert.NoError(t, err) 47 | assert.EqualValues(t, 0, idx.Count()) 48 | } 49 | 50 | func TestDecodeIndexV1InvalidFanout(t *testing.T) { 51 | idx, err := DecodeIndex(bytes.NewReader(make([]byte, indexFanoutWidth-1)), sha1.New()) 52 | 53 | assert.Equal(t, ErrShortFanout, err) 54 | assert.Nil(t, idx) 55 | } 56 | 57 | func TestDecodeIndexUnsupportedVersion(t *testing.T) { 58 | buf := make([]byte, 0, 4+4) 59 | buf = append(buf, 0xff, 0x74, 0x4f, 0x63) 60 | buf = append(buf, 0x0, 0x0, 0x0, 0x3) 61 | 62 | idx, err := DecodeIndex(bytes.NewReader(buf), sha1.New()) 63 | 64 | assert.EqualError(t, err, "gitobj/pack: unsupported version: 3") 65 | assert.Nil(t, idx) 66 | } 67 | 68 | func TestDecodeIndexEmptyContents(t *testing.T) { 69 | idx, err := DecodeIndex(bytes.NewReader(make([]byte, 0)), sha1.New()) 70 | 71 | assert.Equal(t, io.EOF, err) 72 | assert.Nil(t, idx) 73 | } 74 | -------------------------------------------------------------------------------- /pack/index_entry.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | // IndexEntry specifies data encoded into an entry in the pack index. 4 | type IndexEntry struct { 5 | // PackOffset is the number of bytes before the associated object in a 6 | // packfile. 7 | PackOffset uint64 8 | } 9 | -------------------------------------------------------------------------------- /pack/index_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "encoding/binary" 7 | "fmt" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | var ( 14 | idx *Index 15 | ) 16 | 17 | func TestIndexEntrySearch(t *testing.T) { 18 | e, err := idx.Entry([]byte{ 19 | 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 20 | 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 21 | }) 22 | 23 | assert.NoError(t, err) 24 | assert.EqualValues(t, 6, e.PackOffset) 25 | } 26 | 27 | func TestIndexEntrySearchClampLeft(t *testing.T) { 28 | e, err := idx.Entry([]byte{ 29 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 30 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 31 | }) 32 | 33 | assert.NoError(t, err) 34 | assert.EqualValues(t, 0, e.PackOffset) 35 | } 36 | 37 | func TestIndexEntrySearchClampRight(t *testing.T) { 38 | e, err := idx.Entry([]byte{ 39 | 0xff, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 40 | 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 41 | }) 42 | 43 | assert.NoError(t, err) 44 | assert.EqualValues(t, 0x4ff, e.PackOffset) 45 | } 46 | 47 | func TestIndexSearchOutOfBounds(t *testing.T) { 48 | e, err := idx.Entry([]byte{ 49 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 50 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 51 | }) 52 | 53 | assert.True(t, IsNotFound(err), "expected err to be 'not found'") 54 | assert.Nil(t, e) 55 | } 56 | 57 | func TestIndexEntryNotFound(t *testing.T) { 58 | e, err := idx.Entry([]byte{ 59 | 0x1, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 60 | 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 0x6, 61 | }) 62 | 63 | assert.True(t, IsNotFound(err), "expected err to be 'not found'") 64 | assert.Nil(t, e) 65 | } 66 | 67 | func TestIndexCount(t *testing.T) { 68 | fanout := make([]uint32, 256) 69 | for i := 0; i < len(fanout); i++ { 70 | fanout[i] = uint32(i) 71 | } 72 | 73 | idx := &Index{fanout: fanout} 74 | 75 | assert.EqualValues(t, 255, idx.Count()) 76 | } 77 | 78 | func TestIndexIsNotFound(t *testing.T) { 79 | assert.True(t, IsNotFound(errNotFound), 80 | "expected 'errNotFound' to satisfy 'IsNotFound()'") 81 | } 82 | 83 | func TestIndexIsNotFoundForOtherErrors(t *testing.T) { 84 | assert.False(t, IsNotFound(fmt.Errorf("gitobj/pack: misc")), 85 | "expected 'err' not to satisfy 'IsNotFound()'") 86 | } 87 | 88 | // init generates some fixture data and then constructs an *Index instance using 89 | // it. 90 | func init() { 91 | // eps is the number of SHA1 names generated under each 0x slot. 92 | const eps = 5 93 | 94 | hdr := []byte{ 95 | 0xff, 0x74, 0x4f, 0x63, // Index file v2+ magic header 96 | 0x00, 0x00, 0x00, 0x02, // 4-byte version indicator 97 | } 98 | 99 | // Create a fanout table using uint32s (later marshalled using 100 | // binary.BigEndian). 101 | // 102 | // Since we have an even distribution of SHA1s in the generated index, 103 | // each entry will increase by the number of entries per slot (see: eps 104 | // above). 105 | fanout := make([]uint32, indexFanoutEntries) 106 | for i := 0; i < len(fanout); i++ { 107 | // Begin the index at (i+1), since the fanout table mandates 108 | // objects less than the value at index "i". 109 | fanout[i] = uint32((i + 1) * eps) 110 | } 111 | 112 | offs := make([]uint32, 0, 256*eps) 113 | crcs := make([]uint32, 0, 256*eps) 114 | 115 | names := make([][]byte, 0, 256*eps) 116 | for i := 0; i < 256; i++ { 117 | // For each name, generate a unique SHA using the prefix "i", 118 | // and then suffix "j". 119 | // 120 | // In other words, when i=1, we will generate: 121 | // []byte{0x1 0x0 0x0 0x0 ...} 122 | // []byte{0x1 0x1 0x1 0x1 ...} 123 | // []byte{0x1 0x2 0x2 0x2 ...} 124 | // 125 | // and etc. 126 | for j := 0; j < eps; j++ { 127 | var sha [20]byte 128 | 129 | sha[0] = byte(i) 130 | for r := 1; r < len(sha); r++ { 131 | sha[r] = byte(j) 132 | } 133 | 134 | cpy := make([]byte, len(sha)) 135 | copy(cpy, sha[:]) 136 | 137 | names = append(names, cpy) 138 | offs = append(offs, uint32((i*eps)+j)) 139 | crcs = append(crcs, 0) 140 | } 141 | } 142 | 143 | // Create a buffer to hold the index contents: 144 | buf := bytes.NewBuffer(hdr) 145 | 146 | // Write each value in the fanout table using a 32bit network byte-order 147 | // integer. 148 | for _, f := range fanout { 149 | binary.Write(buf, binary.BigEndian, f) 150 | } 151 | // Write each SHA1 name to the table next. 152 | for _, name := range names { 153 | buf.Write(name) 154 | } 155 | // Then write each of the CRC values in network byte-order as a 32bit 156 | // unsigned integer. 157 | for _, crc := range crcs { 158 | binary.Write(buf, binary.BigEndian, crc) 159 | } 160 | // Do the same with the offsets. 161 | for _, off := range offs { 162 | binary.Write(buf, binary.BigEndian, off) 163 | } 164 | 165 | idx = &Index{ 166 | fanout: fanout, 167 | // version is unimportant here, use V2 since it's more common in 168 | // the wild. 169 | version: &V2{hash: sha1.New()}, 170 | 171 | // *bytes.Buffer does not implement io.ReaderAt, but 172 | // *bytes.Reader does. 173 | // 174 | // Call (*bytes.Buffer).Bytes() to get the data, and then 175 | // construct a new *bytes.Reader with it to implement 176 | // io.ReaderAt. 177 | r: bytes.NewReader(buf.Bytes()), 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /pack/index_v1.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "encoding/binary" 5 | "hash" 6 | ) 7 | 8 | // V1 implements IndexVersion for v1 packfiles. 9 | type V1 struct { 10 | hash hash.Hash 11 | } 12 | 13 | // Name implements IndexVersion.Name by returning the 20 byte SHA-1 object name 14 | // for the given entry at offset "at" in the v1 index file "idx". 15 | func (v *V1) Name(idx *Index, at int64) ([]byte, error) { 16 | var sha [MaxHashSize]byte 17 | 18 | hashlen := v.hash.Size() 19 | 20 | if _, err := idx.readAt(sha[:hashlen], v1ShaOffset(at, int64(hashlen))); err != nil { 21 | return nil, err 22 | } 23 | 24 | return sha[:hashlen], nil 25 | } 26 | 27 | // Entry implements IndexVersion.Entry for v1 packfiles by parsing and returning 28 | // the IndexEntry specified at the offset "at" in the given index file. 29 | func (v *V1) Entry(idx *Index, at int64) (*IndexEntry, error) { 30 | var offs [4]byte 31 | if _, err := idx.readAt(offs[:], v1EntryOffset(at, int64(v.hash.Size()))); err != nil { 32 | return nil, err 33 | } 34 | 35 | return &IndexEntry{ 36 | PackOffset: uint64(binary.BigEndian.Uint32(offs[:])), 37 | }, nil 38 | } 39 | 40 | // Width implements IndexVersion.Width() by returning the number of bytes that 41 | // v1 packfile index header occupy. 42 | func (v *V1) Width() int64 { 43 | return indexV1Width 44 | } 45 | 46 | // v1ShaOffset returns the location of the SHA1 of an object given at "at". 47 | func v1ShaOffset(at int64, hashlen int64) int64 { 48 | // Skip forward until the desired entry. 49 | return v1EntryOffset(at, hashlen) + 50 | // Skip past the 4-byte object offset in the desired entry to 51 | // the SHA1. 52 | indexObjectSmallOffsetWidth 53 | } 54 | 55 | // v1EntryOffset returns the location of the packfile offset for the object 56 | // given at "at". 57 | func v1EntryOffset(at int64, hashlen int64) int64 { 58 | // Skip the L1 fanout table 59 | return indexOffsetV1Start + 60 | // Skip the object entries before the one located at "at" 61 | ((hashlen + indexObjectSmallOffsetWidth) * at) 62 | } 63 | -------------------------------------------------------------------------------- /pack/index_v1_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "crypto/sha256" 7 | "encoding/binary" 8 | "hash" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | var ( 15 | V1IndexFanout = make([]uint32, indexFanoutEntries) 16 | ) 17 | 18 | func TestIndexV1SearchExact(t *testing.T) { 19 | for _, algo := range []hash.Hash{sha1.New(), sha256.New()} { 20 | index := newV1Index(algo) 21 | v := &V1{hash: algo} 22 | e, err := v.Entry(index, 1) 23 | 24 | assert.NoError(t, err) 25 | assert.EqualValues(t, 2, e.PackOffset) 26 | } 27 | } 28 | 29 | func TestIndexVersionWidthV1(t *testing.T) { 30 | for _, algo := range []hash.Hash{sha1.New(), sha256.New()} { 31 | v := &V1{hash: algo} 32 | assert.EqualValues(t, 0, v.Width()) 33 | } 34 | } 35 | 36 | func newV1Index(hash hash.Hash) *Index { 37 | V1IndexFanout[1] = 1 38 | V1IndexFanout[2] = 2 39 | V1IndexFanout[3] = 3 40 | 41 | for i := 3; i < len(V1IndexFanout); i++ { 42 | V1IndexFanout[i] = 3 43 | } 44 | 45 | fanout := make([]byte, indexFanoutWidth) 46 | for i, n := range V1IndexFanout { 47 | binary.BigEndian.PutUint32(fanout[i*indexFanoutEntryWidth:], n) 48 | } 49 | 50 | hashlen := hash.Size() 51 | entrylen := hashlen + indexObjectCRCWidth 52 | entries := make([]byte, entrylen*3) 53 | 54 | for i := 0; i < 3; i++ { 55 | // For each entry, set the first three bytes to 0 and the 56 | // remainder to the same value. That creates an initial 4-byte 57 | // CRC field with the value of i+1, followed by a series of data 58 | // bytes which all have that same value. 59 | for j := entrylen*i + 3; j < entrylen*(i+1); j++ { 60 | entries[j] = byte(i + 1) 61 | } 62 | } 63 | 64 | buf := make([]byte, 0, indexOffsetV1Start) 65 | 66 | buf = append(buf, fanout...) 67 | buf = append(buf, entries...) 68 | 69 | return &Index{ 70 | fanout: V1IndexFanout, 71 | version: &V1{hash: hash}, 72 | r: bytes.NewReader(buf), 73 | } 74 | 75 | } 76 | -------------------------------------------------------------------------------- /pack/index_v2.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "encoding/binary" 5 | "hash" 6 | ) 7 | 8 | // V2 implements IndexVersion for v2 packfiles. 9 | type V2 struct { 10 | hash hash.Hash 11 | } 12 | 13 | // Name implements IndexVersion.Name by returning the 20 byte SHA-1 object name 14 | // for the given entry at offset "at" in the v2 index file "idx". 15 | func (v *V2) Name(idx *Index, at int64) ([]byte, error) { 16 | var sha [MaxHashSize]byte 17 | 18 | hashlen := v.hash.Size() 19 | 20 | if _, err := idx.readAt(sha[:hashlen], v2ShaOffset(at, int64(hashlen))); err != nil { 21 | return nil, err 22 | } 23 | 24 | return sha[:hashlen], nil 25 | } 26 | 27 | // Entry implements IndexVersion.Entry for v2 packfiles by parsing and returning 28 | // the IndexEntry specified at the offset "at" in the given index file. 29 | func (v *V2) Entry(idx *Index, at int64) (*IndexEntry, error) { 30 | var offs [4]byte 31 | 32 | hashlen := v.hash.Size() 33 | 34 | if _, err := idx.readAt(offs[:], v2SmallOffsetOffset(at, int64(idx.Count()), int64(hashlen))); err != nil { 35 | return nil, err 36 | } 37 | 38 | loc := uint64(binary.BigEndian.Uint32(offs[:])) 39 | if loc&0x80000000 > 0 { 40 | // If the most significant bit (MSB) of the offset is set, then 41 | // the offset encodes the indexed location for an 8-byte offset. 42 | // 43 | // Mask away (offs&0x7fffffff) the MSB to use as an index to 44 | // find the offset of the 8-byte pack offset. 45 | lo := v2LargeOffsetOffset(int64(loc&0x7fffffff), int64(idx.Count()), int64(hashlen)) 46 | 47 | var offs [8]byte 48 | if _, err := idx.readAt(offs[:], lo); err != nil { 49 | return nil, err 50 | } 51 | 52 | loc = binary.BigEndian.Uint64(offs[:]) 53 | } 54 | return &IndexEntry{PackOffset: loc}, nil 55 | } 56 | 57 | // Width implements IndexVersion.Width() by returning the number of bytes that 58 | // v2 packfile index header occupy. 59 | func (v *V2) Width() int64 { 60 | return indexV2Width 61 | } 62 | 63 | // v2ShaOffset returns the offset of a SHA1 given at "at" in the V2 index file. 64 | func v2ShaOffset(at int64, hashlen int64) int64 { 65 | // Skip the packfile index header and the L1 fanout table. 66 | return indexOffsetV2Start + 67 | // Skip until the desired name in the sorted names table. 68 | (hashlen * at) 69 | } 70 | 71 | // v2SmallOffsetOffset returns the offset of an object's small (4-byte) offset 72 | // given by "at". 73 | func v2SmallOffsetOffset(at, total, hashlen int64) int64 { 74 | // Skip the packfile index header and the L1 fanout table. 75 | return indexOffsetV2Start + 76 | // Skip the name table. 77 | (hashlen * total) + 78 | // Skip the CRC table. 79 | (indexObjectCRCWidth * total) + 80 | // Skip until the desired index in the small offsets table. 81 | (indexObjectSmallOffsetWidth * at) 82 | } 83 | 84 | // v2LargeOffsetOffset returns the offset of an object's large (4-byte) offset, 85 | // given by the index "at". 86 | func v2LargeOffsetOffset(at, total, hashlen int64) int64 { 87 | // Skip the packfile index header and the L1 fanout table. 88 | return indexOffsetV2Start + 89 | // Skip the name table. 90 | (hashlen * total) + 91 | // Skip the CRC table. 92 | (indexObjectCRCWidth * total) + 93 | // Skip the small offsets table. 94 | (indexObjectSmallOffsetWidth * total) + 95 | // Seek to the large offset within the large offset(s) table. 96 | (indexObjectLargeOffsetWidth * at) 97 | } 98 | -------------------------------------------------------------------------------- /pack/index_v2_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "crypto/sha256" 7 | "encoding/binary" 8 | "hash" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | var ( 15 | V2IndexHeader = []byte{ 16 | 0xff, 0x74, 0x4f, 0x63, 17 | 0x00, 0x00, 0x00, 0x02, 18 | } 19 | V2IndexFanout = make([]uint32, indexFanoutEntries) 20 | 21 | V2IndexCRCs = []byte{ 22 | 0x0, 0x0, 0x0, 0x0, 23 | 0x1, 0x1, 0x1, 0x1, 24 | 0x2, 0x2, 0x2, 0x2, 25 | } 26 | 27 | V2IndexOffsets = []byte{ 28 | 0x00, 0x00, 0x00, 0x01, 29 | 0x00, 0x00, 0x00, 0x02, 30 | 0x80, 0x00, 0x00, 0x01, // use the second large offset 31 | 32 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // filler data 33 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, // large offset 34 | } 35 | ) 36 | 37 | func TestIndexV2EntryExact(t *testing.T) { 38 | for _, algo := range []hash.Hash{sha1.New(), sha256.New()} { 39 | index := newV2Index(algo) 40 | v := &V2{hash: algo} 41 | e, err := v.Entry(index, 1) 42 | 43 | assert.NoError(t, err) 44 | assert.EqualValues(t, 2, e.PackOffset) 45 | } 46 | } 47 | 48 | func TestIndexV2EntryExtendedOffset(t *testing.T) { 49 | for _, algo := range []hash.Hash{sha1.New(), sha256.New()} { 50 | index := newV2Index(algo) 51 | v := &V2{hash: algo} 52 | e, err := v.Entry(index, 2) 53 | 54 | assert.NoError(t, err) 55 | assert.EqualValues(t, 3, e.PackOffset) 56 | } 57 | } 58 | 59 | func TestIndexVersionWidthV2(t *testing.T) { 60 | for _, algo := range []hash.Hash{sha1.New(), sha256.New()} { 61 | v := &V2{hash: algo} 62 | assert.EqualValues(t, 8, v.Width()) 63 | } 64 | } 65 | 66 | func newV2Index(hash hash.Hash) *Index { 67 | V2IndexFanout[1] = 1 68 | V2IndexFanout[2] = 2 69 | V2IndexFanout[3] = 3 70 | 71 | for i := 3; i < len(V2IndexFanout); i++ { 72 | V2IndexFanout[i] = 3 73 | } 74 | 75 | fanout := make([]byte, indexFanoutWidth) 76 | for i, n := range V2IndexFanout { 77 | binary.BigEndian.PutUint32(fanout[i*indexFanoutEntryWidth:], n) 78 | } 79 | 80 | hashlen := hash.Size() 81 | names := make([]byte, hashlen*3) 82 | 83 | for i := range names { 84 | names[i] = byte((i / hashlen) + 1) 85 | } 86 | 87 | buf := make([]byte, 0, indexOffsetV2Start+3) 88 | buf = append(buf, V2IndexHeader...) 89 | buf = append(buf, fanout...) 90 | buf = append(buf, names...) 91 | buf = append(buf, V2IndexCRCs...) 92 | buf = append(buf, V2IndexOffsets...) 93 | 94 | return &Index{ 95 | fanout: V2IndexFanout, 96 | version: &V2{hash: hash}, 97 | r: bytes.NewReader(buf), 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /pack/index_version.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | type IndexVersion interface { 4 | // Name returns the name of the object located at the given offset "at", 5 | // in the Index file "idx". 6 | // 7 | // It returns an error if the object at that location could not be 8 | // parsed. 9 | Name(idx *Index, at int64) ([]byte, error) 10 | 11 | // Entry parses and returns the full *IndexEntry located at the offset 12 | // "at" in the Index file "idx". 13 | // 14 | // If there was an error parsing the IndexEntry at that location, it 15 | // will be returned. 16 | Entry(idx *Index, at int64) (*IndexEntry, error) 17 | 18 | // Width returns the number of bytes occupied by the header of a 19 | // particular index version. 20 | Width() int64 21 | } 22 | -------------------------------------------------------------------------------- /pack/io.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import "io" 4 | 5 | // OffsetReaderAt transforms an io.ReaderAt into an io.Reader by beginning and 6 | // advancing all reads at the given offset. 7 | type OffsetReaderAt struct { 8 | // r is the data source for this instance of *OffsetReaderAt. 9 | r io.ReaderAt 10 | 11 | // o if the number of bytes read from the underlying data source, "r". 12 | // It is incremented upon reads. 13 | o int64 14 | } 15 | 16 | // Read implements io.Reader.Read by reading into the given []byte, "p" from the 17 | // last known offset provided to the OffsetReaderAt. 18 | // 19 | // It returns any error encountered from the underlying data stream, and 20 | // advances the reader forward by "n", the number of bytes read from the 21 | // underlying data stream. 22 | func (r *OffsetReaderAt) Read(p []byte) (n int, err error) { 23 | n, err = r.r.ReadAt(p, r.o) 24 | r.o += int64(n) 25 | 26 | return n, err 27 | } 28 | -------------------------------------------------------------------------------- /pack/io_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestOffsetReaderAtReadsAtOffset(t *testing.T) { 12 | bo := &OffsetReaderAt{ 13 | r: bytes.NewReader([]byte{0x0, 0x1, 0x2, 0x3}), 14 | o: 1, 15 | } 16 | 17 | var x1 [1]byte 18 | n1, e1 := bo.Read(x1[:]) 19 | 20 | assert.NoError(t, e1) 21 | assert.Equal(t, 1, n1) 22 | 23 | assert.EqualValues(t, 0x1, x1[0]) 24 | 25 | var x2 [1]byte 26 | n2, e2 := bo.Read(x2[:]) 27 | 28 | assert.NoError(t, e2) 29 | assert.Equal(t, 1, n2) 30 | assert.EqualValues(t, 0x2, x2[0]) 31 | } 32 | 33 | func TestOffsetReaderPropogatesErrors(t *testing.T) { 34 | expected := fmt.Errorf("gitobj/pack: testing") 35 | bo := &OffsetReaderAt{ 36 | r: &ErrReaderAt{Err: expected}, 37 | o: 1, 38 | } 39 | 40 | n, err := bo.Read(make([]byte, 1)) 41 | 42 | assert.Equal(t, expected, err) 43 | assert.Equal(t, 0, n) 44 | } 45 | 46 | type ErrReaderAt struct { 47 | Err error 48 | } 49 | 50 | func (e *ErrReaderAt) ReadAt(p []byte, at int64) (n int, err error) { 51 | return 0, e.Err 52 | } 53 | -------------------------------------------------------------------------------- /pack/object.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | // Object is an encapsulation of an object found in a packfile, or a packed 4 | // object. 5 | type Object struct { 6 | // data is the front-most element of the delta-base chain, and when 7 | // resolved, yields the uncompressed data of this object. 8 | data Chain 9 | // typ is the underlying object's type. It is not the type of the 10 | // front-most chain element, rather, the type of the actual object. 11 | typ PackedObjectType 12 | } 13 | 14 | // Unpack resolves the delta-base chain and returns an uncompressed, unpacked, 15 | // and full representation of the data encoded by this object. 16 | // 17 | // If there was any error in unpacking this object, it is returned immediately, 18 | // and the object's data can be assumed to be corrupt. 19 | func (o *Object) Unpack() ([]byte, error) { 20 | return o.data.Unpack() 21 | } 22 | 23 | // Type returns the underlying object's type. Rather than the type of the 24 | // front-most delta-base component, it is the type of the object itself. 25 | func (o *Object) Type() PackedObjectType { 26 | return o.typ 27 | } 28 | -------------------------------------------------------------------------------- /pack/object_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestObjectTypeReturnsObjectType(t *testing.T) { 11 | o := &Object{ 12 | typ: TypeCommit, 13 | } 14 | 15 | assert.Equal(t, TypeCommit, o.Type()) 16 | } 17 | 18 | func TestObjectUnpackUnpacksData(t *testing.T) { 19 | expected := []byte{0x1, 0x2, 0x3, 0x4} 20 | 21 | o := &Object{ 22 | data: &ChainSimple{ 23 | X: expected, 24 | }, 25 | } 26 | 27 | data, err := o.Unpack() 28 | 29 | assert.Equal(t, expected, data) 30 | assert.NoError(t, err) 31 | } 32 | 33 | func TestObjectUnpackPropogatesErrors(t *testing.T) { 34 | expected := fmt.Errorf("gitobj/pack: testing") 35 | 36 | o := &Object{ 37 | data: &ChainSimple{ 38 | Err: expected, 39 | }, 40 | } 41 | 42 | data, err := o.Unpack() 43 | 44 | assert.Nil(t, data) 45 | assert.Equal(t, expected, err) 46 | } 47 | -------------------------------------------------------------------------------- /pack/packfile.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "compress/zlib" 5 | "fmt" 6 | "hash" 7 | "io" 8 | "io/ioutil" 9 | ) 10 | 11 | // Packfile encapsulates the behavior of accessing an unpacked representation of 12 | // all of the objects encoded in a single packfile. 13 | type Packfile struct { 14 | // Version is the version of the packfile. 15 | Version uint32 16 | // Objects is the total number of objects in the packfile. 17 | Objects uint32 18 | // idx is the corresponding "pack-*.idx" file giving the positions of 19 | // objects in this packfile. 20 | idx *Index 21 | 22 | // hash is the hash algorithm used in this pack. 23 | hash hash.Hash 24 | 25 | // r is an io.ReaderAt that allows read access to the packfile itself. 26 | r io.ReaderAt 27 | } 28 | 29 | // Close closes the packfile if the underlying data stream is closeable. If so, 30 | // it returns any error involved in closing. 31 | func (p *Packfile) Close() error { 32 | var iErr error 33 | if p.idx != nil { 34 | iErr = p.idx.Close() 35 | } 36 | 37 | if close, ok := p.r.(io.Closer); ok { 38 | return close.Close() 39 | } 40 | return iErr 41 | } 42 | 43 | // Object returns a reference to an object packed in the receiving *Packfile. It 44 | // does not attempt to unpack the packfile, rather, that is accomplished by 45 | // calling Unpack() on the returned *Object. 46 | // 47 | // If there was an error loading or buffering the base, it will be returned 48 | // without an object. 49 | // 50 | // If the object given by the SHA-1 name, "name", could not be found, 51 | // (nil, errNotFound) will be returned. 52 | // 53 | // If the object was able to be loaded successfully, it will be returned without 54 | // any error. 55 | func (p *Packfile) Object(name []byte) (*Object, error) { 56 | // First, try and determine the offset of the last entry in the 57 | // delta-base chain by loading it from the corresponding pack index. 58 | entry, err := p.idx.Entry(name) 59 | if err != nil { 60 | if !IsNotFound(err) { 61 | // If the error was not an errNotFound, re-wrap it with 62 | // additional context. 63 | err = fmt.Errorf("gitobj/pack: could not load index: %s", err) 64 | } 65 | return nil, err 66 | } 67 | 68 | // If all goes well, then unpack the object at that given offset. 69 | r, err := p.find(int64(entry.PackOffset)) 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | return &Object{ 75 | data: r, 76 | typ: r.Type(), 77 | }, nil 78 | } 79 | 80 | // find finds and returns a Chain element corresponding to the offset of its 81 | // last element as given by the "offset" argument. 82 | // 83 | // If find returns a ChainBase, it loads that data into memory, but does not 84 | // zlib-flate it. Otherwise, if find returns a ChainDelta, it loads all of the 85 | // leading elements in the chain recursively, but does not apply one delta to 86 | // another. 87 | func (p *Packfile) find(offset int64) (Chain, error) { 88 | // Read the first byte in the chain element. 89 | buf := make([]byte, 1) 90 | if _, err := p.r.ReadAt(buf, offset); err != nil { 91 | return nil, err 92 | } 93 | 94 | // Store the original offset; this will be compared to when loading 95 | // chain elements of type OBJ_OFS_DELTA. 96 | objectOffset := offset 97 | 98 | // Of the first byte, (0123 4567): 99 | // - Bit 0 is the M.S.B., and indicates whether there is more data 100 | // encoded in the length. 101 | // - Bits 1-3 ((buf[0] >> 4) & 0x7) are the object type. 102 | // - Bits 4-7 (buf[0] & 0xf) are the first 4 bits of the variable 103 | // length size of the encoded delta or base. 104 | typ := PackedObjectType((buf[0] >> 4) & 0x7) 105 | size := uint64(buf[0] & 0xf) 106 | shift := uint(4) 107 | offset += 1 108 | 109 | for buf[0]&0x80 != 0 { 110 | // If there is more data to be read, read it. 111 | if _, err := p.r.ReadAt(buf, offset); err != nil { 112 | return nil, err 113 | } 114 | 115 | // And update the size, bitshift, and offset accordingly. 116 | size |= (uint64(buf[0]&0x7f) << shift) 117 | shift += 7 118 | offset += 1 119 | } 120 | 121 | switch typ { 122 | case TypeObjectOffsetDelta, TypeObjectReferenceDelta: 123 | // If the type of delta-base element is a delta, (either 124 | // OBJ_OFS_DELTA, or OBJ_REFS_DELTA), we must load the base, 125 | // which itself could be either of the two above, or a 126 | // OBJ_COMMIT, OBJ_BLOB, etc. 127 | // 128 | // Recursively load the base, and keep track of the updated 129 | // offset. 130 | base, offset, err := p.findBase(typ, offset, objectOffset) 131 | if err != nil { 132 | return nil, err 133 | } 134 | 135 | // Now load the delta to apply to the base, given at the offset 136 | // "offset" and for length "size". 137 | // 138 | // NB: The delta instructions are zlib compressed, so ensure 139 | // that we uncompress the instructions first. 140 | zr, err := zlib.NewReader(&OffsetReaderAt{ 141 | o: offset, 142 | r: p.r, 143 | }) 144 | if err != nil { 145 | return nil, err 146 | } 147 | 148 | delta, err := ioutil.ReadAll(zr) 149 | if err != nil { 150 | return nil, err 151 | } 152 | 153 | // Then compose the two and return it as a *ChainDelta. 154 | return &ChainDelta{ 155 | base: base, 156 | delta: delta, 157 | }, nil 158 | case TypeCommit, TypeTree, TypeBlob, TypeTag: 159 | // Otherwise, the object's contents are given to be the 160 | // following zlib-compressed data. 161 | // 162 | // The length of the compressed data itself is not known, 163 | // rather, "size" determines the length of the data after 164 | // inflation. 165 | return &ChainBase{ 166 | offset: offset, 167 | size: int64(size), 168 | typ: typ, 169 | 170 | r: p.r, 171 | }, nil 172 | } 173 | // Otherwise, we received an invalid object type. 174 | return nil, errUnrecognizedObjectType 175 | } 176 | 177 | // findBase finds the base (an object, or another delta) for a given 178 | // OBJ_OFS_DELTA or OBJ_REFS_DELTA at the given offset. 179 | // 180 | // It returns the preceding Chain, as well as an updated read offset into the 181 | // underlying packfile data. 182 | // 183 | // If any of the above could not be completed successfully, findBase returns an 184 | // error. 185 | func (p *Packfile) findBase(typ PackedObjectType, offset, objOffset int64) (Chain, int64, error) { 186 | var baseOffset int64 187 | 188 | hashlen := p.hash.Size() 189 | 190 | // We assume that we have to read at least an object ID's worth (the 191 | // hash length in the case of a OBJ_REF_DELTA, or greater than the 192 | // length of the base offset encoded in an OBJ_OFS_DELTA). 193 | var sha [MaxHashSize]byte 194 | if _, err := p.r.ReadAt(sha[:hashlen], offset); err != nil { 195 | return nil, baseOffset, err 196 | } 197 | 198 | switch typ { 199 | case TypeObjectOffsetDelta: 200 | // If the object is of type OBJ_OFS_DELTA, read a 201 | // variable-length integer, and find the object at that 202 | // location. 203 | i := 0 204 | c := int64(sha[i]) 205 | baseOffset = c & 0x7f 206 | 207 | for c&0x80 != 0 { 208 | i += 1 209 | c = int64(sha[i]) 210 | 211 | baseOffset += 1 212 | baseOffset <<= 7 213 | baseOffset |= c & 0x7f 214 | } 215 | 216 | baseOffset = objOffset - baseOffset 217 | offset += int64(i) + 1 218 | case TypeObjectReferenceDelta: 219 | // If the delta is an OBJ_REFS_DELTA, find the location of its 220 | // base by reading the SHA-1 name and looking it up in the 221 | // corresponding pack index file. 222 | e, err := p.idx.Entry(sha[:hashlen]) 223 | if err != nil { 224 | return nil, baseOffset, err 225 | } 226 | 227 | baseOffset = int64(e.PackOffset) 228 | offset += int64(hashlen) 229 | default: 230 | // If we did not receive an OBJ_OFS_DELTA, or OBJ_REF_DELTA, the 231 | // type given is not a delta-fied type. Return an error. 232 | return nil, baseOffset, fmt.Errorf( 233 | "gitobj/pack: type %s is not deltafied", typ) 234 | } 235 | 236 | // Once we have determined the base offset of the object's chain base, 237 | // read the delta-base chain beginning at that offset. 238 | r, err := p.find(baseOffset) 239 | return r, offset, err 240 | } 241 | -------------------------------------------------------------------------------- /pack/packfile_decode.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "errors" 7 | "hash" 8 | "io" 9 | ) 10 | 11 | var ( 12 | // packHeader is the expected header that begins all valid packfiles. 13 | packHeader = []byte{'P', 'A', 'C', 'K'} 14 | 15 | // errBadPackHeader is a sentinel error value returned when the given 16 | // pack header does not match the expected one. 17 | errBadPackHeader = errors.New("gitobj/pack: bad pack header") 18 | ) 19 | 20 | // DecodePackfile opens the packfile given by the io.ReaderAt "r" for reading. 21 | // It does not apply any delta-base chains, nor does it do reading otherwise 22 | // beyond the header. 23 | // 24 | // If the header is malformed, or otherwise cannot be read, an error will be 25 | // returned without a corresponding packfile. 26 | func DecodePackfile(r io.ReaderAt, hash hash.Hash) (*Packfile, error) { 27 | header := make([]byte, 12) 28 | if _, err := r.ReadAt(header[:], 0); err != nil { 29 | return nil, err 30 | } 31 | 32 | if !bytes.HasPrefix(header, packHeader) { 33 | return nil, errBadPackHeader 34 | } 35 | 36 | version := binary.BigEndian.Uint32(header[4:]) 37 | objects := binary.BigEndian.Uint32(header[8:]) 38 | 39 | return &Packfile{ 40 | Version: version, 41 | Objects: objects, 42 | 43 | r: r, 44 | hash: hash, 45 | }, nil 46 | } 47 | -------------------------------------------------------------------------------- /pack/packfile_decode_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "crypto/sha256" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestDecodePackfileDecodesIntegerVersion(t *testing.T) { 13 | p, err := DecodePackfile(bytes.NewReader([]byte{ 14 | 'P', 'A', 'C', 'K', // Pack header. 15 | 0x0, 0x0, 0x0, 0x2, // Pack version. 16 | 0x0, 0x0, 0x0, 0x0, // Number of packed objects. 17 | }), sha1.New()) 18 | 19 | assert.NoError(t, err) 20 | assert.EqualValues(t, 2, p.Version) 21 | } 22 | 23 | func TestDecodePackfileDecodesIntegerCount(t *testing.T) { 24 | p, err := DecodePackfile(bytes.NewReader([]byte{ 25 | 'P', 'A', 'C', 'K', // Pack header. 26 | 0x0, 0x0, 0x0, 0x2, // Pack version. 27 | 0x0, 0x0, 0x1, 0x2, // Number of packed objects. 28 | }), sha256.New()) 29 | 30 | assert.NoError(t, err) 31 | assert.EqualValues(t, 258, p.Objects) 32 | } 33 | 34 | func TestDecodePackfileReportsBadHeaders(t *testing.T) { 35 | p, err := DecodePackfile(bytes.NewReader([]byte{ 36 | 'W', 'R', 'O', 'N', 'G', // Malformed pack header. 37 | 0x0, 0x0, 0x0, 0x0, // Pack version. 38 | 0x0, 0x0, 0x0, 0x0, // Number of packed objects. 39 | }), sha1.New()) 40 | 41 | assert.Equal(t, errBadPackHeader, err) 42 | assert.Nil(t, p) 43 | } 44 | -------------------------------------------------------------------------------- /pack/packfile_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "encoding/binary" 7 | "encoding/hex" 8 | "fmt" 9 | "sort" 10 | "strings" 11 | "sync/atomic" 12 | "testing" 13 | 14 | "github.com/stretchr/testify/assert" 15 | ) 16 | 17 | func TestPackObjectReturnsObjectWithSingleBaseAtLowOffset(t *testing.T) { 18 | const original = "Hello, world!\n" 19 | compressed, _ := compress(original) 20 | 21 | p := &Packfile{ 22 | idx: IndexWith(map[string]uint32{ 23 | "cccccccccccccccccccccccccccccccccccccccc": 32, 24 | }), 25 | r: bytes.NewReader(append([]byte{ 26 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 27 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 28 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 29 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 30 | 31 | // (0001 1000) (msb=0, type=commit, size=14) 32 | 0x1e}, compressed...), 33 | ), 34 | hash: sha1.New(), 35 | } 36 | 37 | o, err := p.Object(DecodeHex(t, "cccccccccccccccccccccccccccccccccccccccc")) 38 | assert.NoError(t, err) 39 | 40 | assert.Equal(t, TypeCommit, o.Type()) 41 | 42 | unpacked, err := o.Unpack() 43 | assert.Equal(t, []byte(original), unpacked) 44 | assert.NoError(t, err) 45 | } 46 | 47 | func TestPackObjectReturnsObjectWithSingleBaseAtHighOffset(t *testing.T) { 48 | original := strings.Repeat("four", 64) 49 | compressed, _ := compress(original) 50 | 51 | p := &Packfile{ 52 | idx: IndexWith(map[string]uint32{ 53 | "cccccccccccccccccccccccccccccccccccccccc": 32, 54 | }), 55 | r: bytes.NewReader(append([]byte{ 56 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 57 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 58 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 59 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 60 | 61 | // (1001 0000) (msb=1, type=commit, size=0) 62 | 0x90, 63 | // (1000 0000) (msb=0, size=1 -> size=256) 64 | 0x10}, 65 | 66 | compressed..., 67 | )), 68 | hash: sha1.New(), 69 | } 70 | 71 | o, err := p.Object(DecodeHex(t, "cccccccccccccccccccccccccccccccccccccccc")) 72 | assert.NoError(t, err) 73 | 74 | assert.Equal(t, TypeCommit, o.Type()) 75 | 76 | unpacked, err := o.Unpack() 77 | assert.Equal(t, []byte(original), unpacked) 78 | assert.NoError(t, err) 79 | } 80 | 81 | func TestPackObjectReturnsObjectWithDeltaBaseOffset(t *testing.T) { 82 | const original = "Hello" 83 | compressed, _ := compress(original) 84 | 85 | delta, err := compress(string([]byte{ 86 | 0x05, // Source size: 5. 87 | 0x0e, // Destination size: 14. 88 | 89 | 0x91, // (1000 0001) (instruction=copy, bitmask=0001) 90 | 0x00, // (0000 0000) (offset=0) 91 | 0x05, // (0000 0101) (size=5) 92 | 93 | 0x09, // (0000 0111) (instruction=add, size=7) 94 | 95 | // Contents: ... 96 | ',', ' ', 'w', 'o', 'r', 'l', 'd', '!', '\n', 97 | })) 98 | 99 | p := &Packfile{ 100 | idx: IndexWith(map[string]uint32{ 101 | "cccccccccccccccccccccccccccccccccccccccc": uint32(32 + 1 + len(compressed)), 102 | }), 103 | r: bytes.NewReader(append(append([]byte{ 104 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 105 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 106 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 107 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 108 | 109 | 0x35, // (0011 0101) (msb=0, type=blob, size=5) 110 | }, compressed...), append([]byte{ 111 | 0x6e, // (0110 1010) (msb=0, type=obj_ofs_delta, size=10) 112 | 0x12, // (0001 0001) (ofs_delta=-17, len(compressed)) 113 | }, delta...)...)), 114 | hash: sha1.New(), 115 | } 116 | 117 | o, err := p.Object(DecodeHex(t, "cccccccccccccccccccccccccccccccccccccccc")) 118 | assert.NoError(t, err) 119 | 120 | assert.Equal(t, TypeBlob, o.Type()) 121 | 122 | unpacked, err := o.Unpack() 123 | assert.Equal(t, []byte(original+", world!\n"), unpacked) 124 | assert.NoError(t, err) 125 | } 126 | 127 | func TestPackfileObjectReturnsObjectWithDeltaBaseReference(t *testing.T) { 128 | const original = "Hello!\n" 129 | compressed, _ := compress(original) 130 | 131 | delta, _ := compress(string([]byte{ 132 | 0x07, // Source size: 7. 133 | 0x0e, // Destination size: 14. 134 | 135 | 0x91, // (1001 0001) (copy, smask=0001, omask=0001) 136 | 0x00, // (0000 0000) (offset=0) 137 | 0x05, // (0000 0101) (size=5) 138 | 139 | 0x7, // (0000 0111) (add, length=6) 140 | ',', ' ', 'w', 'o', 'r', 'l', 'd', // (data ...) 141 | 142 | 0x91, // (1001 0001) (copy, smask=0001, omask=0001) 143 | 0x05, // (0000 0101) (offset=5) 144 | 0x02, // (0000 0010) (size=2) 145 | })) 146 | 147 | p := &Packfile{ 148 | idx: IndexWith(map[string]uint32{ 149 | "cccccccccccccccccccccccccccccccccccccccc": 32, 150 | "dddddddddddddddddddddddddddddddddddddddd": 52, 151 | }), 152 | r: bytes.NewReader(append(append([]byte{ 153 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 154 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 155 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 156 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 157 | 158 | 0x37, // (0011 0101) (msb=0, type=blob, size=7) 159 | }, compressed...), append([]byte{ 160 | 0x7f, // (0111 1111) (msb=0, type=obj_ref_delta, size=15) 161 | 162 | // SHA-1 "cccccccccccccccccccccccccccccccccccccccc", 163 | // original blob contents is "Hello!\n" 164 | 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 165 | 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 166 | }, delta...)...)), 167 | hash: sha1.New(), 168 | } 169 | 170 | o, err := p.Object(DecodeHex(t, "dddddddddddddddddddddddddddddddddddddddd")) 171 | assert.NoError(t, err) 172 | 173 | assert.Equal(t, TypeBlob, o.Type()) 174 | 175 | unpacked, err := o.Unpack() 176 | assert.Equal(t, []byte("Hello, world!\n"), unpacked) 177 | assert.NoError(t, err) 178 | } 179 | 180 | func TestPackfileClosesReadClosers(t *testing.T) { 181 | r := new(ReaderAtCloser) 182 | p := &Packfile{ 183 | r: r, 184 | } 185 | 186 | assert.NoError(t, p.Close()) 187 | assert.EqualValues(t, 1, r.N) 188 | } 189 | 190 | func TestPackfileClosePropogatesCloseErrors(t *testing.T) { 191 | e := fmt.Errorf("gitobj/pack: testing") 192 | p := &Packfile{ 193 | r: &ReaderAtCloser{E: e}, 194 | } 195 | 196 | assert.Equal(t, e, p.Close()) 197 | } 198 | 199 | type ReaderAtCloser struct { 200 | E error 201 | N uint64 202 | } 203 | 204 | func (r *ReaderAtCloser) ReadAt(p []byte, at int64) (int, error) { 205 | return 0, nil 206 | } 207 | 208 | func (r *ReaderAtCloser) Close() error { 209 | atomic.AddUint64(&r.N, 1) 210 | return r.E 211 | } 212 | 213 | func IndexWith(offsets map[string]uint32) *Index { 214 | header := []byte{ 215 | 0xff, 0x74, 0x4f, 0x63, 216 | 0x00, 0x00, 0x00, 0x02, 217 | } 218 | 219 | ns := make([][]byte, 0, len(offsets)) 220 | for name, _ := range offsets { 221 | x, _ := hex.DecodeString(name) 222 | ns = append(ns, x) 223 | } 224 | sort.Slice(ns, func(i, j int) bool { 225 | return bytes.Compare(ns[i], ns[j]) < 0 226 | }) 227 | 228 | fanout := make([]uint32, 256) 229 | for i := 0; i < len(fanout); i++ { 230 | var n uint32 231 | 232 | for _, name := range ns { 233 | if name[0] <= byte(i) { 234 | n++ 235 | } 236 | } 237 | 238 | fanout[i] = n 239 | } 240 | 241 | crcs := make([]byte, 4*len(offsets)) 242 | for i, _ := range ns { 243 | binary.BigEndian.PutUint32(crcs[i*4:], 0) 244 | } 245 | 246 | offs := make([]byte, 4*len(offsets)) 247 | for i, name := range ns { 248 | binary.BigEndian.PutUint32(offs[i*4:], offsets[hex.EncodeToString(name)]) 249 | } 250 | 251 | buf := make([]byte, 0) 252 | buf = append(buf, header...) 253 | for _, f := range fanout { 254 | x := make([]byte, 4) 255 | binary.BigEndian.PutUint32(x, f) 256 | 257 | buf = append(buf, x...) 258 | } 259 | for _, n := range ns { 260 | buf = append(buf, n...) 261 | } 262 | buf = append(buf, crcs...) 263 | buf = append(buf, offs...) 264 | 265 | return &Index{ 266 | fanout: fanout, 267 | r: bytes.NewReader(buf), 268 | 269 | version: &V2{hash: sha1.New()}, 270 | } 271 | } 272 | 273 | func DecodeHex(t *testing.T, str string) []byte { 274 | b, err := hex.DecodeString(str) 275 | if err != nil { 276 | t.Fatalf("gitobj/pack: unexpected hex.DecodeString error: %s", err) 277 | } 278 | 279 | return b 280 | } 281 | -------------------------------------------------------------------------------- /pack/set.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "fmt" 5 | "hash" 6 | "os" 7 | "path/filepath" 8 | "regexp" 9 | "sort" 10 | "strings" 11 | 12 | "github.com/git-lfs/gitobj/v2/errors" 13 | ) 14 | 15 | // Set allows access of objects stored across a set of packfiles. 16 | type Set struct { 17 | // m maps the leading byte of a SHA-1 object name to a set of packfiles 18 | // that might contain that object, in order of which packfile is most 19 | // likely to contain that object. 20 | m map[byte][]*Packfile 21 | 22 | // closeFn is a function that is run by Close(), designated to free 23 | // resources held by the *Set, like open packfiles. 24 | closeFn func() error 25 | } 26 | 27 | var ( 28 | // nameRe is a regular expression that matches the basename of a 29 | // filepath that is a packfile. 30 | // 31 | // It includes one matchgroup, which is the SHA-1 name of the pack. 32 | nameRe = regexp.MustCompile(`^(.*)\.pack$`) 33 | ) 34 | 35 | // NewSet creates a new *Set of all packfiles found in a given object database's 36 | // root (i.e., "/path/to/repo/.git/objects"). 37 | // 38 | // It finds all packfiles in the "pack" subdirectory, and instantiates a *Set 39 | // containing them. If there was an error parsing the packfiles in that 40 | // directory, or the directory was otherwise unable to be observed, NewSet 41 | // returns that error. 42 | func NewSet(db string, algo hash.Hash) (*Set, error) { 43 | pd := filepath.Join(db, "pack") 44 | 45 | paths, err := filepath.Glob(filepath.Join(escapeGlobPattern(pd), "*.pack")) 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | packs := make([]*Packfile, 0, len(paths)) 51 | 52 | for _, path := range paths { 53 | submatch := nameRe.FindStringSubmatch(filepath.Base(path)) 54 | if len(submatch) != 2 { 55 | continue 56 | } 57 | 58 | name := submatch[1] 59 | 60 | idxf, err := os.Open(filepath.Join(pd, fmt.Sprintf("%s.idx", name))) 61 | if err != nil { 62 | // We have a pack (since it matched the regex), but the 63 | // index is missing or unusable. Skip this pack and 64 | // continue on with the next one, as Git does. 65 | if idxf != nil { 66 | // In the unlikely event that we did open a 67 | // file, close it, but discard any error in 68 | // doing so. 69 | idxf.Close() 70 | } 71 | continue 72 | } 73 | 74 | packf, err := os.Open(filepath.Join(pd, fmt.Sprintf("%s.pack", name))) 75 | if err != nil { 76 | return nil, err 77 | } 78 | 79 | pack, err := DecodePackfile(packf, algo) 80 | if err != nil { 81 | return nil, err 82 | } 83 | 84 | idx, err := DecodeIndex(idxf, algo) 85 | if err != nil { 86 | return nil, err 87 | } 88 | 89 | pack.idx = idx 90 | 91 | packs = append(packs, pack) 92 | } 93 | return NewSetPacks(packs...), nil 94 | } 95 | 96 | // globEscapes uses these escapes because filepath.Glob does not understand 97 | // backslash escapes on Windows. 98 | var globEscapes = map[string]string{ 99 | "*": "[*]", 100 | "?": "[?]", 101 | "[": "[[]", 102 | } 103 | 104 | func escapeGlobPattern(s string) string { 105 | for char, escape := range globEscapes { 106 | s = strings.Replace(s, char, escape, -1) 107 | } 108 | return s 109 | } 110 | 111 | // NewSetPacks creates a new *Set from the given packfiles. 112 | func NewSetPacks(packs ...*Packfile) *Set { 113 | m := make(map[byte][]*Packfile) 114 | 115 | for i := 0; i < 256; i++ { 116 | n := byte(i) 117 | 118 | for j := 0; j < len(packs); j++ { 119 | pack := packs[j] 120 | 121 | var count uint32 122 | if n == 0 { 123 | count = pack.idx.fanout[n] 124 | } else { 125 | count = pack.idx.fanout[n] - pack.idx.fanout[n-1] 126 | } 127 | 128 | if count > 0 { 129 | m[n] = append(m[n], pack) 130 | } 131 | } 132 | 133 | sort.Slice(m[n], func(i, j int) bool { 134 | ni := m[n][i].idx.fanout[n] 135 | nj := m[n][j].idx.fanout[n] 136 | 137 | return ni > nj 138 | }) 139 | } 140 | 141 | return &Set{ 142 | m: m, 143 | closeFn: func() error { 144 | for _, pack := range packs { 145 | if err := pack.Close(); err != nil { 146 | return err 147 | } 148 | } 149 | return nil 150 | }, 151 | } 152 | } 153 | 154 | // Close closes all open packfiles, returning an error if one was encountered. 155 | func (s *Set) Close() error { 156 | if s.closeFn == nil { 157 | return nil 158 | } 159 | return s.closeFn() 160 | } 161 | 162 | // Object opens (but does not unpack, or, apply the delta-base chain) a given 163 | // object in the first packfile that matches it. 164 | // 165 | // Object searches packfiles contained in the set in order of how many objects 166 | // they have that begin with the first by of the given SHA-1 "name", in 167 | // descending order. 168 | // 169 | // If the object was unable to be found in any of the packfiles, (nil, 170 | // ErrNotFound) will be returned. 171 | // 172 | // If there was otherwise an error opening the object for reading from any of 173 | // the packfiles, it will be returned, and no other packfiles will be searched. 174 | // 175 | // Otherwise, the object will be returned without error. 176 | func (s *Set) Object(name []byte) (*Object, error) { 177 | return s.each(name, func(p *Packfile) (*Object, error) { 178 | return p.Object(name) 179 | }) 180 | } 181 | 182 | // iterFn is a function that takes a given packfile and opens an object from it. 183 | type iterFn func(p *Packfile) (o *Object, err error) 184 | 185 | // each executes the given iterFn "fn" on each Packfile that has any objects 186 | // beginning with a prefix of the SHA-1 "name", in order of which packfiles have 187 | // the most objects beginning with that prefix. 188 | // 189 | // If any invocation of "fn" returns a non-nil error, it will either be a) 190 | // returned immediately, if the error is not ErrIsNotFound, or b) continued 191 | // immediately, if the error is ErrNotFound. 192 | // 193 | // If no packfiles match the given file, return errors.NoSuchObject, along with 194 | // no object. 195 | func (s *Set) each(name []byte, fn iterFn) (*Object, error) { 196 | var key byte 197 | if len(name) > 0 { 198 | key = name[0] 199 | } 200 | 201 | for _, pack := range s.m[key] { 202 | o, err := fn(pack) 203 | if err != nil { 204 | if IsNotFound(err) { 205 | continue 206 | } 207 | return nil, err 208 | } 209 | return o, nil 210 | } 211 | 212 | return nil, errors.NoSuchObject(name) 213 | } 214 | -------------------------------------------------------------------------------- /pack/set_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestSetOpenOpensAPackedObject(t *testing.T) { 12 | const sha = "decafdecafdecafdecafdecafdecafdecafdecaf" 13 | const data = "Hello, world!\n" 14 | compressed, _ := compress(data) 15 | 16 | set := NewSetPacks(&Packfile{ 17 | idx: IndexWith(map[string]uint32{ 18 | sha: 0, 19 | }), 20 | r: bytes.NewReader(append([]byte{0x3e}, compressed...)), 21 | }) 22 | 23 | o, err := set.Object(DecodeHex(t, sha)) 24 | 25 | assert.NoError(t, err) 26 | assert.Equal(t, TypeBlob, o.Type()) 27 | 28 | unpacked, err := o.Unpack() 29 | assert.NoError(t, err) 30 | assert.Equal(t, []byte(data), unpacked) 31 | } 32 | 33 | func TestSetOpenOpensPackedObjectsInPackOrder(t *testing.T) { 34 | p1 := &Packfile{ 35 | Objects: 1, 36 | 37 | idx: IndexWith(map[string]uint32{ 38 | "aa00000000000000000000000000000000000000": 1, 39 | }), 40 | r: bytes.NewReader(nil), 41 | } 42 | p2 := &Packfile{ 43 | Objects: 2, 44 | 45 | idx: IndexWith(map[string]uint32{ 46 | "aa11111111111111111111111111111111111111": 1, 47 | "aa22222222222222222222222222222222222222": 2, 48 | }), 49 | r: bytes.NewReader(nil), 50 | } 51 | p3 := &Packfile{ 52 | Objects: 3, 53 | 54 | idx: IndexWith(map[string]uint32{ 55 | "aa33333333333333333333333333333333333333": 3, 56 | "aa44444444444444444444444444444444444444": 4, 57 | "aa55555555555555555555555555555555555555": 5, 58 | }), 59 | r: bytes.NewReader(nil), 60 | } 61 | 62 | set := NewSetPacks(p1, p2, p3) 63 | 64 | var visited []*Packfile 65 | 66 | set.each( 67 | DecodeHex(t, "aa55555555555555555555555555555555555555"), 68 | func(p *Packfile) (*Object, error) { 69 | visited = append(visited, p) 70 | return nil, errNotFound 71 | }, 72 | ) 73 | 74 | require.Len(t, visited, 3) 75 | assert.EqualValues(t, visited[0].Objects, 3) 76 | assert.EqualValues(t, visited[1].Objects, 2) 77 | assert.EqualValues(t, visited[2].Objects, 1) 78 | } 79 | -------------------------------------------------------------------------------- /pack/storage.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "hash" 5 | "io" 6 | ) 7 | 8 | // Storage implements the storage.Storage interface. 9 | type Storage struct { 10 | packs *Set 11 | } 12 | 13 | // NewStorage returns a new storage object based on a pack set. 14 | func NewStorage(root string, algo hash.Hash) (*Storage, error) { 15 | packs, err := NewSet(root, algo) 16 | if err != nil { 17 | return nil, err 18 | } 19 | return &Storage{packs: packs}, nil 20 | } 21 | 22 | // Open implements the storage.Storage.Open interface. 23 | func (f *Storage) Open(oid []byte) (r io.ReadCloser, err error) { 24 | obj, err := f.packs.Object(oid) 25 | if err != nil { 26 | return nil, err 27 | } 28 | return &delayedObjectReader{obj: obj}, nil 29 | } 30 | 31 | // Open implements the storage.Storage.Open interface. 32 | func (f *Storage) Close() error { 33 | return f.packs.Close() 34 | } 35 | 36 | // IsCompressed returns false, because data returned is already decompressed. 37 | func (f *Storage) IsCompressed() bool { 38 | return false 39 | } 40 | -------------------------------------------------------------------------------- /pack/type.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | // PackedObjectType is a constant type that is defined for all valid object 9 | // types that a packed object can represent. 10 | type PackedObjectType uint8 11 | 12 | const ( 13 | // TypeNone is the zero-value for PackedObjectType, and represents the 14 | // absence of a type. 15 | TypeNone PackedObjectType = iota 16 | // TypeCommit is the PackedObjectType for commit objects. 17 | TypeCommit 18 | // TypeTree is the PackedObjectType for tree objects. 19 | TypeTree 20 | // Typeblob is the PackedObjectType for blob objects. 21 | TypeBlob 22 | // TypeTag is the PackedObjectType for tag objects. 23 | TypeTag 24 | 25 | // TypeObjectOffsetDelta is the type for OBJ_OFS_DELTA-typed objects. 26 | TypeObjectOffsetDelta PackedObjectType = 6 27 | // TypeObjectReferenceDelta is the type for OBJ_REF_DELTA-typed objects. 28 | TypeObjectReferenceDelta PackedObjectType = 7 29 | ) 30 | 31 | // String implements fmt.Stringer and returns an encoding of the type valid for 32 | // use in the loose object format protocol (see: package 'gitobj' for more). 33 | // 34 | // If the receiving instance is not defined, String() will panic(). 35 | func (t PackedObjectType) String() string { 36 | switch t { 37 | case TypeNone: 38 | return "" 39 | case TypeCommit: 40 | return "commit" 41 | case TypeTree: 42 | return "tree" 43 | case TypeBlob: 44 | return "blob" 45 | case TypeTag: 46 | return "tag" 47 | case TypeObjectOffsetDelta: 48 | return "obj_ofs_delta" 49 | case TypeObjectReferenceDelta: 50 | return "obj_ref_delta" 51 | } 52 | 53 | panic(fmt.Sprintf("gitobj/pack: unknown object type: %d", t)) 54 | } 55 | 56 | var ( 57 | errUnrecognizedObjectType = errors.New("gitobj/pack: unrecognized object type") 58 | ) 59 | -------------------------------------------------------------------------------- /pack/type_test.go: -------------------------------------------------------------------------------- 1 | package pack 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | type PackedObjectStringTestCase struct { 11 | T PackedObjectType 12 | 13 | Expected string 14 | Panic bool 15 | } 16 | 17 | func (c *PackedObjectStringTestCase) Assert(t *testing.T) { 18 | if c.Panic { 19 | defer func() { 20 | err := recover() 21 | 22 | if err == nil { 23 | t.Fatalf("gitobj/pack: expected panic()") 24 | } 25 | 26 | assert.Equal(t, c.Expected, fmt.Sprintf("%s", err)) 27 | }() 28 | } 29 | 30 | assert.Equal(t, c.Expected, c.T.String()) 31 | } 32 | 33 | func TestPackedObjectTypeString(t *testing.T) { 34 | for desc, c := range map[string]*PackedObjectStringTestCase{ 35 | "TypeNone": {T: TypeNone, Expected: ""}, 36 | 37 | "TypeCommit": {T: TypeCommit, Expected: "commit"}, 38 | "TypeTree": {T: TypeTree, Expected: "tree"}, 39 | "TypeBlob": {T: TypeBlob, Expected: "blob"}, 40 | "TypeTag": {T: TypeTag, Expected: "tag"}, 41 | 42 | "TypeObjectOffsetDelta": {T: TypeObjectOffsetDelta, 43 | Expected: "obj_ofs_delta"}, 44 | "TypeObjectReferenceDelta": {T: TypeObjectReferenceDelta, 45 | Expected: "obj_ref_delta"}, 46 | 47 | "unknown type": {T: PackedObjectType(5), Panic: true, 48 | Expected: "gitobj/pack: unknown object type: 5"}, 49 | } { 50 | t.Run(desc, c.Assert) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /script/cibuild: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | go test ./... 4 | -------------------------------------------------------------------------------- /storage/backend.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | // Backend is an encapsulation of a set of read-only and read-write interfaces 4 | // for reading and writing objects. 5 | type Backend interface { 6 | // Storage returns a read source and optionally a write source. 7 | // Generally, the write location, if present, should also be a read 8 | // location. 9 | Storage() (Storage, WritableStorage) 10 | } 11 | -------------------------------------------------------------------------------- /storage/decompressing_readcloser.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "compress/zlib" 5 | "io" 6 | ) 7 | 8 | // decompressingReadCloser wraps zlib.NewReader to ensure that both the zlib 9 | // reader and its underlying type are closed. 10 | type decompressingReadCloser struct { 11 | r io.ReadCloser 12 | zr io.ReadCloser 13 | } 14 | 15 | // newDecompressingReadCloser creates a new wrapped zlib reader 16 | func newDecompressingReadCloser(r io.ReadCloser) (io.ReadCloser, error) { 17 | zr, err := zlib.NewReader(r) 18 | if err != nil { 19 | return nil, err 20 | } 21 | return &decompressingReadCloser{r: r, zr: zr}, nil 22 | } 23 | 24 | // Read implements io.ReadCloser. 25 | func (d *decompressingReadCloser) Read(b []byte) (int, error) { 26 | return d.zr.Read(b) 27 | } 28 | 29 | // Close implements io.ReadCloser. 30 | func (d *decompressingReadCloser) Close() error { 31 | if err := d.zr.Close(); err != nil { 32 | return err 33 | } 34 | return d.r.Close() 35 | } 36 | -------------------------------------------------------------------------------- /storage/multi_storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/git-lfs/gitobj/v2/errors" 7 | ) 8 | 9 | // Storage implements an interface for reading, but not writing, objects in an 10 | // object database. 11 | type multiStorage struct { 12 | impls []Storage 13 | } 14 | 15 | func MultiStorage(args ...Storage) Storage { 16 | return &multiStorage{impls: args} 17 | } 18 | 19 | // Open returns a handle on an existing object keyed by the given object 20 | // ID. It returns an error if that file does not already exist. 21 | func (m *multiStorage) Open(oid []byte) (f io.ReadCloser, err error) { 22 | for _, s := range m.impls { 23 | f, err := s.Open(oid) 24 | if err != nil { 25 | if errors.IsNoSuchObject(err) { 26 | continue 27 | } 28 | return nil, err 29 | } 30 | if s.IsCompressed() { 31 | return newDecompressingReadCloser(f) 32 | } 33 | return f, nil 34 | } 35 | return nil, errors.NoSuchObject(oid) 36 | } 37 | 38 | // Close closes the filesystem, after which no more operations are 39 | // allowed. 40 | func (m *multiStorage) Close() error { 41 | for _, s := range m.impls { 42 | if err := s.Close(); err != nil { 43 | return err 44 | } 45 | } 46 | return nil 47 | } 48 | 49 | // Compressed indicates whether data read from this storage source will 50 | // be zlib-compressed. 51 | func (m *multiStorage) IsCompressed() bool { 52 | // To ensure we can read from any Storage type, we automatically 53 | // decompress items if they need it. 54 | return false 55 | } 56 | -------------------------------------------------------------------------------- /storage/storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import "io" 4 | 5 | // Storage implements an interface for reading, but not writing, objects in an 6 | // object database. 7 | type Storage interface { 8 | // Open returns a handle on an existing object keyed by the given object 9 | // ID. It returns an error if that file does not already exist. 10 | Open(oid []byte) (f io.ReadCloser, err error) 11 | 12 | // Close closes the filesystem, after which no more operations are 13 | // allowed. 14 | Close() error 15 | 16 | // Compressed indicates whether data read from this storage source will 17 | // be zlib-compressed. 18 | IsCompressed() bool 19 | } 20 | 21 | // WritableStorage implements an interface for reading and writing objects in 22 | // an object database. 23 | type WritableStorage interface { 24 | Storage 25 | 26 | // Store copies the data given in "r" to the unique object path given by 27 | // "oid". It returns an error if that file already exists (acting as if 28 | // the `os.O_EXCL` mode is given in a bitmask to os.Open). 29 | Store(oid []byte, r io.Reader) (n int64, err error) 30 | } 31 | -------------------------------------------------------------------------------- /storer.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import "io" 4 | 5 | // storer implements a storage engine for reading, writing, and creating 6 | // io.ReadWriters that can store information about loose objects 7 | type storer interface { 8 | // Open returns a handle on an existing object keyed by the given SHA. 9 | // It returns an error if that file does not already exist. 10 | Open(sha []byte) (f io.ReadCloser, err error) 11 | 12 | // Store copies the data given in "r" to the unique object path given by 13 | // "sha". It returns an error if that file already exists (acting as if 14 | // the `os.O_EXCL` mode is given in a bitmask to os.Open). 15 | Store(sha []byte, r io.Reader) (n int64, err error) 16 | } 17 | -------------------------------------------------------------------------------- /tag.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/hex" 7 | "fmt" 8 | "hash" 9 | "io" 10 | "strings" 11 | ) 12 | 13 | type Tag struct { 14 | Object []byte 15 | ObjectType ObjectType 16 | Name string 17 | Tagger string 18 | 19 | Message string 20 | } 21 | 22 | // Decode implements Object.Decode and decodes the uncompressed tag being 23 | // read. It returns the number of uncompressed bytes being consumed off of the 24 | // stream, which should be strictly equal to the size given. 25 | // 26 | // If any error was encountered along the way it will be returned, and the 27 | // receiving *Tag is considered invalid. 28 | func (t *Tag) Decode(hash hash.Hash, r io.Reader, size int64) (int, error) { 29 | scanner := bufio.NewScanner(io.LimitReader(r, size)) 30 | 31 | var ( 32 | finishedHeaders bool 33 | message []string 34 | ) 35 | 36 | for scanner.Scan() { 37 | if finishedHeaders { 38 | message = append(message, scanner.Text()) 39 | } else { 40 | if len(scanner.Bytes()) == 0 { 41 | finishedHeaders = true 42 | continue 43 | } 44 | 45 | parts := strings.SplitN(scanner.Text(), " ", 2) 46 | if len(parts) < 2 { 47 | return 0, fmt.Errorf("gitobj: invalid tag header: %s", scanner.Text()) 48 | } 49 | 50 | switch parts[0] { 51 | case "object": 52 | sha, err := hex.DecodeString(parts[1]) 53 | if err != nil { 54 | return 0, fmt.Errorf("gitobj: unable to decode SHA-1: %s", err) 55 | } 56 | 57 | t.Object = sha 58 | case "type": 59 | t.ObjectType = ObjectTypeFromString(parts[1]) 60 | case "tag": 61 | t.Name = parts[1] 62 | case "tagger": 63 | t.Tagger = parts[1] 64 | default: 65 | return 0, fmt.Errorf("gitobj: unknown tag header: %s", parts[0]) 66 | } 67 | } 68 | } 69 | 70 | if err := scanner.Err(); err != nil { 71 | return 0, err 72 | } 73 | 74 | t.Message = strings.Join(message, "\n") 75 | 76 | return int(size), nil 77 | } 78 | 79 | // Encode encodes the Tag's contents to the given io.Writer, "w". If there was 80 | // any error copying the Tag's contents, that error will be returned. 81 | // 82 | // Otherwise, the number of bytes written will be returned. 83 | func (t *Tag) Encode(w io.Writer) (int, error) { 84 | headers := []string{ 85 | fmt.Sprintf("object %s", hex.EncodeToString(t.Object)), 86 | fmt.Sprintf("type %s", t.ObjectType), 87 | fmt.Sprintf("tag %s", t.Name), 88 | fmt.Sprintf("tagger %s", t.Tagger), 89 | } 90 | 91 | return fmt.Fprintf(w, "%s\n\n%s", strings.Join(headers, "\n"), t.Message) 92 | } 93 | 94 | // Equal returns whether the receiving and given Tags are equal, or in other 95 | // words, whether they are represented by the same SHA-1 when saved to the 96 | // object database. 97 | func (t *Tag) Equal(other *Tag) bool { 98 | if (t == nil) != (other == nil) { 99 | return false 100 | } 101 | 102 | if t != nil { 103 | return bytes.Equal(t.Object, other.Object) && 104 | t.ObjectType == other.ObjectType && 105 | t.Name == other.Name && 106 | t.Tagger == other.Tagger && 107 | t.Message == other.Message 108 | } 109 | 110 | return true 111 | } 112 | 113 | // Type implements Object.ObjectType by returning the correct object type for 114 | // Tags, TagObjectType. 115 | func (t *Tag) Type() ObjectType { 116 | return TagObjectType 117 | } 118 | -------------------------------------------------------------------------------- /tag_test.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "fmt" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestTagTypeReturnsCorrectObjectType(t *testing.T) { 13 | assert.Equal(t, TagObjectType, new(Tag).Type()) 14 | } 15 | 16 | func TestTagEncode(t *testing.T) { 17 | tag := &Tag{ 18 | Object: []byte("aaaaaaaaaaaaaaaaaaaa"), 19 | ObjectType: CommitObjectType, 20 | Name: "v2.4.0", 21 | Tagger: "A U Thor ", 22 | 23 | Message: "The quick brown fox jumps over the lazy dog.", 24 | } 25 | 26 | buf := new(bytes.Buffer) 27 | 28 | n, err := tag.Encode(buf) 29 | 30 | assert.Nil(t, err) 31 | assert.EqualValues(t, buf.Len(), n) 32 | 33 | assertLine(t, buf, "object 6161616161616161616161616161616161616161") 34 | assertLine(t, buf, "type commit") 35 | assertLine(t, buf, "tag v2.4.0") 36 | assertLine(t, buf, "tagger A U Thor ") 37 | assertLine(t, buf, "") 38 | assertLine(t, buf, "The quick brown fox jumps over the lazy dog.") 39 | 40 | assert.Equal(t, 0, buf.Len()) 41 | } 42 | 43 | func TestTagDecode(t *testing.T) { 44 | from := new(bytes.Buffer) 45 | 46 | fmt.Fprintf(from, "object 6161616161616161616161616161616161616161\n") 47 | fmt.Fprintf(from, "type commit\n") 48 | fmt.Fprintf(from, "tag v2.4.0\n") 49 | fmt.Fprintf(from, "tagger A U Thor \n") 50 | fmt.Fprintf(from, "\n") 51 | fmt.Fprintf(from, "The quick brown fox jumps over the lazy dog.\n") 52 | 53 | flen := from.Len() 54 | 55 | tag := new(Tag) 56 | n, err := tag.Decode(sha1.New(), from, int64(flen)) 57 | 58 | assert.Nil(t, err) 59 | assert.Equal(t, n, flen) 60 | 61 | assert.Equal(t, []byte("aaaaaaaaaaaaaaaaaaaa"), tag.Object) 62 | assert.Equal(t, CommitObjectType, tag.ObjectType) 63 | assert.Equal(t, "v2.4.0", tag.Name) 64 | assert.Equal(t, "A U Thor ", tag.Tagger) 65 | assert.Equal(t, "The quick brown fox jumps over the lazy dog.", tag.Message) 66 | } 67 | -------------------------------------------------------------------------------- /tree.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "fmt" 7 | "hash" 8 | "io" 9 | "sort" 10 | "strconv" 11 | "strings" 12 | 13 | "github.com/git-lfs/gitobj/v2/pack" 14 | ) 15 | 16 | // We define these here instead of using the system ones because not all 17 | // operating systems use the traditional values. For example, zOS uses 18 | // different values. 19 | const ( 20 | sIFMT = int32(0170000) 21 | sIFREG = int32(0100000) 22 | sIFDIR = int32(0040000) 23 | sIFLNK = int32(0120000) 24 | sIFGITLINK = int32(0160000) 25 | ) 26 | 27 | // Tree encapsulates a Git tree object. 28 | type Tree struct { 29 | // Entries is the list of entries held by this tree. 30 | Entries []*TreeEntry 31 | } 32 | 33 | // Type implements Object.ObjectType by returning the correct object type for 34 | // Trees, TreeObjectType. 35 | func (t *Tree) Type() ObjectType { return TreeObjectType } 36 | 37 | // Decode implements Object.Decode and decodes the uncompressed tree being 38 | // read. It returns the number of uncompressed bytes being consumed off of the 39 | // stream, which should be strictly equal to the size given. 40 | // 41 | // If any error was encountered along the way, that will be returned, along with 42 | // the number of bytes read up to that point. 43 | func (t *Tree) Decode(hash hash.Hash, from io.Reader, size int64) (n int, err error) { 44 | hashlen := hash.Size() 45 | buf := bufio.NewReader(from) 46 | 47 | var entries []*TreeEntry 48 | for { 49 | modes, err := buf.ReadString(' ') 50 | if err != nil { 51 | if err == io.EOF { 52 | break 53 | } 54 | return n, err 55 | } 56 | n += len(modes) 57 | modes = strings.TrimSuffix(modes, " ") 58 | 59 | mode, _ := strconv.ParseInt(modes, 8, 32) 60 | 61 | fname, err := buf.ReadString('\x00') 62 | if err != nil { 63 | return n, err 64 | } 65 | n += len(fname) 66 | fname = strings.TrimSuffix(fname, "\x00") 67 | 68 | var sha [pack.MaxHashSize]byte 69 | if _, err = io.ReadFull(buf, sha[:hashlen]); err != nil { 70 | return n, err 71 | } 72 | n += hashlen 73 | 74 | entries = append(entries, &TreeEntry{ 75 | Name: fname, 76 | Oid: sha[:hashlen], 77 | Filemode: int32(mode), 78 | }) 79 | } 80 | 81 | t.Entries = entries 82 | 83 | return n, nil 84 | } 85 | 86 | // Encode encodes the tree's contents to the given io.Writer, "w". If there was 87 | // any error copying the tree's contents, that error will be returned. 88 | // 89 | // Otherwise, the number of bytes written will be returned. 90 | func (t *Tree) Encode(to io.Writer) (n int, err error) { 91 | const entryTmpl = "%s %s\x00%s" 92 | 93 | for _, entry := range t.Entries { 94 | fmode := strconv.FormatInt(int64(entry.Filemode), 8) 95 | 96 | ne, err := fmt.Fprintf(to, entryTmpl, 97 | fmode, 98 | entry.Name, 99 | entry.Oid) 100 | 101 | if err != nil { 102 | return n, err 103 | } 104 | 105 | n = n + ne 106 | } 107 | return 108 | } 109 | 110 | // Merge performs a merge operation against the given set of `*TreeEntry`'s by 111 | // either replacing existing tree entries of the same name, or appending new 112 | // entries in sub-tree order. 113 | // 114 | // It returns a copy of the tree, and performs the merge in O(n*log(n)) time. 115 | func (t *Tree) Merge(others ...*TreeEntry) *Tree { 116 | unseen := make(map[string]*TreeEntry) 117 | 118 | // Build a cache of name to *TreeEntry. 119 | for _, other := range others { 120 | unseen[other.Name] = other 121 | } 122 | 123 | // Map the existing entries ("t.Entries") into a new set by either 124 | // copying an existing entry, or replacing it with a new one. 125 | entries := make([]*TreeEntry, 0, len(t.Entries)) 126 | for _, entry := range t.Entries { 127 | if other, ok := unseen[entry.Name]; ok { 128 | entries = append(entries, other) 129 | delete(unseen, entry.Name) 130 | } else { 131 | oid := make([]byte, len(entry.Oid)) 132 | copy(oid, entry.Oid) 133 | 134 | entries = append(entries, &TreeEntry{ 135 | Filemode: entry.Filemode, 136 | Name: entry.Name, 137 | Oid: oid, 138 | }) 139 | } 140 | } 141 | 142 | // For all the items we haven't replaced into the new set, append them 143 | // to the entries. 144 | for _, remaining := range unseen { 145 | entries = append(entries, remaining) 146 | } 147 | 148 | // Call sort afterwords, as a tradeoff between speed and spacial 149 | // complexity. As a future point of optimization, adding new elements 150 | // (see: above) could be done as a linear pass of the "entries" set. 151 | // 152 | // In order to do that, we must have a constant-time lookup of both 153 | // entries in the existing and new sets. This requires building a 154 | // map[string]*TreeEntry for the given "others" as well as "t.Entries". 155 | // 156 | // Trees can be potentially large, so trade this spacial complexity for 157 | // an O(n*log(n)) sort. 158 | sort.Sort(SubtreeOrder(entries)) 159 | 160 | return &Tree{Entries: entries} 161 | } 162 | 163 | // Equal returns whether the receiving and given trees are equal, or in other 164 | // words, whether they are represented by the same SHA-1 when saved to the 165 | // object database. 166 | func (t *Tree) Equal(other *Tree) bool { 167 | if (t == nil) != (other == nil) { 168 | return false 169 | } 170 | 171 | if t != nil { 172 | if len(t.Entries) != len(other.Entries) { 173 | return false 174 | } 175 | 176 | for i := 0; i < len(t.Entries); i++ { 177 | e1 := t.Entries[i] 178 | e2 := other.Entries[i] 179 | 180 | if !e1.Equal(e2) { 181 | return false 182 | } 183 | } 184 | } 185 | return true 186 | } 187 | 188 | // TreeEntry encapsulates information about a single tree entry in a tree 189 | // listing. 190 | type TreeEntry struct { 191 | // Name is the entry name relative to the tree in which this entry is 192 | // contained. 193 | Name string 194 | // Oid is the object ID for this tree entry. 195 | Oid []byte 196 | // Filemode is the filemode of this tree entry on disk. 197 | Filemode int32 198 | } 199 | 200 | // Equal returns whether the receiving and given TreeEntry instances are 201 | // identical in name, filemode, and OID. 202 | func (e *TreeEntry) Equal(other *TreeEntry) bool { 203 | if (e == nil) != (other == nil) { 204 | return false 205 | } 206 | 207 | if e != nil { 208 | return e.Name == other.Name && 209 | bytes.Equal(e.Oid, other.Oid) && 210 | e.Filemode == other.Filemode 211 | } 212 | return true 213 | } 214 | 215 | // Type is the type of entry (either blob: BlobObjectType, or a sub-tree: 216 | // TreeObjectType). 217 | func (e *TreeEntry) Type() ObjectType { 218 | switch e.Filemode & sIFMT { 219 | case sIFREG: 220 | return BlobObjectType 221 | case sIFDIR: 222 | return TreeObjectType 223 | case sIFLNK: 224 | return BlobObjectType 225 | case sIFGITLINK: 226 | return CommitObjectType 227 | default: 228 | panic(fmt.Sprintf("gitobj: unknown object type: %o", 229 | e.Filemode)) 230 | } 231 | } 232 | 233 | // IsLink returns true if the given TreeEntry is a blob which represents a 234 | // symbolic link (i.e., with a filemode of 0120000. 235 | func (e *TreeEntry) IsLink() bool { 236 | return e.Filemode & sIFMT == sIFLNK 237 | } 238 | 239 | // SubtreeOrder is an implementation of sort.Interface that sorts a set of 240 | // `*TreeEntry`'s according to "subtree" order. This ordering is required to 241 | // write trees in a correct, readable format to the Git object database. 242 | // 243 | // The format is as follows: entries are sorted lexicographically in byte-order, 244 | // with subtrees (entries of Type() == gitobj.TreeObjectType) being sorted as 245 | // if their `Name` fields ended in a "/". 246 | // 247 | // See: https://github.com/git/git/blob/v2.13.0/fsck.c#L492-L525 for more 248 | // details. 249 | type SubtreeOrder []*TreeEntry 250 | 251 | // Len implements sort.Interface.Len() and return the length of the underlying 252 | // slice. 253 | func (s SubtreeOrder) Len() int { return len(s) } 254 | 255 | // Swap implements sort.Interface.Swap() and swaps the two elements at i and j. 256 | func (s SubtreeOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 257 | 258 | // Less implements sort.Interface.Less() and returns whether the element at "i" 259 | // is compared as "less" than the element at "j". In other words, it returns if 260 | // the element at "i" should be sorted ahead of that at "j". 261 | // 262 | // It performs this comparison in lexicographic byte-order according to the 263 | // rules above (see SubtreeOrder). 264 | func (s SubtreeOrder) Less(i, j int) bool { 265 | return s.Name(i) < s.Name(j) 266 | } 267 | 268 | // Name returns the name for a given entry indexed at "i", which is a C-style 269 | // string ('\0' terminated unless it's a subtree), optionally terminated with 270 | // '/' if it's a subtree. 271 | // 272 | // This is done because '/' sorts ahead of '\0', and is compatible with the 273 | // tree order in upstream Git. 274 | func (s SubtreeOrder) Name(i int) string { 275 | if i < 0 || i >= len(s) { 276 | return "" 277 | } 278 | 279 | entry := s[i] 280 | if entry == nil { 281 | return "" 282 | } 283 | 284 | if entry.Type() == TreeObjectType { 285 | return entry.Name + "/" 286 | } 287 | return entry.Name + "\x00" 288 | } 289 | -------------------------------------------------------------------------------- /tree_test.go: -------------------------------------------------------------------------------- 1 | package gitobj 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "crypto/sha1" 7 | "fmt" 8 | "sort" 9 | "strconv" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | func TestTreeReturnsCorrectObjectType(t *testing.T) { 17 | assert.Equal(t, TreeObjectType, new(Tree).Type()) 18 | } 19 | 20 | func TestTreeEncoding(t *testing.T) { 21 | tree := &Tree{ 22 | Entries: []*TreeEntry{ 23 | { 24 | Name: "a.dat", 25 | Oid: []byte("aaaaaaaaaaaaaaaaaaaa"), 26 | Filemode: 0100644, 27 | }, 28 | { 29 | Name: "subdir", 30 | Oid: []byte("bbbbbbbbbbbbbbbbbbbb"), 31 | Filemode: 040000, 32 | }, 33 | { 34 | Name: "submodule", 35 | Oid: []byte("cccccccccccccccccccc"), 36 | Filemode: 0160000, 37 | }, 38 | }, 39 | } 40 | 41 | buf := new(bytes.Buffer) 42 | 43 | n, err := tree.Encode(buf) 44 | assert.Nil(t, err) 45 | assert.NotEqual(t, 0, n) 46 | 47 | assertTreeEntry(t, buf, "a.dat", []byte("aaaaaaaaaaaaaaaaaaaa"), 0100644) 48 | assertTreeEntry(t, buf, "subdir", []byte("bbbbbbbbbbbbbbbbbbbb"), 040000) 49 | assertTreeEntry(t, buf, "submodule", []byte("cccccccccccccccccccc"), 0160000) 50 | 51 | assert.Equal(t, 0, buf.Len()) 52 | } 53 | 54 | func TestTreeDecoding(t *testing.T) { 55 | from := new(bytes.Buffer) 56 | fmt.Fprintf(from, "%s %s\x00%s", 57 | strconv.FormatInt(int64(0100644), 8), 58 | "a.dat", []byte("aaaaaaaaaaaaaaaaaaaa")) 59 | fmt.Fprintf(from, "%s %s\x00%s", 60 | strconv.FormatInt(int64(040000), 8), 61 | "subdir", []byte("bbbbbbbbbbbbbbbbbbbb")) 62 | fmt.Fprintf(from, "%s %s\x00%s", 63 | strconv.FormatInt(int64(0120000), 8), 64 | "symlink", []byte("cccccccccccccccccccc")) 65 | fmt.Fprintf(from, "%s %s\x00%s", 66 | strconv.FormatInt(int64(0160000), 8), 67 | "submodule", []byte("dddddddddddddddddddd")) 68 | 69 | flen := from.Len() 70 | 71 | tree := new(Tree) 72 | n, err := tree.Decode(sha1.New(), from, int64(flen)) 73 | 74 | assert.Nil(t, err) 75 | assert.Equal(t, flen, n) 76 | 77 | require.Equal(t, 4, len(tree.Entries)) 78 | assert.Equal(t, &TreeEntry{ 79 | Name: "a.dat", 80 | Oid: []byte("aaaaaaaaaaaaaaaaaaaa"), 81 | Filemode: 0100644, 82 | }, tree.Entries[0]) 83 | assert.Equal(t, &TreeEntry{ 84 | Name: "subdir", 85 | Oid: []byte("bbbbbbbbbbbbbbbbbbbb"), 86 | Filemode: 040000, 87 | }, tree.Entries[1]) 88 | assert.Equal(t, &TreeEntry{ 89 | Name: "symlink", 90 | Oid: []byte("cccccccccccccccccccc"), 91 | Filemode: 0120000, 92 | }, tree.Entries[2]) 93 | assert.Equal(t, &TreeEntry{ 94 | Name: "submodule", 95 | Oid: []byte("dddddddddddddddddddd"), 96 | Filemode: 0160000, 97 | }, tree.Entries[3]) 98 | } 99 | 100 | func TestTreeDecodingShaBoundary(t *testing.T) { 101 | var from bytes.Buffer 102 | 103 | fmt.Fprintf(&from, "%s %s\x00%s", 104 | strconv.FormatInt(int64(0100644), 8), 105 | "a.dat", []byte("aaaaaaaaaaaaaaaaaaaa")) 106 | 107 | flen := from.Len() 108 | 109 | tree := new(Tree) 110 | n, err := tree.Decode(sha1.New(), bufio.NewReaderSize(&from, flen-2), int64(flen)) 111 | 112 | assert.Nil(t, err) 113 | assert.Equal(t, flen, n) 114 | 115 | require.Len(t, tree.Entries, 1) 116 | assert.Equal(t, &TreeEntry{ 117 | Name: "a.dat", 118 | Oid: []byte("aaaaaaaaaaaaaaaaaaaa"), 119 | Filemode: 0100644, 120 | }, tree.Entries[0]) 121 | } 122 | 123 | func TestTreeMergeReplaceElements(t *testing.T) { 124 | e1 := &TreeEntry{Name: "a", Filemode: 0100644, Oid: []byte{0x1}} 125 | e2 := &TreeEntry{Name: "b", Filemode: 0100644, Oid: []byte{0x2}} 126 | e3 := &TreeEntry{Name: "c", Filemode: 0100755, Oid: []byte{0x3}} 127 | 128 | e4 := &TreeEntry{Name: "b", Filemode: 0100644, Oid: []byte{0x4}} 129 | e5 := &TreeEntry{Name: "c", Filemode: 0100644, Oid: []byte{0x5}} 130 | 131 | t1 := &Tree{Entries: []*TreeEntry{e1, e2, e3}} 132 | 133 | t2 := t1.Merge(e4, e5) 134 | 135 | require.Len(t, t1.Entries, 3) 136 | assert.True(t, bytes.Equal(t1.Entries[0].Oid, []byte{0x1})) 137 | assert.True(t, bytes.Equal(t1.Entries[1].Oid, []byte{0x2})) 138 | assert.True(t, bytes.Equal(t1.Entries[2].Oid, []byte{0x3})) 139 | 140 | require.Len(t, t2.Entries, 3) 141 | assert.True(t, bytes.Equal(t2.Entries[0].Oid, []byte{0x1})) 142 | assert.True(t, bytes.Equal(t2.Entries[1].Oid, []byte{0x4})) 143 | assert.True(t, bytes.Equal(t2.Entries[2].Oid, []byte{0x5})) 144 | } 145 | 146 | func TestMergeInsertElementsInSubtreeOrder(t *testing.T) { 147 | e1 := &TreeEntry{Name: "a-b", Filemode: 0100644, Oid: []byte{0x1}} 148 | e2 := &TreeEntry{Name: "a", Filemode: 040000, Oid: []byte{0x2}} 149 | e3 := &TreeEntry{Name: "a=", Filemode: 0100644, Oid: []byte{0x3}} 150 | e4 := &TreeEntry{Name: "a-", Filemode: 0100644, Oid: []byte{0x4}} 151 | 152 | t1 := &Tree{Entries: []*TreeEntry{e1, e2, e3}} 153 | t2 := t1.Merge(e4) 154 | 155 | require.Len(t, t1.Entries, 3) 156 | assert.True(t, bytes.Equal(t1.Entries[0].Oid, []byte{0x1})) 157 | assert.True(t, bytes.Equal(t1.Entries[1].Oid, []byte{0x2})) 158 | assert.True(t, bytes.Equal(t1.Entries[2].Oid, []byte{0x3})) 159 | 160 | require.Len(t, t2.Entries, 4) 161 | assert.True(t, bytes.Equal(t2.Entries[0].Oid, []byte{0x4})) 162 | assert.True(t, bytes.Equal(t2.Entries[1].Oid, []byte{0x1})) 163 | assert.True(t, bytes.Equal(t2.Entries[2].Oid, []byte{0x2})) 164 | assert.True(t, bytes.Equal(t2.Entries[3].Oid, []byte{0x3})) 165 | } 166 | 167 | type TreeEntryTypeTestCase struct { 168 | Filemode int32 169 | Expected ObjectType 170 | IsLink bool 171 | } 172 | 173 | func (c *TreeEntryTypeTestCase) AssertType(t *testing.T) { 174 | e := &TreeEntry{Filemode: c.Filemode} 175 | 176 | got := e.Type() 177 | 178 | assert.Equal(t, c.Expected, got, 179 | "gitobj: expected type: %s, got: %s", c.Expected, got) 180 | } 181 | 182 | func (c *TreeEntryTypeTestCase) AssertIsLink(t *testing.T) { 183 | e := &TreeEntry{Filemode: c.Filemode} 184 | 185 | isLink := e.IsLink() 186 | 187 | assert.Equal(t, c.IsLink, isLink, 188 | "gitobj: expected link: %v, got: %v, for type %s", c.IsLink, isLink, c.Expected) 189 | } 190 | 191 | func TestTreeEntryTypeResolution(t *testing.T) { 192 | for desc, c := range map[string]*TreeEntryTypeTestCase{ 193 | "blob": {0100644, BlobObjectType, false}, 194 | "subtree": {040000, TreeObjectType, false}, 195 | "symlink": {0120000, BlobObjectType, true}, 196 | "commit": {0160000, CommitObjectType, false}, 197 | } { 198 | t.Run(desc, c.AssertType) 199 | t.Run(desc, c.AssertIsLink) 200 | } 201 | } 202 | 203 | func TestTreeEntryTypeResolutionUnknown(t *testing.T) { 204 | e := &TreeEntry{Filemode: -1} 205 | 206 | defer func() { 207 | if err := recover(); err == nil { 208 | t.Fatal("gitobj: expected panic(), got none") 209 | } else { 210 | assert.Equal(t, "gitobj: unknown object type: -1", err) 211 | } 212 | }() 213 | 214 | e.Type() 215 | } 216 | 217 | func TestSubtreeOrder(t *testing.T) { 218 | // The below list (e1, e2, ..., e5) is entered in subtree order: that 219 | // is, lexicographically byte-ordered as if blobs end in a '\0', and 220 | // sub-trees end in a '/'. 221 | // 222 | // See: 223 | // http://public-inbox.org/git/7vac6jfzem.fsf@assigned-by-dhcp.cox.net 224 | e1 := &TreeEntry{Filemode: 0100644, Name: "a-"} 225 | e2 := &TreeEntry{Filemode: 0100644, Name: "a-b"} 226 | e3 := &TreeEntry{Filemode: 040000, Name: "a"} 227 | e4 := &TreeEntry{Filemode: 0100644, Name: "a="} 228 | e5 := &TreeEntry{Filemode: 0100644, Name: "a=b"} 229 | 230 | // Create a set of entries in the wrong order: 231 | entries := []*TreeEntry{e3, e4, e1, e5, e2} 232 | 233 | sort.Sort(SubtreeOrder(entries)) 234 | 235 | // Assert that they are in the correct order after sorting in sub-tree 236 | // order: 237 | require.Len(t, entries, 5) 238 | assert.Equal(t, "a-", entries[0].Name) 239 | assert.Equal(t, "a-b", entries[1].Name) 240 | assert.Equal(t, "a", entries[2].Name) 241 | assert.Equal(t, "a=", entries[3].Name) 242 | assert.Equal(t, "a=b", entries[4].Name) 243 | } 244 | 245 | func TestSubtreeOrderReturnsEmptyForOutOfBounds(t *testing.T) { 246 | o := SubtreeOrder([]*TreeEntry{{Name: "a"}}) 247 | 248 | assert.Equal(t, "", o.Name(len(o)+1)) 249 | } 250 | 251 | func TestSubtreeOrderReturnsEmptyForNilElements(t *testing.T) { 252 | o := SubtreeOrder([]*TreeEntry{nil}) 253 | 254 | assert.Equal(t, "", o.Name(0)) 255 | } 256 | 257 | func TestTreeEqualReturnsTrueWithUnchangedContents(t *testing.T) { 258 | t1 := &Tree{Entries: []*TreeEntry{ 259 | {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, 260 | }} 261 | t2 := &Tree{Entries: []*TreeEntry{ 262 | {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, 263 | }} 264 | 265 | assert.True(t, t1.Equal(t2)) 266 | } 267 | 268 | func TestTreeEqualReturnsFalseWithChangedContents(t *testing.T) { 269 | t1 := &Tree{Entries: []*TreeEntry{ 270 | {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, 271 | {Name: "b.dat", Filemode: 0100644, Oid: make([]byte, 20)}, 272 | }} 273 | t2 := &Tree{Entries: []*TreeEntry{ 274 | {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, 275 | {Name: "c.dat", Filemode: 0100644, Oid: make([]byte, 20)}, 276 | }} 277 | 278 | assert.False(t, t1.Equal(t2)) 279 | } 280 | 281 | func TestTreeEqualReturnsTrueWhenOneTreeIsNil(t *testing.T) { 282 | t1 := &Tree{Entries: []*TreeEntry{ 283 | {Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)}, 284 | }} 285 | t2 := (*Tree)(nil) 286 | 287 | assert.False(t, t1.Equal(t2)) 288 | assert.False(t, t2.Equal(t1)) 289 | } 290 | 291 | func TestTreeEqualReturnsTrueWhenBothTreesAreNil(t *testing.T) { 292 | t1 := (*Tree)(nil) 293 | t2 := (*Tree)(nil) 294 | 295 | assert.True(t, t1.Equal(t2)) 296 | } 297 | 298 | func TestTreeEntryEqualReturnsTrueWhenEntriesAreTheSame(t *testing.T) { 299 | e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} 300 | e2 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} 301 | 302 | assert.True(t, e1.Equal(e2)) 303 | } 304 | 305 | func TestTreeEntryEqualReturnsFalseWhenDifferentNames(t *testing.T) { 306 | e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} 307 | e2 := &TreeEntry{Name: "b.dat", Filemode: 0100644, Oid: make([]byte, 20)} 308 | 309 | assert.False(t, e1.Equal(e2)) 310 | } 311 | 312 | func TestTreeEntryEqualReturnsFalseWhenDifferentOids(t *testing.T) { 313 | e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} 314 | e2 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} 315 | 316 | e2.Oid[0] = 1 317 | 318 | assert.False(t, e1.Equal(e2)) 319 | } 320 | 321 | func TestTreeEntryEqualReturnsFalseWhenDifferentFilemodes(t *testing.T) { 322 | e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} 323 | e2 := &TreeEntry{Name: "a.dat", Filemode: 0100755, Oid: make([]byte, 20)} 324 | 325 | assert.False(t, e1.Equal(e2)) 326 | } 327 | 328 | func TestTreeEntryEqualReturnsFalseWhenOneEntryIsNil(t *testing.T) { 329 | e1 := &TreeEntry{Name: "a.dat", Filemode: 0100644, Oid: make([]byte, 20)} 330 | e2 := (*TreeEntry)(nil) 331 | 332 | assert.False(t, e1.Equal(e2)) 333 | } 334 | 335 | func TestTreeEntryEqualReturnsTrueWhenBothEntriesAreNil(t *testing.T) { 336 | e1 := (*TreeEntry)(nil) 337 | e2 := (*TreeEntry)(nil) 338 | 339 | assert.True(t, e1.Equal(e2)) 340 | } 341 | 342 | func assertTreeEntry(t *testing.T, buf *bytes.Buffer, 343 | name string, oid []byte, mode int32) { 344 | 345 | fmode, err := buf.ReadBytes(' ') 346 | assert.Nil(t, err) 347 | assert.Equal(t, []byte(strconv.FormatInt(int64(mode), 8)+" "), fmode) 348 | 349 | fname, err := buf.ReadBytes('\x00') 350 | assert.Nil(t, err) 351 | assert.Equal(t, []byte(name+"\x00"), fname) 352 | 353 | var sha [20]byte 354 | _, err = buf.Read(sha[:]) 355 | assert.Nil(t, err) 356 | assert.Equal(t, oid, sha[:]) 357 | } 358 | -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/LICENSE: -------------------------------------------------------------------------------- 1 | ISC License 2 | 3 | Copyright (c) 2012-2016 Dave Collins 4 | 5 | Permission to use, copy, modify, and/or distribute this software for any 6 | purpose with or without fee is hereby granted, provided that the above 7 | copyright notice and this permission notice appear in all copies. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/spew/bypass.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2016 Dave Collins 2 | // 3 | // Permission to use, copy, modify, and distribute this software for any 4 | // purpose with or without fee is hereby granted, provided that the above 5 | // copyright notice and this permission notice appear in all copies. 6 | // 7 | // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | 15 | // NOTE: Due to the following build constraints, this file will only be compiled 16 | // when the code is not running on Google App Engine, compiled by GopherJS, and 17 | // "-tags safe" is not added to the go build command line. The "disableunsafe" 18 | // tag is deprecated and thus should not be used. 19 | // Go versions prior to 1.4 are disabled because they use a different layout 20 | // for interfaces which make the implementation of unsafeReflectValue more complex. 21 | // +build !js,!appengine,!safe,!disableunsafe,go1.4 22 | 23 | package spew 24 | 25 | import ( 26 | "reflect" 27 | "unsafe" 28 | ) 29 | 30 | const ( 31 | // UnsafeDisabled is a build-time constant which specifies whether or 32 | // not access to the unsafe package is available. 33 | UnsafeDisabled = false 34 | 35 | // ptrSize is the size of a pointer on the current arch. 36 | ptrSize = unsafe.Sizeof((*byte)(nil)) 37 | ) 38 | 39 | type flag uintptr 40 | 41 | var ( 42 | // flagRO indicates whether the value field of a reflect.Value 43 | // is read-only. 44 | flagRO flag 45 | 46 | // flagAddr indicates whether the address of the reflect.Value's 47 | // value may be taken. 48 | flagAddr flag 49 | ) 50 | 51 | // flagKindMask holds the bits that make up the kind 52 | // part of the flags field. In all the supported versions, 53 | // it is in the lower 5 bits. 54 | const flagKindMask = flag(0x1f) 55 | 56 | // Different versions of Go have used different 57 | // bit layouts for the flags type. This table 58 | // records the known combinations. 59 | var okFlags = []struct { 60 | ro, addr flag 61 | }{{ 62 | // From Go 1.4 to 1.5 63 | ro: 1 << 5, 64 | addr: 1 << 7, 65 | }, { 66 | // Up to Go tip. 67 | ro: 1<<5 | 1<<6, 68 | addr: 1 << 8, 69 | }} 70 | 71 | var flagValOffset = func() uintptr { 72 | field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") 73 | if !ok { 74 | panic("reflect.Value has no flag field") 75 | } 76 | return field.Offset 77 | }() 78 | 79 | // flagField returns a pointer to the flag field of a reflect.Value. 80 | func flagField(v *reflect.Value) *flag { 81 | return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) 82 | } 83 | 84 | // unsafeReflectValue converts the passed reflect.Value into a one that bypasses 85 | // the typical safety restrictions preventing access to unaddressable and 86 | // unexported data. It works by digging the raw pointer to the underlying 87 | // value out of the protected value and generating a new unprotected (unsafe) 88 | // reflect.Value to it. 89 | // 90 | // This allows us to check for implementations of the Stringer and error 91 | // interfaces to be used for pretty printing ordinarily unaddressable and 92 | // inaccessible values such as unexported struct fields. 93 | func unsafeReflectValue(v reflect.Value) reflect.Value { 94 | if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { 95 | return v 96 | } 97 | flagFieldPtr := flagField(&v) 98 | *flagFieldPtr &^= flagRO 99 | *flagFieldPtr |= flagAddr 100 | return v 101 | } 102 | 103 | // Sanity checks against future reflect package changes 104 | // to the type or semantics of the Value.flag field. 105 | func init() { 106 | field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") 107 | if !ok { 108 | panic("reflect.Value has no flag field") 109 | } 110 | if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { 111 | panic("reflect.Value flag field has changed kind") 112 | } 113 | type t0 int 114 | var t struct { 115 | A t0 116 | // t0 will have flagEmbedRO set. 117 | t0 118 | // a will have flagStickyRO set 119 | a t0 120 | } 121 | vA := reflect.ValueOf(t).FieldByName("A") 122 | va := reflect.ValueOf(t).FieldByName("a") 123 | vt0 := reflect.ValueOf(t).FieldByName("t0") 124 | 125 | // Infer flagRO from the difference between the flags 126 | // for the (otherwise identical) fields in t. 127 | flagPublic := *flagField(&vA) 128 | flagWithRO := *flagField(&va) | *flagField(&vt0) 129 | flagRO = flagPublic ^ flagWithRO 130 | 131 | // Infer flagAddr from the difference between a value 132 | // taken from a pointer and not. 133 | vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") 134 | flagNoPtr := *flagField(&vA) 135 | flagPtr := *flagField(&vPtrA) 136 | flagAddr = flagNoPtr ^ flagPtr 137 | 138 | // Check that the inferred flags tally with one of the known versions. 139 | for _, f := range okFlags { 140 | if flagRO == f.ro && flagAddr == f.addr { 141 | return 142 | } 143 | } 144 | panic("reflect.Value read-only flag has changed semantics") 145 | } 146 | -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/spew/bypasssafe.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2015-2016 Dave Collins 2 | // 3 | // Permission to use, copy, modify, and distribute this software for any 4 | // purpose with or without fee is hereby granted, provided that the above 5 | // copyright notice and this permission notice appear in all copies. 6 | // 7 | // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | 15 | // NOTE: Due to the following build constraints, this file will only be compiled 16 | // when the code is running on Google App Engine, compiled by GopherJS, or 17 | // "-tags safe" is added to the go build command line. The "disableunsafe" 18 | // tag is deprecated and thus should not be used. 19 | // +build js appengine safe disableunsafe !go1.4 20 | 21 | package spew 22 | 23 | import "reflect" 24 | 25 | const ( 26 | // UnsafeDisabled is a build-time constant which specifies whether or 27 | // not access to the unsafe package is available. 28 | UnsafeDisabled = true 29 | ) 30 | 31 | // unsafeReflectValue typically converts the passed reflect.Value into a one 32 | // that bypasses the typical safety restrictions preventing access to 33 | // unaddressable and unexported data. However, doing this relies on access to 34 | // the unsafe package. This is a stub version which simply returns the passed 35 | // reflect.Value when the unsafe package is not available. 36 | func unsafeReflectValue(v reflect.Value) reflect.Value { 37 | return v 38 | } 39 | -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/spew/common.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2013-2016 Dave Collins 3 | * 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | package spew 18 | 19 | import ( 20 | "bytes" 21 | "fmt" 22 | "io" 23 | "reflect" 24 | "sort" 25 | "strconv" 26 | ) 27 | 28 | // Some constants in the form of bytes to avoid string overhead. This mirrors 29 | // the technique used in the fmt package. 30 | var ( 31 | panicBytes = []byte("(PANIC=") 32 | plusBytes = []byte("+") 33 | iBytes = []byte("i") 34 | trueBytes = []byte("true") 35 | falseBytes = []byte("false") 36 | interfaceBytes = []byte("(interface {})") 37 | commaNewlineBytes = []byte(",\n") 38 | newlineBytes = []byte("\n") 39 | openBraceBytes = []byte("{") 40 | openBraceNewlineBytes = []byte("{\n") 41 | closeBraceBytes = []byte("}") 42 | asteriskBytes = []byte("*") 43 | colonBytes = []byte(":") 44 | colonSpaceBytes = []byte(": ") 45 | openParenBytes = []byte("(") 46 | closeParenBytes = []byte(")") 47 | spaceBytes = []byte(" ") 48 | pointerChainBytes = []byte("->") 49 | nilAngleBytes = []byte("") 50 | maxNewlineBytes = []byte("\n") 51 | maxShortBytes = []byte("") 52 | circularBytes = []byte("") 53 | circularShortBytes = []byte("") 54 | invalidAngleBytes = []byte("") 55 | openBracketBytes = []byte("[") 56 | closeBracketBytes = []byte("]") 57 | percentBytes = []byte("%") 58 | precisionBytes = []byte(".") 59 | openAngleBytes = []byte("<") 60 | closeAngleBytes = []byte(">") 61 | openMapBytes = []byte("map[") 62 | closeMapBytes = []byte("]") 63 | lenEqualsBytes = []byte("len=") 64 | capEqualsBytes = []byte("cap=") 65 | ) 66 | 67 | // hexDigits is used to map a decimal value to a hex digit. 68 | var hexDigits = "0123456789abcdef" 69 | 70 | // catchPanic handles any panics that might occur during the handleMethods 71 | // calls. 72 | func catchPanic(w io.Writer, v reflect.Value) { 73 | if err := recover(); err != nil { 74 | w.Write(panicBytes) 75 | fmt.Fprintf(w, "%v", err) 76 | w.Write(closeParenBytes) 77 | } 78 | } 79 | 80 | // handleMethods attempts to call the Error and String methods on the underlying 81 | // type the passed reflect.Value represents and outputes the result to Writer w. 82 | // 83 | // It handles panics in any called methods by catching and displaying the error 84 | // as the formatted value. 85 | func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { 86 | // We need an interface to check if the type implements the error or 87 | // Stringer interface. However, the reflect package won't give us an 88 | // interface on certain things like unexported struct fields in order 89 | // to enforce visibility rules. We use unsafe, when it's available, 90 | // to bypass these restrictions since this package does not mutate the 91 | // values. 92 | if !v.CanInterface() { 93 | if UnsafeDisabled { 94 | return false 95 | } 96 | 97 | v = unsafeReflectValue(v) 98 | } 99 | 100 | // Choose whether or not to do error and Stringer interface lookups against 101 | // the base type or a pointer to the base type depending on settings. 102 | // Technically calling one of these methods with a pointer receiver can 103 | // mutate the value, however, types which choose to satisify an error or 104 | // Stringer interface with a pointer receiver should not be mutating their 105 | // state inside these interface methods. 106 | if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { 107 | v = unsafeReflectValue(v) 108 | } 109 | if v.CanAddr() { 110 | v = v.Addr() 111 | } 112 | 113 | // Is it an error or Stringer? 114 | switch iface := v.Interface().(type) { 115 | case error: 116 | defer catchPanic(w, v) 117 | if cs.ContinueOnMethod { 118 | w.Write(openParenBytes) 119 | w.Write([]byte(iface.Error())) 120 | w.Write(closeParenBytes) 121 | w.Write(spaceBytes) 122 | return false 123 | } 124 | 125 | w.Write([]byte(iface.Error())) 126 | return true 127 | 128 | case fmt.Stringer: 129 | defer catchPanic(w, v) 130 | if cs.ContinueOnMethod { 131 | w.Write(openParenBytes) 132 | w.Write([]byte(iface.String())) 133 | w.Write(closeParenBytes) 134 | w.Write(spaceBytes) 135 | return false 136 | } 137 | w.Write([]byte(iface.String())) 138 | return true 139 | } 140 | return false 141 | } 142 | 143 | // printBool outputs a boolean value as true or false to Writer w. 144 | func printBool(w io.Writer, val bool) { 145 | if val { 146 | w.Write(trueBytes) 147 | } else { 148 | w.Write(falseBytes) 149 | } 150 | } 151 | 152 | // printInt outputs a signed integer value to Writer w. 153 | func printInt(w io.Writer, val int64, base int) { 154 | w.Write([]byte(strconv.FormatInt(val, base))) 155 | } 156 | 157 | // printUint outputs an unsigned integer value to Writer w. 158 | func printUint(w io.Writer, val uint64, base int) { 159 | w.Write([]byte(strconv.FormatUint(val, base))) 160 | } 161 | 162 | // printFloat outputs a floating point value using the specified precision, 163 | // which is expected to be 32 or 64bit, to Writer w. 164 | func printFloat(w io.Writer, val float64, precision int) { 165 | w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) 166 | } 167 | 168 | // printComplex outputs a complex value using the specified float precision 169 | // for the real and imaginary parts to Writer w. 170 | func printComplex(w io.Writer, c complex128, floatPrecision int) { 171 | r := real(c) 172 | w.Write(openParenBytes) 173 | w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) 174 | i := imag(c) 175 | if i >= 0 { 176 | w.Write(plusBytes) 177 | } 178 | w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) 179 | w.Write(iBytes) 180 | w.Write(closeParenBytes) 181 | } 182 | 183 | // printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' 184 | // prefix to Writer w. 185 | func printHexPtr(w io.Writer, p uintptr) { 186 | // Null pointer. 187 | num := uint64(p) 188 | if num == 0 { 189 | w.Write(nilAngleBytes) 190 | return 191 | } 192 | 193 | // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix 194 | buf := make([]byte, 18) 195 | 196 | // It's simpler to construct the hex string right to left. 197 | base := uint64(16) 198 | i := len(buf) - 1 199 | for num >= base { 200 | buf[i] = hexDigits[num%base] 201 | num /= base 202 | i-- 203 | } 204 | buf[i] = hexDigits[num] 205 | 206 | // Add '0x' prefix. 207 | i-- 208 | buf[i] = 'x' 209 | i-- 210 | buf[i] = '0' 211 | 212 | // Strip unused leading bytes. 213 | buf = buf[i:] 214 | w.Write(buf) 215 | } 216 | 217 | // valuesSorter implements sort.Interface to allow a slice of reflect.Value 218 | // elements to be sorted. 219 | type valuesSorter struct { 220 | values []reflect.Value 221 | strings []string // either nil or same len and values 222 | cs *ConfigState 223 | } 224 | 225 | // newValuesSorter initializes a valuesSorter instance, which holds a set of 226 | // surrogate keys on which the data should be sorted. It uses flags in 227 | // ConfigState to decide if and how to populate those surrogate keys. 228 | func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { 229 | vs := &valuesSorter{values: values, cs: cs} 230 | if canSortSimply(vs.values[0].Kind()) { 231 | return vs 232 | } 233 | if !cs.DisableMethods { 234 | vs.strings = make([]string, len(values)) 235 | for i := range vs.values { 236 | b := bytes.Buffer{} 237 | if !handleMethods(cs, &b, vs.values[i]) { 238 | vs.strings = nil 239 | break 240 | } 241 | vs.strings[i] = b.String() 242 | } 243 | } 244 | if vs.strings == nil && cs.SpewKeys { 245 | vs.strings = make([]string, len(values)) 246 | for i := range vs.values { 247 | vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) 248 | } 249 | } 250 | return vs 251 | } 252 | 253 | // canSortSimply tests whether a reflect.Kind is a primitive that can be sorted 254 | // directly, or whether it should be considered for sorting by surrogate keys 255 | // (if the ConfigState allows it). 256 | func canSortSimply(kind reflect.Kind) bool { 257 | // This switch parallels valueSortLess, except for the default case. 258 | switch kind { 259 | case reflect.Bool: 260 | return true 261 | case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: 262 | return true 263 | case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: 264 | return true 265 | case reflect.Float32, reflect.Float64: 266 | return true 267 | case reflect.String: 268 | return true 269 | case reflect.Uintptr: 270 | return true 271 | case reflect.Array: 272 | return true 273 | } 274 | return false 275 | } 276 | 277 | // Len returns the number of values in the slice. It is part of the 278 | // sort.Interface implementation. 279 | func (s *valuesSorter) Len() int { 280 | return len(s.values) 281 | } 282 | 283 | // Swap swaps the values at the passed indices. It is part of the 284 | // sort.Interface implementation. 285 | func (s *valuesSorter) Swap(i, j int) { 286 | s.values[i], s.values[j] = s.values[j], s.values[i] 287 | if s.strings != nil { 288 | s.strings[i], s.strings[j] = s.strings[j], s.strings[i] 289 | } 290 | } 291 | 292 | // valueSortLess returns whether the first value should sort before the second 293 | // value. It is used by valueSorter.Less as part of the sort.Interface 294 | // implementation. 295 | func valueSortLess(a, b reflect.Value) bool { 296 | switch a.Kind() { 297 | case reflect.Bool: 298 | return !a.Bool() && b.Bool() 299 | case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: 300 | return a.Int() < b.Int() 301 | case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: 302 | return a.Uint() < b.Uint() 303 | case reflect.Float32, reflect.Float64: 304 | return a.Float() < b.Float() 305 | case reflect.String: 306 | return a.String() < b.String() 307 | case reflect.Uintptr: 308 | return a.Uint() < b.Uint() 309 | case reflect.Array: 310 | // Compare the contents of both arrays. 311 | l := a.Len() 312 | for i := 0; i < l; i++ { 313 | av := a.Index(i) 314 | bv := b.Index(i) 315 | if av.Interface() == bv.Interface() { 316 | continue 317 | } 318 | return valueSortLess(av, bv) 319 | } 320 | } 321 | return a.String() < b.String() 322 | } 323 | 324 | // Less returns whether the value at index i should sort before the 325 | // value at index j. It is part of the sort.Interface implementation. 326 | func (s *valuesSorter) Less(i, j int) bool { 327 | if s.strings == nil { 328 | return valueSortLess(s.values[i], s.values[j]) 329 | } 330 | return s.strings[i] < s.strings[j] 331 | } 332 | 333 | // sortValues is a sort function that handles both native types and any type that 334 | // can be converted to error or Stringer. Other inputs are sorted according to 335 | // their Value.String() value to ensure display stability. 336 | func sortValues(values []reflect.Value, cs *ConfigState) { 337 | if len(values) == 0 { 338 | return 339 | } 340 | sort.Sort(newValuesSorter(values, cs)) 341 | } 342 | -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/spew/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2013-2016 Dave Collins 3 | * 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | /* 18 | Package spew implements a deep pretty printer for Go data structures to aid in 19 | debugging. 20 | 21 | A quick overview of the additional features spew provides over the built-in 22 | printing facilities for Go data types are as follows: 23 | 24 | * Pointers are dereferenced and followed 25 | * Circular data structures are detected and handled properly 26 | * Custom Stringer/error interfaces are optionally invoked, including 27 | on unexported types 28 | * Custom types which only implement the Stringer/error interfaces via 29 | a pointer receiver are optionally invoked when passing non-pointer 30 | variables 31 | * Byte arrays and slices are dumped like the hexdump -C command which 32 | includes offsets, byte values in hex, and ASCII output (only when using 33 | Dump style) 34 | 35 | There are two different approaches spew allows for dumping Go data structures: 36 | 37 | * Dump style which prints with newlines, customizable indentation, 38 | and additional debug information such as types and all pointer addresses 39 | used to indirect to the final value 40 | * A custom Formatter interface that integrates cleanly with the standard fmt 41 | package and replaces %v, %+v, %#v, and %#+v to provide inline printing 42 | similar to the default %v while providing the additional functionality 43 | outlined above and passing unsupported format verbs such as %x and %q 44 | along to fmt 45 | 46 | Quick Start 47 | 48 | This section demonstrates how to quickly get started with spew. See the 49 | sections below for further details on formatting and configuration options. 50 | 51 | To dump a variable with full newlines, indentation, type, and pointer 52 | information use Dump, Fdump, or Sdump: 53 | spew.Dump(myVar1, myVar2, ...) 54 | spew.Fdump(someWriter, myVar1, myVar2, ...) 55 | str := spew.Sdump(myVar1, myVar2, ...) 56 | 57 | Alternatively, if you would prefer to use format strings with a compacted inline 58 | printing style, use the convenience wrappers Printf, Fprintf, etc with 59 | %v (most compact), %+v (adds pointer addresses), %#v (adds types), or 60 | %#+v (adds types and pointer addresses): 61 | spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) 62 | spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) 63 | spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) 64 | spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) 65 | 66 | Configuration Options 67 | 68 | Configuration of spew is handled by fields in the ConfigState type. For 69 | convenience, all of the top-level functions use a global state available 70 | via the spew.Config global. 71 | 72 | It is also possible to create a ConfigState instance that provides methods 73 | equivalent to the top-level functions. This allows concurrent configuration 74 | options. See the ConfigState documentation for more details. 75 | 76 | The following configuration options are available: 77 | * Indent 78 | String to use for each indentation level for Dump functions. 79 | It is a single space by default. A popular alternative is "\t". 80 | 81 | * MaxDepth 82 | Maximum number of levels to descend into nested data structures. 83 | There is no limit by default. 84 | 85 | * DisableMethods 86 | Disables invocation of error and Stringer interface methods. 87 | Method invocation is enabled by default. 88 | 89 | * DisablePointerMethods 90 | Disables invocation of error and Stringer interface methods on types 91 | which only accept pointer receivers from non-pointer variables. 92 | Pointer method invocation is enabled by default. 93 | 94 | * DisablePointerAddresses 95 | DisablePointerAddresses specifies whether to disable the printing of 96 | pointer addresses. This is useful when diffing data structures in tests. 97 | 98 | * DisableCapacities 99 | DisableCapacities specifies whether to disable the printing of 100 | capacities for arrays, slices, maps and channels. This is useful when 101 | diffing data structures in tests. 102 | 103 | * ContinueOnMethod 104 | Enables recursion into types after invoking error and Stringer interface 105 | methods. Recursion after method invocation is disabled by default. 106 | 107 | * SortKeys 108 | Specifies map keys should be sorted before being printed. Use 109 | this to have a more deterministic, diffable output. Note that 110 | only native types (bool, int, uint, floats, uintptr and string) 111 | and types which implement error or Stringer interfaces are 112 | supported with other types sorted according to the 113 | reflect.Value.String() output which guarantees display 114 | stability. Natural map order is used by default. 115 | 116 | * SpewKeys 117 | Specifies that, as a last resort attempt, map keys should be 118 | spewed to strings and sorted by those strings. This is only 119 | considered if SortKeys is true. 120 | 121 | Dump Usage 122 | 123 | Simply call spew.Dump with a list of variables you want to dump: 124 | 125 | spew.Dump(myVar1, myVar2, ...) 126 | 127 | You may also call spew.Fdump if you would prefer to output to an arbitrary 128 | io.Writer. For example, to dump to standard error: 129 | 130 | spew.Fdump(os.Stderr, myVar1, myVar2, ...) 131 | 132 | A third option is to call spew.Sdump to get the formatted output as a string: 133 | 134 | str := spew.Sdump(myVar1, myVar2, ...) 135 | 136 | Sample Dump Output 137 | 138 | See the Dump example for details on the setup of the types and variables being 139 | shown here. 140 | 141 | (main.Foo) { 142 | unexportedField: (*main.Bar)(0xf84002e210)({ 143 | flag: (main.Flag) flagTwo, 144 | data: (uintptr) 145 | }), 146 | ExportedField: (map[interface {}]interface {}) (len=1) { 147 | (string) (len=3) "one": (bool) true 148 | } 149 | } 150 | 151 | Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C 152 | command as shown. 153 | ([]uint8) (len=32 cap=32) { 154 | 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | 155 | 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| 156 | 00000020 31 32 |12| 157 | } 158 | 159 | Custom Formatter 160 | 161 | Spew provides a custom formatter that implements the fmt.Formatter interface 162 | so that it integrates cleanly with standard fmt package printing functions. The 163 | formatter is useful for inline printing of smaller data types similar to the 164 | standard %v format specifier. 165 | 166 | The custom formatter only responds to the %v (most compact), %+v (adds pointer 167 | addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb 168 | combinations. Any other verbs such as %x and %q will be sent to the the 169 | standard fmt package for formatting. In addition, the custom formatter ignores 170 | the width and precision arguments (however they will still work on the format 171 | specifiers not handled by the custom formatter). 172 | 173 | Custom Formatter Usage 174 | 175 | The simplest way to make use of the spew custom formatter is to call one of the 176 | convenience functions such as spew.Printf, spew.Println, or spew.Printf. The 177 | functions have syntax you are most likely already familiar with: 178 | 179 | spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) 180 | spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) 181 | spew.Println(myVar, myVar2) 182 | spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) 183 | spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) 184 | 185 | See the Index for the full list convenience functions. 186 | 187 | Sample Formatter Output 188 | 189 | Double pointer to a uint8: 190 | %v: <**>5 191 | %+v: <**>(0xf8400420d0->0xf8400420c8)5 192 | %#v: (**uint8)5 193 | %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 194 | 195 | Pointer to circular struct with a uint8 field and a pointer to itself: 196 | %v: <*>{1 <*>} 197 | %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} 198 | %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} 199 | %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} 200 | 201 | See the Printf example for details on the setup of variables being shown 202 | here. 203 | 204 | Errors 205 | 206 | Since it is possible for custom Stringer/error interfaces to panic, spew 207 | detects them and handles them internally by printing the panic information 208 | inline with the output. Since spew is intended to provide deep pretty printing 209 | capabilities on structures, it intentionally does not return any errors. 210 | */ 211 | package spew 212 | -------------------------------------------------------------------------------- /vendor/github.com/davecgh/go-spew/spew/spew.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2013-2016 Dave Collins 3 | * 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose with or without fee is hereby granted, provided that the above 6 | * copyright notice and this permission notice appear in all copies. 7 | * 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 | */ 16 | 17 | package spew 18 | 19 | import ( 20 | "fmt" 21 | "io" 22 | ) 23 | 24 | // Errorf is a wrapper for fmt.Errorf that treats each argument as if it were 25 | // passed with a default Formatter interface returned by NewFormatter. It 26 | // returns the formatted string as a value that satisfies error. See 27 | // NewFormatter for formatting details. 28 | // 29 | // This function is shorthand for the following syntax: 30 | // 31 | // fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) 32 | func Errorf(format string, a ...interface{}) (err error) { 33 | return fmt.Errorf(format, convertArgs(a)...) 34 | } 35 | 36 | // Fprint is a wrapper for fmt.Fprint that treats each argument as if it were 37 | // passed with a default Formatter interface returned by NewFormatter. It 38 | // returns the number of bytes written and any write error encountered. See 39 | // NewFormatter for formatting details. 40 | // 41 | // This function is shorthand for the following syntax: 42 | // 43 | // fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) 44 | func Fprint(w io.Writer, a ...interface{}) (n int, err error) { 45 | return fmt.Fprint(w, convertArgs(a)...) 46 | } 47 | 48 | // Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were 49 | // passed with a default Formatter interface returned by NewFormatter. It 50 | // returns the number of bytes written and any write error encountered. See 51 | // NewFormatter for formatting details. 52 | // 53 | // This function is shorthand for the following syntax: 54 | // 55 | // fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) 56 | func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { 57 | return fmt.Fprintf(w, format, convertArgs(a)...) 58 | } 59 | 60 | // Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it 61 | // passed with a default Formatter interface returned by NewFormatter. See 62 | // NewFormatter for formatting details. 63 | // 64 | // This function is shorthand for the following syntax: 65 | // 66 | // fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) 67 | func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { 68 | return fmt.Fprintln(w, convertArgs(a)...) 69 | } 70 | 71 | // Print is a wrapper for fmt.Print that treats each argument as if it were 72 | // passed with a default Formatter interface returned by NewFormatter. It 73 | // returns the number of bytes written and any write error encountered. See 74 | // NewFormatter for formatting details. 75 | // 76 | // This function is shorthand for the following syntax: 77 | // 78 | // fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) 79 | func Print(a ...interface{}) (n int, err error) { 80 | return fmt.Print(convertArgs(a)...) 81 | } 82 | 83 | // Printf is a wrapper for fmt.Printf that treats each argument as if it were 84 | // passed with a default Formatter interface returned by NewFormatter. It 85 | // returns the number of bytes written and any write error encountered. See 86 | // NewFormatter for formatting details. 87 | // 88 | // This function is shorthand for the following syntax: 89 | // 90 | // fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) 91 | func Printf(format string, a ...interface{}) (n int, err error) { 92 | return fmt.Printf(format, convertArgs(a)...) 93 | } 94 | 95 | // Println is a wrapper for fmt.Println that treats each argument as if it were 96 | // passed with a default Formatter interface returned by NewFormatter. It 97 | // returns the number of bytes written and any write error encountered. See 98 | // NewFormatter for formatting details. 99 | // 100 | // This function is shorthand for the following syntax: 101 | // 102 | // fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) 103 | func Println(a ...interface{}) (n int, err error) { 104 | return fmt.Println(convertArgs(a)...) 105 | } 106 | 107 | // Sprint is a wrapper for fmt.Sprint that treats each argument as if it were 108 | // passed with a default Formatter interface returned by NewFormatter. It 109 | // returns the resulting string. See NewFormatter for formatting details. 110 | // 111 | // This function is shorthand for the following syntax: 112 | // 113 | // fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) 114 | func Sprint(a ...interface{}) string { 115 | return fmt.Sprint(convertArgs(a)...) 116 | } 117 | 118 | // Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were 119 | // passed with a default Formatter interface returned by NewFormatter. It 120 | // returns the resulting string. See NewFormatter for formatting details. 121 | // 122 | // This function is shorthand for the following syntax: 123 | // 124 | // fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) 125 | func Sprintf(format string, a ...interface{}) string { 126 | return fmt.Sprintf(format, convertArgs(a)...) 127 | } 128 | 129 | // Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it 130 | // were passed with a default Formatter interface returned by NewFormatter. It 131 | // returns the resulting string. See NewFormatter for formatting details. 132 | // 133 | // This function is shorthand for the following syntax: 134 | // 135 | // fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) 136 | func Sprintln(a ...interface{}) string { 137 | return fmt.Sprintln(convertArgs(a)...) 138 | } 139 | 140 | // convertArgs accepts a slice of arguments and returns a slice of the same 141 | // length with each argument converted to a default spew Formatter interface. 142 | func convertArgs(args []interface{}) (formatters []interface{}) { 143 | formatters = make([]interface{}, len(args)) 144 | for index, arg := range args { 145 | formatters[index] = NewFormatter(arg) 146 | } 147 | return formatters 148 | } 149 | -------------------------------------------------------------------------------- /vendor/github.com/pmezard/go-difflib/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013, Patrick Mezard 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | Redistributions in binary form must reproduce the above copyright 11 | notice, this list of conditions and the following disclaimer in the 12 | documentation and/or other materials provided with the distribution. 13 | The names of its contributors may not be used to endorse or promote 14 | products derived from this software without specific prior written 15 | permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 18 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 20 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 23 | TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 24 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 25 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 26 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell 2 | 3 | Please consider promoting this project if you find it useful. 4 | 5 | Permission is hereby granted, free of charge, to any person 6 | obtaining a copy of this software and associated documentation 7 | files (the "Software"), to deal in the Software without restriction, 8 | including without limitation the rights to use, copy, modify, merge, 9 | publish, distribute, sublicense, and/or sell copies of the Software, 10 | and to permit persons to whom the Software is furnished to do so, 11 | subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included 14 | in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 18 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 20 | DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT 21 | OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 22 | OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.CommentFormat}} 2 | func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { 3 | if h, ok := t.(tHelper); ok { h.Helper() } 4 | return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) 5 | } 6 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.CommentWithoutT "a"}} 2 | func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { 3 | if h, ok := a.t.(tHelper); ok { h.Helper() } 4 | return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) 5 | } 6 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/doc.go: -------------------------------------------------------------------------------- 1 | // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. 2 | // 3 | // Example Usage 4 | // 5 | // The following is a complete example using assert in a standard test function: 6 | // import ( 7 | // "testing" 8 | // "github.com/stretchr/testify/assert" 9 | // ) 10 | // 11 | // func TestSomething(t *testing.T) { 12 | // 13 | // var a string = "Hello" 14 | // var b string = "Hello" 15 | // 16 | // assert.Equal(t, a, b, "The two words should be the same.") 17 | // 18 | // } 19 | // 20 | // if you assert many times, use the format below: 21 | // 22 | // import ( 23 | // "testing" 24 | // "github.com/stretchr/testify/assert" 25 | // ) 26 | // 27 | // func TestSomething(t *testing.T) { 28 | // assert := assert.New(t) 29 | // 30 | // var a string = "Hello" 31 | // var b string = "Hello" 32 | // 33 | // assert.Equal(a, b, "The two words should be the same.") 34 | // } 35 | // 36 | // Assertions 37 | // 38 | // Assertions allow you to easily write test code, and are global funcs in the `assert` package. 39 | // All assertion functions take, as the first argument, the `*testing.T` object provided by the 40 | // testing framework. This allows the assertion funcs to write the failings and other details to 41 | // the correct place. 42 | // 43 | // Every assertion function also takes an optional string message as the final argument, 44 | // allowing custom error messages to be appended to the message the assertion method outputs. 45 | package assert 46 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/errors.go: -------------------------------------------------------------------------------- 1 | package assert 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | // AnError is an error instance useful for testing. If the code does not care 8 | // about error specifics, and only needs to return the error for example, this 9 | // error should be used to make the test code more readable. 10 | var AnError = errors.New("assert.AnError general error for testing") 11 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/forward_assertions.go: -------------------------------------------------------------------------------- 1 | package assert 2 | 3 | // Assertions provides assertion methods around the 4 | // TestingT interface. 5 | type Assertions struct { 6 | t TestingT 7 | } 8 | 9 | // New makes a new Assertions object for the specified TestingT. 10 | func New(t TestingT) *Assertions { 11 | return &Assertions{ 12 | t: t, 13 | } 14 | } 15 | 16 | //go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs 17 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/assert/http_assertions.go: -------------------------------------------------------------------------------- 1 | package assert 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "net/http/httptest" 7 | "net/url" 8 | "strings" 9 | ) 10 | 11 | // httpCode is a helper that returns HTTP code of the response. It returns -1 and 12 | // an error if building a new request fails. 13 | func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { 14 | w := httptest.NewRecorder() 15 | req, err := http.NewRequest(method, url, nil) 16 | if err != nil { 17 | return -1, err 18 | } 19 | req.URL.RawQuery = values.Encode() 20 | handler(w, req) 21 | return w.Code, nil 22 | } 23 | 24 | // HTTPSuccess asserts that a specified handler returns a success status code. 25 | // 26 | // assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) 27 | // 28 | // Returns whether the assertion was successful (true) or not (false). 29 | func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { 30 | if h, ok := t.(tHelper); ok { 31 | h.Helper() 32 | } 33 | code, err := httpCode(handler, method, url, values) 34 | if err != nil { 35 | Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) 36 | return false 37 | } 38 | 39 | isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent 40 | if !isSuccessCode { 41 | Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) 42 | } 43 | 44 | return isSuccessCode 45 | } 46 | 47 | // HTTPRedirect asserts that a specified handler returns a redirect status code. 48 | // 49 | // assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} 50 | // 51 | // Returns whether the assertion was successful (true) or not (false). 52 | func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { 53 | if h, ok := t.(tHelper); ok { 54 | h.Helper() 55 | } 56 | code, err := httpCode(handler, method, url, values) 57 | if err != nil { 58 | Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) 59 | return false 60 | } 61 | 62 | isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect 63 | if !isRedirectCode { 64 | Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) 65 | } 66 | 67 | return isRedirectCode 68 | } 69 | 70 | // HTTPError asserts that a specified handler returns an error status code. 71 | // 72 | // assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} 73 | // 74 | // Returns whether the assertion was successful (true) or not (false). 75 | func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { 76 | if h, ok := t.(tHelper); ok { 77 | h.Helper() 78 | } 79 | code, err := httpCode(handler, method, url, values) 80 | if err != nil { 81 | Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) 82 | return false 83 | } 84 | 85 | isErrorCode := code >= http.StatusBadRequest 86 | if !isErrorCode { 87 | Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) 88 | } 89 | 90 | return isErrorCode 91 | } 92 | 93 | // HTTPBody is a helper that returns HTTP body of the response. It returns 94 | // empty string if building a new request fails. 95 | func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { 96 | w := httptest.NewRecorder() 97 | req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) 98 | if err != nil { 99 | return "" 100 | } 101 | handler(w, req) 102 | return w.Body.String() 103 | } 104 | 105 | // HTTPBodyContains asserts that a specified handler returns a 106 | // body that contains a string. 107 | // 108 | // assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") 109 | // 110 | // Returns whether the assertion was successful (true) or not (false). 111 | func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { 112 | if h, ok := t.(tHelper); ok { 113 | h.Helper() 114 | } 115 | body := HTTPBody(handler, method, url, values) 116 | 117 | contains := strings.Contains(body, fmt.Sprint(str)) 118 | if !contains { 119 | Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) 120 | } 121 | 122 | return contains 123 | } 124 | 125 | // HTTPBodyNotContains asserts that a specified handler returns a 126 | // body that does not contain a string. 127 | // 128 | // assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") 129 | // 130 | // Returns whether the assertion was successful (true) or not (false). 131 | func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { 132 | if h, ok := t.(tHelper); ok { 133 | h.Helper() 134 | } 135 | body := HTTPBody(handler, method, url, values) 136 | 137 | contains := strings.Contains(body, fmt.Sprint(str)) 138 | if contains { 139 | Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) 140 | } 141 | 142 | return !contains 143 | } 144 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/doc.go: -------------------------------------------------------------------------------- 1 | // Package require implements the same assertions as the `assert` package but 2 | // stops test execution when a test fails. 3 | // 4 | // Example Usage 5 | // 6 | // The following is a complete example using require in a standard test function: 7 | // import ( 8 | // "testing" 9 | // "github.com/stretchr/testify/require" 10 | // ) 11 | // 12 | // func TestSomething(t *testing.T) { 13 | // 14 | // var a string = "Hello" 15 | // var b string = "Hello" 16 | // 17 | // require.Equal(t, a, b, "The two words should be the same.") 18 | // 19 | // } 20 | // 21 | // Assertions 22 | // 23 | // The `require` package have same global functions as in the `assert` package, 24 | // but instead of returning a boolean result they call `t.FailNow()`. 25 | // 26 | // Every assertion function also takes an optional string message as the final argument, 27 | // allowing custom error messages to be appended to the message the assertion method outputs. 28 | package require 29 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/forward_requirements.go: -------------------------------------------------------------------------------- 1 | package require 2 | 3 | // Assertions provides assertion methods around the 4 | // TestingT interface. 5 | type Assertions struct { 6 | t TestingT 7 | } 8 | 9 | // New makes a new Assertions object for the specified TestingT. 10 | func New(t TestingT) *Assertions { 11 | return &Assertions{ 12 | t: t, 13 | } 14 | } 15 | 16 | //go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs 17 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/require.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.Comment}} 2 | func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { 3 | if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } 4 | if h, ok := t.(tHelper); ok { h.Helper() } 5 | t.FailNow() 6 | } 7 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/require_forward.go.tmpl: -------------------------------------------------------------------------------- 1 | {{.CommentWithoutT "a"}} 2 | func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { 3 | if h, ok := a.t.(tHelper); ok { h.Helper() } 4 | {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) 5 | } 6 | -------------------------------------------------------------------------------- /vendor/github.com/stretchr/testify/require/requirements.go: -------------------------------------------------------------------------------- 1 | package require 2 | 3 | // TestingT is an interface wrapper around *testing.T 4 | type TestingT interface { 5 | Errorf(format string, args ...interface{}) 6 | FailNow() 7 | } 8 | 9 | type tHelper interface { 10 | Helper() 11 | } 12 | 13 | // ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful 14 | // for table driven tests. 15 | type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) 16 | 17 | // ValueAssertionFunc is a common function prototype when validating a single value. Can be useful 18 | // for table driven tests. 19 | type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) 20 | 21 | // BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful 22 | // for table driven tests. 23 | type BoolAssertionFunc func(TestingT, bool, ...interface{}) 24 | 25 | // ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful 26 | // for table driven tests. 27 | type ErrorAssertionFunc func(TestingT, error, ...interface{}) 28 | 29 | //go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs 30 | -------------------------------------------------------------------------------- /vendor/modules.txt: -------------------------------------------------------------------------------- 1 | # github.com/davecgh/go-spew v1.1.1 2 | github.com/davecgh/go-spew/spew 3 | # github.com/pmezard/go-difflib v1.0.0 4 | github.com/pmezard/go-difflib/difflib 5 | # github.com/stretchr/testify v1.2.2 6 | github.com/stretchr/testify/assert 7 | github.com/stretchr/testify/require 8 | --------------------------------------------------------------------------------