├── LICENSE ├── README.md ├── go.mod ├── go.sum ├── lib ├── compression.go ├── gzip.go ├── xz.go └── zstd.go ├── main.go └── vendor ├── github.com ├── klauspost │ └── compress │ │ ├── LICENSE │ │ ├── fse │ │ ├── README.md │ │ ├── bitreader.go │ │ ├── bitwriter.go │ │ ├── bytereader.go │ │ ├── compress.go │ │ ├── decompress.go │ │ └── fse.go │ │ ├── huff0 │ │ ├── .gitignore │ │ ├── README.md │ │ ├── bitreader.go │ │ ├── bitwriter.go │ │ ├── bytereader.go │ │ ├── compress.go │ │ ├── decompress.go │ │ └── huff0.go │ │ ├── snappy │ │ ├── .gitignore │ │ ├── AUTHORS │ │ ├── CONTRIBUTORS │ │ ├── LICENSE │ │ ├── README │ │ ├── decode.go │ │ ├── decode_amd64.go │ │ ├── decode_amd64.s │ │ ├── decode_other.go │ │ ├── encode.go │ │ ├── encode_amd64.go │ │ ├── encode_amd64.s │ │ ├── encode_other.go │ │ ├── runbench.cmd │ │ └── snappy.go │ │ └── zstd │ │ ├── README.md │ │ ├── bitreader.go │ │ ├── bitwriter.go │ │ ├── blockdec.go │ │ ├── blockenc.go │ │ ├── blocktype_string.go │ │ ├── bytebuf.go │ │ ├── bytereader.go │ │ ├── decoder.go │ │ ├── decoder_options.go │ │ ├── enc_dfast.go │ │ ├── enc_fast.go │ │ ├── enc_params.go │ │ ├── encoder.go │ │ ├── encoder_options.go │ │ ├── framedec.go │ │ ├── frameenc.go │ │ ├── fse_decoder.go │ │ ├── fse_encoder.go │ │ ├── fse_predefined.go │ │ ├── hash.go │ │ ├── history.go │ │ ├── internal │ │ └── xxhash │ │ │ ├── LICENSE.txt │ │ │ ├── README.md │ │ │ ├── xxhash.go │ │ │ ├── xxhash_amd64.go │ │ │ ├── xxhash_amd64.s │ │ │ ├── xxhash_other.go │ │ │ └── xxhash_safe.go │ │ ├── seqdec.go │ │ ├── seqenc.go │ │ ├── snappy.go │ │ └── zstd.go ├── pkg │ └── errors │ │ ├── .gitignore │ │ ├── .travis.yml │ │ ├── LICENSE │ │ ├── README.md │ │ ├── appveyor.yml │ │ ├── errors.go │ │ └── stack.go ├── ulikunitz │ └── xz │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── TODO.md │ │ ├── bits.go │ │ ├── crc.go │ │ ├── example.go │ │ ├── format.go │ │ ├── fox.xz │ │ ├── go.mod │ │ ├── internal │ │ ├── hash │ │ │ ├── cyclic_poly.go │ │ │ ├── doc.go │ │ │ ├── rabin_karp.go │ │ │ └── roller.go │ │ └── xlog │ │ │ └── xlog.go │ │ ├── lzma │ │ ├── bintree.go │ │ ├── bitops.go │ │ ├── breader.go │ │ ├── buffer.go │ │ ├── bytewriter.go │ │ ├── decoder.go │ │ ├── decoderdict.go │ │ ├── directcodec.go │ │ ├── distcodec.go │ │ ├── encoder.go │ │ ├── encoderdict.go │ │ ├── fox.lzma │ │ ├── hashtable.go │ │ ├── header.go │ │ ├── header2.go │ │ ├── lengthcodec.go │ │ ├── literalcodec.go │ │ ├── matchalgorithm.go │ │ ├── operation.go │ │ ├── prob.go │ │ ├── properties.go │ │ ├── rangecodec.go │ │ ├── reader.go │ │ ├── reader2.go │ │ ├── state.go │ │ ├── treecodecs.go │ │ ├── writer.go │ │ └── writer2.go │ │ ├── lzmafilter.go │ │ ├── make-docs │ │ ├── reader.go │ │ └── writer.go └── xi2 │ └── xz │ ├── AUTHORS │ ├── LICENSE │ ├── README.md │ ├── dec_bcj.go │ ├── dec_delta.go │ ├── dec_lzma2.go │ ├── dec_stream.go │ ├── dec_util.go │ ├── dec_xz.go │ ├── doc.go │ └── reader.go └── modules.txt /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Nir 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gopacker 2 | A UPX-like packer to shrink executables. 3 | 4 | ## Quick Start 5 | ``` 6 | go get github.com/nirhaas/gopacker 7 | gopacker 8 | ``` 9 | 10 | ## How does it work 11 | 12 | ### Packing 13 | * Copy `gopacker` executable itself to output file. 14 | * Compress and stream (append) to output file. 15 | * Append compressed size. 16 | * Append magic string. 17 | 18 | Output file is now a functional executable. 19 | 20 | ### Unpacking 21 | When running the packed executable: 22 | * Checks the last few bytes to see if magic string is there. 23 | * Reading compressed size. 24 | * Reading compressed data. 25 | * Uncompressing to memory. 26 | * Overriding the packed executable. 27 | * syscall exec to run the unpacked executable. 28 | 29 | Possible TODO: 30 | * Better compression. 31 | * Encryption. 32 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/nirhaas/gopacker 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/google/go-cmp v0.3.0 // indirect 7 | github.com/klauspost/compress v1.7.1 8 | github.com/klauspost/cpuid v1.2.1 // indirect 9 | github.com/pkg/errors v0.8.1 10 | github.com/ulikunitz/xz v0.5.6 11 | github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 12 | ) 13 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= 2 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 3 | github.com/klauspost/compress v1.7.1 h1:VRD0WLa8rweLB7alA5WMSVkoAtrI8xou5RrNd4JUlR0= 4 | github.com/klauspost/compress v1.7.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= 5 | github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= 6 | github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= 7 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= 8 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 9 | github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8= 10 | github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= 11 | github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= 12 | github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= 13 | -------------------------------------------------------------------------------- /lib/compression.go: -------------------------------------------------------------------------------- 1 | package lib 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // Compression is an interface to be implemented to standatrize compression 8 | // mechanisms used by this tool. 9 | type Compression interface { 10 | CompressWriter(io.Writer) (io.WriteCloser, error) 11 | DecompressReader(io.Reader) (io.ReadCloser, error) 12 | } 13 | 14 | // CompressStream compress by streaming from in to out. 15 | func CompressStream(compression Compression, out io.Writer, in io.Reader) (n int64, err error) { 16 | writer, err := compression.CompressWriter(out) 17 | if err != nil { 18 | return 0, err 19 | } 20 | defer func() { 21 | if err == nil { 22 | err = writer.Close() 23 | } 24 | }() 25 | return io.Copy(writer, in) 26 | } 27 | 28 | // DecompressStream compress by streaming from in to out. 29 | func DecompressStream(compression Compression, out io.Writer, in io.Reader) (n int64, err error) { 30 | reader, err := compression.DecompressReader(in) 31 | if err != nil { 32 | return 0, err 33 | } 34 | defer func() { 35 | if err == nil { 36 | err = reader.Close() 37 | } 38 | }() 39 | return io.Copy(out, reader) 40 | } 41 | -------------------------------------------------------------------------------- /lib/gzip.go: -------------------------------------------------------------------------------- 1 | package lib 2 | 3 | import ( 4 | "compress/gzip" 5 | "io" 6 | ) 7 | 8 | // GZIPCompression . 9 | type GZIPCompression struct{} 10 | 11 | // CompressWriter . 12 | func (z GZIPCompression) CompressWriter(out io.Writer) (io.WriteCloser, error) { 13 | return gzip.NewWriterLevel(out, gzip.BestSpeed) 14 | } 15 | 16 | // DecompressReader . 17 | func (z GZIPCompression) DecompressReader(in io.Reader) (io.ReadCloser, error) { 18 | return gzip.NewReader(in) 19 | } 20 | -------------------------------------------------------------------------------- /lib/xz.go: -------------------------------------------------------------------------------- 1 | package lib 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/ulikunitz/xz" 7 | fastxz "github.com/xi2/xz" 8 | ) 9 | 10 | // XZCompression . 11 | type XZCompression struct{} 12 | 13 | func (x XZCompression) CompressWriter(out io.Writer) (io.WriteCloser, error) { 14 | return xz.NewWriter(out) 15 | } 16 | 17 | func (x XZCompression) DecompressReader(in io.Reader) (io.ReadCloser, error) { 18 | return NewXZReaderWithCloser(fastxz.NewReader(in, 0)), nil 19 | } 20 | 21 | // XZReaderWithCloser is a wrapper because original fastxz.Reader is not 22 | // implementing io.ReadCloser correctly (does not return error). 23 | type XZReaderWithCloser struct { 24 | r *fastxz.Reader 25 | } 26 | 27 | // Read wraps fastxz.Reader Read. 28 | func (zr XZReaderWithCloser) Read(b []byte) (int, error) { 29 | return zr.r.Read(b) 30 | } 31 | 32 | // Close just wraps fastxz.Reader Close, but return nil as error. 33 | func (zr XZReaderWithCloser) Close() error { 34 | // zr.r.Close() 35 | return nil 36 | } 37 | 38 | // NewXZReaderWithCloser . 39 | func NewXZReaderWithCloser(reader *fastxz.Reader, err error) XZReaderWithCloser { 40 | return XZReaderWithCloser{ 41 | r: reader, 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /lib/zstd.go: -------------------------------------------------------------------------------- 1 | package lib 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/klauspost/compress/zstd" 7 | ) 8 | 9 | // ZSTDReaderWithCloser is a wrapper because original zstd.Decoder is not 10 | // implementing io.ReadCloser correctly (does not return error). 11 | type ZSTDReaderWithCloser struct { 12 | r *zstd.Decoder 13 | } 14 | 15 | // Read wraps zstd.Decoder Read. 16 | func (zr ZSTDReaderWithCloser) Read(b []byte) (int, error) { 17 | return zr.r.Read(b) 18 | } 19 | 20 | // Close just wraps zstd.Decoder Close, but return nil as error. 21 | func (zr ZSTDReaderWithCloser) Close() error { 22 | zr.r.Close() 23 | return nil 24 | } 25 | 26 | // NewZSTDReaderWithCloser . 27 | func NewZSTDReaderWithCloser(reader *zstd.Decoder) ZSTDReaderWithCloser { 28 | return ZSTDReaderWithCloser{ 29 | r: reader, 30 | } 31 | } 32 | 33 | // ZSTDCompression . 34 | type ZSTDCompression struct{} 35 | 36 | // CompressWriter . 37 | func (z ZSTDCompression) CompressWriter(out io.Writer) (io.WriteCloser, error) { 38 | return zstd.NewWriter(out, zstd.WithEncoderLevel(zstd.SpeedBetterCompression)) 39 | } 40 | 41 | // DecompressReader . 42 | func (z ZSTDCompression) DecompressReader(in io.Reader) (io.ReadCloser, error) { 43 | reader, err := zstd.NewReader(in) 44 | return NewZSTDReaderWithCloser(reader), err 45 | } 46 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 The Go Authors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the name of Google Inc. nor the names of its 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/fse/README.md: -------------------------------------------------------------------------------- 1 | # Finite State Entropy 2 | 3 | This package provides Finite State Entropy encoding and decoding. 4 | 5 | Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) 6 | encoding provides a fast near-optimal symbol encoding/decoding 7 | for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). 8 | 9 | This can be used for compressing input with a lot of similar input values to the smallest number of bytes. 10 | This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, 11 | but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. 12 | 13 | * [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) 14 | 15 | ## News 16 | 17 | * Feb 2018: First implementation released. Consider this beta software for now. 18 | 19 | # Usage 20 | 21 | This package provides a low level interface that allows to compress single independent blocks. 22 | 23 | Each block is separate, and there is no built in integrity checks. 24 | This means that the caller should keep track of block sizes and also do checksums if needed. 25 | 26 | Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. 27 | You must provide input and will receive the output and maybe an error. 28 | 29 | These error values can be returned: 30 | 31 | | Error | Description | 32 | |---------------------|-----------------------------------------------------------------------------| 33 | | `` | Everything ok, output is returned | 34 | | `ErrIncompressible` | Returned when input is judged to be too hard to compress | 35 | | `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | 36 | | `(error)` | An internal error occurred. | 37 | 38 | As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. 39 | 40 | To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object 41 | that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same 42 | object can be used for both. 43 | 44 | Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this 45 | you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. 46 | 47 | Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. 48 | You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back 49 | your input was likely corrupted. 50 | 51 | It is important to note that a successful decoding does *not* mean your output matches your original input. 52 | There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. 53 | 54 | For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). 55 | 56 | # Performance 57 | 58 | A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. 59 | All compression functions are currently only running on the calling goroutine so only one core will be used per block. 60 | 61 | The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input 62 | is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be 63 | beneficial to transpose all your input values down by 64. 64 | 65 | With moderate block sizes around 64k speed are typically 200MB/s per core for compression and 66 | around 300MB/s decompression speed. 67 | 68 | The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. 69 | 70 | # Plans 71 | 72 | At one point, more internals will be exposed to facilitate more "expert" usage of the components. 73 | 74 | A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). 75 | 76 | # Contributing 77 | 78 | Contributions are always welcome. Be aware that adding public functions will require good justification and breaking 79 | changes will likely not be accepted. If in doubt open an issue before writing the PR. -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/fse/bitreader.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Klaus Post. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. 5 | 6 | package fse 7 | 8 | import ( 9 | "errors" 10 | "io" 11 | ) 12 | 13 | // bitReader reads a bitstream in reverse. 14 | // The last set bit indicates the start of the stream and is used 15 | // for aligning the input. 16 | type bitReader struct { 17 | in []byte 18 | off uint // next byte to read is at in[off - 1] 19 | value uint64 20 | bitsRead uint8 21 | } 22 | 23 | // init initializes and resets the bit reader. 24 | func (b *bitReader) init(in []byte) error { 25 | if len(in) < 1 { 26 | return errors.New("corrupt stream: too short") 27 | } 28 | b.in = in 29 | b.off = uint(len(in)) 30 | // The highest bit of the last byte indicates where to start 31 | v := in[len(in)-1] 32 | if v == 0 { 33 | return errors.New("corrupt stream, did not find end of stream") 34 | } 35 | b.bitsRead = 64 36 | b.value = 0 37 | b.fill() 38 | b.fill() 39 | b.bitsRead += 8 - uint8(highBits(uint32(v))) 40 | return nil 41 | } 42 | 43 | // getBits will return n bits. n can be 0. 44 | func (b *bitReader) getBits(n uint8) uint16 { 45 | if n == 0 || b.bitsRead >= 64 { 46 | return 0 47 | } 48 | return b.getBitsFast(n) 49 | } 50 | 51 | // getBitsFast requires that at least one bit is requested every time. 52 | // There are no checks if the buffer is filled. 53 | func (b *bitReader) getBitsFast(n uint8) uint16 { 54 | const regMask = 64 - 1 55 | v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) 56 | b.bitsRead += n 57 | return v 58 | } 59 | 60 | // fillFast() will make sure at least 32 bits are available. 61 | // There must be at least 4 bytes available. 62 | func (b *bitReader) fillFast() { 63 | if b.bitsRead < 32 { 64 | return 65 | } 66 | // Do single re-slice to avoid bounds checks. 67 | v := b.in[b.off-4 : b.off] 68 | low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) 69 | b.value = (b.value << 32) | uint64(low) 70 | b.bitsRead -= 32 71 | b.off -= 4 72 | } 73 | 74 | // fill() will make sure at least 32 bits are available. 75 | func (b *bitReader) fill() { 76 | if b.bitsRead < 32 { 77 | return 78 | } 79 | if b.off > 4 { 80 | v := b.in[b.off-4 : b.off] 81 | low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) 82 | b.value = (b.value << 32) | uint64(low) 83 | b.bitsRead -= 32 84 | b.off -= 4 85 | return 86 | } 87 | for b.off > 0 { 88 | b.value = (b.value << 8) | uint64(b.in[b.off-1]) 89 | b.bitsRead -= 8 90 | b.off-- 91 | } 92 | } 93 | 94 | // finished returns true if all bits have been read from the bit stream. 95 | func (b *bitReader) finished() bool { 96 | return b.off == 0 && b.bitsRead >= 64 97 | } 98 | 99 | // close the bitstream and returns an error if out-of-buffer reads occurred. 100 | func (b *bitReader) close() error { 101 | // Release reference. 102 | b.in = nil 103 | if b.bitsRead > 64 { 104 | return io.ErrUnexpectedEOF 105 | } 106 | return nil 107 | } 108 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/fse/bitwriter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Klaus Post. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. 5 | 6 | package fse 7 | 8 | import "fmt" 9 | 10 | // bitWriter will write bits. 11 | // First bit will be LSB of the first byte of output. 12 | type bitWriter struct { 13 | bitContainer uint64 14 | nBits uint8 15 | out []byte 16 | } 17 | 18 | // bitMask16 is bitmasks. Has extra to avoid bounds check. 19 | var bitMask16 = [32]uint16{ 20 | 0, 1, 3, 7, 0xF, 0x1F, 21 | 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 22 | 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, 23 | 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 24 | 0xFFFF, 0xFFFF} /* up to 16 bits */ 25 | 26 | // addBits16NC will add up to 16 bits. 27 | // It will not check if there is space for them, 28 | // so the caller must ensure that it has flushed recently. 29 | func (b *bitWriter) addBits16NC(value uint16, bits uint8) { 30 | b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) 31 | b.nBits += bits 32 | } 33 | 34 | // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. 35 | // It will not check if there is space for them, so the caller must ensure that it has flushed recently. 36 | func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { 37 | b.bitContainer |= uint64(value) << (b.nBits & 63) 38 | b.nBits += bits 39 | } 40 | 41 | // addBits16ZeroNC will add up to 16 bits. 42 | // It will not check if there is space for them, 43 | // so the caller must ensure that it has flushed recently. 44 | // This is fastest if bits can be zero. 45 | func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { 46 | if bits == 0 { 47 | return 48 | } 49 | value <<= (16 - bits) & 15 50 | value >>= (16 - bits) & 15 51 | b.bitContainer |= uint64(value) << (b.nBits & 63) 52 | b.nBits += bits 53 | } 54 | 55 | // flush will flush all pending full bytes. 56 | // There will be at least 56 bits available for writing when this has been called. 57 | // Using flush32 is faster, but leaves less space for writing. 58 | func (b *bitWriter) flush() { 59 | v := b.nBits >> 3 60 | switch v { 61 | case 0: 62 | case 1: 63 | b.out = append(b.out, 64 | byte(b.bitContainer), 65 | ) 66 | case 2: 67 | b.out = append(b.out, 68 | byte(b.bitContainer), 69 | byte(b.bitContainer>>8), 70 | ) 71 | case 3: 72 | b.out = append(b.out, 73 | byte(b.bitContainer), 74 | byte(b.bitContainer>>8), 75 | byte(b.bitContainer>>16), 76 | ) 77 | case 4: 78 | b.out = append(b.out, 79 | byte(b.bitContainer), 80 | byte(b.bitContainer>>8), 81 | byte(b.bitContainer>>16), 82 | byte(b.bitContainer>>24), 83 | ) 84 | case 5: 85 | b.out = append(b.out, 86 | byte(b.bitContainer), 87 | byte(b.bitContainer>>8), 88 | byte(b.bitContainer>>16), 89 | byte(b.bitContainer>>24), 90 | byte(b.bitContainer>>32), 91 | ) 92 | case 6: 93 | b.out = append(b.out, 94 | byte(b.bitContainer), 95 | byte(b.bitContainer>>8), 96 | byte(b.bitContainer>>16), 97 | byte(b.bitContainer>>24), 98 | byte(b.bitContainer>>32), 99 | byte(b.bitContainer>>40), 100 | ) 101 | case 7: 102 | b.out = append(b.out, 103 | byte(b.bitContainer), 104 | byte(b.bitContainer>>8), 105 | byte(b.bitContainer>>16), 106 | byte(b.bitContainer>>24), 107 | byte(b.bitContainer>>32), 108 | byte(b.bitContainer>>40), 109 | byte(b.bitContainer>>48), 110 | ) 111 | case 8: 112 | b.out = append(b.out, 113 | byte(b.bitContainer), 114 | byte(b.bitContainer>>8), 115 | byte(b.bitContainer>>16), 116 | byte(b.bitContainer>>24), 117 | byte(b.bitContainer>>32), 118 | byte(b.bitContainer>>40), 119 | byte(b.bitContainer>>48), 120 | byte(b.bitContainer>>56), 121 | ) 122 | default: 123 | panic(fmt.Errorf("bits (%d) > 64", b.nBits)) 124 | } 125 | b.bitContainer >>= v << 3 126 | b.nBits &= 7 127 | } 128 | 129 | // flush32 will flush out, so there are at least 32 bits available for writing. 130 | func (b *bitWriter) flush32() { 131 | if b.nBits < 32 { 132 | return 133 | } 134 | b.out = append(b.out, 135 | byte(b.bitContainer), 136 | byte(b.bitContainer>>8), 137 | byte(b.bitContainer>>16), 138 | byte(b.bitContainer>>24)) 139 | b.nBits -= 32 140 | b.bitContainer >>= 32 141 | } 142 | 143 | // flushAlign will flush remaining full bytes and align to next byte boundary. 144 | func (b *bitWriter) flushAlign() { 145 | nbBytes := (b.nBits + 7) >> 3 146 | for i := uint8(0); i < nbBytes; i++ { 147 | b.out = append(b.out, byte(b.bitContainer>>(i*8))) 148 | } 149 | b.nBits = 0 150 | b.bitContainer = 0 151 | } 152 | 153 | // close will write the alignment bit and write the final byte(s) 154 | // to the output. 155 | func (b *bitWriter) close() error { 156 | // End mark 157 | b.addBits16Clean(1, 1) 158 | // flush until next byte. 159 | b.flushAlign() 160 | return nil 161 | } 162 | 163 | // reset and continue writing by appending to out. 164 | func (b *bitWriter) reset(out []byte) { 165 | b.bitContainer = 0 166 | b.nBits = 0 167 | b.out = out 168 | } 169 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/fse/bytereader.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Klaus Post. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. 5 | 6 | package fse 7 | 8 | // byteReader provides a byte reader that reads 9 | // little endian values from a byte stream. 10 | // The input stream is manually advanced. 11 | // The reader performs no bounds checks. 12 | type byteReader struct { 13 | b []byte 14 | off int 15 | } 16 | 17 | // init will initialize the reader and set the input. 18 | func (b *byteReader) init(in []byte) { 19 | b.b = in 20 | b.off = 0 21 | } 22 | 23 | // advance the stream b n bytes. 24 | func (b *byteReader) advance(n uint) { 25 | b.off += int(n) 26 | } 27 | 28 | // Int32 returns a little endian int32 starting at current offset. 29 | func (b byteReader) Int32() int32 { 30 | b2 := b.b[b.off : b.off+4 : b.off+4] 31 | v3 := int32(b2[3]) 32 | v2 := int32(b2[2]) 33 | v1 := int32(b2[1]) 34 | v0 := int32(b2[0]) 35 | return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) 36 | } 37 | 38 | // Uint32 returns a little endian uint32 starting at current offset. 39 | func (b byteReader) Uint32() uint32 { 40 | b2 := b.b[b.off : b.off+4 : b.off+4] 41 | v3 := uint32(b2[3]) 42 | v2 := uint32(b2[2]) 43 | v1 := uint32(b2[1]) 44 | v0 := uint32(b2[0]) 45 | return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) 46 | } 47 | 48 | // unread returns the unread portion of the input. 49 | func (b byteReader) unread() []byte { 50 | return b.b[b.off:] 51 | } 52 | 53 | // remain will return the number of bytes remaining. 54 | func (b byteReader) remain() int { 55 | return len(b.b) - b.off 56 | } 57 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/fse/fse.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Klaus Post. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. 5 | 6 | // Package fse provides Finite State Entropy encoding and decoding. 7 | // 8 | // Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding 9 | // for byte blocks as implemented in zstd. 10 | // 11 | // See https://github.com/klauspost/compress/tree/master/fse for more information. 12 | package fse 13 | 14 | import ( 15 | "errors" 16 | "fmt" 17 | "math/bits" 18 | ) 19 | 20 | const ( 21 | /*!MEMORY_USAGE : 22 | * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) 23 | * Increasing memory usage improves compression ratio 24 | * Reduced memory usage can improve speed, due to cache effect 25 | * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ 26 | maxMemoryUsage = 14 27 | defaultMemoryUsage = 13 28 | 29 | maxTableLog = maxMemoryUsage - 2 30 | maxTablesize = 1 << maxTableLog 31 | defaultTablelog = defaultMemoryUsage - 2 32 | minTablelog = 5 33 | maxSymbolValue = 255 34 | ) 35 | 36 | var ( 37 | // ErrIncompressible is returned when input is judged to be too hard to compress. 38 | ErrIncompressible = errors.New("input is not compressible") 39 | 40 | // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. 41 | ErrUseRLE = errors.New("input is single value repeated") 42 | ) 43 | 44 | // Scratch provides temporary storage for compression and decompression. 45 | type Scratch struct { 46 | // Private 47 | count [maxSymbolValue + 1]uint32 48 | norm [maxSymbolValue + 1]int16 49 | symbolLen uint16 // Length of active part of the symbol table. 50 | actualTableLog uint8 // Selected tablelog. 51 | br byteReader 52 | bits bitReader 53 | bw bitWriter 54 | ct cTable // Compression tables. 55 | decTable []decSymbol // Decompression table. 56 | zeroBits bool // no bits has prob > 50%. 57 | clearCount bool // clear count 58 | maxCount int // count of the most probable symbol 59 | 60 | // Per block parameters. 61 | // These can be used to override compression parameters of the block. 62 | // Do not touch, unless you know what you are doing. 63 | 64 | // Out is output buffer. 65 | // If the scratch is re-used before the caller is done processing the output, 66 | // set this field to nil. 67 | // Otherwise the output buffer will be re-used for next Compression/Decompression step 68 | // and allocation will be avoided. 69 | Out []byte 70 | 71 | // MaxSymbolValue will override the maximum symbol value of the next block. 72 | MaxSymbolValue uint8 73 | 74 | // TableLog will attempt to override the tablelog for the next block. 75 | TableLog uint8 76 | 77 | // DecompressLimit limits the maximum decoded size acceptable. 78 | // If > 0 decompression will stop when approximately this many bytes 79 | // has been decoded. 80 | // If 0, maximum size will be 2GB. 81 | DecompressLimit int 82 | } 83 | 84 | // Histogram allows to populate the histogram and skip that step in the compression, 85 | // It otherwise allows to inspect the histogram when compression is done. 86 | // To indicate that you have populated the histogram call HistogramFinished 87 | // with the value of the highest populated symbol, as well as the number of entries 88 | // in the most populated entry. These are accepted at face value. 89 | // The returned slice will always be length 256. 90 | func (s *Scratch) Histogram() []uint32 { 91 | return s.count[:] 92 | } 93 | 94 | // HistogramFinished can be called to indicate that the histogram has been populated. 95 | // maxSymbol is the index of the highest set symbol of the next data segment. 96 | // maxCount is the number of entries in the most populated entry. 97 | // These are accepted at face value. 98 | func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { 99 | s.maxCount = maxCount 100 | s.symbolLen = uint16(maxSymbol) + 1 101 | s.clearCount = maxCount != 0 102 | } 103 | 104 | // prepare will prepare and allocate scratch tables used for both compression and decompression. 105 | func (s *Scratch) prepare(in []byte) (*Scratch, error) { 106 | if s == nil { 107 | s = &Scratch{} 108 | } 109 | if s.MaxSymbolValue == 0 { 110 | s.MaxSymbolValue = 255 111 | } 112 | if s.TableLog == 0 { 113 | s.TableLog = defaultTablelog 114 | } 115 | if s.TableLog > maxTableLog { 116 | return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) 117 | } 118 | if cap(s.Out) == 0 { 119 | s.Out = make([]byte, 0, len(in)) 120 | } 121 | if s.clearCount && s.maxCount == 0 { 122 | for i := range s.count { 123 | s.count[i] = 0 124 | } 125 | s.clearCount = false 126 | } 127 | s.br.init(in) 128 | if s.DecompressLimit == 0 { 129 | // Max size 2GB. 130 | s.DecompressLimit = (2 << 30) - 1 131 | } 132 | 133 | return s, nil 134 | } 135 | 136 | // tableStep returns the next table index. 137 | func tableStep(tableSize uint32) uint32 { 138 | return (tableSize >> 1) + (tableSize >> 3) + 3 139 | } 140 | 141 | func highBits(val uint32) (n uint32) { 142 | return uint32(bits.Len32(val) - 1) 143 | } 144 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/huff0/.gitignore: -------------------------------------------------------------------------------- 1 | /huff0-fuzz.zip 2 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/huff0/README.md: -------------------------------------------------------------------------------- 1 | # Huff0 entropy compression 2 | 3 | This package provides Huff0 encoding and decoding as used in zstd. 4 | 5 | [Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), 6 | a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU 7 | (Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. 8 | 9 | This can be used for compressing input with a lot of similar input values to the smallest number of bytes. 10 | This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, 11 | but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. 12 | 13 | * [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) 14 | 15 | THIS PACKAGE IS NOT CONSIDERED STABLE AND API OR ENCODING MAY CHANGE IN THE FUTURE. 16 | 17 | ## News 18 | 19 | * Mar 2018: First implementation released. Consider this beta software for now. 20 | 21 | # Usage 22 | 23 | This package provides a low level interface that allows to compress single independent blocks. 24 | 25 | Each block is separate, and there is no built in integrity checks. 26 | This means that the caller should keep track of block sizes and also do checksums if needed. 27 | 28 | Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and 29 | [`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. 30 | You must provide input and will receive the output and maybe an error. 31 | 32 | These error values can be returned: 33 | 34 | | Error | Description | 35 | |---------------------|-----------------------------------------------------------------------------| 36 | | `` | Everything ok, output is returned | 37 | | `ErrIncompressible` | Returned when input is judged to be too hard to compress | 38 | | `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | 39 | | `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | 40 | | `(error)` | An internal error occurred. | 41 | 42 | 43 | As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. 44 | 45 | To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object 46 | that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same 47 | object can be used for both. 48 | 49 | Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this 50 | you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. 51 | 52 | The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. 53 | 54 | ## Tables and re-use 55 | 56 | Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. 57 | 58 | The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) 59 | that controls this behaviour. See the documentation for details. This can be altered between each block. 60 | 61 | Do however note that this information is *not* stored in the output block and it is up to the users of the package to 62 | record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, 63 | based on the boolean reported back from the CompressXX call. 64 | 65 | If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the 66 | [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. 67 | 68 | ## Decompressing 69 | 70 | The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). 71 | This will initialize the decoding tables. 72 | You can supply the complete block to `ReadTable` and it will return the data part of the block 73 | which can be given to the decompressor. 74 | 75 | Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) 76 | or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. 77 | 78 | You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back 79 | your input was likely corrupted. 80 | 81 | It is important to note that a successful decoding does *not* mean your output matches your original input. 82 | There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. 83 | 84 | # Contributing 85 | 86 | Contributions are always welcome. Be aware that adding public functions will require good justification and breaking 87 | changes will likely not be accepted. If in doubt open an issue before writing the PR. -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/huff0/bitreader.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Klaus Post. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. 5 | 6 | package huff0 7 | 8 | import ( 9 | "errors" 10 | "io" 11 | ) 12 | 13 | // bitReader reads a bitstream in reverse. 14 | // The last set bit indicates the start of the stream and is used 15 | // for aligning the input. 16 | type bitReader struct { 17 | in []byte 18 | off uint // next byte to read is at in[off - 1] 19 | value uint64 20 | bitsRead uint8 21 | } 22 | 23 | // init initializes and resets the bit reader. 24 | func (b *bitReader) init(in []byte) error { 25 | if len(in) < 1 { 26 | return errors.New("corrupt stream: too short") 27 | } 28 | b.in = in 29 | b.off = uint(len(in)) 30 | // The highest bit of the last byte indicates where to start 31 | v := in[len(in)-1] 32 | if v == 0 { 33 | return errors.New("corrupt stream, did not find end of stream") 34 | } 35 | b.bitsRead = 64 36 | b.value = 0 37 | b.fill() 38 | b.fill() 39 | b.bitsRead += 8 - uint8(highBit32(uint32(v))) 40 | return nil 41 | } 42 | 43 | // getBits will return n bits. n can be 0. 44 | func (b *bitReader) getBits(n uint8) uint16 { 45 | if n == 0 || b.bitsRead >= 64 { 46 | return 0 47 | } 48 | return b.getBitsFast(n) 49 | } 50 | 51 | // getBitsFast requires that at least one bit is requested every time. 52 | // There are no checks if the buffer is filled. 53 | func (b *bitReader) getBitsFast(n uint8) uint16 { 54 | const regMask = 64 - 1 55 | v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) 56 | b.bitsRead += n 57 | return v 58 | } 59 | 60 | // peekBitsFast requires that at least one bit is requested every time. 61 | // There are no checks if the buffer is filled. 62 | func (b *bitReader) peekBitsFast(n uint8) uint16 { 63 | const regMask = 64 - 1 64 | v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) 65 | return v 66 | } 67 | 68 | // fillFast() will make sure at least 32 bits are available. 69 | // There must be at least 4 bytes available. 70 | func (b *bitReader) fillFast() { 71 | if b.bitsRead < 32 { 72 | return 73 | } 74 | // Do single re-slice to avoid bounds checks. 75 | v := b.in[b.off-4 : b.off] 76 | low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) 77 | b.value = (b.value << 32) | uint64(low) 78 | b.bitsRead -= 32 79 | b.off -= 4 80 | } 81 | 82 | // fill() will make sure at least 32 bits are available. 83 | func (b *bitReader) fill() { 84 | if b.bitsRead < 32 { 85 | return 86 | } 87 | if b.off > 4 { 88 | v := b.in[b.off-4 : b.off] 89 | low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) 90 | b.value = (b.value << 32) | uint64(low) 91 | b.bitsRead -= 32 92 | b.off -= 4 93 | return 94 | } 95 | for b.off > 0 { 96 | b.value = (b.value << 8) | uint64(b.in[b.off-1]) 97 | b.bitsRead -= 8 98 | b.off-- 99 | } 100 | } 101 | 102 | // finished returns true if all bits have been read from the bit stream. 103 | func (b *bitReader) finished() bool { 104 | return b.off == 0 && b.bitsRead >= 64 105 | } 106 | 107 | // close the bitstream and returns an error if out-of-buffer reads occurred. 108 | func (b *bitReader) close() error { 109 | // Release reference. 110 | b.in = nil 111 | if b.bitsRead > 64 { 112 | return io.ErrUnexpectedEOF 113 | } 114 | return nil 115 | } 116 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/huff0/bitwriter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Klaus Post. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. 5 | 6 | package huff0 7 | 8 | import "fmt" 9 | 10 | // bitWriter will write bits. 11 | // First bit will be LSB of the first byte of output. 12 | type bitWriter struct { 13 | bitContainer uint64 14 | nBits uint8 15 | out []byte 16 | } 17 | 18 | // bitMask16 is bitmasks. Has extra to avoid bounds check. 19 | var bitMask16 = [32]uint16{ 20 | 0, 1, 3, 7, 0xF, 0x1F, 21 | 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 22 | 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, 23 | 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 24 | 0xFFFF, 0xFFFF} /* up to 16 bits */ 25 | 26 | // addBits16NC will add up to 16 bits. 27 | // It will not check if there is space for them, 28 | // so the caller must ensure that it has flushed recently. 29 | func (b *bitWriter) addBits16NC(value uint16, bits uint8) { 30 | b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) 31 | b.nBits += bits 32 | } 33 | 34 | // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. 35 | // It will not check if there is space for them, so the caller must ensure that it has flushed recently. 36 | func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { 37 | b.bitContainer |= uint64(value) << (b.nBits & 63) 38 | b.nBits += bits 39 | } 40 | 41 | // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. 42 | // It will not check if there is space for them, so the caller must ensure that it has flushed recently. 43 | func (b *bitWriter) encSymbol(ct cTable, symbol byte) { 44 | enc := ct[symbol] 45 | b.bitContainer |= uint64(enc.val) << (b.nBits & 63) 46 | b.nBits += enc.nBits 47 | } 48 | 49 | // addBits16ZeroNC will add up to 16 bits. 50 | // It will not check if there is space for them, 51 | // so the caller must ensure that it has flushed recently. 52 | // This is fastest if bits can be zero. 53 | func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { 54 | if bits == 0 { 55 | return 56 | } 57 | value <<= (16 - bits) & 15 58 | value >>= (16 - bits) & 15 59 | b.bitContainer |= uint64(value) << (b.nBits & 63) 60 | b.nBits += bits 61 | } 62 | 63 | // flush will flush all pending full bytes. 64 | // There will be at least 56 bits available for writing when this has been called. 65 | // Using flush32 is faster, but leaves less space for writing. 66 | func (b *bitWriter) flush() { 67 | v := b.nBits >> 3 68 | switch v { 69 | case 0: 70 | return 71 | case 1: 72 | b.out = append(b.out, 73 | byte(b.bitContainer), 74 | ) 75 | b.bitContainer >>= 1 << 3 76 | case 2: 77 | b.out = append(b.out, 78 | byte(b.bitContainer), 79 | byte(b.bitContainer>>8), 80 | ) 81 | b.bitContainer >>= 2 << 3 82 | case 3: 83 | b.out = append(b.out, 84 | byte(b.bitContainer), 85 | byte(b.bitContainer>>8), 86 | byte(b.bitContainer>>16), 87 | ) 88 | b.bitContainer >>= 3 << 3 89 | case 4: 90 | b.out = append(b.out, 91 | byte(b.bitContainer), 92 | byte(b.bitContainer>>8), 93 | byte(b.bitContainer>>16), 94 | byte(b.bitContainer>>24), 95 | ) 96 | b.bitContainer >>= 4 << 3 97 | case 5: 98 | b.out = append(b.out, 99 | byte(b.bitContainer), 100 | byte(b.bitContainer>>8), 101 | byte(b.bitContainer>>16), 102 | byte(b.bitContainer>>24), 103 | byte(b.bitContainer>>32), 104 | ) 105 | b.bitContainer >>= 5 << 3 106 | case 6: 107 | b.out = append(b.out, 108 | byte(b.bitContainer), 109 | byte(b.bitContainer>>8), 110 | byte(b.bitContainer>>16), 111 | byte(b.bitContainer>>24), 112 | byte(b.bitContainer>>32), 113 | byte(b.bitContainer>>40), 114 | ) 115 | b.bitContainer >>= 6 << 3 116 | case 7: 117 | b.out = append(b.out, 118 | byte(b.bitContainer), 119 | byte(b.bitContainer>>8), 120 | byte(b.bitContainer>>16), 121 | byte(b.bitContainer>>24), 122 | byte(b.bitContainer>>32), 123 | byte(b.bitContainer>>40), 124 | byte(b.bitContainer>>48), 125 | ) 126 | b.bitContainer >>= 7 << 3 127 | case 8: 128 | b.out = append(b.out, 129 | byte(b.bitContainer), 130 | byte(b.bitContainer>>8), 131 | byte(b.bitContainer>>16), 132 | byte(b.bitContainer>>24), 133 | byte(b.bitContainer>>32), 134 | byte(b.bitContainer>>40), 135 | byte(b.bitContainer>>48), 136 | byte(b.bitContainer>>56), 137 | ) 138 | b.bitContainer = 0 139 | b.nBits = 0 140 | return 141 | default: 142 | panic(fmt.Errorf("bits (%d) > 64", b.nBits)) 143 | } 144 | b.nBits &= 7 145 | } 146 | 147 | // flush32 will flush out, so there are at least 32 bits available for writing. 148 | func (b *bitWriter) flush32() { 149 | if b.nBits < 32 { 150 | return 151 | } 152 | b.out = append(b.out, 153 | byte(b.bitContainer), 154 | byte(b.bitContainer>>8), 155 | byte(b.bitContainer>>16), 156 | byte(b.bitContainer>>24)) 157 | b.nBits -= 32 158 | b.bitContainer >>= 32 159 | } 160 | 161 | // flushAlign will flush remaining full bytes and align to next byte boundary. 162 | func (b *bitWriter) flushAlign() { 163 | nbBytes := (b.nBits + 7) >> 3 164 | for i := uint8(0); i < nbBytes; i++ { 165 | b.out = append(b.out, byte(b.bitContainer>>(i*8))) 166 | } 167 | b.nBits = 0 168 | b.bitContainer = 0 169 | } 170 | 171 | // close will write the alignment bit and write the final byte(s) 172 | // to the output. 173 | func (b *bitWriter) close() error { 174 | // End mark 175 | b.addBits16Clean(1, 1) 176 | // flush until next byte. 177 | b.flushAlign() 178 | return nil 179 | } 180 | 181 | // reset and continue writing by appending to out. 182 | func (b *bitWriter) reset(out []byte) { 183 | b.bitContainer = 0 184 | b.nBits = 0 185 | b.out = out 186 | } 187 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/huff0/bytereader.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Klaus Post. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. 5 | 6 | package huff0 7 | 8 | // byteReader provides a byte reader that reads 9 | // little endian values from a byte stream. 10 | // The input stream is manually advanced. 11 | // The reader performs no bounds checks. 12 | type byteReader struct { 13 | b []byte 14 | off int 15 | } 16 | 17 | // init will initialize the reader and set the input. 18 | func (b *byteReader) init(in []byte) { 19 | b.b = in 20 | b.off = 0 21 | } 22 | 23 | // advance the stream b n bytes. 24 | func (b *byteReader) advance(n uint) { 25 | b.off += int(n) 26 | } 27 | 28 | // Int32 returns a little endian int32 starting at current offset. 29 | func (b byteReader) Int32() int32 { 30 | v3 := int32(b.b[b.off+3]) 31 | v2 := int32(b.b[b.off+2]) 32 | v1 := int32(b.b[b.off+1]) 33 | v0 := int32(b.b[b.off]) 34 | return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 35 | } 36 | 37 | // Uint32 returns a little endian uint32 starting at current offset. 38 | func (b byteReader) Uint32() uint32 { 39 | v3 := uint32(b.b[b.off+3]) 40 | v2 := uint32(b.b[b.off+2]) 41 | v1 := uint32(b.b[b.off+1]) 42 | v0 := uint32(b.b[b.off]) 43 | return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 44 | } 45 | 46 | // unread returns the unread portion of the input. 47 | func (b byteReader) unread() []byte { 48 | return b.b[b.off:] 49 | } 50 | 51 | // remain will return the number of bytes remaining. 52 | func (b byteReader) remain() int { 53 | return len(b.b) - b.off 54 | } 55 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/.gitignore: -------------------------------------------------------------------------------- 1 | cmd/snappytool/snappytool 2 | testdata/bench 3 | 4 | # These explicitly listed benchmark data files are for an obsolete version of 5 | # snappy_test.go. 6 | testdata/alice29.txt 7 | testdata/asyoulik.txt 8 | testdata/fireworks.jpeg 9 | testdata/geo.protodata 10 | testdata/html 11 | testdata/html_x_4 12 | testdata/kppkn.gtb 13 | testdata/lcet10.txt 14 | testdata/paper-100k.pdf 15 | testdata/plrabn12.txt 16 | testdata/urls.10K 17 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/AUTHORS: -------------------------------------------------------------------------------- 1 | # This is the official list of Snappy-Go authors for copyright purposes. 2 | # This file is distinct from the CONTRIBUTORS files. 3 | # See the latter for an explanation. 4 | 5 | # Names should be added to this file as 6 | # Name or Organization 7 | # The email address is not required for organizations. 8 | 9 | # Please keep the list sorted. 10 | 11 | Damian Gryski 12 | Google Inc. 13 | Jan Mercl <0xjnml@gmail.com> 14 | Rodolfo Carvalho 15 | Sebastien Binet 16 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | # This is the official list of people who can contribute 2 | # (and typically have contributed) code to the Snappy-Go repository. 3 | # The AUTHORS file lists the copyright holders; this file 4 | # lists people. For example, Google employees are listed here 5 | # but not in AUTHORS, because Google holds the copyright. 6 | # 7 | # The submission process automatically checks to make sure 8 | # that people submitting code are listed in this file (by email address). 9 | # 10 | # Names should be added to this file only after verifying that 11 | # the individual or the individual's organization has agreed to 12 | # the appropriate Contributor License Agreement, found here: 13 | # 14 | # http://code.google.com/legal/individual-cla-v1.0.html 15 | # http://code.google.com/legal/corporate-cla-v1.0.html 16 | # 17 | # The agreement for individuals can be filled out on the web. 18 | # 19 | # When adding J Random Contributor's name to this file, 20 | # either J's name or J's organization's name should be 21 | # added to the AUTHORS file, depending on whether the 22 | # individual or corporate CLA was used. 23 | 24 | # Names should be added to this file like so: 25 | # Name 26 | 27 | # Please keep the list sorted. 28 | 29 | Damian Gryski 30 | Jan Mercl <0xjnml@gmail.com> 31 | Kai Backman 32 | Marc-Antoine Ruel 33 | Nigel Tao 34 | Rob Pike 35 | Rodolfo Carvalho 36 | Russ Cox 37 | Sebastien Binet 38 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the name of Google Inc. nor the names of its 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/README: -------------------------------------------------------------------------------- 1 | The Snappy compression format in the Go programming language. 2 | 3 | To download and install from source: 4 | $ go get github.com/golang/snappy 5 | 6 | Unless otherwise noted, the Snappy-Go source files are distributed 7 | under the BSD-style license found in the LICENSE file. 8 | 9 | 10 | 11 | Benchmarks. 12 | 13 | The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten 14 | or so files, the same set used by the C++ Snappy code (github.com/google/snappy 15 | and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ 16 | 3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: 17 | 18 | "go test -test.bench=." 19 | 20 | _UFlat0-8 2.19GB/s ± 0% html 21 | _UFlat1-8 1.41GB/s ± 0% urls 22 | _UFlat2-8 23.5GB/s ± 2% jpg 23 | _UFlat3-8 1.91GB/s ± 0% jpg_200 24 | _UFlat4-8 14.0GB/s ± 1% pdf 25 | _UFlat5-8 1.97GB/s ± 0% html4 26 | _UFlat6-8 814MB/s ± 0% txt1 27 | _UFlat7-8 785MB/s ± 0% txt2 28 | _UFlat8-8 857MB/s ± 0% txt3 29 | _UFlat9-8 719MB/s ± 1% txt4 30 | _UFlat10-8 2.84GB/s ± 0% pb 31 | _UFlat11-8 1.05GB/s ± 0% gaviota 32 | 33 | _ZFlat0-8 1.04GB/s ± 0% html 34 | _ZFlat1-8 534MB/s ± 0% urls 35 | _ZFlat2-8 15.7GB/s ± 1% jpg 36 | _ZFlat3-8 740MB/s ± 3% jpg_200 37 | _ZFlat4-8 9.20GB/s ± 1% pdf 38 | _ZFlat5-8 991MB/s ± 0% html4 39 | _ZFlat6-8 379MB/s ± 0% txt1 40 | _ZFlat7-8 352MB/s ± 0% txt2 41 | _ZFlat8-8 396MB/s ± 1% txt3 42 | _ZFlat9-8 327MB/s ± 1% txt4 43 | _ZFlat10-8 1.33GB/s ± 1% pb 44 | _ZFlat11-8 605MB/s ± 1% gaviota 45 | 46 | 47 | 48 | "go test -test.bench=. -tags=noasm" 49 | 50 | _UFlat0-8 621MB/s ± 2% html 51 | _UFlat1-8 494MB/s ± 1% urls 52 | _UFlat2-8 23.2GB/s ± 1% jpg 53 | _UFlat3-8 1.12GB/s ± 1% jpg_200 54 | _UFlat4-8 4.35GB/s ± 1% pdf 55 | _UFlat5-8 609MB/s ± 0% html4 56 | _UFlat6-8 296MB/s ± 0% txt1 57 | _UFlat7-8 288MB/s ± 0% txt2 58 | _UFlat8-8 309MB/s ± 1% txt3 59 | _UFlat9-8 280MB/s ± 1% txt4 60 | _UFlat10-8 753MB/s ± 0% pb 61 | _UFlat11-8 400MB/s ± 0% gaviota 62 | 63 | _ZFlat0-8 409MB/s ± 1% html 64 | _ZFlat1-8 250MB/s ± 1% urls 65 | _ZFlat2-8 12.3GB/s ± 1% jpg 66 | _ZFlat3-8 132MB/s ± 0% jpg_200 67 | _ZFlat4-8 2.92GB/s ± 0% pdf 68 | _ZFlat5-8 405MB/s ± 1% html4 69 | _ZFlat6-8 179MB/s ± 1% txt1 70 | _ZFlat7-8 170MB/s ± 1% txt2 71 | _ZFlat8-8 189MB/s ± 1% txt3 72 | _ZFlat9-8 164MB/s ± 1% txt4 73 | _ZFlat10-8 479MB/s ± 1% pb 74 | _ZFlat11-8 270MB/s ± 1% gaviota 75 | 76 | 77 | 78 | For comparison (Go's encoded output is byte-for-byte identical to C++'s), here 79 | are the numbers from C++ Snappy's 80 | 81 | make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log 82 | 83 | BM_UFlat/0 2.4GB/s html 84 | BM_UFlat/1 1.4GB/s urls 85 | BM_UFlat/2 21.8GB/s jpg 86 | BM_UFlat/3 1.5GB/s jpg_200 87 | BM_UFlat/4 13.3GB/s pdf 88 | BM_UFlat/5 2.1GB/s html4 89 | BM_UFlat/6 1.0GB/s txt1 90 | BM_UFlat/7 959.4MB/s txt2 91 | BM_UFlat/8 1.0GB/s txt3 92 | BM_UFlat/9 864.5MB/s txt4 93 | BM_UFlat/10 2.9GB/s pb 94 | BM_UFlat/11 1.2GB/s gaviota 95 | 96 | BM_ZFlat/0 944.3MB/s html (22.31 %) 97 | BM_ZFlat/1 501.6MB/s urls (47.78 %) 98 | BM_ZFlat/2 14.3GB/s jpg (99.95 %) 99 | BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) 100 | BM_ZFlat/4 8.3GB/s pdf (83.30 %) 101 | BM_ZFlat/5 903.5MB/s html4 (22.52 %) 102 | BM_ZFlat/6 336.0MB/s txt1 (57.88 %) 103 | BM_ZFlat/7 312.3MB/s txt2 (61.91 %) 104 | BM_ZFlat/8 353.1MB/s txt3 (54.99 %) 105 | BM_ZFlat/9 289.9MB/s txt4 (66.26 %) 106 | BM_ZFlat/10 1.2GB/s pb (19.68 %) 107 | BM_ZFlat/11 527.4MB/s gaviota (37.72 %) 108 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/decode_amd64.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Snappy-Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // +build !appengine 6 | // +build gc 7 | // +build !noasm 8 | 9 | package snappy 10 | 11 | // decode has the same semantics as in decode_other.go. 12 | // 13 | //go:noescape 14 | func decode(dst, src []byte) int 15 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/decode_other.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Snappy-Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // +build !amd64 appengine !gc noasm 6 | 7 | package snappy 8 | 9 | // decode writes the decoding of src to dst. It assumes that the varint-encoded 10 | // length of the decompressed bytes has already been read, and that len(dst) 11 | // equals that length. 12 | // 13 | // It returns 0 on success or a decodeErrCodeXxx error code on failure. 14 | func decode(dst, src []byte) int { 15 | var d, s, offset, length int 16 | for s < len(src) { 17 | switch src[s] & 0x03 { 18 | case tagLiteral: 19 | x := uint32(src[s] >> 2) 20 | switch { 21 | case x < 60: 22 | s++ 23 | case x == 60: 24 | s += 2 25 | if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. 26 | return decodeErrCodeCorrupt 27 | } 28 | x = uint32(src[s-1]) 29 | case x == 61: 30 | s += 3 31 | if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. 32 | return decodeErrCodeCorrupt 33 | } 34 | x = uint32(src[s-2]) | uint32(src[s-1])<<8 35 | case x == 62: 36 | s += 4 37 | if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. 38 | return decodeErrCodeCorrupt 39 | } 40 | x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 41 | case x == 63: 42 | s += 5 43 | if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. 44 | return decodeErrCodeCorrupt 45 | } 46 | x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 47 | } 48 | length = int(x) + 1 49 | if length <= 0 { 50 | return decodeErrCodeUnsupportedLiteralLength 51 | } 52 | if length > len(dst)-d || length > len(src)-s { 53 | return decodeErrCodeCorrupt 54 | } 55 | copy(dst[d:], src[s:s+length]) 56 | d += length 57 | s += length 58 | continue 59 | 60 | case tagCopy1: 61 | s += 2 62 | if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. 63 | return decodeErrCodeCorrupt 64 | } 65 | length = 4 + int(src[s-2])>>2&0x7 66 | offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) 67 | 68 | case tagCopy2: 69 | s += 3 70 | if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. 71 | return decodeErrCodeCorrupt 72 | } 73 | length = 1 + int(src[s-3])>>2 74 | offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) 75 | 76 | case tagCopy4: 77 | s += 5 78 | if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. 79 | return decodeErrCodeCorrupt 80 | } 81 | length = 1 + int(src[s-5])>>2 82 | offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) 83 | } 84 | 85 | if offset <= 0 || d < offset || length > len(dst)-d { 86 | return decodeErrCodeCorrupt 87 | } 88 | // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike 89 | // the built-in copy function, this byte-by-byte copy always runs 90 | // forwards, even if the slices overlap. Conceptually, this is: 91 | // 92 | // d += forwardCopy(dst[d:d+length], dst[d-offset:]) 93 | for end := d + length; d != end; d++ { 94 | dst[d] = dst[d-offset] 95 | } 96 | } 97 | if d != len(dst) { 98 | return decodeErrCodeCorrupt 99 | } 100 | return 0 101 | } 102 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/encode_amd64.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Snappy-Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // +build !appengine 6 | // +build gc 7 | // +build !noasm 8 | 9 | package snappy 10 | 11 | // emitLiteral has the same semantics as in encode_other.go. 12 | // 13 | //go:noescape 14 | func emitLiteral(dst, lit []byte) int 15 | 16 | // emitCopy has the same semantics as in encode_other.go. 17 | // 18 | //go:noescape 19 | func emitCopy(dst []byte, offset, length int) int 20 | 21 | // extendMatch has the same semantics as in encode_other.go. 22 | // 23 | //go:noescape 24 | func extendMatch(src []byte, i, j int) int 25 | 26 | // encodeBlock has the same semantics as in encode_other.go. 27 | // 28 | //go:noescape 29 | func encodeBlock(dst, src []byte) (d int) 30 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/runbench.cmd: -------------------------------------------------------------------------------- 1 | del old.txt 2 | go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt 3 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/snappy/snappy.go: -------------------------------------------------------------------------------- 1 | // Copyright 2011 The Snappy-Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // Package snappy implements the snappy block-based compression format. 6 | // It aims for very high speeds and reasonable compression. 7 | // 8 | // The C++ snappy implementation is at https://github.com/google/snappy 9 | package snappy 10 | 11 | import ( 12 | "hash/crc32" 13 | ) 14 | 15 | /* 16 | Each encoded block begins with the varint-encoded length of the decoded data, 17 | followed by a sequence of chunks. Chunks begin and end on byte boundaries. The 18 | first byte of each chunk is broken into its 2 least and 6 most significant bits 19 | called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. 20 | Zero means a literal tag. All other values mean a copy tag. 21 | 22 | For literal tags: 23 | - If m < 60, the next 1 + m bytes are literal bytes. 24 | - Otherwise, let n be the little-endian unsigned integer denoted by the next 25 | m - 59 bytes. The next 1 + n bytes after that are literal bytes. 26 | 27 | For copy tags, length bytes are copied from offset bytes ago, in the style of 28 | Lempel-Ziv compression algorithms. In particular: 29 | - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). 30 | The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 31 | of the offset. The next byte is bits 0-7 of the offset. 32 | - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). 33 | The length is 1 + m. The offset is the little-endian unsigned integer 34 | denoted by the next 2 bytes. 35 | - For l == 3, this tag is a legacy format that is no longer issued by most 36 | encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in 37 | [1, 65). The length is 1 + m. The offset is the little-endian unsigned 38 | integer denoted by the next 4 bytes. 39 | */ 40 | const ( 41 | tagLiteral = 0x00 42 | tagCopy1 = 0x01 43 | tagCopy2 = 0x02 44 | tagCopy4 = 0x03 45 | ) 46 | 47 | const ( 48 | checksumSize = 4 49 | chunkHeaderSize = 4 50 | magicChunk = "\xff\x06\x00\x00" + magicBody 51 | magicBody = "sNaPpY" 52 | 53 | // maxBlockSize is the maximum size of the input to encodeBlock. It is not 54 | // part of the wire format per se, but some parts of the encoder assume 55 | // that an offset fits into a uint16. 56 | // 57 | // Also, for the framing format (Writer type instead of Encode function), 58 | // https://github.com/google/snappy/blob/master/framing_format.txt says 59 | // that "the uncompressed data in a chunk must be no longer than 65536 60 | // bytes". 61 | maxBlockSize = 65536 62 | 63 | // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is 64 | // hard coded to be a const instead of a variable, so that obufLen can also 65 | // be a const. Their equivalence is confirmed by 66 | // TestMaxEncodedLenOfMaxBlockSize. 67 | maxEncodedLenOfMaxBlockSize = 76490 68 | 69 | obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize 70 | obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize 71 | ) 72 | 73 | const ( 74 | chunkTypeCompressedData = 0x00 75 | chunkTypeUncompressedData = 0x01 76 | chunkTypePadding = 0xfe 77 | chunkTypeStreamIdentifier = 0xff 78 | ) 79 | 80 | var crcTable = crc32.MakeTable(crc32.Castagnoli) 81 | 82 | // crc implements the checksum specified in section 3 of 83 | // https://github.com/google/snappy/blob/master/framing_format.txt 84 | func crc(b []byte) uint32 { 85 | c := crc32.Update(0, crcTable, b) 86 | return uint32(c>>15|c<<17) + 0xa282ead8 87 | } 88 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/bitreader.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019+ Klaus Post. All rights reserved. 2 | // License information can be found in the LICENSE file. 3 | // Based on work by Yann Collet, released under BSD License. 4 | 5 | package zstd 6 | 7 | import ( 8 | "errors" 9 | "io" 10 | "math/bits" 11 | ) 12 | 13 | // bitReader reads a bitstream in reverse. 14 | // The last set bit indicates the start of the stream and is used 15 | // for aligning the input. 16 | type bitReader struct { 17 | in []byte 18 | off uint // next byte to read is at in[off - 1] 19 | value uint64 // Maybe use [16]byte, but shifting is awkward. 20 | bitsRead uint8 21 | } 22 | 23 | // init initializes and resets the bit reader. 24 | func (b *bitReader) init(in []byte) error { 25 | if len(in) < 1 { 26 | return errors.New("corrupt stream: too short") 27 | } 28 | b.in = in 29 | b.off = uint(len(in)) 30 | // The highest bit of the last byte indicates where to start 31 | v := in[len(in)-1] 32 | if v == 0 { 33 | return errors.New("corrupt stream, did not find end of stream") 34 | } 35 | b.bitsRead = 64 36 | b.value = 0 37 | b.fill() 38 | b.fill() 39 | b.bitsRead += 8 - uint8(highBits(uint32(v))) 40 | return nil 41 | } 42 | 43 | // getBits will return n bits. n can be 0. 44 | func (b *bitReader) getBits(n uint8) int { 45 | if n == 0 /*|| b.bitsRead >= 64 */ { 46 | return 0 47 | } 48 | return b.getBitsFast(n) 49 | } 50 | 51 | // getBitsFast requires that at least one bit is requested every time. 52 | // There are no checks if the buffer is filled. 53 | func (b *bitReader) getBitsFast(n uint8) int { 54 | const regMask = 64 - 1 55 | v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) 56 | b.bitsRead += n 57 | return int(v) 58 | } 59 | 60 | // fillFast() will make sure at least 32 bits are available. 61 | // There must be at least 4 bytes available. 62 | func (b *bitReader) fillFast() { 63 | if b.bitsRead < 32 { 64 | return 65 | } 66 | // Do single re-slice to avoid bounds checks. 67 | v := b.in[b.off-4 : b.off] 68 | low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) 69 | b.value = (b.value << 32) | uint64(low) 70 | b.bitsRead -= 32 71 | b.off -= 4 72 | } 73 | 74 | // fill() will make sure at least 32 bits are available. 75 | func (b *bitReader) fill() { 76 | if b.bitsRead < 32 { 77 | return 78 | } 79 | if b.off >= 4 { 80 | v := b.in[b.off-4 : b.off] 81 | low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) 82 | b.value = (b.value << 32) | uint64(low) 83 | b.bitsRead -= 32 84 | b.off -= 4 85 | return 86 | } 87 | for b.off > 0 { 88 | b.value = (b.value << 8) | uint64(b.in[b.off-1]) 89 | b.bitsRead -= 8 90 | b.off-- 91 | } 92 | } 93 | 94 | // finished returns true if all bits have been read from the bit stream. 95 | func (b *bitReader) finished() bool { 96 | return b.off == 0 && b.bitsRead >= 64 97 | } 98 | 99 | // overread returns true if more bits have been requested than is on the stream. 100 | func (b *bitReader) overread() bool { 101 | return b.bitsRead > 64 102 | } 103 | 104 | // remain returns the number of bits remaining. 105 | func (b *bitReader) remain() uint { 106 | return b.off*8 + 64 - uint(b.bitsRead) 107 | } 108 | 109 | // close the bitstream and returns an error if out-of-buffer reads occurred. 110 | func (b *bitReader) close() error { 111 | // Release reference. 112 | b.in = nil 113 | if b.bitsRead > 64 { 114 | return io.ErrUnexpectedEOF 115 | } 116 | return nil 117 | } 118 | 119 | func highBits(val uint32) (n uint32) { 120 | return uint32(bits.Len32(val) - 1) 121 | } 122 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/bitwriter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Klaus Post. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. 5 | 6 | package zstd 7 | 8 | import "fmt" 9 | 10 | // bitWriter will write bits. 11 | // First bit will be LSB of the first byte of output. 12 | type bitWriter struct { 13 | bitContainer uint64 14 | nBits uint8 15 | out []byte 16 | } 17 | 18 | // bitMask16 is bitmasks. Has extra to avoid bounds check. 19 | var bitMask16 = [32]uint16{ 20 | 0, 1, 3, 7, 0xF, 0x1F, 21 | 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 22 | 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, 23 | 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 24 | 0xFFFF, 0xFFFF} /* up to 16 bits */ 25 | 26 | var bitMask32 = [32]uint32{ 27 | 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 28 | 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 29 | 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, 30 | 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, 31 | } // up to 32 bits 32 | 33 | // addBits16NC will add up to 16 bits. 34 | // It will not check if there is space for them, 35 | // so the caller must ensure that it has flushed recently. 36 | func (b *bitWriter) addBits16NC(value uint16, bits uint8) { 37 | b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) 38 | b.nBits += bits 39 | } 40 | 41 | // addBits32NC will add up to 32 bits. 42 | // It will not check if there is space for them, 43 | // so the caller must ensure that it has flushed recently. 44 | func (b *bitWriter) addBits32NC(value uint32, bits uint8) { 45 | b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) 46 | b.nBits += bits 47 | } 48 | 49 | // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. 50 | // It will not check if there is space for them, so the caller must ensure that it has flushed recently. 51 | func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { 52 | b.bitContainer |= uint64(value) << (b.nBits & 63) 53 | b.nBits += bits 54 | } 55 | 56 | // flush will flush all pending full bytes. 57 | // There will be at least 56 bits available for writing when this has been called. 58 | // Using flush32 is faster, but leaves less space for writing. 59 | func (b *bitWriter) flush() { 60 | v := b.nBits >> 3 61 | switch v { 62 | case 0: 63 | case 1: 64 | b.out = append(b.out, 65 | byte(b.bitContainer), 66 | ) 67 | case 2: 68 | b.out = append(b.out, 69 | byte(b.bitContainer), 70 | byte(b.bitContainer>>8), 71 | ) 72 | case 3: 73 | b.out = append(b.out, 74 | byte(b.bitContainer), 75 | byte(b.bitContainer>>8), 76 | byte(b.bitContainer>>16), 77 | ) 78 | case 4: 79 | b.out = append(b.out, 80 | byte(b.bitContainer), 81 | byte(b.bitContainer>>8), 82 | byte(b.bitContainer>>16), 83 | byte(b.bitContainer>>24), 84 | ) 85 | case 5: 86 | b.out = append(b.out, 87 | byte(b.bitContainer), 88 | byte(b.bitContainer>>8), 89 | byte(b.bitContainer>>16), 90 | byte(b.bitContainer>>24), 91 | byte(b.bitContainer>>32), 92 | ) 93 | case 6: 94 | b.out = append(b.out, 95 | byte(b.bitContainer), 96 | byte(b.bitContainer>>8), 97 | byte(b.bitContainer>>16), 98 | byte(b.bitContainer>>24), 99 | byte(b.bitContainer>>32), 100 | byte(b.bitContainer>>40), 101 | ) 102 | case 7: 103 | b.out = append(b.out, 104 | byte(b.bitContainer), 105 | byte(b.bitContainer>>8), 106 | byte(b.bitContainer>>16), 107 | byte(b.bitContainer>>24), 108 | byte(b.bitContainer>>32), 109 | byte(b.bitContainer>>40), 110 | byte(b.bitContainer>>48), 111 | ) 112 | case 8: 113 | b.out = append(b.out, 114 | byte(b.bitContainer), 115 | byte(b.bitContainer>>8), 116 | byte(b.bitContainer>>16), 117 | byte(b.bitContainer>>24), 118 | byte(b.bitContainer>>32), 119 | byte(b.bitContainer>>40), 120 | byte(b.bitContainer>>48), 121 | byte(b.bitContainer>>56), 122 | ) 123 | default: 124 | panic(fmt.Errorf("bits (%d) > 64", b.nBits)) 125 | } 126 | b.bitContainer >>= v << 3 127 | b.nBits &= 7 128 | } 129 | 130 | // flush32 will flush out, so there are at least 32 bits available for writing. 131 | func (b *bitWriter) flush32() { 132 | if b.nBits < 32 { 133 | return 134 | } 135 | b.out = append(b.out, 136 | byte(b.bitContainer), 137 | byte(b.bitContainer>>8), 138 | byte(b.bitContainer>>16), 139 | byte(b.bitContainer>>24)) 140 | b.nBits -= 32 141 | b.bitContainer >>= 32 142 | } 143 | 144 | // flushAlign will flush remaining full bytes and align to next byte boundary. 145 | func (b *bitWriter) flushAlign() { 146 | nbBytes := (b.nBits + 7) >> 3 147 | for i := uint8(0); i < nbBytes; i++ { 148 | b.out = append(b.out, byte(b.bitContainer>>(i*8))) 149 | } 150 | b.nBits = 0 151 | b.bitContainer = 0 152 | } 153 | 154 | // close will write the alignment bit and write the final byte(s) 155 | // to the output. 156 | func (b *bitWriter) close() error { 157 | // End mark 158 | b.addBits16Clean(1, 1) 159 | // flush until next byte. 160 | b.flushAlign() 161 | return nil 162 | } 163 | 164 | // reset and continue writing by appending to out. 165 | func (b *bitWriter) reset(out []byte) { 166 | b.bitContainer = 0 167 | b.nBits = 0 168 | b.out = out 169 | } 170 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/blocktype_string.go: -------------------------------------------------------------------------------- 1 | // Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. 2 | 3 | package zstd 4 | 5 | import "strconv" 6 | 7 | func _() { 8 | // An "invalid array index" compiler error signifies that the constant values have changed. 9 | // Re-run the stringer command to generate them again. 10 | var x [1]struct{} 11 | _ = x[blockTypeRaw-0] 12 | _ = x[blockTypeRLE-1] 13 | _ = x[blockTypeCompressed-2] 14 | _ = x[blockTypeReserved-3] 15 | } 16 | 17 | const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" 18 | 19 | var _blockType_index = [...]uint8{0, 12, 24, 43, 60} 20 | 21 | func (i blockType) String() string { 22 | if i >= blockType(len(_blockType_index)-1) { 23 | return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" 24 | } 25 | return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] 26 | } 27 | func _() { 28 | // An "invalid array index" compiler error signifies that the constant values have changed. 29 | // Re-run the stringer command to generate them again. 30 | var x [1]struct{} 31 | _ = x[literalsBlockRaw-0] 32 | _ = x[literalsBlockRLE-1] 33 | _ = x[literalsBlockCompressed-2] 34 | _ = x[literalsBlockTreeless-3] 35 | } 36 | 37 | const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" 38 | 39 | var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} 40 | 41 | func (i literalsBlockType) String() string { 42 | if i >= literalsBlockType(len(_literalsBlockType_index)-1) { 43 | return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" 44 | } 45 | return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] 46 | } 47 | func _() { 48 | // An "invalid array index" compiler error signifies that the constant values have changed. 49 | // Re-run the stringer command to generate them again. 50 | var x [1]struct{} 51 | _ = x[compModePredefined-0] 52 | _ = x[compModeRLE-1] 53 | _ = x[compModeFSE-2] 54 | _ = x[compModeRepeat-3] 55 | } 56 | 57 | const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" 58 | 59 | var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} 60 | 61 | func (i seqCompMode) String() string { 62 | if i >= seqCompMode(len(_seqCompMode_index)-1) { 63 | return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" 64 | } 65 | return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] 66 | } 67 | func _() { 68 | // An "invalid array index" compiler error signifies that the constant values have changed. 69 | // Re-run the stringer command to generate them again. 70 | var x [1]struct{} 71 | _ = x[tableLiteralLengths-0] 72 | _ = x[tableOffsets-1] 73 | _ = x[tableMatchLengths-2] 74 | } 75 | 76 | const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" 77 | 78 | var _tableIndex_index = [...]uint8{0, 19, 31, 48} 79 | 80 | func (i tableIndex) String() string { 81 | if i >= tableIndex(len(_tableIndex_index)-1) { 82 | return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" 83 | } 84 | return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] 85 | } 86 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/bytebuf.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019+ Klaus Post. All rights reserved. 2 | // License information can be found in the LICENSE file. 3 | // Based on work by Yann Collet, released under BSD License. 4 | 5 | package zstd 6 | 7 | import ( 8 | "fmt" 9 | "io" 10 | "io/ioutil" 11 | ) 12 | 13 | type byteBuffer interface { 14 | // Read up to 8 bytes. 15 | // Returns nil if no more input is available. 16 | readSmall(n int) []byte 17 | 18 | // Read >8 bytes. 19 | // MAY use the destination slice. 20 | readBig(n int, dst []byte) ([]byte, error) 21 | 22 | // Read a single byte. 23 | readByte() (byte, error) 24 | 25 | // Skip n bytes. 26 | skipN(n int) error 27 | } 28 | 29 | // in-memory buffer 30 | type byteBuf []byte 31 | 32 | func (b *byteBuf) readSmall(n int) []byte { 33 | if debug && n > 8 { 34 | panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) 35 | } 36 | bb := *b 37 | if len(bb) < n { 38 | return nil 39 | } 40 | r := bb[:n] 41 | *b = bb[n:] 42 | return r 43 | } 44 | 45 | func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { 46 | bb := *b 47 | if len(bb) < n { 48 | return nil, io.ErrUnexpectedEOF 49 | } 50 | r := bb[:n] 51 | *b = bb[n:] 52 | return r, nil 53 | } 54 | 55 | func (b *byteBuf) remain() []byte { 56 | return *b 57 | } 58 | 59 | func (b *byteBuf) readByte() (byte, error) { 60 | bb := *b 61 | if len(bb) < 1 { 62 | return 0, nil 63 | } 64 | r := bb[0] 65 | *b = bb[1:] 66 | return r, nil 67 | } 68 | 69 | func (b *byteBuf) skipN(n int) error { 70 | bb := *b 71 | if len(bb) < n { 72 | return io.ErrUnexpectedEOF 73 | } 74 | *b = bb[n:] 75 | return nil 76 | } 77 | 78 | // wrapper around a reader. 79 | type readerWrapper struct { 80 | r io.Reader 81 | tmp [8]byte 82 | } 83 | 84 | func (r *readerWrapper) readSmall(n int) []byte { 85 | if debug && n > 8 { 86 | panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) 87 | } 88 | n2, err := io.ReadFull(r.r, r.tmp[:n]) 89 | // We only really care about the actual bytes read. 90 | if n2 != n { 91 | if debug { 92 | println("readSmall: got", n2, "want", n, "err", err) 93 | } 94 | return nil 95 | } 96 | return r.tmp[:n] 97 | } 98 | 99 | func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { 100 | if cap(dst) < n { 101 | dst = make([]byte, n) 102 | } 103 | n2, err := io.ReadFull(r.r, dst[:n]) 104 | return dst[:n2], err 105 | } 106 | 107 | func (r *readerWrapper) readByte() (byte, error) { 108 | n2, err := r.r.Read(r.tmp[:1]) 109 | if err != nil { 110 | return 0, err 111 | } 112 | if n2 != 1 { 113 | return 0, io.ErrUnexpectedEOF 114 | } 115 | return r.tmp[0], nil 116 | } 117 | 118 | func (r *readerWrapper) skipN(n int) error { 119 | _, err := io.CopyN(ioutil.Discard, r.r, int64(n)) 120 | return err 121 | } 122 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/bytereader.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019+ Klaus Post. All rights reserved. 2 | // License information can be found in the LICENSE file. 3 | // Based on work by Yann Collet, released under BSD License. 4 | 5 | package zstd 6 | 7 | // byteReader provides a byte reader that reads 8 | // little endian values from a byte stream. 9 | // The input stream is manually advanced. 10 | // The reader performs no bounds checks. 11 | type byteReader struct { 12 | b []byte 13 | off int 14 | } 15 | 16 | // init will initialize the reader and set the input. 17 | func (b *byteReader) init(in []byte) { 18 | b.b = in 19 | b.off = 0 20 | } 21 | 22 | // advance the stream b n bytes. 23 | func (b *byteReader) advance(n uint) { 24 | b.off += int(n) 25 | } 26 | 27 | // overread returns whether we have advanced too far. 28 | func (b *byteReader) overread() bool { 29 | return b.off > len(b.b) 30 | } 31 | 32 | // Int32 returns a little endian int32 starting at current offset. 33 | func (b byteReader) Int32() int32 { 34 | b2 := b.b[b.off : b.off+4 : b.off+4] 35 | v3 := int32(b2[3]) 36 | v2 := int32(b2[2]) 37 | v1 := int32(b2[1]) 38 | v0 := int32(b2[0]) 39 | return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) 40 | } 41 | 42 | // Uint8 returns the next byte 43 | func (b *byteReader) Uint8() uint8 { 44 | v := b.b[b.off] 45 | return v 46 | } 47 | 48 | // Uint32 returns a little endian uint32 starting at current offset. 49 | func (b byteReader) Uint32() uint32 { 50 | if r := b.remain(); r < 4 { 51 | // Very rare 52 | v := uint32(0) 53 | for i := 1; i <= r; i++ { 54 | v = (v << 8) | uint32(b.b[len(b.b)-i]) 55 | } 56 | return v 57 | } 58 | b2 := b.b[b.off : b.off+4 : b.off+4] 59 | v3 := uint32(b2[3]) 60 | v2 := uint32(b2[2]) 61 | v1 := uint32(b2[1]) 62 | v0 := uint32(b2[0]) 63 | return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) 64 | } 65 | 66 | // unread returns the unread portion of the input. 67 | func (b byteReader) unread() []byte { 68 | return b.b[b.off:] 69 | } 70 | 71 | // remain will return the number of bytes remaining. 72 | func (b byteReader) remain() int { 73 | return len(b.b) - b.off 74 | } 75 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/decoder_options.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019+ Klaus Post. All rights reserved. 2 | // License information can be found in the LICENSE file. 3 | // Based on work by Yann Collet, released under BSD License. 4 | 5 | package zstd 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | "runtime" 11 | ) 12 | 13 | // DOption is an option for creating a decoder. 14 | type DOption func(*decoderOptions) error 15 | 16 | // options retains accumulated state of multiple options. 17 | type decoderOptions struct { 18 | lowMem bool 19 | concurrent int 20 | maxDecodedSize uint64 21 | } 22 | 23 | func (o *decoderOptions) setDefault() { 24 | *o = decoderOptions{ 25 | // use less ram: true for now, but may change. 26 | lowMem: true, 27 | concurrent: runtime.GOMAXPROCS(0), 28 | } 29 | o.maxDecodedSize = 1 << 63 30 | } 31 | 32 | // WithDecoderLowmem will set whether to use a lower amount of memory, 33 | // but possibly have to allocate more while running. 34 | func WithDecoderLowmem(b bool) DOption { 35 | return func(o *decoderOptions) error { o.lowMem = b; return nil } 36 | } 37 | 38 | // WithDecoderConcurrency will set the concurrency, 39 | // meaning the maximum number of decoders to run concurrently. 40 | // The value supplied must be at least 1. 41 | // By default this will be set to GOMAXPROCS. 42 | func WithDecoderConcurrency(n int) DOption { 43 | return func(o *decoderOptions) error { 44 | if n <= 0 { 45 | return fmt.Errorf("Concurrency must be at least 1") 46 | } 47 | o.concurrent = n 48 | return nil 49 | } 50 | } 51 | 52 | // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory 53 | // (non-streaming) operations. 54 | // Maxmimum and default is 1 << 63 bytes. 55 | func WithDecoderMaxMemory(n uint64) DOption { 56 | return func(o *decoderOptions) error { 57 | if n == 0 { 58 | return errors.New("WithDecoderMaxmemory must be at least 1") 59 | } 60 | if n > 1<<63 { 61 | return fmt.Errorf("WithDecoderMaxmemorymust be less than 1 << 63") 62 | } 63 | o.maxDecodedSize = n 64 | return nil 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/frameenc.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019+ Klaus Post. All rights reserved. 2 | // License information can be found in the LICENSE file. 3 | // Based on work by Yann Collet, released under BSD License. 4 | 5 | package zstd 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | "io" 11 | "math" 12 | "math/bits" 13 | ) 14 | 15 | type frameHeader struct { 16 | ContentSize uint64 17 | WindowSize uint32 18 | SingleSegment bool 19 | Checksum bool 20 | DictID uint32 // Not stored. 21 | } 22 | 23 | const maxHeaderSize = 14 24 | 25 | func (f frameHeader) appendTo(dst []byte) ([]byte, error) { 26 | dst = append(dst, frameMagic...) 27 | var fhd uint8 28 | if f.Checksum { 29 | fhd |= 1 << 2 30 | } 31 | if f.SingleSegment { 32 | fhd |= 1 << 5 33 | } 34 | var fcs uint8 35 | if f.ContentSize >= 256 { 36 | fcs++ 37 | } 38 | if f.ContentSize >= 65536+256 { 39 | fcs++ 40 | } 41 | if f.ContentSize >= 0xffffffff { 42 | fcs++ 43 | } 44 | fhd |= fcs << 6 45 | 46 | dst = append(dst, fhd) 47 | if !f.SingleSegment { 48 | const winLogMin = 10 49 | windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 50 | dst = append(dst, uint8(windowLog)) 51 | } 52 | if f.SingleSegment && f.ContentSize == 0 { 53 | return nil, errors.New("single segment, but no size set") 54 | } 55 | switch fcs { 56 | case 0: 57 | if f.SingleSegment { 58 | dst = append(dst, uint8(f.ContentSize)) 59 | } 60 | // Unless SingleSegment is set, framessizes < 256 are nto stored. 61 | case 1: 62 | f.ContentSize -= 256 63 | dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) 64 | case 2: 65 | dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) 66 | case 3: 67 | dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), 68 | uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) 69 | default: 70 | panic("invalid fcs") 71 | } 72 | return dst, nil 73 | } 74 | 75 | const skippableFrameHeader = 4 + 4 76 | 77 | // calcSkippableFrame will return a total size to be added for written 78 | // to be divisible by multiple. 79 | // The value will always be > skippableFrameHeader. 80 | // The function will panic if written < 0 or wantMultiple <= 0. 81 | func calcSkippableFrame(written, wantMultiple int64) int { 82 | if wantMultiple <= 0 { 83 | panic("wantMultiple <= 0") 84 | } 85 | if written < 0 { 86 | panic("written < 0") 87 | } 88 | leftOver := written % wantMultiple 89 | if leftOver == 0 { 90 | return 0 91 | } 92 | toAdd := wantMultiple - leftOver 93 | for toAdd < skippableFrameHeader { 94 | toAdd += wantMultiple 95 | } 96 | return int(toAdd) 97 | } 98 | 99 | // skippableFrame will add a skippable frame with a total size of bytes. 100 | // total should be >= skippableFrameHeader and < math.MaxUint32. 101 | func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { 102 | if total == 0 { 103 | return dst, nil 104 | } 105 | if total < skippableFrameHeader { 106 | return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) 107 | } 108 | if int64(total) > math.MaxUint32 { 109 | return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) 110 | } 111 | dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) 112 | f := uint32(total - skippableFrameHeader) 113 | dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) 114 | start := len(dst) 115 | dst = append(dst, make([]byte, f)...) 116 | _, err := io.ReadFull(r, dst[start:]) 117 | return dst, err 118 | } 119 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/fse_predefined.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019+ Klaus Post. All rights reserved. 2 | // License information can be found in the LICENSE file. 3 | // Based on work by Yann Collet, released under BSD License. 4 | 5 | package zstd 6 | 7 | import ( 8 | "fmt" 9 | "math" 10 | ) 11 | 12 | var ( 13 | // fsePredef are the predefined fse tables as defined here: 14 | // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions 15 | // These values are already transformed. 16 | fsePredef [3]fseDecoder 17 | 18 | // fsePredefEnc are the predefined encoder based on fse tables as defined here: 19 | // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions 20 | // These values are already transformed. 21 | fsePredefEnc [3]fseEncoder 22 | 23 | // symbolTableX contain the transformations needed for each type as defined in 24 | // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets 25 | symbolTableX [3][]baseOffset 26 | 27 | // maxTableSymbol is the biggest supported symbol for each table type 28 | // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets 29 | maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} 30 | 31 | // bitTables is the bits table for each table. 32 | bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} 33 | ) 34 | 35 | type tableIndex uint8 36 | 37 | const ( 38 | // indexes for fsePredef and symbolTableX 39 | tableLiteralLengths tableIndex = 0 40 | tableOffsets tableIndex = 1 41 | tableMatchLengths tableIndex = 2 42 | 43 | maxLiteralLengthSymbol = 35 44 | maxOffsetLengthSymbol = 30 45 | maxMatchLengthSymbol = 52 46 | ) 47 | 48 | // baseOffset is used for calculating transformations. 49 | type baseOffset struct { 50 | baseLine uint32 51 | addBits uint8 52 | } 53 | 54 | // fillBase will precalculate base offsets with the given bit distributions. 55 | func fillBase(dst []baseOffset, base uint32, bits ...uint8) { 56 | if len(bits) != len(dst) { 57 | panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) 58 | } 59 | for i, bit := range bits { 60 | if base > math.MaxInt32 { 61 | panic(fmt.Sprintf("invalid decoding table, base overflows int32")) 62 | } 63 | 64 | dst[i] = baseOffset{ 65 | baseLine: base, 66 | addBits: bit, 67 | } 68 | base += 1 << bit 69 | } 70 | } 71 | 72 | func init() { 73 | // Literals length codes 74 | tmp := make([]baseOffset, 36) 75 | for i := range tmp[:16] { 76 | tmp[i] = baseOffset{ 77 | baseLine: uint32(i), 78 | addBits: 0, 79 | } 80 | } 81 | fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) 82 | symbolTableX[tableLiteralLengths] = tmp 83 | 84 | // Match length codes 85 | tmp = make([]baseOffset, 53) 86 | for i := range tmp[:32] { 87 | tmp[i] = baseOffset{ 88 | // The transformation adds the 3 length. 89 | baseLine: uint32(i) + 3, 90 | addBits: 0, 91 | } 92 | } 93 | fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) 94 | symbolTableX[tableMatchLengths] = tmp 95 | 96 | // Offset codes 97 | tmp = make([]baseOffset, maxOffsetBits+1) 98 | tmp[1] = baseOffset{ 99 | baseLine: 1, 100 | addBits: 1, 101 | } 102 | fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) 103 | symbolTableX[tableOffsets] = tmp 104 | 105 | // Fill predefined tables and transform them. 106 | // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions 107 | for i := range fsePredef[:] { 108 | f := &fsePredef[i] 109 | switch tableIndex(i) { 110 | case tableLiteralLengths: 111 | // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 112 | f.actualTableLog = 6 113 | copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 114 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, 115 | -1, -1, -1, -1}) 116 | f.symbolLen = 36 117 | case tableOffsets: 118 | // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 119 | f.actualTableLog = 5 120 | copy(f.norm[:], []int16{ 121 | 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 122 | 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) 123 | f.symbolLen = 29 124 | case tableMatchLengths: 125 | //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 126 | f.actualTableLog = 6 127 | copy(f.norm[:], []int16{ 128 | 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 129 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 130 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 131 | -1, -1, -1, -1, -1}) 132 | f.symbolLen = 53 133 | } 134 | if err := f.buildDtable(); err != nil { 135 | panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) 136 | } 137 | if err := f.transform(symbolTableX[i]); err != nil { 138 | panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) 139 | } 140 | f.preDefined = true 141 | 142 | // Create encoder as well 143 | enc := &fsePredefEnc[i] 144 | copy(enc.norm[:], f.norm[:]) 145 | enc.symbolLen = f.symbolLen 146 | enc.actualTableLog = f.actualTableLog 147 | if err := enc.buildCTable(); err != nil { 148 | panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) 149 | } 150 | enc.setBits(bitTables[i]) 151 | enc.preDefined = true 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/hash.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019+ Klaus Post. All rights reserved. 2 | // License information can be found in the LICENSE file. 3 | // Based on work by Yann Collet, released under BSD License. 4 | 5 | package zstd 6 | 7 | const ( 8 | prime3bytes = 506832829 9 | prime4bytes = 2654435761 10 | prime5bytes = 889523592379 11 | prime6bytes = 227718039650203 12 | prime7bytes = 58295818150454627 13 | prime8bytes = 0xcf1bbcdcb7a56463 14 | ) 15 | 16 | // hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. 17 | // l must be >=4 and <=8. Any other value will return hash for 4 bytes. 18 | // h should always be <32. 19 | // Preferably h and l should be a constant. 20 | // FIXME: This does NOT get resolved, if 'mls' is constant, 21 | // so this cannot be used. 22 | func hashLen(u uint64, hashLog, mls uint8) uint32 { 23 | switch mls { 24 | case 5: 25 | return hash5(u, hashLog) 26 | case 6: 27 | return hash6(u, hashLog) 28 | case 7: 29 | return hash7(u, hashLog) 30 | case 8: 31 | return hash8(u, hashLog) 32 | default: 33 | return hash4x64(u, hashLog) 34 | } 35 | } 36 | 37 | // hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. 38 | // Preferably h should be a constant and should always be <32. 39 | func hash3(u uint32, h uint8) uint32 { 40 | return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) 41 | } 42 | 43 | // hash4 returns the hash of u to fit in a hash table with h bits. 44 | // Preferably h should be a constant and should always be <32. 45 | func hash4(u uint32, h uint8) uint32 { 46 | return (u * prime4bytes) >> ((32 - h) & 31) 47 | } 48 | 49 | // hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. 50 | // Preferably h should be a constant and should always be <32. 51 | func hash4x64(u uint64, h uint8) uint32 { 52 | return (uint32(u) * prime4bytes) >> ((32 - h) & 31) 53 | } 54 | 55 | // hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. 56 | // Preferably h should be a constant and should always be <64. 57 | func hash5(u uint64, h uint8) uint32 { 58 | return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) 59 | } 60 | 61 | // hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. 62 | // Preferably h should be a constant and should always be <64. 63 | func hash6(u uint64, h uint8) uint32 { 64 | return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) 65 | } 66 | 67 | // hash6 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. 68 | // Preferably h should be a constant and should always be <64. 69 | func hash7(u uint64, h uint8) uint32 { 70 | return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) 71 | } 72 | 73 | // hash8 returns the hash of u to fit in a hash table with h bits. 74 | // Preferably h should be a constant and should always be <64. 75 | func hash8(u uint64, h uint8) uint32 { 76 | return uint32((u * prime8bytes) >> ((64 - h) & 63)) 77 | } 78 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/history.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019+ Klaus Post. All rights reserved. 2 | // License information can be found in the LICENSE file. 3 | // Based on work by Yann Collet, released under BSD License. 4 | 5 | package zstd 6 | 7 | import ( 8 | "github.com/klauspost/compress/huff0" 9 | ) 10 | 11 | // history contains the information transferred between blocks. 12 | type history struct { 13 | b []byte 14 | huffTree *huff0.Scratch 15 | recentOffsets [3]int 16 | decoders sequenceDecs 17 | windowSize int 18 | maxSize int 19 | error bool 20 | } 21 | 22 | // reset will reset the history to initial state of a frame. 23 | // The history must already have been initialized to the desired size. 24 | func (h *history) reset() { 25 | h.b = h.b[:0] 26 | h.error = false 27 | h.recentOffsets = [3]int{1, 4, 8} 28 | if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { 29 | fseDecoderPool.Put(f) 30 | } 31 | if f := h.decoders.offsets.fse; f != nil && !f.preDefined { 32 | fseDecoderPool.Put(f) 33 | } 34 | if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { 35 | fseDecoderPool.Put(f) 36 | } 37 | h.decoders = sequenceDecs{} 38 | if h.huffTree != nil { 39 | huffDecoderPool.Put(h.huffTree) 40 | } 41 | h.huffTree = nil 42 | //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) 43 | } 44 | 45 | // append bytes to history. 46 | // This function will make sure there is space for it, 47 | // if the buffer has been allocated with enough extra space. 48 | func (h *history) append(b []byte) { 49 | if len(b) >= h.windowSize { 50 | // Discard all history by simply overwriting 51 | h.b = h.b[:h.windowSize] 52 | copy(h.b, b[len(b)-h.windowSize:]) 53 | return 54 | } 55 | 56 | // If there is space, append it. 57 | if len(b) < cap(h.b)-len(h.b) { 58 | h.b = append(h.b, b...) 59 | return 60 | } 61 | 62 | // Move data down so we only have window size left. 63 | // We know we have less than window size in b at this point. 64 | discard := len(b) + len(h.b) - h.windowSize 65 | copy(h.b, h.b[discard:]) 66 | h.b = h.b[:h.windowSize] 67 | copy(h.b[h.windowSize-len(b):], b) 68 | } 69 | 70 | // append bytes to history without ever discarding anything. 71 | func (h *history) appendKeep(b []byte) { 72 | h.b = append(h.b, b...) 73 | } 74 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 Caleb Spare 2 | 3 | MIT License 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md: -------------------------------------------------------------------------------- 1 | # xxhash 2 | 3 | VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. 4 | 5 | 6 | [![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) 7 | [![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) 8 | 9 | xxhash is a Go implementation of the 64-bit 10 | [xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a 11 | high-quality hashing algorithm that is much faster than anything in the Go 12 | standard library. 13 | 14 | This package provides a straightforward API: 15 | 16 | ``` 17 | func Sum64(b []byte) uint64 18 | func Sum64String(s string) uint64 19 | type Digest struct{ ... } 20 | func New() *Digest 21 | ``` 22 | 23 | The `Digest` type implements hash.Hash64. Its key methods are: 24 | 25 | ``` 26 | func (*Digest) Write([]byte) (int, error) 27 | func (*Digest) WriteString(string) (int, error) 28 | func (*Digest) Sum64() uint64 29 | ``` 30 | 31 | This implementation provides a fast pure-Go implementation and an even faster 32 | assembly implementation for amd64. 33 | 34 | ## Benchmarks 35 | 36 | Here are some quick benchmarks comparing the pure-Go and assembly 37 | implementations of Sum64. 38 | 39 | | input size | purego | asm | 40 | | --- | --- | --- | 41 | | 5 B | 979.66 MB/s | 1291.17 MB/s | 42 | | 100 B | 7475.26 MB/s | 7973.40 MB/s | 43 | | 4 KB | 17573.46 MB/s | 17602.65 MB/s | 44 | | 10 MB | 17131.46 MB/s | 17142.16 MB/s | 45 | 46 | These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using 47 | the following commands under Go 1.11.2: 48 | 49 | ``` 50 | $ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' 51 | $ go test -benchtime 10s -bench '/xxhash,direct,bytes' 52 | ``` 53 | 54 | ## Projects using this package 55 | 56 | - [InfluxDB](https://github.com/influxdata/influxdb) 57 | - [Prometheus](https://github.com/prometheus/prometheus) 58 | - [FreeCache](https://github.com/coocood/freecache) 59 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go: -------------------------------------------------------------------------------- 1 | // +build !appengine 2 | // +build gc 3 | // +build !purego 4 | 5 | package xxhash 6 | 7 | // Sum64 computes the 64-bit xxHash digest of b. 8 | // 9 | //go:noescape 10 | func Sum64(b []byte) uint64 11 | 12 | //go:noescape 13 | func writeBlocks(*Digest, []byte) int 14 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s: -------------------------------------------------------------------------------- 1 | // +build !appengine 2 | // +build gc 3 | // +build !purego 4 | 5 | #include "textflag.h" 6 | 7 | // Register allocation: 8 | // AX h 9 | // CX pointer to advance through b 10 | // DX n 11 | // BX loop end 12 | // R8 v1, k1 13 | // R9 v2 14 | // R10 v3 15 | // R11 v4 16 | // R12 tmp 17 | // R13 prime1v 18 | // R14 prime2v 19 | // R15 prime4v 20 | 21 | // round reads from and advances the buffer pointer in CX. 22 | // It assumes that R13 has prime1v and R14 has prime2v. 23 | #define round(r) \ 24 | MOVQ (CX), R12 \ 25 | ADDQ $8, CX \ 26 | IMULQ R14, R12 \ 27 | ADDQ R12, r \ 28 | ROLQ $31, r \ 29 | IMULQ R13, r 30 | 31 | // mergeRound applies a merge round on the two registers acc and val. 32 | // It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. 33 | #define mergeRound(acc, val) \ 34 | IMULQ R14, val \ 35 | ROLQ $31, val \ 36 | IMULQ R13, val \ 37 | XORQ val, acc \ 38 | IMULQ R13, acc \ 39 | ADDQ R15, acc 40 | 41 | // func Sum64(b []byte) uint64 42 | TEXT ·Sum64(SB), NOSPLIT, $0-32 43 | // Load fixed primes. 44 | MOVQ ·prime1v(SB), R13 45 | MOVQ ·prime2v(SB), R14 46 | MOVQ ·prime4v(SB), R15 47 | 48 | // Load slice. 49 | MOVQ b_base+0(FP), CX 50 | MOVQ b_len+8(FP), DX 51 | LEAQ (CX)(DX*1), BX 52 | 53 | // The first loop limit will be len(b)-32. 54 | SUBQ $32, BX 55 | 56 | // Check whether we have at least one block. 57 | CMPQ DX, $32 58 | JLT noBlocks 59 | 60 | // Set up initial state (v1, v2, v3, v4). 61 | MOVQ R13, R8 62 | ADDQ R14, R8 63 | MOVQ R14, R9 64 | XORQ R10, R10 65 | XORQ R11, R11 66 | SUBQ R13, R11 67 | 68 | // Loop until CX > BX. 69 | blockLoop: 70 | round(R8) 71 | round(R9) 72 | round(R10) 73 | round(R11) 74 | 75 | CMPQ CX, BX 76 | JLE blockLoop 77 | 78 | MOVQ R8, AX 79 | ROLQ $1, AX 80 | MOVQ R9, R12 81 | ROLQ $7, R12 82 | ADDQ R12, AX 83 | MOVQ R10, R12 84 | ROLQ $12, R12 85 | ADDQ R12, AX 86 | MOVQ R11, R12 87 | ROLQ $18, R12 88 | ADDQ R12, AX 89 | 90 | mergeRound(AX, R8) 91 | mergeRound(AX, R9) 92 | mergeRound(AX, R10) 93 | mergeRound(AX, R11) 94 | 95 | JMP afterBlocks 96 | 97 | noBlocks: 98 | MOVQ ·prime5v(SB), AX 99 | 100 | afterBlocks: 101 | ADDQ DX, AX 102 | 103 | // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. 104 | ADDQ $24, BX 105 | 106 | CMPQ CX, BX 107 | JG fourByte 108 | 109 | wordLoop: 110 | // Calculate k1. 111 | MOVQ (CX), R8 112 | ADDQ $8, CX 113 | IMULQ R14, R8 114 | ROLQ $31, R8 115 | IMULQ R13, R8 116 | 117 | XORQ R8, AX 118 | ROLQ $27, AX 119 | IMULQ R13, AX 120 | ADDQ R15, AX 121 | 122 | CMPQ CX, BX 123 | JLE wordLoop 124 | 125 | fourByte: 126 | ADDQ $4, BX 127 | CMPQ CX, BX 128 | JG singles 129 | 130 | MOVL (CX), R8 131 | ADDQ $4, CX 132 | IMULQ R13, R8 133 | XORQ R8, AX 134 | 135 | ROLQ $23, AX 136 | IMULQ R14, AX 137 | ADDQ ·prime3v(SB), AX 138 | 139 | singles: 140 | ADDQ $4, BX 141 | CMPQ CX, BX 142 | JGE finalize 143 | 144 | singlesLoop: 145 | MOVBQZX (CX), R12 146 | ADDQ $1, CX 147 | IMULQ ·prime5v(SB), R12 148 | XORQ R12, AX 149 | 150 | ROLQ $11, AX 151 | IMULQ R13, AX 152 | 153 | CMPQ CX, BX 154 | JL singlesLoop 155 | 156 | finalize: 157 | MOVQ AX, R12 158 | SHRQ $33, R12 159 | XORQ R12, AX 160 | IMULQ R14, AX 161 | MOVQ AX, R12 162 | SHRQ $29, R12 163 | XORQ R12, AX 164 | IMULQ ·prime3v(SB), AX 165 | MOVQ AX, R12 166 | SHRQ $32, R12 167 | XORQ R12, AX 168 | 169 | MOVQ AX, ret+24(FP) 170 | RET 171 | 172 | // writeBlocks uses the same registers as above except that it uses AX to store 173 | // the d pointer. 174 | 175 | // func writeBlocks(d *Digest, b []byte) int 176 | TEXT ·writeBlocks(SB), NOSPLIT, $0-40 177 | // Load fixed primes needed for round. 178 | MOVQ ·prime1v(SB), R13 179 | MOVQ ·prime2v(SB), R14 180 | 181 | // Load slice. 182 | MOVQ b_base+8(FP), CX 183 | MOVQ b_len+16(FP), DX 184 | LEAQ (CX)(DX*1), BX 185 | SUBQ $32, BX 186 | 187 | // Load vN from d. 188 | MOVQ d+0(FP), AX 189 | MOVQ 0(AX), R8 // v1 190 | MOVQ 8(AX), R9 // v2 191 | MOVQ 16(AX), R10 // v3 192 | MOVQ 24(AX), R11 // v4 193 | 194 | // We don't need to check the loop condition here; this function is 195 | // always called with at least one block of data to process. 196 | blockLoop: 197 | round(R8) 198 | round(R9) 199 | round(R10) 200 | round(R11) 201 | 202 | CMPQ CX, BX 203 | JLE blockLoop 204 | 205 | // Copy vN back to d. 206 | MOVQ R8, 0(AX) 207 | MOVQ R9, 8(AX) 208 | MOVQ R10, 16(AX) 209 | MOVQ R11, 24(AX) 210 | 211 | // The number of bytes written is CX minus the old base pointer. 212 | SUBQ b_base+8(FP), CX 213 | MOVQ CX, ret+32(FP) 214 | 215 | RET 216 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go: -------------------------------------------------------------------------------- 1 | // +build !amd64 appengine !gc purego 2 | 3 | package xxhash 4 | 5 | // Sum64 computes the 64-bit xxHash digest of b. 6 | func Sum64(b []byte) uint64 { 7 | // A simpler version would be 8 | // d := New() 9 | // d.Write(b) 10 | // return d.Sum64() 11 | // but this is faster, particularly for small inputs. 12 | 13 | n := len(b) 14 | var h uint64 15 | 16 | if n >= 32 { 17 | v1 := prime1v + prime2 18 | v2 := prime2 19 | v3 := uint64(0) 20 | v4 := -prime1v 21 | for len(b) >= 32 { 22 | v1 = round(v1, u64(b[0:8:len(b)])) 23 | v2 = round(v2, u64(b[8:16:len(b)])) 24 | v3 = round(v3, u64(b[16:24:len(b)])) 25 | v4 = round(v4, u64(b[24:32:len(b)])) 26 | b = b[32:len(b):len(b)] 27 | } 28 | h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) 29 | h = mergeRound(h, v1) 30 | h = mergeRound(h, v2) 31 | h = mergeRound(h, v3) 32 | h = mergeRound(h, v4) 33 | } else { 34 | h = prime5 35 | } 36 | 37 | h += uint64(n) 38 | 39 | i, end := 0, len(b) 40 | for ; i+8 <= end; i += 8 { 41 | k1 := round(0, u64(b[i:i+8:len(b)])) 42 | h ^= k1 43 | h = rol27(h)*prime1 + prime4 44 | } 45 | if i+4 <= end { 46 | h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 47 | h = rol23(h)*prime2 + prime3 48 | i += 4 49 | } 50 | for ; i < end; i++ { 51 | h ^= uint64(b[i]) * prime5 52 | h = rol11(h) * prime1 53 | } 54 | 55 | h ^= h >> 33 56 | h *= prime2 57 | h ^= h >> 29 58 | h *= prime3 59 | h ^= h >> 32 60 | 61 | return h 62 | } 63 | 64 | func writeBlocks(d *Digest, b []byte) int { 65 | v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 66 | n := len(b) 67 | for len(b) >= 32 { 68 | v1 = round(v1, u64(b[0:8:len(b)])) 69 | v2 = round(v2, u64(b[8:16:len(b)])) 70 | v3 = round(v3, u64(b[16:24:len(b)])) 71 | v4 = round(v4, u64(b[24:32:len(b)])) 72 | b = b[32:len(b):len(b)] 73 | } 74 | d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 75 | return n - len(b) 76 | } 77 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go: -------------------------------------------------------------------------------- 1 | package xxhash 2 | 3 | // Sum64String computes the 64-bit xxHash digest of s. 4 | func Sum64String(s string) uint64 { 5 | return Sum64([]byte(s)) 6 | } 7 | 8 | // WriteString adds more data to d. It always returns len(s), nil. 9 | func (d *Digest) WriteString(s string) (n int, err error) { 10 | return d.Write([]byte(s)) 11 | } 12 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/seqenc.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019+ Klaus Post. All rights reserved. 2 | // License information can be found in the LICENSE file. 3 | // Based on work by Yann Collet, released under BSD License. 4 | 5 | package zstd 6 | 7 | import "math/bits" 8 | 9 | type seqCoders struct { 10 | llEnc, ofEnc, mlEnc *fseEncoder 11 | llPrev, ofPrev, mlPrev *fseEncoder 12 | } 13 | 14 | // swap coders with another (block). 15 | func (s *seqCoders) swap(other *seqCoders) { 16 | *s, *other = *other, *s 17 | } 18 | 19 | // setPrev will update the previous encoders to the actually used ones 20 | // and make sure a fresh one is in the main slot. 21 | func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { 22 | compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { 23 | // We used the new one, more current to history and reuse the previous history 24 | if *current == used { 25 | *prev, *current = *current, *prev 26 | c := *current 27 | p := *prev 28 | c.reUsed = false 29 | p.reUsed = true 30 | return 31 | } 32 | if used == *prev { 33 | return 34 | } 35 | // Ensure we cannot reuse by accident 36 | prevEnc := *prev 37 | prevEnc.symbolLen = 0 38 | return 39 | } 40 | compareSwap(ll, &s.llEnc, &s.llPrev) 41 | compareSwap(ml, &s.mlEnc, &s.mlPrev) 42 | compareSwap(of, &s.ofEnc, &s.ofPrev) 43 | } 44 | 45 | func highBit(val uint32) (n uint32) { 46 | return uint32(bits.Len32(val) - 1) 47 | } 48 | 49 | var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, 50 | 8, 9, 10, 11, 12, 13, 14, 15, 51 | 16, 16, 17, 17, 18, 18, 19, 19, 52 | 20, 20, 20, 20, 21, 21, 21, 21, 53 | 22, 22, 22, 22, 22, 22, 22, 22, 54 | 23, 23, 23, 23, 23, 23, 23, 23, 55 | 24, 24, 24, 24, 24, 24, 24, 24, 56 | 24, 24, 24, 24, 24, 24, 24, 24} 57 | 58 | // Up to 6 bits 59 | const maxLLCode = 35 60 | 61 | // llBitsTable translates from ll code to number of bits. 62 | var llBitsTable = [maxLLCode + 1]byte{ 63 | 0, 0, 0, 0, 0, 0, 0, 0, 64 | 0, 0, 0, 0, 0, 0, 0, 0, 65 | 1, 1, 1, 1, 2, 2, 3, 3, 66 | 4, 6, 7, 8, 9, 10, 11, 12, 67 | 13, 14, 15, 16} 68 | 69 | // llCode returns the code that represents the literal length requested. 70 | func llCode(litLength uint32) uint8 { 71 | const llDeltaCode = 19 72 | if litLength <= 63 { 73 | // Compiler insists on bounds check (Go 1.12) 74 | return llCodeTable[litLength&63] 75 | } 76 | return uint8(highBit(litLength)) + llDeltaCode 77 | } 78 | 79 | var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 80 | 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 81 | 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 82 | 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 83 | 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 84 | 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 85 | 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 86 | 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} 87 | 88 | // Up to 6 bits 89 | const maxMLCode = 52 90 | 91 | // mlBitsTable translates from ml code to number of bits. 92 | var mlBitsTable = [maxMLCode + 1]byte{ 93 | 0, 0, 0, 0, 0, 0, 0, 0, 94 | 0, 0, 0, 0, 0, 0, 0, 0, 95 | 0, 0, 0, 0, 0, 0, 0, 0, 96 | 0, 0, 0, 0, 0, 0, 0, 0, 97 | 1, 1, 1, 1, 2, 2, 3, 3, 98 | 4, 4, 5, 7, 8, 9, 10, 11, 99 | 12, 13, 14, 15, 16} 100 | 101 | // note : mlBase = matchLength - MINMATCH; 102 | // because it's the format it's stored in seqStore->sequences 103 | func mlCode(mlBase uint32) uint8 { 104 | const mlDeltaCode = 36 105 | if mlBase <= 127 { 106 | // Compiler insists on bounds check (Go 1.12) 107 | return mlCodeTable[mlBase&127] 108 | } 109 | return uint8(highBit(mlBase)) + mlDeltaCode 110 | } 111 | 112 | func ofCode(offset uint32) uint8 { 113 | // A valid offset will always be > 0. 114 | return uint8(bits.Len32(offset) - 1) 115 | } 116 | -------------------------------------------------------------------------------- /vendor/github.com/klauspost/compress/zstd/zstd.go: -------------------------------------------------------------------------------- 1 | // Package zstd provides decompression of zstandard files. 2 | // 3 | // For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd 4 | package zstd 5 | 6 | import ( 7 | "errors" 8 | "log" 9 | "math/bits" 10 | ) 11 | 12 | const debug = false 13 | const debugSequences = false 14 | 15 | // force encoder to use predefined tables. 16 | const forcePreDef = false 17 | 18 | // zstdMinMatch is the minimum zstd match length. 19 | const zstdMinMatch = 3 20 | 21 | var ( 22 | // ErrReservedBlockType is returned when a reserved block type is found. 23 | // Typically this indicates wrong or corrupted input. 24 | ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") 25 | 26 | // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. 27 | // Typically this indicates wrong or corrupted input. 28 | ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") 29 | 30 | // ErrBlockTooSmall is returned when a block is too small to be decoded. 31 | // Typically returned on invalid input. 32 | ErrBlockTooSmall = errors.New("block too small") 33 | 34 | // ErrMagicMismatch is returned when a "magic" number isn't what is expected. 35 | // Typically this indicates wrong or corrupted input. 36 | ErrMagicMismatch = errors.New("invalid input: magic number mismatch") 37 | 38 | // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. 39 | // Typically this indicates wrong or corrupted input. 40 | ErrWindowSizeExceeded = errors.New("window size exceeded") 41 | 42 | // ErrWindowSizeTooSmall is returned when no window size is specified. 43 | // Typically this indicates wrong or corrupted input. 44 | ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") 45 | 46 | // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. 47 | ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") 48 | 49 | // ErrUnknownDictionary is returned if the dictionary ID is unknown. 50 | // For the time being dictionaries are not supported. 51 | ErrUnknownDictionary = errors.New("unknown dictionary") 52 | 53 | // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. 54 | // This is only returned if SingleSegment is specified on the frame. 55 | ErrFrameSizeExceeded = errors.New("frame size exceeded") 56 | 57 | // ErrCRCMismatch is returned if CRC mismatches. 58 | ErrCRCMismatch = errors.New("CRC check failed") 59 | 60 | // ErrDecoderClosed will be returned if the Decoder was used after 61 | // Close has been called. 62 | ErrDecoderClosed = errors.New("decoder used after Close") 63 | ) 64 | 65 | func println(a ...interface{}) { 66 | if debug { 67 | log.Println(a...) 68 | } 69 | } 70 | 71 | func printf(format string, a ...interface{}) { 72 | if debug { 73 | log.Printf(format, a...) 74 | } 75 | } 76 | 77 | // matchLen returns the maximum length. 78 | // a must be the shortest of the two. 79 | // The function also returns whether all bytes matched. 80 | func matchLen(a, b []byte) int { 81 | b = b[:len(a)] 82 | for i := 0; i < len(a)-7; i += 8 { 83 | if diff := load64(a, i) ^ load64(b, i); diff != 0 { 84 | return i + (bits.TrailingZeros64(diff) >> 3) 85 | } 86 | } 87 | checked := (len(a) >> 3) << 3 88 | a = a[checked:] 89 | b = b[checked:] 90 | // TODO: We could do a 4 check. 91 | for i := range a { 92 | if a[i] != b[i] { 93 | return int(i) + checked 94 | } 95 | } 96 | return len(a) + checked 97 | } 98 | 99 | // matchLen returns a match length in src between index s and t 100 | func matchLenIn(src []byte, s, t int32) int32 { 101 | s1 := len(src) 102 | b := src[t:] 103 | a := src[s:s1] 104 | b = b[:len(a)] 105 | // Extend the match to be as long as possible. 106 | for i := range a { 107 | if a[i] != b[i] { 108 | return int32(i) 109 | } 110 | } 111 | return int32(len(a)) 112 | } 113 | 114 | func load3232(b []byte, i int32) uint32 { 115 | // Help the compiler eliminate bounds checks on the read so it can be done in a single read. 116 | b = b[i:] 117 | b = b[:4] 118 | return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 119 | } 120 | 121 | func load6432(b []byte, i int32) uint64 { 122 | // Help the compiler eliminate bounds checks on the read so it can be done in a single read. 123 | b = b[i:] 124 | b = b[:8] 125 | return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | 126 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 127 | } 128 | 129 | func load64(b []byte, i int) uint64 { 130 | // Help the compiler eliminate bounds checks on the read so it can be done in a single read. 131 | b = b[i:] 132 | b = b[:8] 133 | return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | 134 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 135 | } 136 | -------------------------------------------------------------------------------- /vendor/github.com/pkg/errors/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | -------------------------------------------------------------------------------- /vendor/github.com/pkg/errors/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | go_import_path: github.com/pkg/errors 3 | go: 4 | - 1.4.x 5 | - 1.5.x 6 | - 1.6.x 7 | - 1.7.x 8 | - 1.8.x 9 | - 1.9.x 10 | - 1.10.x 11 | - 1.11.x 12 | - tip 13 | 14 | script: 15 | - go test -v ./... 16 | -------------------------------------------------------------------------------- /vendor/github.com/pkg/errors/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Dave Cheney 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /vendor/github.com/pkg/errors/README.md: -------------------------------------------------------------------------------- 1 | # errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) 2 | 3 | Package errors provides simple error handling primitives. 4 | 5 | `go get github.com/pkg/errors` 6 | 7 | The traditional error handling idiom in Go is roughly akin to 8 | ```go 9 | if err != nil { 10 | return err 11 | } 12 | ``` 13 | which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. 14 | 15 | ## Adding context to an error 16 | 17 | The errors.Wrap function returns a new error that adds context to the original error. For example 18 | ```go 19 | _, err := ioutil.ReadAll(r) 20 | if err != nil { 21 | return errors.Wrap(err, "read failed") 22 | } 23 | ``` 24 | ## Retrieving the cause of an error 25 | 26 | Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. 27 | ```go 28 | type causer interface { 29 | Cause() error 30 | } 31 | ``` 32 | `errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: 33 | ```go 34 | switch err := errors.Cause(err).(type) { 35 | case *MyError: 36 | // handle specifically 37 | default: 38 | // unknown error 39 | } 40 | ``` 41 | 42 | [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). 43 | 44 | ## Contributing 45 | 46 | We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. 47 | 48 | Before proposing a change, please discuss your change by raising an issue. 49 | 50 | ## License 51 | 52 | BSD-2-Clause 53 | -------------------------------------------------------------------------------- /vendor/github.com/pkg/errors/appveyor.yml: -------------------------------------------------------------------------------- 1 | version: build-{build}.{branch} 2 | 3 | clone_folder: C:\gopath\src\github.com\pkg\errors 4 | shallow_clone: true # for startup speed 5 | 6 | environment: 7 | GOPATH: C:\gopath 8 | 9 | platform: 10 | - x64 11 | 12 | # http://www.appveyor.com/docs/installed-software 13 | install: 14 | # some helpful output for debugging builds 15 | - go version 16 | - go env 17 | # pre-installed MinGW at C:\MinGW is 32bit only 18 | # but MSYS2 at C:\msys64 has mingw64 19 | - set PATH=C:\msys64\mingw64\bin;%PATH% 20 | - gcc --version 21 | - g++ --version 22 | 23 | build_script: 24 | - go install -v ./... 25 | 26 | test_script: 27 | - set PATH=C:\gopath\bin;%PATH% 28 | - go test -v ./... 29 | 30 | #artifacts: 31 | # - path: '%GOPATH%\bin\*.exe' 32 | deploy: off 33 | -------------------------------------------------------------------------------- /vendor/github.com/pkg/errors/stack.go: -------------------------------------------------------------------------------- 1 | package errors 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "path" 7 | "runtime" 8 | "strings" 9 | ) 10 | 11 | // Frame represents a program counter inside a stack frame. 12 | type Frame uintptr 13 | 14 | // pc returns the program counter for this frame; 15 | // multiple frames may have the same PC value. 16 | func (f Frame) pc() uintptr { return uintptr(f) - 1 } 17 | 18 | // file returns the full path to the file that contains the 19 | // function for this Frame's pc. 20 | func (f Frame) file() string { 21 | fn := runtime.FuncForPC(f.pc()) 22 | if fn == nil { 23 | return "unknown" 24 | } 25 | file, _ := fn.FileLine(f.pc()) 26 | return file 27 | } 28 | 29 | // line returns the line number of source code of the 30 | // function for this Frame's pc. 31 | func (f Frame) line() int { 32 | fn := runtime.FuncForPC(f.pc()) 33 | if fn == nil { 34 | return 0 35 | } 36 | _, line := fn.FileLine(f.pc()) 37 | return line 38 | } 39 | 40 | // Format formats the frame according to the fmt.Formatter interface. 41 | // 42 | // %s source file 43 | // %d source line 44 | // %n function name 45 | // %v equivalent to %s:%d 46 | // 47 | // Format accepts flags that alter the printing of some verbs, as follows: 48 | // 49 | // %+s function name and path of source file relative to the compile time 50 | // GOPATH separated by \n\t (\n\t) 51 | // %+v equivalent to %+s:%d 52 | func (f Frame) Format(s fmt.State, verb rune) { 53 | switch verb { 54 | case 's': 55 | switch { 56 | case s.Flag('+'): 57 | pc := f.pc() 58 | fn := runtime.FuncForPC(pc) 59 | if fn == nil { 60 | io.WriteString(s, "unknown") 61 | } else { 62 | file, _ := fn.FileLine(pc) 63 | fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) 64 | } 65 | default: 66 | io.WriteString(s, path.Base(f.file())) 67 | } 68 | case 'd': 69 | fmt.Fprintf(s, "%d", f.line()) 70 | case 'n': 71 | name := runtime.FuncForPC(f.pc()).Name() 72 | io.WriteString(s, funcname(name)) 73 | case 'v': 74 | f.Format(s, 's') 75 | io.WriteString(s, ":") 76 | f.Format(s, 'd') 77 | } 78 | } 79 | 80 | // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). 81 | type StackTrace []Frame 82 | 83 | // Format formats the stack of Frames according to the fmt.Formatter interface. 84 | // 85 | // %s lists source files for each Frame in the stack 86 | // %v lists the source file and line number for each Frame in the stack 87 | // 88 | // Format accepts flags that alter the printing of some verbs, as follows: 89 | // 90 | // %+v Prints filename, function, and line number for each Frame in the stack. 91 | func (st StackTrace) Format(s fmt.State, verb rune) { 92 | switch verb { 93 | case 'v': 94 | switch { 95 | case s.Flag('+'): 96 | for _, f := range st { 97 | fmt.Fprintf(s, "\n%+v", f) 98 | } 99 | case s.Flag('#'): 100 | fmt.Fprintf(s, "%#v", []Frame(st)) 101 | default: 102 | fmt.Fprintf(s, "%v", []Frame(st)) 103 | } 104 | case 's': 105 | fmt.Fprintf(s, "%s", []Frame(st)) 106 | } 107 | } 108 | 109 | // stack represents a stack of program counters. 110 | type stack []uintptr 111 | 112 | func (s *stack) Format(st fmt.State, verb rune) { 113 | switch verb { 114 | case 'v': 115 | switch { 116 | case st.Flag('+'): 117 | for _, pc := range *s { 118 | f := Frame(pc) 119 | fmt.Fprintf(st, "\n%+v", f) 120 | } 121 | } 122 | } 123 | } 124 | 125 | func (s *stack) StackTrace() StackTrace { 126 | f := make([]Frame, len(*s)) 127 | for i := 0; i < len(f); i++ { 128 | f[i] = Frame((*s)[i]) 129 | } 130 | return f 131 | } 132 | 133 | func callers() *stack { 134 | const depth = 32 135 | var pcs [depth]uintptr 136 | n := runtime.Callers(3, pcs[:]) 137 | var st stack = pcs[0:n] 138 | return &st 139 | } 140 | 141 | // funcname removes the path prefix component of a function's name reported by func.Name(). 142 | func funcname(name string) string { 143 | i := strings.LastIndex(name, "/") 144 | name = name[i+1:] 145 | i = strings.Index(name, ".") 146 | return name[i+1:] 147 | } 148 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/.gitignore: -------------------------------------------------------------------------------- 1 | # .gitignore 2 | 3 | TODO.html 4 | README.html 5 | 6 | lzma/writer.txt 7 | lzma/reader.txt 8 | 9 | cmd/gxz/gxz 10 | cmd/xb/xb 11 | 12 | # test executables 13 | *.test 14 | 15 | # profile files 16 | *.out 17 | 18 | # vim swap file 19 | .*.swp 20 | 21 | # executables on windows 22 | *.exe 23 | 24 | # default compression test file 25 | enwik8* 26 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014-2016 Ulrich Kunitz 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * My name, Ulrich Kunitz, may not be used to endorse or promote products 15 | derived from this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 21 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/README.md: -------------------------------------------------------------------------------- 1 | # Package xz 2 | 3 | This Go language package supports the reading and writing of xz 4 | compressed streams. It includes also a gxz command for compressing and 5 | decompressing data. The package is completely written in Go and doesn't 6 | have any dependency on any C code. 7 | 8 | The package is currently under development. There might be bugs and APIs 9 | are not considered stable. At this time the package cannot compete with 10 | the xz tool regarding compression speed and size. The algorithms there 11 | have been developed over a long time and are highly optimized. However 12 | there are a number of improvements planned and I'm very optimistic about 13 | parallel compression and decompression. Stay tuned! 14 | 15 | ## Using the API 16 | 17 | The following example program shows how to use the API. 18 | 19 | ```go 20 | package main 21 | 22 | import ( 23 | "bytes" 24 | "io" 25 | "log" 26 | "os" 27 | 28 | "github.com/ulikunitz/xz" 29 | ) 30 | 31 | func main() { 32 | const text = "The quick brown fox jumps over the lazy dog.\n" 33 | var buf bytes.Buffer 34 | // compress text 35 | w, err := xz.NewWriter(&buf) 36 | if err != nil { 37 | log.Fatalf("xz.NewWriter error %s", err) 38 | } 39 | if _, err := io.WriteString(w, text); err != nil { 40 | log.Fatalf("WriteString error %s", err) 41 | } 42 | if err := w.Close(); err != nil { 43 | log.Fatalf("w.Close error %s", err) 44 | } 45 | // decompress buffer and write output to stdout 46 | r, err := xz.NewReader(&buf) 47 | if err != nil { 48 | log.Fatalf("NewReader error %s", err) 49 | } 50 | if _, err = io.Copy(os.Stdout, r); err != nil { 51 | log.Fatalf("io.Copy error %s", err) 52 | } 53 | } 54 | ``` 55 | 56 | ## Using the gxz compression tool 57 | 58 | The package includes a gxz command line utility for compression and 59 | decompression. 60 | 61 | Use following command for installation: 62 | 63 | $ go get github.com/ulikunitz/xz/cmd/gxz 64 | 65 | To test it call the following command. 66 | 67 | $ gxz bigfile 68 | 69 | After some time a much smaller file bigfile.xz will replace bigfile. 70 | To decompress it use the following command. 71 | 72 | $ gxz -d bigfile.xz 73 | 74 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/bits.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package xz 6 | 7 | import ( 8 | "errors" 9 | "io" 10 | ) 11 | 12 | // putUint32LE puts the little-endian representation of x into the first 13 | // four bytes of p. 14 | func putUint32LE(p []byte, x uint32) { 15 | p[0] = byte(x) 16 | p[1] = byte(x >> 8) 17 | p[2] = byte(x >> 16) 18 | p[3] = byte(x >> 24) 19 | } 20 | 21 | // putUint64LE puts the little-endian representation of x into the first 22 | // eight bytes of p. 23 | func putUint64LE(p []byte, x uint64) { 24 | p[0] = byte(x) 25 | p[1] = byte(x >> 8) 26 | p[2] = byte(x >> 16) 27 | p[3] = byte(x >> 24) 28 | p[4] = byte(x >> 32) 29 | p[5] = byte(x >> 40) 30 | p[6] = byte(x >> 48) 31 | p[7] = byte(x >> 56) 32 | } 33 | 34 | // uint32LE converts a little endian representation to an uint32 value. 35 | func uint32LE(p []byte) uint32 { 36 | return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | 37 | uint32(p[3])<<24 38 | } 39 | 40 | // putUvarint puts a uvarint representation of x into the byte slice. 41 | func putUvarint(p []byte, x uint64) int { 42 | i := 0 43 | for x >= 0x80 { 44 | p[i] = byte(x) | 0x80 45 | x >>= 7 46 | i++ 47 | } 48 | p[i] = byte(x) 49 | return i + 1 50 | } 51 | 52 | // errOverflow indicates an overflow of the 64-bit unsigned integer. 53 | var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") 54 | 55 | // readUvarint reads a uvarint from the given byte reader. 56 | func readUvarint(r io.ByteReader) (x uint64, n int, err error) { 57 | var s uint 58 | i := 0 59 | for { 60 | b, err := r.ReadByte() 61 | if err != nil { 62 | return x, i, err 63 | } 64 | i++ 65 | if b < 0x80 { 66 | if i > 10 || i == 10 && b > 1 { 67 | return x, i, errOverflowU64 68 | } 69 | return x | uint64(b)<>27]) 28 | } 29 | 30 | // nlz32 computes the number of leading zeros for an unsigned 32-bit integer. 31 | func nlz32(x uint32) int { 32 | // Smear left most bit to the right 33 | x |= x >> 1 34 | x |= x >> 2 35 | x |= x >> 4 36 | x |= x >> 8 37 | x |= x >> 16 38 | // Use ntz mechanism to calculate nlz. 39 | x++ 40 | if x == 0 { 41 | return 0 42 | } 43 | x *= ntz32Const 44 | return 32 - int(ntz32Table[x>>27]) 45 | } 46 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/breader.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import ( 8 | "errors" 9 | "io" 10 | ) 11 | 12 | // breader provides the ReadByte function for a Reader. It doesn't read 13 | // more data from the reader than absolutely necessary. 14 | type breader struct { 15 | io.Reader 16 | // helper slice to save allocations 17 | p []byte 18 | } 19 | 20 | // ByteReader converts an io.Reader into an io.ByteReader. 21 | func ByteReader(r io.Reader) io.ByteReader { 22 | br, ok := r.(io.ByteReader) 23 | if !ok { 24 | return &breader{r, make([]byte, 1)} 25 | } 26 | return br 27 | } 28 | 29 | // ReadByte read byte function. 30 | func (r *breader) ReadByte() (c byte, err error) { 31 | n, err := r.Reader.Read(r.p) 32 | if n < 1 { 33 | if err == nil { 34 | err = errors.New("breader.ReadByte: no data") 35 | } 36 | return 0, err 37 | } 38 | return r.p[0], nil 39 | } 40 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/buffer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import ( 8 | "errors" 9 | ) 10 | 11 | // buffer provides a circular buffer of bytes. If the front index equals 12 | // the rear index the buffer is empty. As a consequence front cannot be 13 | // equal rear for a full buffer. So a full buffer has a length that is 14 | // one byte less the the length of the data slice. 15 | type buffer struct { 16 | data []byte 17 | front int 18 | rear int 19 | } 20 | 21 | // newBuffer creates a buffer with the given size. 22 | func newBuffer(size int) *buffer { 23 | return &buffer{data: make([]byte, size+1)} 24 | } 25 | 26 | // Cap returns the capacity of the buffer. 27 | func (b *buffer) Cap() int { 28 | return len(b.data) - 1 29 | } 30 | 31 | // Resets the buffer. The front and rear index are set to zero. 32 | func (b *buffer) Reset() { 33 | b.front = 0 34 | b.rear = 0 35 | } 36 | 37 | // Buffered returns the number of bytes buffered. 38 | func (b *buffer) Buffered() int { 39 | delta := b.front - b.rear 40 | if delta < 0 { 41 | delta += len(b.data) 42 | } 43 | return delta 44 | } 45 | 46 | // Available returns the number of bytes available for writing. 47 | func (b *buffer) Available() int { 48 | delta := b.rear - 1 - b.front 49 | if delta < 0 { 50 | delta += len(b.data) 51 | } 52 | return delta 53 | } 54 | 55 | // addIndex adds a non-negative integer to the index i and returns the 56 | // resulting index. The function takes care of wrapping the index as 57 | // well as potential overflow situations. 58 | func (b *buffer) addIndex(i int, n int) int { 59 | // subtraction of len(b.data) prevents overflow 60 | i += n - len(b.data) 61 | if i < 0 { 62 | i += len(b.data) 63 | } 64 | return i 65 | } 66 | 67 | // Read reads bytes from the buffer into p and returns the number of 68 | // bytes read. The function never returns an error but might return less 69 | // data than requested. 70 | func (b *buffer) Read(p []byte) (n int, err error) { 71 | n, err = b.Peek(p) 72 | b.rear = b.addIndex(b.rear, n) 73 | return n, err 74 | } 75 | 76 | // Peek reads bytes from the buffer into p without changing the buffer. 77 | // Peek will never return an error but might return less data than 78 | // requested. 79 | func (b *buffer) Peek(p []byte) (n int, err error) { 80 | m := b.Buffered() 81 | n = len(p) 82 | if m < n { 83 | n = m 84 | p = p[:n] 85 | } 86 | k := copy(p, b.data[b.rear:]) 87 | if k < n { 88 | copy(p[k:], b.data) 89 | } 90 | return n, nil 91 | } 92 | 93 | // Discard skips the n next bytes to read from the buffer, returning the 94 | // bytes discarded. 95 | // 96 | // If Discards skips fewer than n bytes, it returns an error. 97 | func (b *buffer) Discard(n int) (discarded int, err error) { 98 | if n < 0 { 99 | return 0, errors.New("buffer.Discard: negative argument") 100 | } 101 | m := b.Buffered() 102 | if m < n { 103 | n = m 104 | err = errors.New( 105 | "buffer.Discard: discarded less bytes then requested") 106 | } 107 | b.rear = b.addIndex(b.rear, n) 108 | return n, err 109 | } 110 | 111 | // ErrNoSpace indicates that there is insufficient space for the Write 112 | // operation. 113 | var ErrNoSpace = errors.New("insufficient space") 114 | 115 | // Write puts data into the buffer. If less bytes are written than 116 | // requested ErrNoSpace is returned. 117 | func (b *buffer) Write(p []byte) (n int, err error) { 118 | m := b.Available() 119 | n = len(p) 120 | if m < n { 121 | n = m 122 | p = p[:m] 123 | err = ErrNoSpace 124 | } 125 | k := copy(b.data[b.front:], p) 126 | if k < n { 127 | copy(b.data, p[k:]) 128 | } 129 | b.front = b.addIndex(b.front, n) 130 | return n, err 131 | } 132 | 133 | // WriteByte writes a single byte into the buffer. The error ErrNoSpace 134 | // is returned if no single byte is available in the buffer for writing. 135 | func (b *buffer) WriteByte(c byte) error { 136 | if b.Available() < 1 { 137 | return ErrNoSpace 138 | } 139 | b.data[b.front] = c 140 | b.front = b.addIndex(b.front, 1) 141 | return nil 142 | } 143 | 144 | // prefixLen returns the length of the common prefix of a and b. 145 | func prefixLen(a, b []byte) int { 146 | if len(a) > len(b) { 147 | a, b = b, a 148 | } 149 | for i, c := range a { 150 | if b[i] != c { 151 | return i 152 | } 153 | } 154 | return len(a) 155 | } 156 | 157 | // matchLen returns the length of the common prefix for the given 158 | // distance from the rear and the byte slice p. 159 | func (b *buffer) matchLen(distance int, p []byte) int { 160 | var n int 161 | i := b.rear - distance 162 | if i < 0 { 163 | if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { 164 | return n 165 | } 166 | p = p[n:] 167 | i = 0 168 | } 169 | n += prefixLen(p, b.data[i:]) 170 | return n 171 | } 172 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/bytewriter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import ( 8 | "errors" 9 | "io" 10 | ) 11 | 12 | // ErrLimit indicates that the limit of the LimitedByteWriter has been 13 | // reached. 14 | var ErrLimit = errors.New("limit reached") 15 | 16 | // LimitedByteWriter provides a byte writer that can be written until a 17 | // limit is reached. The field N provides the number of remaining 18 | // bytes. 19 | type LimitedByteWriter struct { 20 | BW io.ByteWriter 21 | N int64 22 | } 23 | 24 | // WriteByte writes a single byte to the limited byte writer. It returns 25 | // ErrLimit if the limit has been reached. If the byte is successfully 26 | // written the field N of the LimitedByteWriter will be decremented by 27 | // one. 28 | func (l *LimitedByteWriter) WriteByte(c byte) error { 29 | if l.N <= 0 { 30 | return ErrLimit 31 | } 32 | if err := l.BW.WriteByte(c); err != nil { 33 | return err 34 | } 35 | l.N-- 36 | return nil 37 | } 38 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/decoderdict.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | ) 11 | 12 | // decoderDict provides the dictionary for the decoder. The whole 13 | // dictionary is used as reader buffer. 14 | type decoderDict struct { 15 | buf buffer 16 | head int64 17 | } 18 | 19 | // newDecoderDict creates a new decoder dictionary. The whole dictionary 20 | // will be used as reader buffer. 21 | func newDecoderDict(dictCap int) (d *decoderDict, err error) { 22 | // lower limit supports easy test cases 23 | if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { 24 | return nil, errors.New("lzma: dictCap out of range") 25 | } 26 | d = &decoderDict{buf: *newBuffer(dictCap)} 27 | return d, nil 28 | } 29 | 30 | // Reset clears the dictionary. The read buffer is not changed, so the 31 | // buffered data can still be read. 32 | func (d *decoderDict) Reset() { 33 | d.head = 0 34 | } 35 | 36 | // WriteByte writes a single byte into the dictionary. It is used to 37 | // write literals into the dictionary. 38 | func (d *decoderDict) WriteByte(c byte) error { 39 | if err := d.buf.WriteByte(c); err != nil { 40 | return err 41 | } 42 | d.head++ 43 | return nil 44 | } 45 | 46 | // pos returns the position of the dictionary head. 47 | func (d *decoderDict) pos() int64 { return d.head } 48 | 49 | // dictLen returns the actual length of the dictionary. 50 | func (d *decoderDict) dictLen() int { 51 | capacity := d.buf.Cap() 52 | if d.head >= int64(capacity) { 53 | return capacity 54 | } 55 | return int(d.head) 56 | } 57 | 58 | // byteAt returns a byte stored in the dictionary. If the distance is 59 | // non-positive or exceeds the current length of the dictionary the zero 60 | // byte is returned. 61 | func (d *decoderDict) byteAt(dist int) byte { 62 | if !(0 < dist && dist <= d.dictLen()) { 63 | return 0 64 | } 65 | i := d.buf.front - dist 66 | if i < 0 { 67 | i += len(d.buf.data) 68 | } 69 | return d.buf.data[i] 70 | } 71 | 72 | // writeMatch writes the match at the top of the dictionary. The given 73 | // distance must point in the current dictionary and the length must not 74 | // exceed the maximum length 273 supported in LZMA. 75 | // 76 | // The error value ErrNoSpace indicates that no space is available in 77 | // the dictionary for writing. You need to read from the dictionary 78 | // first. 79 | func (d *decoderDict) writeMatch(dist int64, length int) error { 80 | if !(0 < dist && dist <= int64(d.dictLen())) { 81 | return errors.New("writeMatch: distance out of range") 82 | } 83 | if !(0 < length && length <= maxMatchLen) { 84 | return errors.New("writeMatch: length out of range") 85 | } 86 | if length > d.buf.Available() { 87 | return ErrNoSpace 88 | } 89 | d.head += int64(length) 90 | 91 | i := d.buf.front - int(dist) 92 | if i < 0 { 93 | i += len(d.buf.data) 94 | } 95 | for length > 0 { 96 | var p []byte 97 | if i >= d.buf.front { 98 | p = d.buf.data[i:] 99 | i = 0 100 | } else { 101 | p = d.buf.data[i:d.buf.front] 102 | i = d.buf.front 103 | } 104 | if len(p) > length { 105 | p = p[:length] 106 | } 107 | if _, err := d.buf.Write(p); err != nil { 108 | panic(fmt.Errorf("d.buf.Write returned error %s", err)) 109 | } 110 | length -= len(p) 111 | } 112 | return nil 113 | } 114 | 115 | // Write writes the given bytes into the dictionary and advances the 116 | // head. 117 | func (d *decoderDict) Write(p []byte) (n int, err error) { 118 | n, err = d.buf.Write(p) 119 | d.head += int64(n) 120 | return n, err 121 | } 122 | 123 | // Available returns the number of available bytes for writing into the 124 | // decoder dictionary. 125 | func (d *decoderDict) Available() int { return d.buf.Available() } 126 | 127 | // Read reads data from the buffer contained in the decoder dictionary. 128 | func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } 129 | 130 | // Buffered returns the number of bytes currently buffered in the 131 | // decoder dictionary. 132 | func (d *decoderDict) buffered() int { return d.buf.Buffered() } 133 | 134 | // Peek gets data from the buffer without advancing the rear index. 135 | func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } 136 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/directcodec.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import "fmt" 8 | 9 | // directCodec allows the encoding and decoding of values with a fixed number 10 | // of bits. The number of bits must be in the range [1,32]. 11 | type directCodec byte 12 | 13 | // makeDirectCodec creates a directCodec. The function panics if the number of 14 | // bits is not in the range [1,32]. 15 | func makeDirectCodec(bits int) directCodec { 16 | if !(1 <= bits && bits <= 32) { 17 | panic(fmt.Errorf("bits=%d out of range", bits)) 18 | } 19 | return directCodec(bits) 20 | } 21 | 22 | // Bits returns the number of bits supported by this codec. 23 | func (dc directCodec) Bits() int { 24 | return int(dc) 25 | } 26 | 27 | // Encode uses the range encoder to encode a value with the fixed number of 28 | // bits. The most-significant bit is encoded first. 29 | func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { 30 | for i := int(dc) - 1; i >= 0; i-- { 31 | if err := e.DirectEncodeBit(v >> uint(i)); err != nil { 32 | return err 33 | } 34 | } 35 | return nil 36 | } 37 | 38 | // Decode uses the range decoder to decode a value with the given number of 39 | // given bits. The most-significant bit is decoded first. 40 | func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { 41 | for i := int(dc) - 1; i >= 0; i-- { 42 | x, err := d.DirectDecodeBit() 43 | if err != nil { 44 | return 0, err 45 | } 46 | v = (v << 1) | x 47 | } 48 | return v, nil 49 | } 50 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/distcodec.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | // Constants used by the distance codec. 8 | const ( 9 | // minimum supported distance 10 | minDistance = 1 11 | // maximum supported distance, value is used for the eos marker. 12 | maxDistance = 1 << 32 13 | // number of the supported len states 14 | lenStates = 4 15 | // start for the position models 16 | startPosModel = 4 17 | // first index with align bits support 18 | endPosModel = 14 19 | // bits for the position slots 20 | posSlotBits = 6 21 | // number of align bits 22 | alignBits = 4 23 | // maximum position slot 24 | maxPosSlot = 63 25 | ) 26 | 27 | // distCodec provides encoding and decoding of distance values. 28 | type distCodec struct { 29 | posSlotCodecs [lenStates]treeCodec 30 | posModel [endPosModel - startPosModel]treeReverseCodec 31 | alignCodec treeReverseCodec 32 | } 33 | 34 | // deepcopy initializes dc as deep copy of the source. 35 | func (dc *distCodec) deepcopy(src *distCodec) { 36 | if dc == src { 37 | return 38 | } 39 | for i := range dc.posSlotCodecs { 40 | dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) 41 | } 42 | for i := range dc.posModel { 43 | dc.posModel[i].deepcopy(&src.posModel[i]) 44 | } 45 | dc.alignCodec.deepcopy(&src.alignCodec) 46 | } 47 | 48 | // distBits returns the number of bits required to encode dist. 49 | func distBits(dist uint32) int { 50 | if dist < startPosModel { 51 | return 6 52 | } 53 | // slot s > 3, dist d 54 | // s = 2(bits(d)-1) + bit(d, bits(d)-2) 55 | // s>>1 = bits(d)-1 56 | // bits(d) = 32-nlz32(d) 57 | // s>>1=31-nlz32(d) 58 | // n = 5 + (s>>1) = 36 - nlz32(d) 59 | return 36 - nlz32(dist) 60 | } 61 | 62 | // newDistCodec creates a new distance codec. 63 | func (dc *distCodec) init() { 64 | for i := range dc.posSlotCodecs { 65 | dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) 66 | } 67 | for i := range dc.posModel { 68 | posSlot := startPosModel + i 69 | bits := (posSlot >> 1) - 1 70 | dc.posModel[i] = makeTreeReverseCodec(bits) 71 | } 72 | dc.alignCodec = makeTreeReverseCodec(alignBits) 73 | } 74 | 75 | // lenState converts the value l to a supported lenState value. 76 | func lenState(l uint32) uint32 { 77 | if l >= lenStates { 78 | l = lenStates - 1 79 | } 80 | return l 81 | } 82 | 83 | // Encode encodes the distance using the parameter l. Dist can have values from 84 | // the full range of uint32 values. To get the distance offset the actual match 85 | // distance has to be decreased by 1. A distance offset of 0xffffffff (eos) 86 | // indicates the end of the stream. 87 | func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { 88 | // Compute the posSlot using nlz32 89 | var posSlot uint32 90 | var bits uint32 91 | if dist < startPosModel { 92 | posSlot = dist 93 | } else { 94 | bits = uint32(30 - nlz32(dist)) 95 | posSlot = startPosModel - 2 + (bits << 1) 96 | posSlot += (dist >> uint(bits)) & 1 97 | } 98 | 99 | if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { 100 | return 101 | } 102 | 103 | switch { 104 | case posSlot < startPosModel: 105 | return nil 106 | case posSlot < endPosModel: 107 | tc := &dc.posModel[posSlot-startPosModel] 108 | return tc.Encode(dist, e) 109 | } 110 | dic := directCodec(bits - alignBits) 111 | if err = dic.Encode(e, dist>>alignBits); err != nil { 112 | return 113 | } 114 | return dc.alignCodec.Encode(dist, e) 115 | } 116 | 117 | // Decode decodes the distance offset using the parameter l. The dist value 118 | // 0xffffffff (eos) indicates the end of the stream. Add one to the distance 119 | // offset to get the actual match distance. 120 | func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { 121 | posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) 122 | if err != nil { 123 | return 124 | } 125 | 126 | // posSlot equals distance 127 | if posSlot < startPosModel { 128 | return posSlot, nil 129 | } 130 | 131 | // posSlot uses the individual models 132 | bits := (posSlot >> 1) - 1 133 | dist = (2 | (posSlot & 1)) << bits 134 | var u uint32 135 | if posSlot < endPosModel { 136 | tc := &dc.posModel[posSlot-startPosModel] 137 | if u, err = tc.Decode(d); err != nil { 138 | return 0, err 139 | } 140 | dist += u 141 | return dist, nil 142 | } 143 | 144 | // posSlots use direct encoding and a single model for the four align 145 | // bits. 146 | dic := directCodec(bits - alignBits) 147 | if u, err = dic.Decode(d); err != nil { 148 | return 0, err 149 | } 150 | dist += u << alignBits 151 | if u, err = dc.alignCodec.Decode(d); err != nil { 152 | return 0, err 153 | } 154 | dist += u 155 | return dist, nil 156 | } 157 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/encoderdict.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | "io" 11 | ) 12 | 13 | // matcher is an interface that supports the identification of the next 14 | // operation. 15 | type matcher interface { 16 | io.Writer 17 | SetDict(d *encoderDict) 18 | NextOp(rep [4]uint32) operation 19 | } 20 | 21 | // encoderDict provides the dictionary of the encoder. It includes an 22 | // addtional buffer atop of the actual dictionary. 23 | type encoderDict struct { 24 | buf buffer 25 | m matcher 26 | head int64 27 | capacity int 28 | // preallocated array 29 | data [maxMatchLen]byte 30 | } 31 | 32 | // newEncoderDict creates the encoder dictionary. The argument bufSize 33 | // defines the size of the additional buffer. 34 | func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { 35 | if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { 36 | return nil, errors.New( 37 | "lzma: dictionary capacity out of range") 38 | } 39 | if bufSize < 1 { 40 | return nil, errors.New( 41 | "lzma: buffer size must be larger than zero") 42 | } 43 | d = &encoderDict{ 44 | buf: *newBuffer(dictCap + bufSize), 45 | capacity: dictCap, 46 | m: m, 47 | } 48 | m.SetDict(d) 49 | return d, nil 50 | } 51 | 52 | // Discard discards n bytes. Note that n must not be larger than 53 | // MaxMatchLen. 54 | func (d *encoderDict) Discard(n int) { 55 | p := d.data[:n] 56 | k, _ := d.buf.Read(p) 57 | if k < n { 58 | panic(fmt.Errorf("lzma: can't discard %d bytes", n)) 59 | } 60 | d.head += int64(n) 61 | d.m.Write(p) 62 | } 63 | 64 | // Len returns the data available in the encoder dictionary. 65 | func (d *encoderDict) Len() int { 66 | n := d.buf.Available() 67 | if int64(n) > d.head { 68 | return int(d.head) 69 | } 70 | return n 71 | } 72 | 73 | // DictLen returns the actual length of data in the dictionary. 74 | func (d *encoderDict) DictLen() int { 75 | if d.head < int64(d.capacity) { 76 | return int(d.head) 77 | } 78 | return d.capacity 79 | } 80 | 81 | // Available returns the number of bytes that can be written by a 82 | // following Write call. 83 | func (d *encoderDict) Available() int { 84 | return d.buf.Available() - d.DictLen() 85 | } 86 | 87 | // Write writes data into the dictionary buffer. Note that the position 88 | // of the dictionary head will not be moved. If there is not enough 89 | // space in the buffer ErrNoSpace will be returned. 90 | func (d *encoderDict) Write(p []byte) (n int, err error) { 91 | m := d.Available() 92 | if len(p) > m { 93 | p = p[:m] 94 | err = ErrNoSpace 95 | } 96 | var e error 97 | if n, e = d.buf.Write(p); e != nil { 98 | err = e 99 | } 100 | return n, err 101 | } 102 | 103 | // Pos returns the position of the head. 104 | func (d *encoderDict) Pos() int64 { return d.head } 105 | 106 | // ByteAt returns the byte at the given distance. 107 | func (d *encoderDict) ByteAt(distance int) byte { 108 | if !(0 < distance && distance <= d.Len()) { 109 | return 0 110 | } 111 | i := d.buf.rear - distance 112 | if i < 0 { 113 | i += len(d.buf.data) 114 | } 115 | return d.buf.data[i] 116 | } 117 | 118 | // CopyN copies the last n bytes from the dictionary into the provided 119 | // writer. This is used for copying uncompressed data into an 120 | // uncompressed segment. 121 | func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { 122 | if n <= 0 { 123 | return 0, nil 124 | } 125 | m := d.Len() 126 | if n > m { 127 | n = m 128 | err = ErrNoSpace 129 | } 130 | i := d.buf.rear - n 131 | var e error 132 | if i < 0 { 133 | i += len(d.buf.data) 134 | if written, e = w.Write(d.buf.data[i:]); e != nil { 135 | return written, e 136 | } 137 | i = 0 138 | } 139 | var k int 140 | k, e = w.Write(d.buf.data[i:d.buf.rear]) 141 | written += k 142 | if e != nil { 143 | err = e 144 | } 145 | return written, err 146 | } 147 | 148 | // Buffered returns the number of bytes in the buffer. 149 | func (d *encoderDict) Buffered() int { return d.buf.Buffered() } 150 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/fox.lzma: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nirhaas/gopacker/f33ba265006a5c47493c4da93a4895bf4a065ed3/vendor/github.com/ulikunitz/xz/lzma/fox.lzma -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/header.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | ) 11 | 12 | // uint32LE reads an uint32 integer from a byte slice 13 | func uint32LE(b []byte) uint32 { 14 | x := uint32(b[3]) << 24 15 | x |= uint32(b[2]) << 16 16 | x |= uint32(b[1]) << 8 17 | x |= uint32(b[0]) 18 | return x 19 | } 20 | 21 | // uint64LE converts the uint64 value stored as little endian to an uint64 22 | // value. 23 | func uint64LE(b []byte) uint64 { 24 | x := uint64(b[7]) << 56 25 | x |= uint64(b[6]) << 48 26 | x |= uint64(b[5]) << 40 27 | x |= uint64(b[4]) << 32 28 | x |= uint64(b[3]) << 24 29 | x |= uint64(b[2]) << 16 30 | x |= uint64(b[1]) << 8 31 | x |= uint64(b[0]) 32 | return x 33 | } 34 | 35 | // putUint32LE puts an uint32 integer into a byte slice that must have at least 36 | // a length of 4 bytes. 37 | func putUint32LE(b []byte, x uint32) { 38 | b[0] = byte(x) 39 | b[1] = byte(x >> 8) 40 | b[2] = byte(x >> 16) 41 | b[3] = byte(x >> 24) 42 | } 43 | 44 | // putUint64LE puts the uint64 value into the byte slice as little endian 45 | // value. The byte slice b must have at least place for 8 bytes. 46 | func putUint64LE(b []byte, x uint64) { 47 | b[0] = byte(x) 48 | b[1] = byte(x >> 8) 49 | b[2] = byte(x >> 16) 50 | b[3] = byte(x >> 24) 51 | b[4] = byte(x >> 32) 52 | b[5] = byte(x >> 40) 53 | b[6] = byte(x >> 48) 54 | b[7] = byte(x >> 56) 55 | } 56 | 57 | // noHeaderSize defines the value of the length field in the LZMA header. 58 | const noHeaderSize uint64 = 1<<64 - 1 59 | 60 | // HeaderLen provides the length of the LZMA file header. 61 | const HeaderLen = 13 62 | 63 | // header represents the header of an LZMA file. 64 | type header struct { 65 | properties Properties 66 | dictCap int 67 | // uncompressed size; negative value if no size is given 68 | size int64 69 | } 70 | 71 | // marshalBinary marshals the header. 72 | func (h *header) marshalBinary() (data []byte, err error) { 73 | if err = h.properties.verify(); err != nil { 74 | return nil, err 75 | } 76 | if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { 77 | return nil, fmt.Errorf("lzma: DictCap %d out of range", 78 | h.dictCap) 79 | } 80 | 81 | data = make([]byte, 13) 82 | 83 | // property byte 84 | data[0] = h.properties.Code() 85 | 86 | // dictionary capacity 87 | putUint32LE(data[1:5], uint32(h.dictCap)) 88 | 89 | // uncompressed size 90 | var s uint64 91 | if h.size > 0 { 92 | s = uint64(h.size) 93 | } else { 94 | s = noHeaderSize 95 | } 96 | putUint64LE(data[5:], s) 97 | 98 | return data, nil 99 | } 100 | 101 | // unmarshalBinary unmarshals the header. 102 | func (h *header) unmarshalBinary(data []byte) error { 103 | if len(data) != HeaderLen { 104 | return errors.New("lzma.unmarshalBinary: data has wrong length") 105 | } 106 | 107 | // properties 108 | var err error 109 | if h.properties, err = PropertiesForCode(data[0]); err != nil { 110 | return err 111 | } 112 | 113 | // dictionary capacity 114 | h.dictCap = int(uint32LE(data[1:])) 115 | if h.dictCap < 0 { 116 | return errors.New( 117 | "LZMA header: dictionary capacity exceeds maximum " + 118 | "integer") 119 | } 120 | 121 | // uncompressed size 122 | s := uint64LE(data[5:]) 123 | if s == noHeaderSize { 124 | h.size = -1 125 | } else { 126 | h.size = int64(s) 127 | if h.size < 0 { 128 | return errors.New( 129 | "LZMA header: uncompressed size " + 130 | "out of int64 range") 131 | } 132 | } 133 | 134 | return nil 135 | } 136 | 137 | // validDictCap checks whether the dictionary capacity is correct. This 138 | // is used to weed out wrong file headers. 139 | func validDictCap(dictcap int) bool { 140 | if int64(dictcap) == MaxDictCap { 141 | return true 142 | } 143 | for n := uint(10); n < 32; n++ { 144 | if dictcap == 1<= 10 or 2^32-1. If 156 | // there is an explicit size it must not exceed 256 GiB. The length of 157 | // the data argument must be HeaderLen. 158 | func ValidHeader(data []byte) bool { 159 | var h header 160 | if err := h.unmarshalBinary(data); err != nil { 161 | return false 162 | } 163 | if !validDictCap(h.dictCap) { 164 | return false 165 | } 166 | return h.size < 0 || h.size <= 1<<38 167 | } 168 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import "errors" 8 | 9 | // maxPosBits defines the number of bits of the position value that are used to 10 | // to compute the posState value. The value is used to select the tree codec 11 | // for length encoding and decoding. 12 | const maxPosBits = 4 13 | 14 | // minMatchLen and maxMatchLen give the minimum and maximum values for 15 | // encoding and decoding length values. minMatchLen is also used as base 16 | // for the encoded length values. 17 | const ( 18 | minMatchLen = 2 19 | maxMatchLen = minMatchLen + 16 + 256 - 1 20 | ) 21 | 22 | // lengthCodec support the encoding of the length value. 23 | type lengthCodec struct { 24 | choice [2]prob 25 | low [1 << maxPosBits]treeCodec 26 | mid [1 << maxPosBits]treeCodec 27 | high treeCodec 28 | } 29 | 30 | // deepcopy initializes the lc value as deep copy of the source value. 31 | func (lc *lengthCodec) deepcopy(src *lengthCodec) { 32 | if lc == src { 33 | return 34 | } 35 | lc.choice = src.choice 36 | for i := range lc.low { 37 | lc.low[i].deepcopy(&src.low[i]) 38 | } 39 | for i := range lc.mid { 40 | lc.mid[i].deepcopy(&src.mid[i]) 41 | } 42 | lc.high.deepcopy(&src.high) 43 | } 44 | 45 | // init initializes a new length codec. 46 | func (lc *lengthCodec) init() { 47 | for i := range lc.choice { 48 | lc.choice[i] = probInit 49 | } 50 | for i := range lc.low { 51 | lc.low[i] = makeTreeCodec(3) 52 | } 53 | for i := range lc.mid { 54 | lc.mid[i] = makeTreeCodec(3) 55 | } 56 | lc.high = makeTreeCodec(8) 57 | } 58 | 59 | // lBits gives the number of bits used for the encoding of the l value 60 | // provided to the range encoder. 61 | func lBits(l uint32) int { 62 | switch { 63 | case l < 8: 64 | return 4 65 | case l < 16: 66 | return 5 67 | default: 68 | return 10 69 | } 70 | } 71 | 72 | // Encode encodes the length offset. The length offset l can be compute by 73 | // subtracting minMatchLen (2) from the actual length. 74 | // 75 | // l = length - minMatchLen 76 | // 77 | func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, 78 | ) (err error) { 79 | if l > maxMatchLen-minMatchLen { 80 | return errors.New("lengthCodec.Encode: l out of range") 81 | } 82 | if l < 8 { 83 | if err = lc.choice[0].Encode(e, 0); err != nil { 84 | return 85 | } 86 | return lc.low[posState].Encode(e, l) 87 | } 88 | if err = lc.choice[0].Encode(e, 1); err != nil { 89 | return 90 | } 91 | if l < 16 { 92 | if err = lc.choice[1].Encode(e, 0); err != nil { 93 | return 94 | } 95 | return lc.mid[posState].Encode(e, l-8) 96 | } 97 | if err = lc.choice[1].Encode(e, 1); err != nil { 98 | return 99 | } 100 | if err = lc.high.Encode(e, l-16); err != nil { 101 | return 102 | } 103 | return nil 104 | } 105 | 106 | // Decode reads the length offset. Add minMatchLen to compute the actual length 107 | // to the length offset l. 108 | func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, 109 | ) (l uint32, err error) { 110 | var b uint32 111 | if b, err = lc.choice[0].Decode(d); err != nil { 112 | return 113 | } 114 | if b == 0 { 115 | l, err = lc.low[posState].Decode(d) 116 | return 117 | } 118 | if b, err = lc.choice[1].Decode(d); err != nil { 119 | return 120 | } 121 | if b == 0 { 122 | l, err = lc.mid[posState].Decode(d) 123 | l += 8 124 | return 125 | } 126 | l, err = lc.high.Decode(d) 127 | l += 16 128 | return 129 | } 130 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/literalcodec.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | // literalCodec supports the encoding of literal. It provides 768 probability 8 | // values per literal state. The upper 512 probabilities are used with the 9 | // context of a match bit. 10 | type literalCodec struct { 11 | probs []prob 12 | } 13 | 14 | // deepcopy initializes literal codec c as a deep copy of the source. 15 | func (c *literalCodec) deepcopy(src *literalCodec) { 16 | if c == src { 17 | return 18 | } 19 | c.probs = make([]prob, len(src.probs)) 20 | copy(c.probs, src.probs) 21 | } 22 | 23 | // init initializes the literal codec. 24 | func (c *literalCodec) init(lc, lp int) { 25 | switch { 26 | case !(minLC <= lc && lc <= maxLC): 27 | panic("lc out of range") 28 | case !(minLP <= lp && lp <= maxLP): 29 | panic("lp out of range") 30 | } 31 | c.probs = make([]prob, 0x300<= 7 { 47 | m := uint32(match) 48 | for { 49 | matchBit := (m >> 7) & 1 50 | m <<= 1 51 | bit := (r >> 7) & 1 52 | r <<= 1 53 | i := ((1 + matchBit) << 8) | symbol 54 | if err = probs[i].Encode(e, bit); err != nil { 55 | return 56 | } 57 | symbol = (symbol << 1) | bit 58 | if matchBit != bit { 59 | break 60 | } 61 | if symbol >= 0x100 { 62 | break 63 | } 64 | } 65 | } 66 | for symbol < 0x100 { 67 | bit := (r >> 7) & 1 68 | r <<= 1 69 | if err = probs[symbol].Encode(e, bit); err != nil { 70 | return 71 | } 72 | symbol = (symbol << 1) | bit 73 | } 74 | return nil 75 | } 76 | 77 | // Decode decodes a literal byte using the range decoder as well as the LZMA 78 | // state, a match byte, and the literal state. 79 | func (c *literalCodec) Decode(d *rangeDecoder, 80 | state uint32, match byte, litState uint32, 81 | ) (s byte, err error) { 82 | k := litState * 0x300 83 | probs := c.probs[k : k+0x300] 84 | symbol := uint32(1) 85 | if state >= 7 { 86 | m := uint32(match) 87 | for { 88 | matchBit := (m >> 7) & 1 89 | m <<= 1 90 | i := ((1 + matchBit) << 8) | symbol 91 | bit, err := d.DecodeBit(&probs[i]) 92 | if err != nil { 93 | return 0, err 94 | } 95 | symbol = (symbol << 1) | bit 96 | if matchBit != bit { 97 | break 98 | } 99 | if symbol >= 0x100 { 100 | break 101 | } 102 | } 103 | } 104 | for symbol < 0x100 { 105 | bit, err := d.DecodeBit(&probs[symbol]) 106 | if err != nil { 107 | return 0, err 108 | } 109 | symbol = (symbol << 1) | bit 110 | } 111 | s = byte(symbol - 0x100) 112 | return s, nil 113 | } 114 | 115 | // minLC and maxLC define the range for LC values. 116 | const ( 117 | minLC = 0 118 | maxLC = 8 119 | ) 120 | 121 | // minLC and maxLC define the range for LP values. 122 | const ( 123 | minLP = 0 124 | maxLP = 4 125 | ) 126 | 127 | // minState and maxState define a range for the state values stored in 128 | // the State values. 129 | const ( 130 | minState = 0 131 | maxState = 11 132 | ) 133 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import "errors" 8 | 9 | // MatchAlgorithm identifies an algorithm to find matches in the 10 | // dictionary. 11 | type MatchAlgorithm byte 12 | 13 | // Supported matcher algorithms. 14 | const ( 15 | HashTable4 MatchAlgorithm = iota 16 | BinaryTree 17 | ) 18 | 19 | // maStrings are used by the String method. 20 | var maStrings = map[MatchAlgorithm]string{ 21 | HashTable4: "HashTable4", 22 | BinaryTree: "BinaryTree", 23 | } 24 | 25 | // String returns a string representation of the Matcher. 26 | func (a MatchAlgorithm) String() string { 27 | if s, ok := maStrings[a]; ok { 28 | return s 29 | } 30 | return "unknown" 31 | } 32 | 33 | var errUnsupportedMatchAlgorithm = errors.New( 34 | "lzma: unsupported match algorithm value") 35 | 36 | // verify checks whether the matcher value is supported. 37 | func (a MatchAlgorithm) verify() error { 38 | if _, ok := maStrings[a]; !ok { 39 | return errUnsupportedMatchAlgorithm 40 | } 41 | return nil 42 | } 43 | 44 | func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { 45 | switch a { 46 | case HashTable4: 47 | return newHashTable(dictCap, 4) 48 | case BinaryTree: 49 | return newBinTree(dictCap) 50 | } 51 | return nil, errUnsupportedMatchAlgorithm 52 | } 53 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/operation.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | "unicode" 11 | ) 12 | 13 | // operation represents an operation on the dictionary during encoding or 14 | // decoding. 15 | type operation interface { 16 | Len() int 17 | } 18 | 19 | // rep represents a repetition at the given distance and the given length 20 | type match struct { 21 | // supports all possible distance values, including the eos marker 22 | distance int64 23 | // length 24 | n int 25 | } 26 | 27 | // verify checks whether the match is valid. If that is not the case an 28 | // error is returned. 29 | func (m match) verify() error { 30 | if !(minDistance <= m.distance && m.distance <= maxDistance) { 31 | return errors.New("distance out of range") 32 | } 33 | if !(1 <= m.n && m.n <= maxMatchLen) { 34 | return errors.New("length out of range") 35 | } 36 | return nil 37 | } 38 | 39 | // l return the l-value for the match, which is the difference of length 40 | // n and 2. 41 | func (m match) l() uint32 { 42 | return uint32(m.n - minMatchLen) 43 | } 44 | 45 | // dist returns the dist value for the match, which is one less of the 46 | // distance stored in the match. 47 | func (m match) dist() uint32 { 48 | return uint32(m.distance - minDistance) 49 | } 50 | 51 | // Len returns the number of bytes matched. 52 | func (m match) Len() int { 53 | return m.n 54 | } 55 | 56 | // String returns a string representation for the repetition. 57 | func (m match) String() string { 58 | return fmt.Sprintf("M{%d,%d}", m.distance, m.n) 59 | } 60 | 61 | // lit represents a single byte literal. 62 | type lit struct { 63 | b byte 64 | } 65 | 66 | // Len returns 1 for the single byte literal. 67 | func (l lit) Len() int { 68 | return 1 69 | } 70 | 71 | // String returns a string representation for the literal. 72 | func (l lit) String() string { 73 | var c byte 74 | if unicode.IsPrint(rune(l.b)) { 75 | c = l.b 76 | } else { 77 | c = '.' 78 | } 79 | return fmt.Sprintf("L{%c/%02x}", c, l.b) 80 | } 81 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/prob.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | // movebits defines the number of bits used for the updates of probability 8 | // values. 9 | const movebits = 5 10 | 11 | // probbits defines the number of bits of a probability value. 12 | const probbits = 11 13 | 14 | // probInit defines 0.5 as initial value for prob values. 15 | const probInit prob = 1 << (probbits - 1) 16 | 17 | // Type prob represents probabilities. The type can also be used to encode and 18 | // decode single bits. 19 | type prob uint16 20 | 21 | // Dec decreases the probability. The decrease is proportional to the 22 | // probability value. 23 | func (p *prob) dec() { 24 | *p -= *p >> movebits 25 | } 26 | 27 | // Inc increases the probability. The Increase is proportional to the 28 | // difference of 1 and the probability value. 29 | func (p *prob) inc() { 30 | *p += ((1 << probbits) - *p) >> movebits 31 | } 32 | 33 | // Computes the new bound for a given range using the probability value. 34 | func (p prob) bound(r uint32) uint32 { 35 | return (r >> probbits) * uint32(p) 36 | } 37 | 38 | // Bits returns 1. One is the number of bits that can be encoded or decoded 39 | // with a single prob value. 40 | func (p prob) Bits() int { 41 | return 1 42 | } 43 | 44 | // Encode encodes the least-significant bit of v. Note that the p value will be 45 | // changed. 46 | func (p *prob) Encode(e *rangeEncoder, v uint32) error { 47 | return e.EncodeBit(v, p) 48 | } 49 | 50 | // Decode decodes a single bit. Note that the p value will change. 51 | func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { 52 | return d.DecodeBit(p) 53 | } 54 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/properties.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | ) 11 | 12 | // maximum and minimum values for the LZMA properties. 13 | const ( 14 | minPB = 0 15 | maxPB = 4 16 | ) 17 | 18 | // maxPropertyCode is the possible maximum of a properties code byte. 19 | const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 20 | 21 | // Properties contains the parameters LC, LP and PB. The parameter LC 22 | // defines the number of literal context bits; parameter LP the number 23 | // of literal position bits and PB the number of position bits. 24 | type Properties struct { 25 | LC int 26 | LP int 27 | PB int 28 | } 29 | 30 | // String returns the properties in a string representation. 31 | func (p *Properties) String() string { 32 | return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) 33 | } 34 | 35 | // PropertiesForCode converts a properties code byte into a Properties value. 36 | func PropertiesForCode(code byte) (p Properties, err error) { 37 | if code > maxPropertyCode { 38 | return p, errors.New("lzma: invalid properties code") 39 | } 40 | p.LC = int(code % 9) 41 | code /= 9 42 | p.LP = int(code % 5) 43 | code /= 5 44 | p.PB = int(code % 5) 45 | return p, err 46 | } 47 | 48 | // verify checks the properties for correctness. 49 | func (p *Properties) verify() error { 50 | if p == nil { 51 | return errors.New("lzma: properties are nil") 52 | } 53 | if !(minLC <= p.LC && p.LC <= maxLC) { 54 | return errors.New("lzma: lc out of range") 55 | } 56 | if !(minLP <= p.LP && p.LP <= maxLP) { 57 | return errors.New("lzma: lp out of range") 58 | } 59 | if !(minPB <= p.PB && p.PB <= maxPB) { 60 | return errors.New("lzma: pb out of range") 61 | } 62 | return nil 63 | } 64 | 65 | // Code converts the properties to a byte. The function assumes that 66 | // the properties components are all in range. 67 | func (p Properties) Code() byte { 68 | return byte((p.PB*5+p.LP)*9 + p.LC) 69 | } 70 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/reader.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // Package lzma supports the decoding and encoding of LZMA streams. 6 | // Reader and Writer support the classic LZMA format. Reader2 and 7 | // Writer2 support the decoding and encoding of LZMA2 streams. 8 | // 9 | // The package is written completely in Go and doesn't rely on any external 10 | // library. 11 | package lzma 12 | 13 | import ( 14 | "errors" 15 | "io" 16 | ) 17 | 18 | // ReaderConfig stores the parameters for the reader of the classic LZMA 19 | // format. 20 | type ReaderConfig struct { 21 | DictCap int 22 | } 23 | 24 | // fill converts the zero values of the configuration to the default values. 25 | func (c *ReaderConfig) fill() { 26 | if c.DictCap == 0 { 27 | c.DictCap = 8 * 1024 * 1024 28 | } 29 | } 30 | 31 | // Verify checks the reader configuration for errors. Zero values will 32 | // be replaced by default values. 33 | func (c *ReaderConfig) Verify() error { 34 | c.fill() 35 | if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { 36 | return errors.New("lzma: dictionary capacity is out of range") 37 | } 38 | return nil 39 | } 40 | 41 | // Reader provides a reader for LZMA files or streams. 42 | type Reader struct { 43 | lzma io.Reader 44 | h header 45 | d *decoder 46 | } 47 | 48 | // NewReader creates a new reader for an LZMA stream using the classic 49 | // format. NewReader reads and checks the header of the LZMA stream. 50 | func NewReader(lzma io.Reader) (r *Reader, err error) { 51 | return ReaderConfig{}.NewReader(lzma) 52 | } 53 | 54 | // NewReader creates a new reader for an LZMA stream in the classic 55 | // format. The function reads and verifies the the header of the LZMA 56 | // stream. 57 | func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { 58 | if err = c.Verify(); err != nil { 59 | return nil, err 60 | } 61 | data := make([]byte, HeaderLen) 62 | if _, err := io.ReadFull(lzma, data); err != nil { 63 | if err == io.EOF { 64 | return nil, errors.New("lzma: unexpected EOF") 65 | } 66 | return nil, err 67 | } 68 | r = &Reader{lzma: lzma} 69 | if err = r.h.unmarshalBinary(data); err != nil { 70 | return nil, err 71 | } 72 | if r.h.dictCap < MinDictCap { 73 | return nil, errors.New("lzma: dictionary capacity too small") 74 | } 75 | dictCap := r.h.dictCap 76 | if c.DictCap > dictCap { 77 | dictCap = c.DictCap 78 | } 79 | 80 | state := newState(r.h.properties) 81 | dict, err := newDecoderDict(dictCap) 82 | if err != nil { 83 | return nil, err 84 | } 85 | r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) 86 | if err != nil { 87 | return nil, err 88 | } 89 | return r, nil 90 | } 91 | 92 | // EOSMarker indicates that an EOS marker has been encountered. 93 | func (r *Reader) EOSMarker() bool { 94 | return r.d.eosMarker 95 | } 96 | 97 | // Read returns uncompressed data. 98 | func (r *Reader) Read(p []byte) (n int, err error) { 99 | return r.d.Read(p) 100 | } 101 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/reader2.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | import ( 8 | "errors" 9 | "io" 10 | 11 | "github.com/ulikunitz/xz/internal/xlog" 12 | ) 13 | 14 | // Reader2Config stores the parameters for the LZMA2 reader. 15 | // format. 16 | type Reader2Config struct { 17 | DictCap int 18 | } 19 | 20 | // fill converts the zero values of the configuration to the default values. 21 | func (c *Reader2Config) fill() { 22 | if c.DictCap == 0 { 23 | c.DictCap = 8 * 1024 * 1024 24 | } 25 | } 26 | 27 | // Verify checks the reader configuration for errors. Zero configuration values 28 | // will be replaced by default values. 29 | func (c *Reader2Config) Verify() error { 30 | c.fill() 31 | if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { 32 | return errors.New("lzma: dictionary capacity is out of range") 33 | } 34 | return nil 35 | } 36 | 37 | // Reader2 supports the reading of LZMA2 chunk sequences. Note that the 38 | // first chunk should have a dictionary reset and the first compressed 39 | // chunk a properties reset. The chunk sequence may not be terminated by 40 | // an end-of-stream chunk. 41 | type Reader2 struct { 42 | r io.Reader 43 | err error 44 | 45 | dict *decoderDict 46 | ur *uncompressedReader 47 | decoder *decoder 48 | chunkReader io.Reader 49 | 50 | cstate chunkState 51 | ctype chunkType 52 | } 53 | 54 | // NewReader2 creates a reader for an LZMA2 chunk sequence. 55 | func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { 56 | return Reader2Config{}.NewReader2(lzma2) 57 | } 58 | 59 | // NewReader2 creates an LZMA2 reader using the given configuration. 60 | func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { 61 | if err = c.Verify(); err != nil { 62 | return nil, err 63 | } 64 | r = &Reader2{r: lzma2, cstate: start} 65 | r.dict, err = newDecoderDict(c.DictCap) 66 | if err != nil { 67 | return nil, err 68 | } 69 | if err = r.startChunk(); err != nil { 70 | r.err = err 71 | } 72 | return r, nil 73 | } 74 | 75 | // uncompressed tests whether the chunk type specifies an uncompressed 76 | // chunk. 77 | func uncompressed(ctype chunkType) bool { 78 | return ctype == cU || ctype == cUD 79 | } 80 | 81 | // startChunk parses a new chunk. 82 | func (r *Reader2) startChunk() error { 83 | r.chunkReader = nil 84 | header, err := readChunkHeader(r.r) 85 | if err != nil { 86 | if err == io.EOF { 87 | err = io.ErrUnexpectedEOF 88 | } 89 | return err 90 | } 91 | xlog.Debugf("chunk header %v", header) 92 | if err = r.cstate.next(header.ctype); err != nil { 93 | return err 94 | } 95 | if r.cstate == stop { 96 | return io.EOF 97 | } 98 | if header.ctype == cUD || header.ctype == cLRND { 99 | r.dict.Reset() 100 | } 101 | size := int64(header.uncompressed) + 1 102 | if uncompressed(header.ctype) { 103 | if r.ur != nil { 104 | r.ur.Reopen(r.r, size) 105 | } else { 106 | r.ur = newUncompressedReader(r.r, r.dict, size) 107 | } 108 | r.chunkReader = r.ur 109 | return nil 110 | } 111 | br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) 112 | if r.decoder == nil { 113 | state := newState(header.props) 114 | r.decoder, err = newDecoder(br, state, r.dict, size) 115 | if err != nil { 116 | return err 117 | } 118 | r.chunkReader = r.decoder 119 | return nil 120 | } 121 | switch header.ctype { 122 | case cLR: 123 | r.decoder.State.Reset() 124 | case cLRN, cLRND: 125 | r.decoder.State = newState(header.props) 126 | } 127 | err = r.decoder.Reopen(br, size) 128 | if err != nil { 129 | return err 130 | } 131 | r.chunkReader = r.decoder 132 | return nil 133 | } 134 | 135 | // Read reads data from the LZMA2 chunk sequence. 136 | func (r *Reader2) Read(p []byte) (n int, err error) { 137 | if r.err != nil { 138 | return 0, r.err 139 | } 140 | for n < len(p) { 141 | var k int 142 | k, err = r.chunkReader.Read(p[n:]) 143 | n += k 144 | if err != nil { 145 | if err == io.EOF { 146 | err = r.startChunk() 147 | if err == nil { 148 | continue 149 | } 150 | } 151 | r.err = err 152 | return n, err 153 | } 154 | if k == 0 { 155 | r.err = errors.New("lzma: Reader2 doesn't get data") 156 | return n, r.err 157 | } 158 | } 159 | return n, nil 160 | } 161 | 162 | // EOS returns whether the LZMA2 stream has been terminated by an 163 | // end-of-stream chunk. 164 | func (r *Reader2) EOS() bool { 165 | return r.cstate == stop 166 | } 167 | 168 | // uncompressedReader is used to read uncompressed chunks. 169 | type uncompressedReader struct { 170 | lr io.LimitedReader 171 | Dict *decoderDict 172 | eof bool 173 | err error 174 | } 175 | 176 | // newUncompressedReader initializes a new uncompressedReader. 177 | func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { 178 | ur := &uncompressedReader{ 179 | lr: io.LimitedReader{R: r, N: size}, 180 | Dict: dict, 181 | } 182 | return ur 183 | } 184 | 185 | // Reopen reinitializes an uncompressed reader. 186 | func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { 187 | ur.err = nil 188 | ur.eof = false 189 | ur.lr = io.LimitedReader{R: r, N: size} 190 | } 191 | 192 | // fill reads uncompressed data into the dictionary. 193 | func (ur *uncompressedReader) fill() error { 194 | if !ur.eof { 195 | n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) 196 | if err != io.EOF { 197 | return err 198 | } 199 | ur.eof = true 200 | if n > 0 { 201 | return nil 202 | } 203 | } 204 | if ur.lr.N != 0 { 205 | return io.ErrUnexpectedEOF 206 | } 207 | return io.EOF 208 | } 209 | 210 | // Read reads uncompressed data from the limited reader. 211 | func (ur *uncompressedReader) Read(p []byte) (n int, err error) { 212 | if ur.err != nil { 213 | return 0, ur.err 214 | } 215 | for { 216 | var k int 217 | k, err = ur.Dict.Read(p[n:]) 218 | n += k 219 | if n >= len(p) { 220 | return n, nil 221 | } 222 | if err != nil { 223 | break 224 | } 225 | err = ur.fill() 226 | if err != nil { 227 | break 228 | } 229 | } 230 | ur.err = err 231 | return n, err 232 | } 233 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/state.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | // states defines the overall state count 8 | const states = 12 9 | 10 | // State maintains the full state of the operation encoding or decoding 11 | // process. 12 | type state struct { 13 | rep [4]uint32 14 | isMatch [states << maxPosBits]prob 15 | isRepG0Long [states << maxPosBits]prob 16 | isRep [states]prob 17 | isRepG0 [states]prob 18 | isRepG1 [states]prob 19 | isRepG2 [states]prob 20 | litCodec literalCodec 21 | lenCodec lengthCodec 22 | repLenCodec lengthCodec 23 | distCodec distCodec 24 | state uint32 25 | posBitMask uint32 26 | Properties Properties 27 | } 28 | 29 | // initProbSlice initializes a slice of probabilities. 30 | func initProbSlice(p []prob) { 31 | for i := range p { 32 | p[i] = probInit 33 | } 34 | } 35 | 36 | // Reset sets all state information to the original values. 37 | func (s *state) Reset() { 38 | p := s.Properties 39 | *s = state{ 40 | Properties: p, 41 | // dict: s.dict, 42 | posBitMask: (uint32(1) << uint(p.PB)) - 1, 43 | } 44 | initProbSlice(s.isMatch[:]) 45 | initProbSlice(s.isRep[:]) 46 | initProbSlice(s.isRepG0[:]) 47 | initProbSlice(s.isRepG1[:]) 48 | initProbSlice(s.isRepG2[:]) 49 | initProbSlice(s.isRepG0Long[:]) 50 | s.litCodec.init(p.LC, p.LP) 51 | s.lenCodec.init() 52 | s.repLenCodec.init() 53 | s.distCodec.init() 54 | } 55 | 56 | // initState initializes the state. 57 | func initState(s *state, p Properties) { 58 | *s = state{Properties: p} 59 | s.Reset() 60 | } 61 | 62 | // newState creates a new state from the give Properties. 63 | func newState(p Properties) *state { 64 | s := &state{Properties: p} 65 | s.Reset() 66 | return s 67 | } 68 | 69 | // deepcopy initializes s as a deep copy of the source. 70 | func (s *state) deepcopy(src *state) { 71 | if s == src { 72 | return 73 | } 74 | s.rep = src.rep 75 | s.isMatch = src.isMatch 76 | s.isRepG0Long = src.isRepG0Long 77 | s.isRep = src.isRep 78 | s.isRepG0 = src.isRepG0 79 | s.isRepG1 = src.isRepG1 80 | s.isRepG2 = src.isRepG2 81 | s.litCodec.deepcopy(&src.litCodec) 82 | s.lenCodec.deepcopy(&src.lenCodec) 83 | s.repLenCodec.deepcopy(&src.repLenCodec) 84 | s.distCodec.deepcopy(&src.distCodec) 85 | s.state = src.state 86 | s.posBitMask = src.posBitMask 87 | s.Properties = src.Properties 88 | } 89 | 90 | // cloneState creates a new clone of the give state. 91 | func cloneState(src *state) *state { 92 | s := new(state) 93 | s.deepcopy(src) 94 | return s 95 | } 96 | 97 | // updateStateLiteral updates the state for a literal. 98 | func (s *state) updateStateLiteral() { 99 | switch { 100 | case s.state < 4: 101 | s.state = 0 102 | return 103 | case s.state < 10: 104 | s.state -= 3 105 | return 106 | } 107 | s.state -= 6 108 | } 109 | 110 | // updateStateMatch updates the state for a match. 111 | func (s *state) updateStateMatch() { 112 | if s.state < 7 { 113 | s.state = 7 114 | } else { 115 | s.state = 10 116 | } 117 | } 118 | 119 | // updateStateRep updates the state for a repetition. 120 | func (s *state) updateStateRep() { 121 | if s.state < 7 { 122 | s.state = 8 123 | } else { 124 | s.state = 11 125 | } 126 | } 127 | 128 | // updateStateShortRep updates the state for a short repetition. 129 | func (s *state) updateStateShortRep() { 130 | if s.state < 7 { 131 | s.state = 9 132 | } else { 133 | s.state = 11 134 | } 135 | } 136 | 137 | // states computes the states of the operation codec. 138 | func (s *state) states(dictHead int64) (state1, state2, posState uint32) { 139 | state1 = s.state 140 | posState = uint32(dictHead) & s.posBitMask 141 | state2 = (s.state << maxPosBits) | posState 142 | return 143 | } 144 | 145 | // litState computes the literal state. 146 | func (s *state) litState(prev byte, dictHead int64) uint32 { 147 | lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) 148 | litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | 149 | (uint32(prev) >> (8 - lc)) 150 | return litState 151 | } 152 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzma/treecodecs.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package lzma 6 | 7 | // treeCodec encodes or decodes values with a fixed bit size. It is using a 8 | // tree of probability value. The root of the tree is the most-significant bit. 9 | type treeCodec struct { 10 | probTree 11 | } 12 | 13 | // makeTreeCodec makes a tree codec. The bits value must be inside the range 14 | // [1,32]. 15 | func makeTreeCodec(bits int) treeCodec { 16 | return treeCodec{makeProbTree(bits)} 17 | } 18 | 19 | // deepcopy initializes tc as a deep copy of the source. 20 | func (tc *treeCodec) deepcopy(src *treeCodec) { 21 | tc.probTree.deepcopy(&src.probTree) 22 | } 23 | 24 | // Encode uses the range encoder to encode a fixed-bit-size value. 25 | func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { 26 | m := uint32(1) 27 | for i := int(tc.bits) - 1; i >= 0; i-- { 28 | b := (v >> uint(i)) & 1 29 | if err := e.EncodeBit(b, &tc.probs[m]); err != nil { 30 | return err 31 | } 32 | m = (m << 1) | b 33 | } 34 | return nil 35 | } 36 | 37 | // Decodes uses the range decoder to decode a fixed-bit-size value. Errors may 38 | // be caused by the range decoder. 39 | func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { 40 | m := uint32(1) 41 | for j := 0; j < int(tc.bits); j++ { 42 | b, err := d.DecodeBit(&tc.probs[m]) 43 | if err != nil { 44 | return 0, err 45 | } 46 | m = (m << 1) | b 47 | } 48 | return m - (1 << uint(tc.bits)), nil 49 | } 50 | 51 | // treeReverseCodec is another tree codec, where the least-significant bit is 52 | // the start of the probability tree. 53 | type treeReverseCodec struct { 54 | probTree 55 | } 56 | 57 | // deepcopy initializes the treeReverseCodec as a deep copy of the 58 | // source. 59 | func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { 60 | tc.probTree.deepcopy(&src.probTree) 61 | } 62 | 63 | // makeTreeReverseCodec creates treeReverseCodec value. The bits argument must 64 | // be in the range [1,32]. 65 | func makeTreeReverseCodec(bits int) treeReverseCodec { 66 | return treeReverseCodec{makeProbTree(bits)} 67 | } 68 | 69 | // Encode uses range encoder to encode a fixed-bit-size value. The range 70 | // encoder may cause errors. 71 | func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { 72 | m := uint32(1) 73 | for i := uint(0); i < uint(tc.bits); i++ { 74 | b := (v >> i) & 1 75 | if err := e.EncodeBit(b, &tc.probs[m]); err != nil { 76 | return err 77 | } 78 | m = (m << 1) | b 79 | } 80 | return nil 81 | } 82 | 83 | // Decodes uses the range decoder to decode a fixed-bit-size value. Errors 84 | // returned by the range decoder will be returned. 85 | func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { 86 | m := uint32(1) 87 | for j := uint(0); j < uint(tc.bits); j++ { 88 | b, err := d.DecodeBit(&tc.probs[m]) 89 | if err != nil { 90 | return 0, err 91 | } 92 | m = (m << 1) | b 93 | v |= b << j 94 | } 95 | return v, nil 96 | } 97 | 98 | // probTree stores enough probability values to be used by the treeEncode and 99 | // treeDecode methods of the range coder types. 100 | type probTree struct { 101 | probs []prob 102 | bits byte 103 | } 104 | 105 | // deepcopy initializes the probTree value as a deep copy of the source. 106 | func (t *probTree) deepcopy(src *probTree) { 107 | if t == src { 108 | return 109 | } 110 | t.probs = make([]prob, len(src.probs)) 111 | copy(t.probs, src.probs) 112 | t.bits = src.bits 113 | } 114 | 115 | // makeProbTree initializes a probTree structure. 116 | func makeProbTree(bits int) probTree { 117 | if !(1 <= bits && bits <= 32) { 118 | panic("bits outside of range [1,32]") 119 | } 120 | t := probTree{ 121 | bits: byte(bits), 122 | probs: make([]prob, 1< 0 { 57 | c.SizeInHeader = true 58 | } 59 | if !c.SizeInHeader { 60 | c.EOSMarker = true 61 | } 62 | } 63 | 64 | // Verify checks WriterConfig for errors. Verify will replace zero 65 | // values with default values. 66 | func (c *WriterConfig) Verify() error { 67 | c.fill() 68 | var err error 69 | if c == nil { 70 | return errors.New("lzma: WriterConfig is nil") 71 | } 72 | if c.Properties == nil { 73 | return errors.New("lzma: WriterConfig has no Properties set") 74 | } 75 | if err = c.Properties.verify(); err != nil { 76 | return err 77 | } 78 | if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { 79 | return errors.New("lzma: dictionary capacity is out of range") 80 | } 81 | if !(maxMatchLen <= c.BufSize) { 82 | return errors.New("lzma: lookahead buffer size too small") 83 | } 84 | if c.SizeInHeader { 85 | if c.Size < 0 { 86 | return errors.New("lzma: negative size not supported") 87 | } 88 | } else if !c.EOSMarker { 89 | return errors.New("lzma: EOS marker is required") 90 | } 91 | if err = c.Matcher.verify(); err != nil { 92 | return err 93 | } 94 | 95 | return nil 96 | } 97 | 98 | // header returns the header structure for this configuration. 99 | func (c *WriterConfig) header() header { 100 | h := header{ 101 | properties: *c.Properties, 102 | dictCap: c.DictCap, 103 | size: -1, 104 | } 105 | if c.SizeInHeader { 106 | h.size = c.Size 107 | } 108 | return h 109 | } 110 | 111 | // Writer writes an LZMA stream in the classic format. 112 | type Writer struct { 113 | h header 114 | bw io.ByteWriter 115 | buf *bufio.Writer 116 | e *encoder 117 | } 118 | 119 | // NewWriter creates a new LZMA writer for the classic format. The 120 | // method will write the header to the underlying stream. 121 | func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { 122 | if err = c.Verify(); err != nil { 123 | return nil, err 124 | } 125 | w = &Writer{h: c.header()} 126 | 127 | var ok bool 128 | w.bw, ok = lzma.(io.ByteWriter) 129 | if !ok { 130 | w.buf = bufio.NewWriter(lzma) 131 | w.bw = w.buf 132 | } 133 | state := newState(w.h.properties) 134 | m, err := c.Matcher.new(w.h.dictCap) 135 | if err != nil { 136 | return nil, err 137 | } 138 | dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) 139 | if err != nil { 140 | return nil, err 141 | } 142 | var flags encoderFlags 143 | if c.EOSMarker { 144 | flags = eosMarker 145 | } 146 | if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { 147 | return nil, err 148 | } 149 | 150 | if err = w.writeHeader(); err != nil { 151 | return nil, err 152 | } 153 | return w, nil 154 | } 155 | 156 | // NewWriter creates a new LZMA writer using the classic format. The 157 | // function writes the header to the underlying stream. 158 | func NewWriter(lzma io.Writer) (w *Writer, err error) { 159 | return WriterConfig{}.NewWriter(lzma) 160 | } 161 | 162 | // writeHeader writes the LZMA header into the stream. 163 | func (w *Writer) writeHeader() error { 164 | data, err := w.h.marshalBinary() 165 | if err != nil { 166 | return err 167 | } 168 | _, err = w.bw.(io.Writer).Write(data) 169 | return err 170 | } 171 | 172 | // Write puts data into the Writer. 173 | func (w *Writer) Write(p []byte) (n int, err error) { 174 | if w.h.size >= 0 { 175 | m := w.h.size 176 | m -= w.e.Compressed() + int64(w.e.dict.Buffered()) 177 | if m < 0 { 178 | m = 0 179 | } 180 | if m < int64(len(p)) { 181 | p = p[:m] 182 | err = ErrNoSpace 183 | } 184 | } 185 | var werr error 186 | if n, werr = w.e.Write(p); werr != nil { 187 | err = werr 188 | } 189 | return n, err 190 | } 191 | 192 | // Close closes the writer stream. It ensures that all data from the 193 | // buffer will be compressed and the LZMA stream will be finished. 194 | func (w *Writer) Close() error { 195 | if w.h.size >= 0 { 196 | n := w.e.Compressed() + int64(w.e.dict.Buffered()) 197 | if n != w.h.size { 198 | return errSize 199 | } 200 | } 201 | err := w.e.Close() 202 | if w.buf != nil { 203 | ferr := w.buf.Flush() 204 | if err == nil { 205 | err = ferr 206 | } 207 | } 208 | return err 209 | } 210 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/lzmafilter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014-2017 Ulrich Kunitz. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package xz 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | "io" 11 | 12 | "github.com/ulikunitz/xz/lzma" 13 | ) 14 | 15 | // LZMA filter constants. 16 | const ( 17 | lzmaFilterID = 0x21 18 | lzmaFilterLen = 3 19 | ) 20 | 21 | // lzmaFilter declares the LZMA2 filter information stored in an xz 22 | // block header. 23 | type lzmaFilter struct { 24 | dictCap int64 25 | } 26 | 27 | // String returns a representation of the LZMA filter. 28 | func (f lzmaFilter) String() string { 29 | return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) 30 | } 31 | 32 | // id returns the ID for the LZMA2 filter. 33 | func (f lzmaFilter) id() uint64 { return lzmaFilterID } 34 | 35 | // MarshalBinary converts the lzmaFilter in its encoded representation. 36 | func (f lzmaFilter) MarshalBinary() (data []byte, err error) { 37 | c := lzma.EncodeDictCap(f.dictCap) 38 | return []byte{lzmaFilterID, 1, c}, nil 39 | } 40 | 41 | // UnmarshalBinary unmarshals the given data representation of the LZMA2 42 | // filter. 43 | func (f *lzmaFilter) UnmarshalBinary(data []byte) error { 44 | if len(data) != lzmaFilterLen { 45 | return errors.New("xz: data for LZMA2 filter has wrong length") 46 | } 47 | if data[0] != lzmaFilterID { 48 | return errors.New("xz: wrong LZMA2 filter id") 49 | } 50 | if data[1] != 1 { 51 | return errors.New("xz: wrong LZMA2 filter size") 52 | } 53 | dc, err := lzma.DecodeDictCap(data[2]) 54 | if err != nil { 55 | return errors.New("xz: wrong LZMA2 dictionary size property") 56 | } 57 | 58 | f.dictCap = dc 59 | return nil 60 | } 61 | 62 | // reader creates a new reader for the LZMA2 filter. 63 | func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, 64 | err error) { 65 | 66 | config := new(lzma.Reader2Config) 67 | if c != nil { 68 | config.DictCap = c.DictCap 69 | } 70 | dc := int(f.dictCap) 71 | if dc < 1 { 72 | return nil, errors.New("xz: LZMA2 filter parameter " + 73 | "dictionary capacity overflow") 74 | } 75 | if dc > config.DictCap { 76 | config.DictCap = dc 77 | } 78 | 79 | fr, err = config.NewReader2(r) 80 | if err != nil { 81 | return nil, err 82 | } 83 | return fr, nil 84 | } 85 | 86 | // writeCloser creates a io.WriteCloser for the LZMA2 filter. 87 | func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, 88 | ) (fw io.WriteCloser, err error) { 89 | config := new(lzma.Writer2Config) 90 | if c != nil { 91 | *config = lzma.Writer2Config{ 92 | Properties: c.Properties, 93 | DictCap: c.DictCap, 94 | BufSize: c.BufSize, 95 | Matcher: c.Matcher, 96 | } 97 | } 98 | 99 | dc := int(f.dictCap) 100 | if dc < 1 { 101 | return nil, errors.New("xz: LZMA2 filter parameter " + 102 | "dictionary capacity overflow") 103 | } 104 | if dc > config.DictCap { 105 | config.DictCap = dc 106 | } 107 | 108 | fw, err = config.NewWriter2(w) 109 | if err != nil { 110 | return nil, err 111 | } 112 | return fw, nil 113 | } 114 | 115 | // last returns true, because an LZMA2 filter must be the last filter in 116 | // the filter list. 117 | func (f lzmaFilter) last() bool { return true } 118 | -------------------------------------------------------------------------------- /vendor/github.com/ulikunitz/xz/make-docs: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -x 4 | pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md 5 | pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md 6 | -------------------------------------------------------------------------------- /vendor/github.com/xi2/xz/AUTHORS: -------------------------------------------------------------------------------- 1 | # Package xz authors 2 | 3 | Michael Cross 4 | 5 | # XZ Embedded authors 6 | 7 | Lasse Collin 8 | Igor Pavlov 9 | -------------------------------------------------------------------------------- /vendor/github.com/xi2/xz/LICENSE: -------------------------------------------------------------------------------- 1 | Licensing of github.com/xi2/xz 2 | ============================== 3 | 4 | This Go package is a modified version of 5 | 6 | XZ Embedded 7 | 8 | The contents of the testdata directory are modified versions of 9 | the test files from 10 | 11 | XZ Utils 12 | 13 | All the files in this package have been written by Michael Cross, 14 | Lasse Collin and/or Igor PavLov. All these files have been put 15 | into the public domain. You can do whatever you want with these 16 | files. 17 | 18 | This software is provided "as is", without any warranty. 19 | -------------------------------------------------------------------------------- /vendor/github.com/xi2/xz/README.md: -------------------------------------------------------------------------------- 1 | # Xz 2 | 3 | Package xz implements XZ decompression natively in Go. 4 | 5 | Documentation at . 6 | 7 | Download and install with `go get github.com/xi2/xz`. 8 | 9 | If you need compression as well as decompression, you might want to 10 | look at . 11 | -------------------------------------------------------------------------------- /vendor/github.com/xi2/xz/dec_delta.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Delta decoder 3 | * 4 | * Author: Lasse Collin 5 | * 6 | * Translation to Go: Michael Cross 7 | * 8 | * This file has been put into the public domain. 9 | * You can do whatever you want with this file. 10 | */ 11 | 12 | package xz 13 | 14 | type xzDecDelta struct { 15 | delta [256]byte 16 | pos byte 17 | distance int // in range [1, 256] 18 | } 19 | 20 | /* 21 | * Decode raw stream which has a delta filter as the first filter. 22 | */ 23 | func xzDecDeltaRun(s *xzDecDelta, b *xzBuf, chain func(*xzBuf) xzRet) xzRet { 24 | outStart := b.outPos 25 | ret := chain(b) 26 | for i := outStart; i < b.outPos; i++ { 27 | tmp := b.out[i] + s.delta[byte(s.distance+int(s.pos))] 28 | s.delta[s.pos] = tmp 29 | b.out[i] = tmp 30 | s.pos-- 31 | } 32 | return ret 33 | } 34 | 35 | /* 36 | * Allocate memory for a delta decoder. xzDecDeltaReset must be used 37 | * before calling xzDecDeltaRun. 38 | */ 39 | func xzDecDeltaCreate() *xzDecDelta { 40 | return new(xzDecDelta) 41 | } 42 | 43 | /* 44 | * Returns xzOK if the given distance is valid. Otherwise 45 | * xzOptionsError is returned. 46 | */ 47 | func xzDecDeltaReset(s *xzDecDelta, distance int) xzRet { 48 | if distance < 1 || distance > 256 { 49 | return xzOptionsError 50 | } 51 | s.delta = [256]byte{} 52 | s.pos = 0 53 | s.distance = distance 54 | return xzOK 55 | } 56 | -------------------------------------------------------------------------------- /vendor/github.com/xi2/xz/dec_util.go: -------------------------------------------------------------------------------- 1 | /* 2 | * XZ decompressor utility functions 3 | * 4 | * Author: Michael Cross 5 | * 6 | * This file has been put into the public domain. 7 | * You can do whatever you want with this file. 8 | */ 9 | 10 | package xz 11 | 12 | func getLE32(buf []byte) uint32 { 13 | return uint32(buf[0]) | 14 | uint32(buf[1])<<8 | 15 | uint32(buf[2])<<16 | 16 | uint32(buf[3])<<24 17 | } 18 | 19 | func getBE32(buf []byte) uint32 { 20 | return uint32(buf[0])<<24 | 21 | uint32(buf[1])<<16 | 22 | uint32(buf[2])<<8 | 23 | uint32(buf[3]) 24 | } 25 | 26 | func putLE32(val uint32, buf []byte) { 27 | buf[0] = byte(val) 28 | buf[1] = byte(val >> 8) 29 | buf[2] = byte(val >> 16) 30 | buf[3] = byte(val >> 24) 31 | return 32 | } 33 | 34 | func putBE32(val uint32, buf []byte) { 35 | buf[0] = byte(val >> 24) 36 | buf[1] = byte(val >> 16) 37 | buf[2] = byte(val >> 8) 38 | buf[3] = byte(val) 39 | return 40 | } 41 | 42 | func putLE64(val uint64, buf []byte) { 43 | buf[0] = byte(val) 44 | buf[1] = byte(val >> 8) 45 | buf[2] = byte(val >> 16) 46 | buf[3] = byte(val >> 24) 47 | buf[4] = byte(val >> 32) 48 | buf[5] = byte(val >> 40) 49 | buf[6] = byte(val >> 48) 50 | buf[7] = byte(val >> 56) 51 | return 52 | } 53 | -------------------------------------------------------------------------------- /vendor/github.com/xi2/xz/dec_xz.go: -------------------------------------------------------------------------------- 1 | /* 2 | * XZ decompressor 3 | * 4 | * Authors: Lasse Collin 5 | * Igor Pavlov 6 | * 7 | * Translation to Go: Michael Cross 8 | * 9 | * This file has been put into the public domain. 10 | * You can do whatever you want with this file. 11 | */ 12 | 13 | package xz 14 | 15 | /* from linux/include/linux/xz.h **************************************/ 16 | 17 | /** 18 | * xzRet - Return codes 19 | * @xzOK: Everything is OK so far. More input or more 20 | * output space is required to continue. 21 | * @xzStreamEnd: Operation finished successfully. 22 | * @xzUnSupportedCheck: Integrity check type is not supported. Decoding 23 | * is still possible by simply calling xzDecRun 24 | * again. 25 | * @xzMemlimitError: A bigger LZMA2 dictionary would be needed than 26 | * allowed by the dictMax argument given to 27 | * xzDecInit. 28 | * @xzFormatError: File format was not recognized (wrong magic 29 | * bytes). 30 | * @xzOptionsError: This implementation doesn't support the requested 31 | * compression options. In the decoder this means 32 | * that the header CRC32 matches, but the header 33 | * itself specifies something that we don't support. 34 | * @xzDataError: Compressed data is corrupt. 35 | * @xzBufError: Cannot make any progress. 36 | * 37 | * xzBufError is returned when two consecutive calls to XZ code cannot 38 | * consume any input and cannot produce any new output. This happens 39 | * when there is no new input available, or the output buffer is full 40 | * while at least one output byte is still pending. Assuming your code 41 | * is not buggy, you can get this error only when decoding a 42 | * compressed stream that is truncated or otherwise corrupt. 43 | */ 44 | type xzRet int 45 | 46 | const ( 47 | xzOK xzRet = iota 48 | xzStreamEnd 49 | xzUnsupportedCheck 50 | xzMemlimitError 51 | xzFormatError 52 | xzOptionsError 53 | xzDataError 54 | xzBufError 55 | ) 56 | 57 | /** 58 | * xzBuf - Passing input and output buffers to XZ code 59 | * @in: Input buffer. 60 | * @inPos: Current position in the input buffer. This must not exceed 61 | * input buffer size. 62 | * @out: Output buffer. 63 | * @outPos: Current position in the output buffer. This must not exceed 64 | * output buffer size. 65 | * 66 | * Only the contents of the output buffer from out[outPos] onward, and 67 | * the variables inPos and outPos are modified by the XZ code. 68 | */ 69 | type xzBuf struct { 70 | in []byte 71 | inPos int 72 | out []byte 73 | outPos int 74 | } 75 | 76 | /* All XZ filter IDs */ 77 | type xzFilterID int64 78 | 79 | const ( 80 | idDelta xzFilterID = 0x03 81 | idBCJX86 xzFilterID = 0x04 82 | idBCJPowerPC xzFilterID = 0x05 83 | idBCJIA64 xzFilterID = 0x06 84 | idBCJARM xzFilterID = 0x07 85 | idBCJARMThumb xzFilterID = 0x08 86 | idBCJSPARC xzFilterID = 0x09 87 | idLZMA2 xzFilterID = 0x21 88 | ) 89 | 90 | // CheckID is the type of the data integrity check in an XZ stream 91 | // calculated from the uncompressed data. 92 | type CheckID int 93 | 94 | func (id CheckID) String() string { 95 | switch id { 96 | case CheckNone: 97 | return "None" 98 | case CheckCRC32: 99 | return "CRC32" 100 | case CheckCRC64: 101 | return "CRC64" 102 | case CheckSHA256: 103 | return "SHA256" 104 | default: 105 | return "Unknown" 106 | } 107 | } 108 | 109 | const ( 110 | CheckNone CheckID = 0x00 111 | CheckCRC32 CheckID = 0x01 112 | CheckCRC64 CheckID = 0x04 113 | CheckSHA256 CheckID = 0x0A 114 | checkMax CheckID = 0x0F 115 | checkUnset CheckID = -1 116 | ) 117 | 118 | // An XZ stream contains a stream header which holds information about 119 | // the stream. That information is exposed as fields of the 120 | // Reader. Currently it contains only the stream's data integrity 121 | // check type. 122 | type Header struct { 123 | CheckType CheckID // type of the stream's data integrity check 124 | } 125 | -------------------------------------------------------------------------------- /vendor/github.com/xi2/xz/doc.go: -------------------------------------------------------------------------------- 1 | // Package xz implements XZ decompression natively in Go. 2 | // 3 | // Usage 4 | // 5 | // For ease of use, this package is designed to have a similar API to 6 | // compress/gzip. See the examples for further details. 7 | // 8 | // Implementation 9 | // 10 | // This package is a translation from C to Go of XZ Embedded 11 | // (http://tukaani.org/xz/embedded.html) with enhancements made so as 12 | // to implement all mandatory and optional parts of the XZ file format 13 | // specification v1.0.4. It supports all filters and block check 14 | // types, supports multiple streams, and performs index verification 15 | // using SHA-256 as recommended by the specification. 16 | // 17 | // Speed 18 | // 19 | // On the author's Intel Ivybridge i5, decompression speed is about 20 | // half that of the standard XZ Utils (tested with a recent linux 21 | // kernel tarball). 22 | // 23 | // Thanks 24 | // 25 | // Thanks are due to Lasse Collin and Igor Pavlov, the authors of XZ 26 | // Embedded, on whose code package xz is based. It would not exist 27 | // without their decision to allow others to modify and reuse their 28 | // code. 29 | // 30 | // Bug reports 31 | // 32 | // For bug reports relating to this package please contact the author 33 | // through https://github.com/xi2/xz/issues, and not the authors of XZ 34 | // Embedded. 35 | package xz 36 | -------------------------------------------------------------------------------- /vendor/modules.txt: -------------------------------------------------------------------------------- 1 | # github.com/klauspost/compress v1.7.1 2 | github.com/klauspost/compress/zstd 3 | github.com/klauspost/compress/huff0 4 | github.com/klauspost/compress/snappy 5 | github.com/klauspost/compress/zstd/internal/xxhash 6 | github.com/klauspost/compress/fse 7 | # github.com/pkg/errors v0.8.1 8 | github.com/pkg/errors 9 | # github.com/ulikunitz/xz v0.5.6 10 | github.com/ulikunitz/xz 11 | github.com/ulikunitz/xz/internal/xlog 12 | github.com/ulikunitz/xz/lzma 13 | github.com/ulikunitz/xz/internal/hash 14 | # github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 15 | github.com/xi2/xz 16 | --------------------------------------------------------------------------------