├── vendor ├── src │ └── github.com │ │ ├── Redundancy │ │ └── go-sync │ │ │ ├── acceptancetests │ │ │ ├── nulltest.sh │ │ │ ├── run.sh │ │ │ ├── truncation_test.sh │ │ │ └── truncated_test.sh │ │ │ ├── util │ │ │ └── readers │ │ │ │ ├── sequence_test.go │ │ │ │ ├── readers.go │ │ │ │ ├── sequencelimit.go │ │ │ │ ├── injectedreader.go │ │ │ │ ├── nonrepeating.go │ │ │ │ ├── uniformreader.go │ │ │ │ ├── nonrepeating_test.go │ │ │ │ └── uniform_test.go │ │ │ ├── blocksources │ │ │ ├── sortableBlockResponse.go │ │ │ ├── readseeker_blocksource_test.go │ │ │ ├── readseeker_blocksource.go │ │ │ ├── fixed_size_block_resolver.go │ │ │ ├── helpers.go │ │ │ ├── httpblocksource.go │ │ │ ├── fixed_size_block_resolver_test.go │ │ │ ├── httpblocksource_test.go │ │ │ └── blocksourcebase_test.go │ │ │ ├── gosync.go │ │ │ ├── filechecksum │ │ │ ├── verifier.go │ │ │ ├── verifier_test.go │ │ │ └── filechecksum.go │ │ │ ├── gosync │ │ │ ├── fileformat.md │ │ │ ├── main.go │ │ │ ├── build.go │ │ │ ├── patch.go │ │ │ ├── diff.go │ │ │ └── common.go │ │ │ ├── LICENSE │ │ │ ├── comparer │ │ │ ├── comparer_bench_test.go │ │ │ └── comparer.go │ │ │ ├── indexbuilder │ │ │ └── indexbuilder.go │ │ │ ├── patcher │ │ │ ├── blocksource.go │ │ │ └── sequential │ │ │ │ ├── sequential_test.go │ │ │ │ └── sequential.go │ │ │ ├── chunks │ │ │ └── chunks.go │ │ │ ├── rollsum │ │ │ ├── rollsum_32_base.go │ │ │ ├── rollsum_32.go │ │ │ └── rollsum_32_test.go │ │ │ ├── gosync_test.go │ │ │ ├── index │ │ │ ├── index_bench_test.go │ │ │ ├── index.go │ │ │ └── index_test.go │ │ │ ├── README.md │ │ │ ├── circularbuffer │ │ │ ├── noalloc.go │ │ │ └── noalloc_test.go │ │ │ └── http_test.go │ │ ├── codegangsta │ │ └── cli │ │ │ ├── autocomplete │ │ │ ├── zsh_autocomplete │ │ │ └── bash_autocomplete │ │ │ ├── appveyor.yml │ │ │ ├── altsrc │ │ │ ├── helpers_test.go │ │ │ ├── input_source_context.go │ │ │ ├── yaml_file_loader.go │ │ │ ├── map_input_source.go │ │ │ └── yaml_command_test.go │ │ │ ├── helpers_test.go │ │ │ ├── cli.go │ │ │ ├── LICENSE │ │ │ ├── help_test.go │ │ │ ├── command_test.go │ │ │ ├── context_test.go │ │ │ ├── help.go │ │ │ └── command.go │ │ └── petar │ │ └── GoLLRB │ │ └── llrb │ │ ├── util.go │ │ ├── avgvar.go │ │ ├── llrb-stats.go │ │ ├── iterator_test.go │ │ ├── iterator.go │ │ └── llrb_test.go └── manifest ├── README.md ├── .gitignore ├── src └── gosync │ ├── fileformat.md │ ├── main.go │ ├── build.go │ ├── patch.go │ ├── diff.go │ └── common.go └── LICENSE /vendor/src/github.com/Redundancy/go-sync/acceptancetests/nulltest.sh: -------------------------------------------------------------------------------- 1 | echo "ok!" -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/util/readers/sequence_test.go: -------------------------------------------------------------------------------- 1 | package readers 2 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete: -------------------------------------------------------------------------------- 1 | autoload -U compinit && compinit 2 | autoload -U bashcompinit && bashcompinit 3 | 4 | script_dir=$(dirname $0) 5 | source ${script_dir}/bash_autocomplete 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gosync-cmd 2 | Command-line tool based on gosync 3 | 4 | This forks the CLI code that was originally in https://github.com/Redundancy/go-sync 5 | In order to separate the library from the tool, and to bring proper vendoring to the tool. 6 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/util/readers/readers.go: -------------------------------------------------------------------------------- 1 | /* 2 | util/readers exists to provide convenient and composable io.Reader compatible streams to allow testing 3 | without having to check in large binary files. 4 | 5 | */ 6 | package readers 7 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/appveyor.yml: -------------------------------------------------------------------------------- 1 | version: "{build}" 2 | 3 | os: Windows Server 2012 R2 4 | 5 | install: 6 | - go version 7 | - go env 8 | 9 | build_script: 10 | - cd %APPVEYOR_BUILD_FOLDER% 11 | - go vet ./... 12 | - go test -v ./... 13 | 14 | test: off 15 | 16 | deploy: off 17 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/util/readers/sequencelimit.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // read from 'readers' in sequence up to a limit of 'size' 8 | func SequenceLimit(size int64, readers ...io.Reader) io.Reader { 9 | return io.LimitReader( 10 | io.MultiReader(readers...), 11 | size) 12 | } 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/util/readers/injectedreader.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // Injects the second reader into the first at an offset 8 | func InjectedReader( 9 | offsetFromStart int64, 10 | base io.Reader, 11 | inject io.Reader, 12 | ) io.Reader { 13 | return io.MultiReader( 14 | io.LimitReader(base, offsetFromStart), 15 | inject, 16 | base, 17 | ) 18 | } 19 | -------------------------------------------------------------------------------- /vendor/src/github.com/petar/GoLLRB/llrb/util.go: -------------------------------------------------------------------------------- 1 | // Copyright 2010 Petar Maymounkov. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package llrb 6 | 7 | type Int int 8 | 9 | func (x Int) Less(than Item) bool { 10 | return x < than.(Int) 11 | } 12 | 13 | type String string 14 | 15 | func (x String) Less(than Item) bool { 16 | return x < than.(String) 17 | } 18 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | : ${PROG:=$(basename ${BASH_SOURCE})} 4 | 5 | _cli_bash_autocomplete() { 6 | local cur opts base 7 | COMPREPLY=() 8 | cur="${COMP_WORDS[COMP_CWORD]}" 9 | opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) 10 | COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) 11 | return 0 12 | } 13 | 14 | complete -F _cli_bash_autocomplete $PROG 15 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/blocksources/sortableBlockResponse.go: -------------------------------------------------------------------------------- 1 | package blocksources 2 | 3 | import ( 4 | "github.com/Redundancy/go-sync/patcher" 5 | ) 6 | 7 | type PendingResponses []patcher.BlockReponse 8 | 9 | func (r PendingResponses) Len() int { 10 | return len(r) 11 | } 12 | 13 | func (r PendingResponses) Swap(i, j int) { 14 | r[i], r[j] = r[j], r[i] 15 | } 16 | 17 | func (r PendingResponses) Less(i, j int) bool { 18 | return r[i].StartBlock < r[j].StartBlock 19 | } 20 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/acceptancetests/run.sh: -------------------------------------------------------------------------------- 1 | counter=1 2 | for f in $TRAVIS_BUILD_DIR/acceptancetests/*test.sh 3 | do 4 | [ ! -f "$f" ] && continue 5 | 6 | echo "Running acceptance test $f" 7 | echo 'travis_fold:start:test_output$counter' 8 | sh $f 9 | rc=$? 10 | echo 'travis_fold:end:test_output$counter' 11 | 12 | if [ $rc != 0 ]; then 13 | echo "Test Failed" 14 | exit $rc 15 | fi 16 | 17 | echo "Test Passed" 18 | counter=`expr $counter + 1` 19 | done 20 | 21 | 22 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/acceptancetests/truncation_test.sh: -------------------------------------------------------------------------------- 1 | echo "Testing truncating a file too long" 2 | wget -q https://s3-eu-west-1.amazonaws.com/gosync-test/0.1.2/gosync.exe -O test.exe 3 | cp test.exe compare.exe 4 | gosync b test.exe 5 | truncate -s 10000000 test.exe 6 | gosync p test.exe test.gosync https://s3-eu-west-1.amazonaws.com/gosync-test/0.1.2/gosync.exe 7 | diff -q test.exe compare.exe 8 | rc=$? 9 | if [ $rc != 0 ]; then 10 | gosync -version 11 | ls -l compare.exe 12 | ls -l test.exe 13 | exit $rc 14 | fi -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/acceptancetests/truncated_test.sh: -------------------------------------------------------------------------------- 1 | echo "Testing filling in a truncated file" 2 | wget -q https://s3-eu-west-1.amazonaws.com/gosync-test/0.1.2/gosync.exe -O test.exe 3 | cp test.exe compare.exe 4 | gosync b test.exe 5 | truncate -s 5000000 test.exe 6 | gosync p test.exe test.gosync https://s3-eu-west-1.amazonaws.com/gosync-test/0.1.2/gosync.exe 7 | diff -q test.exe compare.exe 8 | rc=$? 9 | if [ $rc != 0 ]; then 10 | gosync -version 11 | ls -l compare.exe 12 | ls -l test.exe 13 | exit $rc 14 | fi 15 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/altsrc/helpers_test.go: -------------------------------------------------------------------------------- 1 | package altsrc 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func expect(t *testing.T, a interface{}, b interface{}) { 9 | if !reflect.DeepEqual(b, a) { 10 | t.Errorf("Expected %#v (type %v) - Got %#v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) 11 | } 12 | } 13 | 14 | func refute(t *testing.T, a interface{}, b interface{}) { 15 | if a == b { 16 | t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/helpers_test.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | /* Test Helpers */ 9 | func expect(t *testing.T, a interface{}, b interface{}) { 10 | if !reflect.DeepEqual(a, b) { 11 | t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) 12 | } 13 | } 14 | 15 | func refute(t *testing.T, a interface{}, b interface{}) { 16 | if reflect.DeepEqual(a, b) { 17 | t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/altsrc/input_source_context.go: -------------------------------------------------------------------------------- 1 | package altsrc 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/codegangsta/cli" 7 | ) 8 | 9 | // InputSourceContext is an interface used to allow 10 | // other input sources to be implemented as needed. 11 | type InputSourceContext interface { 12 | Int(name string) (int, error) 13 | Duration(name string) (time.Duration, error) 14 | Float64(name string) (float64, error) 15 | String(name string) (string, error) 16 | StringSlice(name string) ([]string, error) 17 | IntSlice(name string) ([]int, error) 18 | Generic(name string) (cli.Generic, error) 19 | Bool(name string) (bool, error) 20 | BoolT(name string) (bool, error) 21 | } 22 | -------------------------------------------------------------------------------- /vendor/manifest: -------------------------------------------------------------------------------- 1 | { 2 | "version": 0, 3 | "dependencies": [ 4 | { 5 | "importpath": "github.com/Redundancy/go-sync", 6 | "repository": "https://github.com/Redundancy/go-sync", 7 | "revision": "aff5b3b10f5934554a1fcfdc089ca13830ec1f35", 8 | "branch": "master" 9 | }, 10 | { 11 | "importpath": "github.com/codegangsta/cli", 12 | "repository": "https://github.com/codegangsta/cli", 13 | "revision": "aca5b047ed14d17224157c3434ea93bf6cdaadee", 14 | "branch": "master" 15 | }, 16 | { 17 | "importpath": "github.com/petar/GoLLRB/llrb", 18 | "repository": "https://github.com/petar/GoLLRB", 19 | "revision": "53be0d36a84c2a886ca057d34b6aa4468df9ccb4", 20 | "branch": "master", 21 | "path": "/llrb" 22 | } 23 | ] 24 | } -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/blocksources/readseeker_blocksource_test.go: -------------------------------------------------------------------------------- 1 | package blocksources 2 | 3 | import ( 4 | "bytes" 5 | "github.com/Redundancy/go-sync/patcher" 6 | "testing" 7 | ) 8 | 9 | const STRING_DATA = "abcdefghijklmnopqrst" 10 | 11 | var BYTE_BLOCK_SOURCE = NewReadSeekerBlockSource( 12 | bytes.NewReader( 13 | []byte(STRING_DATA), 14 | ), 15 | MakeNullFixedSizeResolver(4), 16 | ) 17 | 18 | func TestReadFirstBlock(t *testing.T) { 19 | BYTE_BLOCK_SOURCE.RequestBlocks( 20 | patcher.MissingBlockSpan{ 21 | BlockSize: 4, 22 | StartBlock: 0, 23 | EndBlock: 0, 24 | }, 25 | ) 26 | 27 | result := <-BYTE_BLOCK_SOURCE.GetResultChannel() 28 | 29 | if result.StartBlock != 0 { 30 | t.Errorf("Wrong start block: %v", result.StartBlock) 31 | } 32 | 33 | EXPECTED := STRING_DATA[:4] 34 | 35 | if bytes.Compare(result.Data, []byte(EXPECTED)) != 0 { 36 | t.Errorf( 37 | "Unexpected result data: \"%v\" expected: \"%v\"", 38 | string(result.Data), 39 | EXPECTED, 40 | ) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/util/readers/nonrepeating.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "encoding/binary" 5 | "io" 6 | ) 7 | 8 | const nonRepeatingModulo = 87178291199 9 | const nonRepeatingIncrement = 17180131327 10 | 11 | // *should* produce a non-repeating sequence of bytes in a deterministic fashion 12 | // use io.LimitReader to limit it to a specific length 13 | type nonRepeatingSequenceReader struct { 14 | value int 15 | } 16 | 17 | func NewNonRepeatingSequence(i int) io.Reader { 18 | return &nonRepeatingSequenceReader{i} 19 | } 20 | 21 | func NewSizedNonRepeatingSequence(i int, s int64) io.Reader { 22 | return io.LimitReader(NewNonRepeatingSequence(i), s) 23 | } 24 | 25 | func (r *nonRepeatingSequenceReader) Read(p []byte) (n int, err error) { 26 | lenp := len(p) 27 | b := []byte{1, 2, 3, 4} 28 | 29 | for i := 0; i < lenp; i++ { 30 | binary.LittleEndian.PutUint32(b, uint32(r.value)) 31 | p[i] = b[0] 32 | r.value = (r.value + nonRepeatingIncrement) % nonRepeatingModulo 33 | } 34 | return lenp, nil 35 | } 36 | -------------------------------------------------------------------------------- /src/gosync/fileformat.md: -------------------------------------------------------------------------------- 1 | *NB: I'm documenting the current format of the gosync files merely as a point in time 2 | reference of format that is in use in the tool that is meant to serve as a practical reference and acceptance testing tool. The gosync tool is not intended as a production-worthy, well supported, tested tool.* 3 | 4 | *The format used exists entirely in service of being able to test the implementation of the gosync library as a cohesive whole in the real world, and therefore backwards and forwards compatibility (or even efficiency) are not primary concerns.* 5 | 6 | # Version 0.2.0 7 | ### The header 8 | (LE = little endian) 9 | * The string "G0S9NC" in UTF-8 10 | * versions*3 (eg. 0.1.2), uint16 LE 11 | * filesize, int64 LE 12 | * blocksize uint32 LE 13 | 14 | ### The body 15 | Repeating: 16 | * WeakChecksum 17 | * StrongChecksum 18 | 19 | each referring to blocks, starting at 0 (file start) and going upwards. 20 | 21 | In the current implementation of the FileChecksumGenerator used the WeakChecksum is the rolling checksum (4 bytes), and StrongChecksum is MD5 (16 bytes). 22 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/gosync.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package gosync is inspired by zsync, and rsync. It aims to take the fundamentals and create a very flexible library that can be adapted 3 | to work in many ways. 4 | 5 | We rely heavily on built in Go abstractions like io.Reader, hash.Hash and our own interfaces - this makes the code easier to change, and to test. 6 | In particular, no part of the core library should know anything about the transport or layout of the reference data. If you want 7 | to do rsync and do http/https range requests, that's just as good as zsync client-server over an SSH tunnel. The goal is also to allow 8 | support for multiple concurrent connections, so that you can make the best use of your line in the face of the bandwidth latency product 9 | (or other concerns that require concurrency to solve). 10 | 11 | The following optimizations are possible: 12 | * Generate hashes with multiple threads (both during reference generation and local file interrogation) 13 | * Multiple ranged requests (can even be used to get the hashes) 14 | 15 | */ 16 | package gosync 17 | -------------------------------------------------------------------------------- /vendor/src/github.com/petar/GoLLRB/llrb/avgvar.go: -------------------------------------------------------------------------------- 1 | // Copyright 2010 Petar Maymounkov. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package llrb 6 | 7 | import "math" 8 | 9 | // avgVar maintains the average and variance of a stream of numbers 10 | // in a space-efficient manner. 11 | type avgVar struct { 12 | count int64 13 | sum, sumsq float64 14 | } 15 | 16 | func (av *avgVar) Init() { 17 | av.count = 0 18 | av.sum = 0.0 19 | av.sumsq = 0.0 20 | } 21 | 22 | func (av *avgVar) Add(sample float64) { 23 | av.count++ 24 | av.sum += sample 25 | av.sumsq += sample * sample 26 | } 27 | 28 | func (av *avgVar) GetCount() int64 { return av.count } 29 | 30 | func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) } 31 | 32 | func (av *avgVar) GetTotal() float64 { return av.sum } 33 | 34 | func (av *avgVar) GetVar() float64 { 35 | a := av.GetAvg() 36 | return av.sumsq/float64(av.count) - a*a 37 | } 38 | 39 | func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) } 40 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/util/readers/uniformreader.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | // Reads a continuous stream of bytes with the same value, up to length 8 | type uniformReader struct { 9 | value byte 10 | length int 11 | read int 12 | } 13 | 14 | func (r *uniformReader) Read(p []byte) (n int, err error) { 15 | destinationLength := len(p) 16 | readable := r.length - r.read 17 | read := destinationLength 18 | 19 | if readable < destinationLength { 20 | read = readable 21 | } 22 | 23 | if read == 0 { 24 | return 0, io.EOF 25 | } 26 | 27 | for i := 0; i < read; i++ { 28 | p[i] = r.value 29 | } 30 | 31 | var result error = nil 32 | if read == readable { 33 | result = io.EOF 34 | } 35 | 36 | r.read += read 37 | 38 | return read, result 39 | } 40 | 41 | func ZeroReader(length int) io.Reader { 42 | return &uniformReader{ 43 | value: 0, 44 | length: length, 45 | read: 0, 46 | } 47 | } 48 | 49 | func OneReader(length int) io.Reader { 50 | return &uniformReader{ 51 | value: 1, 52 | length: length, 53 | read: 0, 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/filechecksum/verifier.go: -------------------------------------------------------------------------------- 1 | package filechecksum 2 | 3 | import ( 4 | "bytes" 5 | "hash" 6 | ) 7 | 8 | type ChecksumLookup interface { 9 | GetStrongChecksumForBlock(blockID int) []byte 10 | } 11 | 12 | type HashVerifier struct { 13 | BlockSize uint 14 | Hash hash.Hash 15 | BlockChecksumGetter ChecksumLookup 16 | } 17 | 18 | func (v *HashVerifier) VerifyBlockRange(startBlockID uint, data []byte) bool { 19 | for i := 0; i*int(v.BlockSize) < len(data); i++ { 20 | start := i * int(v.BlockSize) 21 | end := start + int(v.BlockSize) 22 | 23 | if end > len(data) { 24 | end = len(data) 25 | } 26 | 27 | blockData := data[start:end] 28 | 29 | expectedChecksum := v.BlockChecksumGetter.GetStrongChecksumForBlock( 30 | int(startBlockID) + i, 31 | ) 32 | 33 | if expectedChecksum == nil { 34 | return true 35 | } 36 | 37 | v.Hash.Write(blockData) 38 | hashedData := v.Hash.Sum(nil) 39 | 40 | if bytes.Compare(expectedChecksum, hashedData) != 0 { 41 | return false 42 | } 43 | 44 | v.Hash.Reset() 45 | } 46 | 47 | return true 48 | } 49 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/gosync/fileformat.md: -------------------------------------------------------------------------------- 1 | *NB: I'm documenting the current format of the gosync files merely as a point in time 2 | reference of format that is in use in the tool that is meant to serve as a practical reference and acceptance testing tool. The gosync tool is not intended as a production-worthy, well supported, tested tool.* 3 | 4 | *The format used exists entirely in service of being able to test the implementation of the gosync library as a cohesive whole in the real world, and therefore backwards and forwards compatibility (or even efficiency) are not primary concerns.* 5 | 6 | # Version 0.2.0 7 | ### The header 8 | (LE = little endian) 9 | * The string "G0S9NC" in UTF-8 10 | * versions*3 (eg. 0.1.2), uint16 LE 11 | * filesize, int64 LE 12 | * blocksize uint32 LE 13 | 14 | ### The body 15 | Repeating: 16 | * WeakChecksum 17 | * StrongChecksum 18 | 19 | each referring to blocks, starting at 0 (file start) and going upwards. 20 | 21 | In the current implementation of the FileChecksumGenerator used the WeakChecksum is the rolling checksum (4 bytes), and StrongChecksum is MD5 (16 bytes). 22 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/cli.go: -------------------------------------------------------------------------------- 1 | // Package cli provides a minimal framework for creating and organizing command line 2 | // Go applications. cli is designed to be easy to understand and write, the most simple 3 | // cli application can be written as follows: 4 | // func main() { 5 | // cli.NewApp().Run(os.Args) 6 | // } 7 | // 8 | // Of course this application does not do much, so let's make this an actual application: 9 | // func main() { 10 | // app := cli.NewApp() 11 | // app.Name = "greet" 12 | // app.Usage = "say a greeting" 13 | // app.Action = func(c *cli.Context) { 14 | // println("Greetings") 15 | // } 16 | // 17 | // app.Run(os.Args) 18 | // } 19 | package cli 20 | 21 | import ( 22 | "strings" 23 | ) 24 | 25 | type MultiError struct { 26 | Errors []error 27 | } 28 | 29 | func NewMultiError(err ...error) MultiError { 30 | return MultiError{Errors: err} 31 | } 32 | 33 | func (m MultiError) Error() string { 34 | errs := make([]string, len(m.Errors)) 35 | for i, err := range m.Errors { 36 | errs[i] = err.Error() 37 | } 38 | 39 | return strings.Join(errs, "\n") 40 | } 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Daniel Speed 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Daniel Speed 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 Jeremy Saenz 2 | All Rights Reserved. 3 | 4 | MIT LICENSE 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy of 7 | this software and associated documentation files (the "Software"), to deal in 8 | the Software without restriction, including without limitation the rights to 9 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 10 | the Software, and to permit persons to whom the Software is furnished to do so, 11 | subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 18 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 19 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 20 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/util/readers/nonrepeating_test.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "io" 5 | "io/ioutil" 6 | "testing" 7 | ) 8 | 9 | // This is only a very basic test 10 | func TestNonRepeatingSequenceReader(t *testing.T) { 11 | i := NewNonRepeatingSequence(0) 12 | a := []byte{0} 13 | b := []byte{0} 14 | 15 | i.Read(a) 16 | i.Read(b) 17 | 18 | if a[0] == b[0] { 19 | t.Fatalf("Bytes should not be the same! %s vs %s", a, b) 20 | } 21 | } 22 | 23 | func TestNonRepeatingSequenceIsDifferent(t *testing.T) { 24 | i := NewNonRepeatingSequence(0) 25 | i2 := NewNonRepeatingSequence(5) 26 | 27 | a := []byte{0} 28 | b := []byte{0} 29 | 30 | commonalities := 0 31 | 32 | for x := 0; x < 100; x++ { 33 | i.Read(a) 34 | i2.Read(b) 35 | 36 | if a[0] == b[0] { 37 | commonalities += 1 38 | } 39 | } 40 | 41 | if commonalities > 5 { 42 | t.Fatal("Sequences are too similar") 43 | } 44 | } 45 | 46 | func BenchmarkNonRepeatingSequence(b *testing.B) { 47 | b.SetBytes(1) 48 | 49 | s := NewSizedNonRepeatingSequence(0, int64(b.N)) 50 | 51 | b.StartTimer() 52 | _, err := io.Copy(ioutil.Discard, s) 53 | b.StopTimer() 54 | 55 | if err != nil { 56 | b.Fatal(err) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/blocksources/readseeker_blocksource.go: -------------------------------------------------------------------------------- 1 | package blocksources 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | const ( 8 | from_start = 0 9 | ) 10 | 11 | type ReadSeeker interface { 12 | Read(b []byte) (n int, err error) 13 | Seek(offset int64, whence int) (int64, error) 14 | } 15 | 16 | func NewReadSeekerBlockSource( 17 | r ReadSeeker, 18 | resolver BlockSourceOffsetResolver, 19 | ) *BlockSourceBase { 20 | return NewBlockSourceBase( 21 | &ReadSeekerRequester{ 22 | rs: r, 23 | }, 24 | resolver, 25 | nil, // TODO: No verifier! 26 | 1, 27 | 8*MB, 28 | ) 29 | } 30 | 31 | type ReadSeekerRequester struct { 32 | rs ReadSeeker 33 | } 34 | 35 | func (r *ReadSeekerRequester) DoRequest(startOffset int64, endOffset int64) (data []byte, err error) { 36 | read_length := endOffset - startOffset 37 | buffer := make([]byte, read_length) 38 | 39 | if _, err = r.rs.Seek(startOffset, from_start); err != nil { 40 | return 41 | } 42 | 43 | n, err := io.ReadFull(r.rs, buffer) 44 | 45 | if err != nil && err != io.ErrUnexpectedEOF { 46 | return 47 | } 48 | 49 | return buffer[:n], nil 50 | } 51 | 52 | func (r *ReadSeekerRequester) IsFatal(err error) bool { 53 | return true 54 | } 55 | -------------------------------------------------------------------------------- /vendor/src/github.com/petar/GoLLRB/llrb/llrb-stats.go: -------------------------------------------------------------------------------- 1 | // Copyright 2010 Petar Maymounkov. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package llrb 6 | 7 | // GetHeight() returns an item in the tree with key @key, and it's height in the tree 8 | func (t *LLRB) GetHeight(key Item) (result Item, depth int) { 9 | return t.getHeight(t.root, key) 10 | } 11 | 12 | func (t *LLRB) getHeight(h *Node, item Item) (Item, int) { 13 | if h == nil { 14 | return nil, 0 15 | } 16 | if less(item, h.Item) { 17 | result, depth := t.getHeight(h.Left, item) 18 | return result, depth + 1 19 | } 20 | if less(h.Item, item) { 21 | result, depth := t.getHeight(h.Right, item) 22 | return result, depth + 1 23 | } 24 | return h.Item, 0 25 | } 26 | 27 | // HeightStats() returns the average and standard deviation of the height 28 | // of elements in the tree 29 | func (t *LLRB) HeightStats() (avg, stddev float64) { 30 | av := &avgVar{} 31 | heightStats(t.root, 0, av) 32 | return av.GetAvg(), av.GetStdDev() 33 | } 34 | 35 | func heightStats(h *Node, d int, av *avgVar) { 36 | if h == nil { 37 | return 38 | } 39 | av.Add(float64(d)) 40 | if h.Left != nil { 41 | heightStats(h.Left, d+1, av) 42 | } 43 | if h.Right != nil { 44 | heightStats(h.Right, d+1, av) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/gosync/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | gosync is a command-line implementation of the gosync package functionality, primarily as a demonstration of usage 3 | but supposed to be functional in itself. 4 | */ 5 | package main 6 | 7 | import ( 8 | "fmt" 9 | "log" 10 | "net/http" 11 | _ "net/http/pprof" 12 | "os" 13 | "runtime" 14 | 15 | "github.com/codegangsta/cli" 16 | ) 17 | 18 | const ( 19 | DefaultBlockSize = 8192 20 | magicString = "G0S9NC" // just to confirm the file type is used correctly 21 | majorVersion = uint16(0) 22 | minorVersion = uint16(2) 23 | patchVersion = uint16(1) 24 | ) 25 | 26 | var app = cli.NewApp() 27 | 28 | func main() { 29 | app.Name = "gosync" 30 | app.Usage = "Build indexes, patches, patch files" 31 | app.Flags = []cli.Flag{ 32 | cli.BoolFlag{ 33 | Name: "profile", 34 | Usage: "enable HTTP profiling", 35 | }, 36 | cli.IntFlag{ 37 | Name: "profilePort", 38 | Value: 6060, 39 | Usage: "The number of streams to use concurrently", 40 | }, 41 | } 42 | 43 | app.Version = fmt.Sprintf( 44 | "%v.%v.%v", 45 | majorVersion, 46 | minorVersion, 47 | patchVersion, 48 | ) 49 | 50 | runtime.GOMAXPROCS(runtime.NumCPU()) 51 | 52 | app.Before = func(c *cli.Context) error { 53 | if c.Bool("profile") { 54 | port := fmt.Sprint(c.Int("profilePort")) 55 | 56 | go func() { 57 | log.Println(http.ListenAndServe("localhost:"+port, nil)) 58 | }() 59 | } 60 | 61 | return nil 62 | } 63 | 64 | app.Run(os.Args) 65 | } 66 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/gosync/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | gosync is a command-line implementation of the gosync package functionality, primarily as a demonstration of usage 3 | but supposed to be functional in itself. 4 | */ 5 | package main 6 | 7 | import ( 8 | "fmt" 9 | "log" 10 | "net/http" 11 | _ "net/http/pprof" 12 | "os" 13 | "runtime" 14 | 15 | "github.com/codegangsta/cli" 16 | ) 17 | 18 | const ( 19 | DefaultBlockSize = 8192 20 | magicString = "G0S9NC" // just to confirm the file type is used correctly 21 | majorVersion = uint16(0) 22 | minorVersion = uint16(2) 23 | patchVersion = uint16(1) 24 | ) 25 | 26 | var app = cli.NewApp() 27 | 28 | func main() { 29 | app.Name = "gosync" 30 | app.Usage = "Build indexes, patches, patch files" 31 | app.Flags = []cli.Flag{ 32 | cli.BoolFlag{ 33 | Name: "profile", 34 | Usage: "enable HTTP profiling", 35 | }, 36 | cli.IntFlag{ 37 | Name: "profilePort", 38 | Value: 6060, 39 | Usage: "The number of streams to use concurrently", 40 | }, 41 | } 42 | 43 | app.Version = fmt.Sprintf( 44 | "%v.%v.%v", 45 | majorVersion, 46 | minorVersion, 47 | patchVersion, 48 | ) 49 | 50 | runtime.GOMAXPROCS(runtime.NumCPU()) 51 | 52 | app.Before = func(c *cli.Context) error { 53 | if c.Bool("profile") { 54 | port := fmt.Sprint(c.Int("profilePort")) 55 | 56 | go func() { 57 | log.Println(http.ListenAndServe("localhost:"+port, nil)) 58 | }() 59 | } 60 | 61 | return nil 62 | } 63 | 64 | app.Run(os.Args) 65 | } 66 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/filechecksum/verifier_test.go: -------------------------------------------------------------------------------- 1 | package filechecksum 2 | 3 | import ( 4 | "crypto/md5" 5 | "testing" 6 | ) 7 | 8 | type SingleBlockSource []byte 9 | 10 | func (d SingleBlockSource) GetStrongChecksumForBlock(blockID int) []byte { 11 | m := md5.New() 12 | m.Write(d) 13 | return m.Sum(nil) 14 | } 15 | 16 | func TestBlockEqualsItself(t *testing.T) { 17 | data := []byte("fooooo") 18 | 19 | h := HashVerifier{ 20 | Hash: md5.New(), 21 | BlockSize: uint(len(data)), 22 | BlockChecksumGetter: SingleBlockSource(data), 23 | } 24 | 25 | if !h.VerifyBlockRange(0, data) { 26 | t.Error("data did not verify") 27 | } 28 | } 29 | 30 | type FourByteBlockSource []byte 31 | 32 | func (d FourByteBlockSource) GetStrongChecksumForBlock(blockID int) []byte { 33 | m := md5.New() 34 | 35 | start := blockID * 4 36 | end := start + 4 37 | 38 | if end >= len(d) { 39 | end = len(d) 40 | } 41 | 42 | m.Write(d[start:end]) 43 | return m.Sum(nil) 44 | } 45 | 46 | func TestSplitBlocksEqualThemselves(t *testing.T) { 47 | data := []byte("foooBaar") 48 | 49 | h := HashVerifier{ 50 | Hash: md5.New(), 51 | BlockSize: uint(4), 52 | BlockChecksumGetter: FourByteBlockSource(data), 53 | } 54 | 55 | if !h.VerifyBlockRange(0, data) { 56 | t.Error("data did not verify") 57 | } 58 | } 59 | 60 | func TestPartialBlock(t *testing.T) { 61 | data := []byte("fo") 62 | 63 | h := HashVerifier{ 64 | Hash: md5.New(), 65 | BlockSize: uint(4), 66 | BlockChecksumGetter: SingleBlockSource(data), 67 | } 68 | 69 | if !h.VerifyBlockRange(0, data) { 70 | t.Error("data did not verify") 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/comparer/comparer_bench_test.go: -------------------------------------------------------------------------------- 1 | package comparer 2 | 3 | import ( 4 | "github.com/Redundancy/go-sync/chunks" 5 | "github.com/Redundancy/go-sync/filechecksum" 6 | "github.com/Redundancy/go-sync/util/readers" 7 | "testing" 8 | ) 9 | 10 | var test = []byte{0, 1, 2, 3} 11 | 12 | type NegativeWeakIndex struct { 13 | } 14 | 15 | func (i *NegativeWeakIndex) FindWeakChecksum2(chk []byte) interface{} { 16 | return nil 17 | } 18 | 19 | func (i *NegativeWeakIndex) FindStrongChecksum2(chk []byte, weak interface{}) []chunks.ChunkChecksum { 20 | return nil 21 | } 22 | 23 | type NegativeStrongIndex struct { 24 | } 25 | 26 | func (i *NegativeStrongIndex) FindWeakChecksum2(chk []byte) interface{} { 27 | return i 28 | } 29 | 30 | func (i *NegativeStrongIndex) FindStrongChecksum2(chk []byte, weak interface{}) []chunks.ChunkChecksum { 31 | return nil 32 | } 33 | 34 | func BenchmarkWeakComparison(b *testing.B) { 35 | b.ReportAllocs() 36 | b.SetBytes(1) 37 | 38 | const BLOCK_SIZE = 8 39 | generator := filechecksum.NewFileChecksumGenerator(BLOCK_SIZE) 40 | 41 | b.StartTimer() 42 | 43 | results := (&Comparer{}).StartFindMatchingBlocks( 44 | readers.OneReader(b.N+BLOCK_SIZE), 45 | 0, 46 | generator, 47 | &NegativeWeakIndex{}, 48 | ) 49 | 50 | for _, ok := <-results; ok; { 51 | } 52 | 53 | b.StopTimer() 54 | } 55 | 56 | func BenchmarkStrongComparison(b *testing.B) { 57 | b.ReportAllocs() 58 | b.SetBytes(1) 59 | 60 | const BLOCK_SIZE = 8 61 | generator := filechecksum.NewFileChecksumGenerator(BLOCK_SIZE) 62 | 63 | b.StartTimer() 64 | 65 | results := (&Comparer{}).StartFindMatchingBlocks( 66 | readers.OneReader(b.N+BLOCK_SIZE), 67 | 0, 68 | generator, 69 | &NegativeStrongIndex{}, 70 | ) 71 | 72 | for _, ok := <-results; ok; { 73 | } 74 | 75 | b.StopTimer() 76 | } 77 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/util/readers/uniform_test.go: -------------------------------------------------------------------------------- 1 | package readers 2 | 3 | import ( 4 | "io" 5 | "io/ioutil" 6 | "testing" 7 | ) 8 | 9 | func TestUniformReaderLength(t *testing.T) { 10 | r, err := ioutil.ReadAll(OneReader(100)) 11 | 12 | if err != nil { 13 | t.Fatal(err) 14 | } 15 | 16 | if len(r) != 100 { 17 | t.Errorf("Unexpected length: %v", len(r)) 18 | } 19 | 20 | for i, b := range r { 21 | if b != 1 { 22 | t.Errorf("Byte at position %v is not 1: %v", i, b) 23 | } 24 | } 25 | } 26 | 27 | func TestReadIntoLargerBuffer(t *testing.T) { 28 | b := make([]byte, 100) 29 | r := OneReader(10) 30 | 31 | n, err := r.Read(b) 32 | 33 | if n != 10 { 34 | t.Errorf("Wrong read length: %v", n) 35 | } 36 | 37 | if err != io.EOF { 38 | t.Errorf("Did not raise EOF after reading: %v", err) 39 | } 40 | } 41 | 42 | func TestMultiUniformReader(t *testing.T) { 43 | r := io.MultiReader( 44 | OneReader(12), 45 | NewSizedNonRepeatingSequence(0, 88), 46 | ) 47 | 48 | b := make([]byte, 100) 49 | 50 | n, err := r.Read(b) 51 | 52 | if n != 12 { 53 | t.Errorf("Wrong read length: %v", n) 54 | } 55 | 56 | if err == io.EOF { 57 | t.Errorf("Raised EOF after reading! %v", err) 58 | } 59 | 60 | n, err = r.Read(b) 61 | 62 | if n != 88 { 63 | t.Errorf("Wrong read length: %v", n) 64 | } 65 | 66 | n, err = r.Read(b) 67 | 68 | if err != io.EOF { 69 | t.Errorf("Really expected EOF by now: %v %v", err, n) 70 | } 71 | } 72 | 73 | func TestFillBuffer(t *testing.T) { 74 | r := io.MultiReader( 75 | OneReader(12), 76 | NewSizedNonRepeatingSequence(0, 88), 77 | ) 78 | 79 | b := make([]byte, 100) 80 | _, err := io.ReadFull(r, b) 81 | 82 | if err != nil && err != io.EOF { 83 | t.Error(err) 84 | } 85 | 86 | if len(b) != cap(b) { 87 | t.Errorf("Expected to fill b: %v", len(b)) 88 | } 89 | 90 | } 91 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/indexbuilder/indexbuilder.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package indexbuilder provides a few shortbuts to building a checksum index by generating and then loading 3 | the checksums, and building an index from that. It's potentially a sign that the responsibilities here need refactoring. 4 | */ 5 | package indexbuilder 6 | 7 | import ( 8 | "bytes" 9 | "github.com/Redundancy/go-sync/chunks" 10 | "github.com/Redundancy/go-sync/filechecksum" 11 | "github.com/Redundancy/go-sync/index" 12 | "io" 13 | ) 14 | 15 | // Generates an index from a reader 16 | // This is mostly a utility function to avoid being overly verbose in tests that need 17 | // an index to work, but don't want to construct one by hand in order to avoid the dependencies 18 | // obviously this means that those tests are likely to fail if there are issues with any of the other 19 | // modules, which is not ideal. 20 | // TODO: move to util? 21 | func BuildChecksumIndex(check *filechecksum.FileChecksumGenerator, r io.Reader) ( 22 | fcheck []byte, 23 | i *index.ChecksumIndex, 24 | lookup filechecksum.ChecksumLookup, 25 | err error, 26 | ) { 27 | b := bytes.NewBuffer(nil) 28 | fcheck, err = check.GenerateChecksums(r, b) 29 | 30 | if err != nil { 31 | return 32 | } 33 | 34 | weakSize := check.WeakRollingHash.Size() 35 | strongSize := check.GetStrongHash().Size() 36 | readChunks, err := chunks.LoadChecksumsFromReader(b, weakSize, strongSize) 37 | 38 | if err != nil { 39 | return 40 | } 41 | 42 | i = index.MakeChecksumIndex(readChunks) 43 | lookup = chunks.StrongChecksumGetter(readChunks) 44 | 45 | return 46 | } 47 | 48 | func BuildIndexFromString(generator *filechecksum.FileChecksumGenerator, reference string) ( 49 | fileCheckSum []byte, 50 | referenceIndex *index.ChecksumIndex, 51 | lookup filechecksum.ChecksumLookup, 52 | err error, 53 | ) { 54 | return BuildChecksumIndex(generator, bytes.NewBufferString(reference)) 55 | } 56 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/patcher/blocksource.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package patcher follows a pattern established by hash, which defines the interface in the top level package, and then provides implementations 3 | below it. 4 | */ 5 | package patcher 6 | 7 | import ( 8 | "hash" 9 | ) 10 | 11 | /* 12 | BlockSource is an interface used by the patchers to obtain blocks from the reference 13 | It does not stipulate where the reference data might be (it could be local, in a pre-built patch file, on S3 or somewhere else) 14 | 15 | It is assumed that the BlockSource may be slow, and may benefit from request pipelining & concurrency. 16 | Therefore patchers should feel free to request as many block spans as they can handle. 17 | 18 | A BlockSource may be a view onto a larger transport concept, so that multiple files can be handled with wider 19 | knowledge of the number of simultaneous requests allowed, etc. The BlockSource may decide to split BlockSpans 20 | into smaller sizes if it wants. 21 | 22 | It is up to the patcher to receive blocks in a timely manner, and decide what to do with them, rather than 23 | bother the BlockSource with more memory management and buffering logic. 24 | 25 | Since these interfaces require specific structs to satisfy, it's expected that implementers will import this module. 26 | 27 | */ 28 | type BlockSource interface { 29 | RequestBlocks(MissingBlockSpan) error 30 | 31 | GetResultChannel() <-chan BlockReponse 32 | 33 | // If the block source encounters an unsurmountable problem 34 | EncounteredError() <-chan error 35 | } 36 | 37 | type FoundBlockSpan struct { 38 | StartBlock uint 39 | EndBlock uint 40 | BlockSize int64 41 | MatchOffset int64 42 | } 43 | 44 | type MissingBlockSpan struct { 45 | StartBlock uint 46 | EndBlock uint 47 | 48 | BlockSize int64 49 | // a hasher to use to ensure that the block response matches 50 | Hasher hash.Hash 51 | // the hash values that the blocks should have 52 | ExpectedSums [][]byte 53 | } 54 | 55 | type BlockReponse struct { 56 | StartBlock uint 57 | Data []byte 58 | } 59 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/blocksources/fixed_size_block_resolver.go: -------------------------------------------------------------------------------- 1 | package blocksources 2 | 3 | type FixedSizeBlockResolver struct { 4 | BlockSize uint64 5 | FileSize int64 6 | MaxDesiredRequestSize uint64 7 | } 8 | 9 | func (r *FixedSizeBlockResolver) GetBlockStartOffset(blockID uint) int64 { 10 | if off := int64(uint64(blockID) * r.BlockSize); r.FileSize != 0 && off > r.FileSize { 11 | return r.FileSize 12 | } else { 13 | return off 14 | } 15 | } 16 | 17 | func (r *FixedSizeBlockResolver) GetBlockEndOffset(blockID uint) int64 { 18 | if off := int64(uint64(blockID+1) * r.BlockSize); r.FileSize != 0 && off > r.FileSize { 19 | return r.FileSize 20 | } else { 21 | return off 22 | } 23 | } 24 | 25 | // Split blocks into chunks of the desired size, or less. This implementation assumes a fixed block size at the source. 26 | func (r *FixedSizeBlockResolver) SplitBlockRangeToDesiredSize(startBlockID, endBlockID uint) []QueuedRequest { 27 | 28 | if r.MaxDesiredRequestSize == 0 { 29 | return []QueuedRequest{ 30 | QueuedRequest{ 31 | StartBlockID: startBlockID, 32 | EndBlockID: endBlockID, 33 | }, 34 | } 35 | } 36 | 37 | maxSize := r.MaxDesiredRequestSize 38 | if r.MaxDesiredRequestSize < r.BlockSize { 39 | maxSize = r.BlockSize 40 | } 41 | 42 | // how many blocks is the desired size? 43 | blockCountPerRequest := uint(maxSize / r.BlockSize) 44 | 45 | requests := make([]QueuedRequest, 0, (endBlockID-startBlockID)/blockCountPerRequest+1) 46 | currentBlockID := startBlockID 47 | 48 | for { 49 | maxEndBlock := currentBlockID + blockCountPerRequest 50 | 51 | if maxEndBlock > endBlockID { 52 | requests = append( 53 | requests, 54 | QueuedRequest{ 55 | StartBlockID: currentBlockID, 56 | EndBlockID: endBlockID, 57 | }, 58 | ) 59 | 60 | return requests 61 | } else { 62 | requests = append( 63 | requests, 64 | QueuedRequest{ 65 | StartBlockID: currentBlockID, 66 | EndBlockID: maxEndBlock - 1, 67 | }, 68 | ) 69 | 70 | currentBlockID = maxEndBlock 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/help_test.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | ) 7 | 8 | func Test_ShowAppHelp_NoAuthor(t *testing.T) { 9 | output := new(bytes.Buffer) 10 | app := NewApp() 11 | app.Writer = output 12 | 13 | c := NewContext(app, nil, nil) 14 | 15 | ShowAppHelp(c) 16 | 17 | if bytes.Index(output.Bytes(), []byte("AUTHOR(S):")) != -1 { 18 | t.Errorf("expected\n%snot to include %s", output.String(), "AUTHOR(S):") 19 | } 20 | } 21 | 22 | func Test_ShowAppHelp_NoVersion(t *testing.T) { 23 | output := new(bytes.Buffer) 24 | app := NewApp() 25 | app.Writer = output 26 | 27 | app.Version = "" 28 | 29 | c := NewContext(app, nil, nil) 30 | 31 | ShowAppHelp(c) 32 | 33 | if bytes.Index(output.Bytes(), []byte("VERSION:")) != -1 { 34 | t.Errorf("expected\n%snot to include %s", output.String(), "VERSION:") 35 | } 36 | } 37 | 38 | func Test_Help_Custom_Flags(t *testing.T) { 39 | oldFlag := HelpFlag 40 | defer func() { 41 | HelpFlag = oldFlag 42 | }() 43 | 44 | HelpFlag = BoolFlag{ 45 | Name: "help, x", 46 | Usage: "show help", 47 | } 48 | 49 | app := App{ 50 | Flags: []Flag{ 51 | BoolFlag{Name: "foo, h"}, 52 | }, 53 | Action: func(ctx *Context) { 54 | if ctx.Bool("h") != true { 55 | t.Errorf("custom help flag not set") 56 | } 57 | }, 58 | } 59 | output := new(bytes.Buffer) 60 | app.Writer = output 61 | app.Run([]string{"test", "-h"}) 62 | if output.Len() > 0 { 63 | t.Errorf("unexpected output: %s", output.String()) 64 | } 65 | } 66 | 67 | func Test_Version_Custom_Flags(t *testing.T) { 68 | oldFlag := VersionFlag 69 | defer func() { 70 | VersionFlag = oldFlag 71 | }() 72 | 73 | VersionFlag = BoolFlag{ 74 | Name: "version, V", 75 | Usage: "show version", 76 | } 77 | 78 | app := App{ 79 | Flags: []Flag{ 80 | BoolFlag{Name: "foo, v"}, 81 | }, 82 | Action: func(ctx *Context) { 83 | if ctx.Bool("v") != true { 84 | t.Errorf("custom version flag not set") 85 | } 86 | }, 87 | } 88 | output := new(bytes.Buffer) 89 | app.Writer = output 90 | app.Run([]string{"test", "-v"}) 91 | if output.Len() > 0 { 92 | t.Errorf("unexpected output: %s", output.String()) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /vendor/src/github.com/petar/GoLLRB/llrb/iterator_test.go: -------------------------------------------------------------------------------- 1 | package llrb 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestAscendGreaterOrEqual(t *testing.T) { 9 | tree := New() 10 | tree.InsertNoReplace(Int(4)) 11 | tree.InsertNoReplace(Int(6)) 12 | tree.InsertNoReplace(Int(1)) 13 | tree.InsertNoReplace(Int(3)) 14 | var ary []Item 15 | tree.AscendGreaterOrEqual(Int(-1), func(i Item) bool { 16 | ary = append(ary, i) 17 | return true 18 | }) 19 | expected := []Item{Int(1), Int(3), Int(4), Int(6)} 20 | if !reflect.DeepEqual(ary, expected) { 21 | t.Errorf("expected %v but got %v", expected, ary) 22 | } 23 | ary = nil 24 | tree.AscendGreaterOrEqual(Int(3), func(i Item) bool { 25 | ary = append(ary, i) 26 | return true 27 | }) 28 | expected = []Item{Int(3), Int(4), Int(6)} 29 | if !reflect.DeepEqual(ary, expected) { 30 | t.Errorf("expected %v but got %v", expected, ary) 31 | } 32 | ary = nil 33 | tree.AscendGreaterOrEqual(Int(2), func(i Item) bool { 34 | ary = append(ary, i) 35 | return true 36 | }) 37 | expected = []Item{Int(3), Int(4), Int(6)} 38 | if !reflect.DeepEqual(ary, expected) { 39 | t.Errorf("expected %v but got %v", expected, ary) 40 | } 41 | } 42 | 43 | func TestDescendLessOrEqual(t *testing.T) { 44 | tree := New() 45 | tree.InsertNoReplace(Int(4)) 46 | tree.InsertNoReplace(Int(6)) 47 | tree.InsertNoReplace(Int(1)) 48 | tree.InsertNoReplace(Int(3)) 49 | var ary []Item 50 | tree.DescendLessOrEqual(Int(10), func(i Item) bool { 51 | ary = append(ary, i) 52 | return true 53 | }) 54 | expected := []Item{Int(6), Int(4), Int(3), Int(1)} 55 | if !reflect.DeepEqual(ary, expected) { 56 | t.Errorf("expected %v but got %v", expected, ary) 57 | } 58 | ary = nil 59 | tree.DescendLessOrEqual(Int(4), func(i Item) bool { 60 | ary = append(ary, i) 61 | return true 62 | }) 63 | expected = []Item{Int(4), Int(3), Int(1)} 64 | if !reflect.DeepEqual(ary, expected) { 65 | t.Errorf("expected %v but got %v", expected, ary) 66 | } 67 | ary = nil 68 | tree.DescendLessOrEqual(Int(5), func(i Item) bool { 69 | ary = append(ary, i) 70 | return true 71 | }) 72 | expected = []Item{Int(4), Int(3), Int(1)} 73 | if !reflect.DeepEqual(ary, expected) { 74 | t.Errorf("expected %v but got %v", expected, ary) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/altsrc/yaml_file_loader.go: -------------------------------------------------------------------------------- 1 | // Disabling building of yaml support in cases where golang is 1.0 or 1.1 2 | // as the encoding library is not implemented or supported. 3 | 4 | // +build !go1,!go1.1 5 | 6 | package altsrc 7 | 8 | import ( 9 | "fmt" 10 | "io/ioutil" 11 | "net/http" 12 | "net/url" 13 | "os" 14 | 15 | "github.com/codegangsta/cli" 16 | 17 | "gopkg.in/yaml.v2" 18 | ) 19 | 20 | type yamlSourceContext struct { 21 | FilePath string 22 | } 23 | 24 | // NewYamlSourceFromFile creates a new Yaml InputSourceContext from a filepath. 25 | func NewYamlSourceFromFile(file string) (InputSourceContext, error) { 26 | ymlLoader := &yamlSourceLoader{FilePath: file} 27 | var results map[string]interface{} 28 | err := readCommandYaml(ysl.FilePath, &results) 29 | if err != nil { 30 | return fmt.Errorf("Unable to load Yaml file '%s': inner error: \n'%v'", filePath, err.Error()) 31 | } 32 | 33 | return &MapInputSource{valueMap: results}, nil 34 | } 35 | 36 | // NewYamlSourceFromFlagFunc creates a new Yaml InputSourceContext from a provided flag name and source context. 37 | func NewYamlSourceFromFlagFunc(flagFileName string) func(InputSourceContext, error) { 38 | return func(context cli.Context) { 39 | filePath := context.String(flagFileName) 40 | return NewYamlSourceFromFile(filePath) 41 | } 42 | } 43 | 44 | func readCommandYaml(filePath string, container interface{}) (err error) { 45 | b, err := loadDataFrom(filePath) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | err = yaml.Unmarshal(b, container) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | err = nil 56 | return 57 | } 58 | 59 | func loadDataFrom(filePath string) ([]byte, error) { 60 | u, err := url.Parse(filePath) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | if u.Host != "" { // i have a host, now do i support the scheme? 66 | switch u.Scheme { 67 | case "http", "https": 68 | res, err := http.Get(filePath) 69 | if err != nil { 70 | return nil, err 71 | } 72 | return ioutil.ReadAll(res.Body) 73 | default: 74 | return nil, fmt.Errorf("scheme of %s is unsupported", filePath) 75 | } 76 | } else if u.Path != "" { // i dont have a host, but I have a path. I am a local file. 77 | if _, notFoundFileErr := os.Stat(filePath); notFoundFileErr != nil { 78 | return nil, fmt.Errorf("Cannot read from file: '%s' because it does not exist.", filePath) 79 | } 80 | return ioutil.ReadFile(filePath) 81 | } else { 82 | return nil, fmt.Errorf("unable to determine how to load from path %s", filePath) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/chunks/chunks.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package chunks provides the basic structure for a pair of the weak and strong checksums. 3 | Since this is fairly widely used, splitting this out breaks a number of possible circular dependencies 4 | */ 5 | package chunks 6 | 7 | import ( 8 | "bytes" 9 | "errors" 10 | "io" 11 | ) 12 | 13 | // For a given Block, the Weak & Strong hashes, and the offset 14 | // This structure is only used to generate the index of reference files, since 15 | // computing the strong checksum is not done when comparing unless the weak checksum matches 16 | type ChunkChecksum struct { 17 | // an offset in terms of chunk count 18 | ChunkOffset uint 19 | // the size of the block 20 | Size int64 21 | WeakChecksum []byte 22 | StrongChecksum []byte 23 | } 24 | 25 | // compares a checksum to another based on the checksums, not the offset 26 | func (chunk ChunkChecksum) Match(other ChunkChecksum) bool { 27 | weakEqual := bytes.Compare(chunk.WeakChecksum, other.WeakChecksum) == 0 28 | strongEqual := false 29 | if weakEqual { 30 | strongEqual = bytes.Compare(chunk.StrongChecksum, other.StrongChecksum) == 0 31 | } 32 | return weakEqual && strongEqual 33 | } 34 | 35 | var ErrPartialChecksum = errors.New("Reader length was not a multiple of the checksums") 36 | 37 | // Loads chunks from a reader, assuming alternating weak then strong hashes 38 | func LoadChecksumsFromReader( 39 | r io.Reader, 40 | weakHashSize int, 41 | strongHashSize int, 42 | ) ([]ChunkChecksum, error) { 43 | 44 | result := make([]ChunkChecksum, 0, 20) 45 | offset := uint(0) 46 | 47 | temp := ChunkChecksum{} 48 | 49 | for { 50 | weakBuffer := make([]byte, weakHashSize) 51 | n, err := io.ReadFull(r, weakBuffer) 52 | 53 | if n == weakHashSize { 54 | temp.ChunkOffset = offset 55 | temp.WeakChecksum = weakBuffer 56 | } else if n == 0 && err == io.EOF { 57 | break 58 | } else { 59 | return nil, ErrPartialChecksum 60 | } 61 | 62 | strongBuffer := make([]byte, strongHashSize) 63 | n, err = io.ReadFull(r, strongBuffer) 64 | 65 | if n == strongHashSize { 66 | temp.StrongChecksum = strongBuffer 67 | result = append(result, temp) 68 | 69 | if err == io.EOF { 70 | break 71 | } 72 | } else { 73 | return nil, ErrPartialChecksum 74 | } 75 | 76 | offset += 1 77 | } 78 | 79 | return result, nil 80 | } 81 | 82 | // satisfies filechecksum.ChecksumLookup 83 | type StrongChecksumGetter []ChunkChecksum 84 | 85 | func (s StrongChecksumGetter) GetStrongChecksumForBlock(blockID int) []byte { 86 | return s[blockID].StrongChecksum 87 | } 88 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/rollsum/rollsum_32_base.go: -------------------------------------------------------------------------------- 1 | package rollsum 2 | 3 | import ( 4 | "encoding/binary" 5 | ) 6 | 7 | const FULL_BYTES_16 = (1 << 16) - 1 8 | 9 | // Rollsum32Base decouples the rollsum algorithm from the implementation of 10 | // hash.Hash and the storage the rolling checksum window 11 | // this allows us to write different versions of the storage for the distinctly different 12 | // use-cases and optimize the storage with the usage pattern. 13 | func NewRollsum32Base(blockSize uint) *Rollsum32Base { 14 | return &Rollsum32Base{blockSize: blockSize} 15 | } 16 | 17 | // The specification of hash.Hash is such that it cannot be implemented without implementing storage 18 | // but the most optimal storage scheme depends on usage of the circular buffer & hash 19 | type Rollsum32Base struct { 20 | blockSize uint 21 | a, b uint32 22 | } 23 | 24 | // Add a single byte into the rollsum 25 | func (r *Rollsum32Base) AddByte(b byte) { 26 | r.a += uint32(b) 27 | r.b += r.a 28 | } 29 | 30 | func (r *Rollsum32Base) AddBytes(bs []byte) { 31 | for _, b := range bs { 32 | r.a += uint32(b) 33 | r.b += r.a 34 | } 35 | } 36 | 37 | // Remove a byte from the end of the rollsum 38 | // Use the previous length (before removal) 39 | func (r *Rollsum32Base) RemoveByte(b byte, length int) { 40 | r.a -= uint32(b) 41 | r.b -= uint32(uint(length) * uint(b)) 42 | } 43 | 44 | func (r *Rollsum32Base) RemoveBytes(bs []byte, length int) { 45 | for _, b := range bs { 46 | r.a -= uint32(b) 47 | r.b -= uint32(uint(length) * uint(b)) 48 | length -= 1 49 | } 50 | } 51 | 52 | func (r *Rollsum32Base) AddAndRemoveBytes(add []byte, remove []byte, length int) { 53 | len_added := len(add) 54 | len_removed := len(remove) 55 | 56 | startEvicted := len_added - len_removed 57 | r.AddBytes(add[:startEvicted]) 58 | length += startEvicted 59 | 60 | for i := startEvicted; i < len_added; i++ { 61 | r.RemoveByte(remove[i-startEvicted], length) 62 | r.AddByte(add[i]) 63 | } 64 | } 65 | 66 | // Set a whole block of blockSize 67 | func (r *Rollsum32Base) SetBlock(block []byte) { 68 | r.Reset() 69 | r.AddBytes(block) 70 | } 71 | 72 | // Reset the hash to the initial state 73 | func (r *Rollsum32Base) Reset() { 74 | r.a, r.b = 0, 0 75 | } 76 | 77 | // size of the hash in bytes 78 | func (r *Rollsum32Base) Size() int { 79 | return 4 80 | } 81 | 82 | // Puts the sum into b. Avoids allocation. b must have length >= 4 83 | func (r *Rollsum32Base) GetSum(b []byte) { 84 | value := uint32((r.a & FULL_BYTES_16) + ((r.b & FULL_BYTES_16) << 16)) 85 | binary.LittleEndian.PutUint32(b, value) 86 | } 87 | -------------------------------------------------------------------------------- /src/gosync/build.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "time" 8 | 9 | "github.com/Redundancy/go-sync/filechecksum" 10 | "github.com/codegangsta/cli" 11 | ) 12 | 13 | func init() { 14 | app.Commands = append( 15 | app.Commands, 16 | cli.Command{ 17 | Name: "build", 18 | ShortName: "b", 19 | Usage: "build a .gosync file for a file", 20 | Action: Build, 21 | Flags: []cli.Flag{ 22 | cli.IntFlag{ 23 | Name: "blocksize", 24 | Value: DefaultBlockSize, 25 | Usage: "The block size to use for the gosync file", 26 | }, 27 | }, 28 | }, 29 | ) 30 | } 31 | 32 | func Build(c *cli.Context) { 33 | filename := c.Args()[0] 34 | blocksize := uint32(c.Int("blocksize")) 35 | generator := filechecksum.NewFileChecksumGenerator(uint(blocksize)) 36 | inputFile, err := os.Open(filename) 37 | 38 | if err != nil { 39 | absInputPath, err2 := filepath.Abs(filename) 40 | if err2 == nil { 41 | handleFileError(absInputPath, err) 42 | } else { 43 | handleFileError(filename, err) 44 | } 45 | 46 | os.Exit(1) 47 | } 48 | 49 | s, _ := inputFile.Stat() 50 | // TODO: Error? 51 | file_size := s.Size() 52 | 53 | defer inputFile.Close() 54 | 55 | ext := filepath.Ext(filename) 56 | outfilePath := filename[:len(filename)-len(ext)] + ".gosync" 57 | outputFile, err := os.Create(outfilePath) 58 | 59 | if err != nil { 60 | handleFileError(outfilePath, err) 61 | os.Exit(1) 62 | } 63 | 64 | defer outputFile.Close() 65 | 66 | if err = writeHeaders( 67 | outputFile, 68 | magicString, 69 | blocksize, 70 | file_size, 71 | []uint16{majorVersion, minorVersion, patchVersion}, 72 | ); err != nil { 73 | fmt.Fprintf( 74 | os.Stderr, 75 | "Error getting file info: %v\n", 76 | filename, 77 | err, 78 | ) 79 | os.Exit(2) 80 | } 81 | 82 | start := time.Now() 83 | _, err = generator.GenerateChecksums(inputFile, outputFile) 84 | end := time.Now() 85 | 86 | if err != nil { 87 | fmt.Fprintf( 88 | os.Stderr, 89 | "Error generating checksum: %v\n", 90 | filename, 91 | err, 92 | ) 93 | os.Exit(2) 94 | } 95 | 96 | inputFileInfo, err := os.Stat(filename) 97 | if err != nil { 98 | fmt.Fprintf( 99 | os.Stderr, 100 | "Error getting file info: %v\n", 101 | filename, 102 | err, 103 | ) 104 | os.Exit(2) 105 | } 106 | 107 | fmt.Fprintf( 108 | os.Stderr, 109 | "Index for %v file generated in %v (%v bytes/S)\n", 110 | inputFileInfo.Size(), 111 | end.Sub(start), 112 | float64(inputFileInfo.Size())/end.Sub(start).Seconds(), 113 | ) 114 | } 115 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/gosync/build.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "time" 8 | 9 | "github.com/Redundancy/go-sync/filechecksum" 10 | "github.com/codegangsta/cli" 11 | ) 12 | 13 | func init() { 14 | app.Commands = append( 15 | app.Commands, 16 | cli.Command{ 17 | Name: "build", 18 | ShortName: "b", 19 | Usage: "build a .gosync file for a file", 20 | Action: Build, 21 | Flags: []cli.Flag{ 22 | cli.IntFlag{ 23 | Name: "blocksize", 24 | Value: DefaultBlockSize, 25 | Usage: "The block size to use for the gosync file", 26 | }, 27 | }, 28 | }, 29 | ) 30 | } 31 | 32 | func Build(c *cli.Context) { 33 | filename := c.Args()[0] 34 | blocksize := uint32(c.Int("blocksize")) 35 | generator := filechecksum.NewFileChecksumGenerator(uint(blocksize)) 36 | inputFile, err := os.Open(filename) 37 | 38 | if err != nil { 39 | absInputPath, err2 := filepath.Abs(filename) 40 | if err2 == nil { 41 | handleFileError(absInputPath, err) 42 | } else { 43 | handleFileError(filename, err) 44 | } 45 | 46 | os.Exit(1) 47 | } 48 | 49 | s, _ := inputFile.Stat() 50 | // TODO: Error? 51 | file_size := s.Size() 52 | 53 | defer inputFile.Close() 54 | 55 | ext := filepath.Ext(filename) 56 | outfilePath := filename[:len(filename)-len(ext)] + ".gosync" 57 | outputFile, err := os.Create(outfilePath) 58 | 59 | if err != nil { 60 | handleFileError(outfilePath, err) 61 | os.Exit(1) 62 | } 63 | 64 | defer outputFile.Close() 65 | 66 | if err = writeHeaders( 67 | outputFile, 68 | magicString, 69 | blocksize, 70 | file_size, 71 | []uint16{majorVersion, minorVersion, patchVersion}, 72 | ); err != nil { 73 | fmt.Fprintf( 74 | os.Stderr, 75 | "Error getting file info: %v\n", 76 | filename, 77 | err, 78 | ) 79 | os.Exit(2) 80 | } 81 | 82 | start := time.Now() 83 | _, err = generator.GenerateChecksums(inputFile, outputFile) 84 | end := time.Now() 85 | 86 | if err != nil { 87 | fmt.Fprintf( 88 | os.Stderr, 89 | "Error generating checksum: %v\n", 90 | filename, 91 | err, 92 | ) 93 | os.Exit(2) 94 | } 95 | 96 | inputFileInfo, err := os.Stat(filename) 97 | if err != nil { 98 | fmt.Fprintf( 99 | os.Stderr, 100 | "Error getting file info: %v\n", 101 | filename, 102 | err, 103 | ) 104 | os.Exit(2) 105 | } 106 | 107 | fmt.Fprintf( 108 | os.Stderr, 109 | "Index for %v file generated in %v (%v bytes/S)\n", 110 | inputFileInfo.Size(), 111 | end.Sub(start), 112 | float64(inputFileInfo.Size())/end.Sub(start).Seconds(), 113 | ) 114 | } 115 | -------------------------------------------------------------------------------- /src/gosync/patch.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "runtime" 7 | 8 | gosync_main "github.com/Redundancy/go-sync" 9 | "github.com/codegangsta/cli" 10 | ) 11 | 12 | const usage = "gosync patch []" 13 | 14 | func init() { 15 | app.Commands = append( 16 | app.Commands, 17 | cli.Command{ 18 | Name: "patch", 19 | ShortName: "p", 20 | Usage: usage, 21 | Description: `Recreate the reference source file, using an index and a local file that is believed to be similar. 22 | The index should be produced by "gosync build". 23 | 24 | is a .gosync file and may be a local, unc network path or http/https url 25 | is corresponding target and may be a local, unc network path or http/https url 26 | is optional. If not specified, the local file will be overwritten when done.`, 27 | Action: Patch, 28 | Flags: []cli.Flag{ 29 | cli.IntFlag{ 30 | Name: "p", 31 | Value: runtime.NumCPU(), 32 | Usage: "The number of streams to use concurrently", 33 | }, 34 | }, 35 | }, 36 | ) 37 | } 38 | 39 | // Patch a file 40 | func Patch(c *cli.Context) { 41 | errorWrapper(c, func(c *cli.Context) error { 42 | 43 | fmt.Fprintln(os.Stderr, "Starting patching process") 44 | 45 | if l := len(c.Args()); l < 3 || l > 4 { 46 | return fmt.Errorf( 47 | "Usage is \"%v\" (invalid number of arguments)", 48 | usage, 49 | ) 50 | } 51 | 52 | localFilename := c.Args()[0] 53 | summaryFile := c.Args()[1] 54 | referencePath := c.Args()[2] 55 | 56 | outFilename := localFilename 57 | if len(c.Args()) == 4 { 58 | outFilename = c.Args()[3] 59 | } 60 | 61 | indexReader, e := os.Open(summaryFile) 62 | if e != nil { 63 | return e 64 | } 65 | defer indexReader.Close() 66 | 67 | _, _, _, filesize, blocksize, e := readHeadersAndCheck( 68 | indexReader, 69 | magicString, 70 | majorVersion, 71 | ) 72 | 73 | index, checksumLookup, blockCount, err := readIndex( 74 | indexReader, 75 | uint(blocksize), 76 | ) 77 | 78 | fs := &gosync_main.BasicSummary{ 79 | ChecksumIndex: index, 80 | ChecksumLookup: checksumLookup, 81 | BlockCount: blockCount, 82 | BlockSize: uint(blocksize), 83 | FileSize: filesize, 84 | } 85 | 86 | rsync, err := gosync_main.MakeRSync( 87 | localFilename, 88 | referencePath, 89 | outFilename, 90 | fs, 91 | ) 92 | 93 | if err != nil { 94 | return err 95 | } 96 | 97 | err = rsync.Patch() 98 | 99 | if err != nil { 100 | return err 101 | } 102 | 103 | return rsync.Close() 104 | }) 105 | } 106 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/gosync/patch.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "runtime" 7 | 8 | gosync_main "github.com/Redundancy/go-sync" 9 | "github.com/codegangsta/cli" 10 | ) 11 | 12 | const usage = "gosync patch []" 13 | 14 | func init() { 15 | app.Commands = append( 16 | app.Commands, 17 | cli.Command{ 18 | Name: "patch", 19 | ShortName: "p", 20 | Usage: usage, 21 | Description: `Recreate the reference source file, using an index and a local file that is believed to be similar. 22 | The index should be produced by "gosync build". 23 | 24 | is a .gosync file and may be a local, unc network path or http/https url 25 | is corresponding target and may be a local, unc network path or http/https url 26 | is optional. If not specified, the local file will be overwritten when done.`, 27 | Action: Patch, 28 | Flags: []cli.Flag{ 29 | cli.IntFlag{ 30 | Name: "p", 31 | Value: runtime.NumCPU(), 32 | Usage: "The number of streams to use concurrently", 33 | }, 34 | }, 35 | }, 36 | ) 37 | } 38 | 39 | // Patch a file 40 | func Patch(c *cli.Context) { 41 | errorWrapper(c, func(c *cli.Context) error { 42 | 43 | fmt.Fprintln(os.Stderr, "Starting patching process") 44 | 45 | if l := len(c.Args()); l < 3 || l > 4 { 46 | return fmt.Errorf( 47 | "Usage is \"%v\" (invalid number of arguments)", 48 | usage, 49 | ) 50 | } 51 | 52 | localFilename := c.Args()[0] 53 | summaryFile := c.Args()[1] 54 | referencePath := c.Args()[2] 55 | 56 | outFilename := localFilename 57 | if len(c.Args()) == 4 { 58 | outFilename = c.Args()[3] 59 | } 60 | 61 | indexReader, e := os.Open(summaryFile) 62 | if e != nil { 63 | return e 64 | } 65 | defer indexReader.Close() 66 | 67 | _, _, _, filesize, blocksize, e := readHeadersAndCheck( 68 | indexReader, 69 | magicString, 70 | majorVersion, 71 | ) 72 | 73 | index, checksumLookup, blockCount, err := readIndex( 74 | indexReader, 75 | uint(blocksize), 76 | ) 77 | 78 | fs := &gosync_main.BasicSummary{ 79 | ChecksumIndex: index, 80 | ChecksumLookup: checksumLookup, 81 | BlockCount: blockCount, 82 | BlockSize: uint(blocksize), 83 | FileSize: filesize, 84 | } 85 | 86 | rsync, err := gosync_main.MakeRSync( 87 | localFilename, 88 | referencePath, 89 | outFilename, 90 | fs, 91 | ) 92 | 93 | if err != nil { 94 | return err 95 | } 96 | 97 | err = rsync.Patch() 98 | 99 | if err != nil { 100 | return err 101 | } 102 | 103 | return rsync.Close() 104 | }) 105 | } 106 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/rollsum/rollsum_32.go: -------------------------------------------------------------------------------- 1 | /* 2 | rollsum provides an implementation of a rolling checksum - a checksum that's efficient to advance a byte 3 | or more at a time. It is inspired by the rollsum in rsync, but differs in that the internal values used 4 | are 32bit integers - to make a conformant implementation, a find a replace on "32" should be almost sufficient 5 | (although it would be highly recommended to test against known values from the original implementation). 6 | 7 | Rollsum32 supports the hash.Hash implementation, but is not used much in go-sync, mostly in order to 8 | share and access the underlying circular buffer storage, and use the implementation as efficiently as possible. 9 | */ 10 | package rollsum 11 | 12 | import ( 13 | "github.com/Redundancy/go-sync/circularbuffer" 14 | ) 15 | 16 | func NewRollsum32(blocksize uint) *Rollsum32 { 17 | return &Rollsum32{ 18 | Rollsum32Base: Rollsum32Base{ 19 | blockSize: blocksize, 20 | }, 21 | buffer: circularbuffer.MakeC2Buffer(int(blocksize)), 22 | } 23 | } 24 | 25 | // Rollsum32 is a rolling checksum implemenation 26 | // inspired by rsync, but with 32bit internal values 27 | // Create one using NewRollsum32 28 | type Rollsum32 struct { 29 | Rollsum32Base 30 | buffer *circularbuffer.C2 31 | } 32 | 33 | // cannot be called concurrently 34 | func (r *Rollsum32) Write(p []byte) (n int, err error) { 35 | ulen_p := uint(len(p)) 36 | 37 | if ulen_p >= r.blockSize { 38 | // if it's really long, we can just ignore a load of it 39 | remaining := p[ulen_p-r.blockSize:] 40 | r.buffer.Write(remaining) 41 | r.Rollsum32Base.SetBlock(remaining) 42 | } else { 43 | b_len := r.buffer.Len() 44 | r.buffer.Write(p) 45 | evicted := r.buffer.Evicted() 46 | r.Rollsum32Base.AddAndRemoveBytes(p, evicted, b_len) 47 | } 48 | 49 | return len(p), nil 50 | } 51 | 52 | // The most efficient byte length to call Write with 53 | func (r *Rollsum32) BlockSize() int { 54 | return int(r.blockSize) 55 | } 56 | 57 | // the number of bytes 58 | func (r *Rollsum32) Size() int { 59 | return 4 60 | } 61 | 62 | func (r *Rollsum32) Reset() { 63 | r.Rollsum32Base.Reset() 64 | r.buffer.Reset() 65 | } 66 | 67 | // Sum appends the current hash to b and returns the resulting slice. 68 | // It does not change the underlying hash state. 69 | // Note that this is to allow Sum() to reuse a preallocated buffer 70 | func (r *Rollsum32) Sum(b []byte) []byte { 71 | if b != nil && cap(b)-len(b) >= 4 { 72 | p := len(b) 73 | b = b[:len(b)+4] 74 | r.Rollsum32Base.GetSum(b[p:]) 75 | return b 76 | } else { 77 | result := []byte{0, 0, 0, 0} 78 | r.Rollsum32Base.GetSum(result) 79 | return append(b, result...) 80 | } 81 | } 82 | 83 | func (r *Rollsum32) GetLastBlock() []byte { 84 | return r.buffer.GetBlock() 85 | } 86 | -------------------------------------------------------------------------------- /vendor/src/github.com/petar/GoLLRB/llrb/iterator.go: -------------------------------------------------------------------------------- 1 | package llrb 2 | 3 | type ItemIterator func(i Item) bool 4 | 5 | //func (t *Tree) Ascend(iterator ItemIterator) { 6 | // t.AscendGreaterOrEqual(Inf(-1), iterator) 7 | //} 8 | 9 | func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { 10 | t.ascendRange(t.root, greaterOrEqual, lessThan, iterator) 11 | } 12 | 13 | func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool { 14 | if h == nil { 15 | return true 16 | } 17 | if !less(h.Item, sup) { 18 | return t.ascendRange(h.Left, inf, sup, iterator) 19 | } 20 | if less(h.Item, inf) { 21 | return t.ascendRange(h.Right, inf, sup, iterator) 22 | } 23 | 24 | if !t.ascendRange(h.Left, inf, sup, iterator) { 25 | return false 26 | } 27 | if !iterator(h.Item) { 28 | return false 29 | } 30 | return t.ascendRange(h.Right, inf, sup, iterator) 31 | } 32 | 33 | // AscendGreaterOrEqual will call iterator once for each element greater or equal to 34 | // pivot in ascending order. It will stop whenever the iterator returns false. 35 | func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { 36 | t.ascendGreaterOrEqual(t.root, pivot, iterator) 37 | } 38 | 39 | func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { 40 | if h == nil { 41 | return true 42 | } 43 | if !less(h.Item, pivot) { 44 | if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) { 45 | return false 46 | } 47 | if !iterator(h.Item) { 48 | return false 49 | } 50 | } 51 | return t.ascendGreaterOrEqual(h.Right, pivot, iterator) 52 | } 53 | 54 | func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) { 55 | t.ascendLessThan(t.root, pivot, iterator) 56 | } 57 | 58 | func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool { 59 | if h == nil { 60 | return true 61 | } 62 | if !t.ascendLessThan(h.Left, pivot, iterator) { 63 | return false 64 | } 65 | if !iterator(h.Item) { 66 | return false 67 | } 68 | if less(h.Item, pivot) { 69 | return t.ascendLessThan(h.Left, pivot, iterator) 70 | } 71 | return true 72 | } 73 | 74 | // DescendLessOrEqual will call iterator once for each element less than the 75 | // pivot in descending order. It will stop whenever the iterator returns false. 76 | func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) { 77 | t.descendLessOrEqual(t.root, pivot, iterator) 78 | } 79 | 80 | func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { 81 | if h == nil { 82 | return true 83 | } 84 | if less(h.Item, pivot) || !less(pivot, h.Item) { 85 | if !t.descendLessOrEqual(h.Right, pivot, iterator) { 86 | return false 87 | } 88 | if !iterator(h.Item) { 89 | return false 90 | } 91 | } 92 | return t.descendLessOrEqual(h.Left, pivot, iterator) 93 | } 94 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/blocksources/helpers.go: -------------------------------------------------------------------------------- 1 | package blocksources 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Redundancy/go-sync/patcher" 6 | ) 7 | 8 | // errorWatcher is a small helper object 9 | // sendIfSet will only return a channel if there is an error set 10 | // so w.sendIfSet() <- w.Err() is always safe in a select statement 11 | // even if there is no error set 12 | type errorWatcher struct { 13 | errorChannel chan error 14 | lastError error 15 | } 16 | 17 | func (w *errorWatcher) setError(e error) { 18 | if w.lastError != nil { 19 | panic("cannot set a new error when one is already set!") 20 | } 21 | w.lastError = e 22 | } 23 | 24 | func (w *errorWatcher) clear() { 25 | w.lastError = nil 26 | } 27 | 28 | func (w *errorWatcher) Err() error { 29 | return w.lastError 30 | } 31 | 32 | func (w *errorWatcher) sendIfSet() chan<- error { 33 | if w.lastError != nil { 34 | return w.errorChannel 35 | } else { 36 | return nil 37 | } 38 | } 39 | 40 | type pendingResponseHelper struct { 41 | responseChannel chan patcher.BlockReponse 42 | pendingResponse *patcher.BlockReponse 43 | } 44 | 45 | func (w *pendingResponseHelper) setResponse(r *patcher.BlockReponse) { 46 | if w.pendingResponse != nil { 47 | p := fmt.Sprintf("Setting a response when one is already set! Had startblock %v, got %v", r.StartBlock, w.pendingResponse.StartBlock) 48 | panic(p) 49 | } 50 | w.pendingResponse = r 51 | } 52 | 53 | func (w *pendingResponseHelper) clear() { 54 | w.pendingResponse = nil 55 | } 56 | 57 | func (w *pendingResponseHelper) Response() patcher.BlockReponse { 58 | if w.pendingResponse == nil { 59 | return patcher.BlockReponse{} 60 | } 61 | return *w.pendingResponse 62 | } 63 | 64 | func (w *pendingResponseHelper) sendIfPending() chan<- patcher.BlockReponse { 65 | if w.pendingResponse != nil { 66 | return w.responseChannel 67 | } else { 68 | return nil 69 | } 70 | 71 | } 72 | 73 | type UintSlice []uint 74 | 75 | func (r UintSlice) Len() int { 76 | return len(r) 77 | } 78 | 79 | func (r UintSlice) Swap(i, j int) { 80 | r[i], r[j] = r[j], r[i] 81 | } 82 | 83 | func (r UintSlice) Less(i, j int) bool { 84 | return r[i] < r[j] 85 | } 86 | 87 | type asyncResult struct { 88 | startBlockID uint 89 | endBlockID uint 90 | data []byte 91 | err error 92 | } 93 | 94 | type QueuedRequest struct { 95 | StartBlockID uint 96 | EndBlockID uint 97 | } 98 | 99 | type QueuedRequestList []QueuedRequest 100 | 101 | func (r QueuedRequestList) Len() int { 102 | return len(r) 103 | } 104 | 105 | func (r QueuedRequestList) Swap(i, j int) { 106 | r[i], r[j] = r[j], r[i] 107 | } 108 | 109 | func (r QueuedRequestList) Less(i, j int) bool { 110 | return r[i].StartBlockID < r[j].StartBlockID 111 | } 112 | 113 | func MakeNullFixedSizeResolver(blockSize uint64) BlockSourceOffsetResolver { 114 | return &FixedSizeBlockResolver{ 115 | BlockSize: blockSize, 116 | } 117 | } 118 | 119 | func MakeFileSizedBlockResolver(blockSize uint64, filesize int64) BlockSourceOffsetResolver { 120 | return &FixedSizeBlockResolver{ 121 | BlockSize: blockSize, 122 | FileSize: filesize, 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/command_test.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | "fmt" 7 | "io/ioutil" 8 | "strings" 9 | "testing" 10 | ) 11 | 12 | func TestCommandFlagParsing(t *testing.T) { 13 | cases := []struct { 14 | testArgs []string 15 | skipFlagParsing bool 16 | expectedErr error 17 | }{ 18 | {[]string{"blah", "blah", "-break"}, false, errors.New("flag provided but not defined: -break")}, // Test normal "not ignoring flags" flow 19 | {[]string{"blah", "blah"}, true, nil}, // Test SkipFlagParsing without any args that look like flags 20 | {[]string{"blah", "-break"}, true, nil}, // Test SkipFlagParsing with random flag arg 21 | {[]string{"blah", "-help"}, true, nil}, // Test SkipFlagParsing with "special" help flag arg 22 | } 23 | 24 | for _, c := range cases { 25 | app := NewApp() 26 | app.Writer = ioutil.Discard 27 | set := flag.NewFlagSet("test", 0) 28 | set.Parse(c.testArgs) 29 | 30 | context := NewContext(app, set, nil) 31 | 32 | command := Command{ 33 | Name: "test-cmd", 34 | Aliases: []string{"tc"}, 35 | Usage: "this is for testing", 36 | Description: "testing", 37 | Action: func(_ *Context) {}, 38 | } 39 | 40 | command.SkipFlagParsing = c.skipFlagParsing 41 | 42 | err := command.Run(context) 43 | 44 | expect(t, err, c.expectedErr) 45 | expect(t, []string(context.Args()), c.testArgs) 46 | } 47 | } 48 | 49 | func TestCommand_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) { 50 | app := NewApp() 51 | app.Commands = []Command{ 52 | Command{ 53 | Name: "bar", 54 | Before: func(c *Context) error { return fmt.Errorf("before error") }, 55 | After: func(c *Context) error { return fmt.Errorf("after error") }, 56 | }, 57 | } 58 | 59 | err := app.Run([]string{"foo", "bar"}) 60 | if err == nil { 61 | t.Fatalf("expected to receive error from Run, got none") 62 | } 63 | 64 | if !strings.Contains(err.Error(), "before error") { 65 | t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) 66 | } 67 | if !strings.Contains(err.Error(), "after error") { 68 | t.Errorf("expected text of error from After method, but got none in \"%v\"", err) 69 | } 70 | } 71 | 72 | func TestCommand_OnUsageError_WithWrongFlagValue(t *testing.T) { 73 | app := NewApp() 74 | app.Commands = []Command{ 75 | Command{ 76 | Name: "bar", 77 | Flags: []Flag{ 78 | IntFlag{Name: "flag"}, 79 | }, 80 | OnUsageError: func(c *Context, err error) error { 81 | if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") { 82 | t.Errorf("Expect an invalid value error, but got \"%v\"", err) 83 | } 84 | return errors.New("intercepted: " + err.Error()) 85 | }, 86 | }, 87 | } 88 | 89 | err := app.Run([]string{"foo", "bar", "--flag=wrong"}) 90 | if err == nil { 91 | t.Fatalf("expected to receive error from Run, got none") 92 | } 93 | 94 | if !strings.HasPrefix(err.Error(), "intercepted: invalid value") { 95 | t.Errorf("Expect an intercepted error, but got \"%v\"", err) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/blocksources/httpblocksource.go: -------------------------------------------------------------------------------- 1 | package blocksources 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "net/http" 8 | "strings" 9 | ) 10 | 11 | const MB = 1024 * 1024 12 | 13 | var RangedRequestNotSupportedError = errors.New("Ranged request not supported (Server did not respond with 206 Status)") 14 | var ResponseFromServerWasGZiped = errors.New("HTTP response was gzip encoded. Ranges may not match those requested.") 15 | 16 | var ClientNoCompression = &http.Client{ 17 | Transport: &http.Transport{}, 18 | } 19 | 20 | func NewHttpBlockSource( 21 | url string, 22 | concurrentRequests int, 23 | resolver BlockSourceOffsetResolver, 24 | verifier BlockVerifier, 25 | ) *BlockSourceBase { 26 | return NewBlockSourceBase( 27 | &HttpRequester{ 28 | url: url, 29 | client: http.DefaultClient, 30 | }, 31 | resolver, 32 | verifier, 33 | concurrentRequests, 34 | 4*MB, 35 | ) 36 | } 37 | 38 | type URLNotFoundError string 39 | 40 | func (url URLNotFoundError) Error() string { 41 | return "404 Error on URL: " + string(url) 42 | } 43 | 44 | // This class provides the implementation of BlockSourceRequester for BlockSourceBase 45 | // this simplifies creating new BlockSources that satisfy the requirements down to 46 | // writing a request function 47 | type HttpRequester struct { 48 | client *http.Client 49 | url string 50 | } 51 | 52 | func (r *HttpRequester) DoRequest(startOffset int64, endOffset int64) (data []byte, err error) { 53 | rangedRequest, err := http.NewRequest("GET", r.url, nil) 54 | 55 | if err != nil { 56 | return nil, fmt.Errorf("Error creating request for \"%v\": %v", r.url, err) 57 | } 58 | 59 | rangeSpecifier := fmt.Sprintf("bytes=%v-%v", startOffset, endOffset-1) 60 | rangedRequest.ProtoAtLeast(1, 1) 61 | rangedRequest.Header.Add("Range", rangeSpecifier) 62 | rangedRequest.Header.Add("Accept-Encoding", "identity") 63 | rangedResponse, err := r.client.Do(rangedRequest) 64 | 65 | if err != nil { 66 | return nil, fmt.Errorf("Error executing request for \"%v\": %v", r.url, err) 67 | } 68 | 69 | defer rangedResponse.Body.Close() 70 | 71 | if rangedResponse.StatusCode == 404 { 72 | return nil, URLNotFoundError(r.url) 73 | } else if rangedResponse.StatusCode != 206 { 74 | return nil, RangedRequestNotSupportedError 75 | } else if strings.Contains( 76 | rangedResponse.Header.Get("Content-Encoding"), 77 | "gzip", 78 | ) { 79 | return nil, ResponseFromServerWasGZiped 80 | } else { 81 | buf := bytes.NewBuffer(make([]byte, 0, endOffset-startOffset)) 82 | _, err = buf.ReadFrom(rangedResponse.Body) 83 | 84 | if err != nil { 85 | err = fmt.Errorf( 86 | "Failed to read response body for %v (%v-%v): %v", 87 | r.url, 88 | startOffset, endOffset-1, 89 | err, 90 | ) 91 | } 92 | 93 | data = buf.Bytes() 94 | 95 | if int64(len(data)) != endOffset-startOffset { 96 | err = fmt.Errorf( 97 | "Unexpected response length %v (%v): %v", 98 | r.url, 99 | endOffset-startOffset+1, 100 | len(data), 101 | ) 102 | } 103 | 104 | return 105 | } 106 | } 107 | 108 | func (r *HttpRequester) IsFatal(err error) bool { 109 | return true 110 | } 111 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/gosync_test.go: -------------------------------------------------------------------------------- 1 | package gosync 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "bytes" 8 | 9 | "github.com/Redundancy/go-sync/blocksources" 10 | "github.com/Redundancy/go-sync/comparer" 11 | "github.com/Redundancy/go-sync/filechecksum" 12 | "github.com/Redundancy/go-sync/indexbuilder" 13 | "github.com/Redundancy/go-sync/util/readers" 14 | ) 15 | 16 | func Example() { 17 | // due to short example strings, use a very small block size 18 | // using one this small in practice would increase your file transfer! 19 | const blockSize = 4 20 | 21 | // This is the "file" as described by the authoritive version 22 | const reference = "The quick brown fox jumped over the lazy dog" 23 | 24 | // This is what we have locally. Not too far off, but not correct. 25 | const localVersion = "The qwik brown fox jumped 0v3r the lazy" 26 | 27 | generator := filechecksum.NewFileChecksumGenerator(blockSize) 28 | _, referenceFileIndex, _, err := indexbuilder.BuildIndexFromString( 29 | generator, 30 | reference, 31 | ) 32 | 33 | if err != nil { 34 | return 35 | } 36 | 37 | referenceAsBytes := []byte(reference) 38 | localVersionAsBytes := []byte(localVersion) 39 | 40 | blockCount := len(referenceAsBytes) / blockSize 41 | if len(referenceAsBytes)%blockSize != 0 { 42 | blockCount++ 43 | } 44 | 45 | inputFile := bytes.NewReader(localVersionAsBytes) 46 | patchedFile := bytes.NewBuffer(nil) 47 | 48 | // This is more complicated than usual, because we're using in-memory 49 | // "files" and sources. Normally you would use MakeRSync 50 | summary := &BasicSummary{ 51 | ChecksumIndex: referenceFileIndex, 52 | ChecksumLookup: nil, 53 | BlockCount: uint(blockCount), 54 | BlockSize: blockSize, 55 | FileSize: int64(len(referenceAsBytes)), 56 | } 57 | 58 | rsync := &RSync{ 59 | Input: inputFile, 60 | Output: patchedFile, 61 | Source: blocksources.NewReadSeekerBlockSource( 62 | bytes.NewReader(referenceAsBytes), 63 | blocksources.MakeNullFixedSizeResolver(uint64(blockSize)), 64 | ), 65 | Summary: summary, 66 | OnClose: nil, 67 | } 68 | 69 | if err := rsync.Patch(); err != nil { 70 | fmt.Printf("Error: %v", err) 71 | return 72 | } 73 | 74 | fmt.Printf("Patched result: \"%s\"\n", patchedFile.Bytes()) 75 | // Output: 76 | // Patched result: "The quick brown fox jumped over the lazy dog" 77 | } 78 | 79 | const ( 80 | BYTE = 1 81 | KB = 1024 * BYTE 82 | MB = 1024 * KB 83 | ) 84 | 85 | func BenchmarkIndexComparisons(b *testing.B) { 86 | b.ReportAllocs() 87 | 88 | const SIZE = 200 * KB 89 | b.SetBytes(SIZE) 90 | 91 | file := readers.NewSizedNonRepeatingSequence(6, SIZE) 92 | generator := filechecksum.NewFileChecksumGenerator(8 * KB) 93 | _, index, _, err := indexbuilder.BuildChecksumIndex(generator, file) 94 | 95 | if err != nil { 96 | b.Fatal(err) 97 | } 98 | 99 | b.StartTimer() 100 | for i := 0; i < b.N; i++ { 101 | // must reinitialize the file for each comparison 102 | otherFile := readers.NewSizedNonRepeatingSequence(745656, SIZE) 103 | compare := &comparer.Comparer{} 104 | m := compare.StartFindMatchingBlocks(otherFile, 0, generator, index) 105 | 106 | for _, ok := <-m; ok; { 107 | } 108 | } 109 | 110 | b.StopTimer() 111 | } 112 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/index/index_bench_test.go: -------------------------------------------------------------------------------- 1 | package index 2 | 3 | import ( 4 | "github.com/Redundancy/go-sync/chunks" 5 | "math/rand" 6 | "sort" 7 | "testing" 8 | ) 9 | 10 | var T = []byte{1, 2, 3, 4} 11 | 12 | func BenchmarkIndex1024(b *testing.B) { 13 | i := ChecksumIndex{} 14 | i.weakChecksumLookup = make([]map[uint32]StrongChecksumList, 256) 15 | 16 | for x := 0; x < 1024; x++ { 17 | w := rand.Uint32() 18 | 19 | if i.weakChecksumLookup[w&255] == nil { 20 | i.weakChecksumLookup[w&255] = make(map[uint32]StrongChecksumList) 21 | } 22 | 23 | i.weakChecksumLookup[w&255][w] = append( 24 | i.weakChecksumLookup[w&255][w], 25 | chunks.ChunkChecksum{}, 26 | ) 27 | } 28 | 29 | b.SetBytes(1) 30 | b.StartTimer() 31 | for x := 0; x < b.N; x++ { 32 | i.FindWeakChecksum2(T) 33 | } 34 | b.StopTimer() 35 | 36 | } 37 | 38 | func BenchmarkIndex8192(b *testing.B) { 39 | i := ChecksumIndex{} 40 | i.weakChecksumLookup = make([]map[uint32]StrongChecksumList, 256) 41 | 42 | for x := 0; x < 8192; x++ { 43 | w := rand.Uint32() 44 | 45 | if i.weakChecksumLookup[w&255] == nil { 46 | i.weakChecksumLookup[w&255] = make(map[uint32]StrongChecksumList) 47 | } 48 | 49 | i.weakChecksumLookup[w&255][w] = append( 50 | i.weakChecksumLookup[w&255][w], 51 | chunks.ChunkChecksum{}, 52 | ) 53 | } 54 | 55 | b.SetBytes(1) 56 | b.StartTimer() 57 | for x := 0; x < b.N; x++ { 58 | i.FindWeakChecksum2(T) 59 | } 60 | b.StopTimer() 61 | } 62 | 63 | // Check how fast a sorted list of 8192 items would be 64 | func BenchmarkIndexAsListBinarySearch8192(b *testing.B) { 65 | b.SkipNow() 66 | 67 | s := make([]int, 8192) 68 | for x := 0; x < 8192; x++ { 69 | s[x] = rand.Int() 70 | } 71 | 72 | sort.Ints(s) 73 | 74 | b.StartTimer() 75 | for x := 0; x < b.N; x++ { 76 | sort.SearchInts(s, rand.Int()) 77 | } 78 | b.StopTimer() 79 | } 80 | 81 | // Check how fast a sorted list of 8192 items would be 82 | // Checking for cache coherency gains 83 | func BenchmarkIndexAsListLinearSearch8192(b *testing.B) { 84 | s := make([]int, 8192) 85 | for x := 0; x < 8192; x++ { 86 | s[x] = rand.Int() 87 | } 88 | 89 | sort.Ints(s) 90 | 91 | l := len(s) 92 | b.StartTimer() 93 | for x := 0; x < b.N; x++ { 94 | v := rand.Int() 95 | for i := 0; i < l; i++ { 96 | if v < s[i] { 97 | break 98 | } 99 | } 100 | } 101 | b.StopTimer() 102 | } 103 | 104 | func Benchmark_256SplitBinarySearch(b *testing.B) { 105 | a := make([][]int, 256) 106 | for x := 0; x < 8192; x++ { 107 | i := rand.Int() 108 | a[i&255] = append( 109 | a[i&255], 110 | i, 111 | ) 112 | } 113 | 114 | for x := 0; x < 256; x++ { 115 | sort.Ints(a[x]) 116 | } 117 | 118 | b.StartTimer() 119 | for x := 0; x < b.N; x++ { 120 | v := rand.Int() 121 | sort.SearchInts(a[v&255], v) 122 | } 123 | b.StopTimer() 124 | } 125 | 126 | /* 127 | This is currently the best performing contender for the index data structure for 128 | weak checksum lookups. 129 | */ 130 | func Benchmark_256Split_Map(b *testing.B) { 131 | a := make([]map[int]interface{}, 256) 132 | for x := 0; x < 8192; x++ { 133 | i := rand.Int() 134 | if a[i&255] == nil { 135 | a[i&255] = make(map[int]interface{}) 136 | } 137 | a[i&255][i] = nil 138 | } 139 | 140 | b.StartTimer() 141 | for x := 0; x < b.N; x++ { 142 | v := rand.Int() 143 | if _, ok := a[v&255][v]; ok { 144 | 145 | } 146 | } 147 | b.StopTimer() 148 | } 149 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/blocksources/fixed_size_block_resolver_test.go: -------------------------------------------------------------------------------- 1 | package blocksources 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestNullResolverGivesBackTheSameBlocks(t *testing.T) { 8 | n := MakeNullFixedSizeResolver(5) 9 | result := n.SplitBlockRangeToDesiredSize(0, 10000) 10 | 11 | if len(result) != 1 { 12 | t.Fatalf("Unexpected result length (expected 1): %v", result) 13 | } 14 | 15 | r := result[0] 16 | 17 | if r.StartBlockID != 0 { 18 | t.Errorf("Unexpected start block ID: %v", r) 19 | } 20 | 21 | if r.EndBlockID != 10000 { 22 | t.Errorf("Unexpected end block ID: %v", r) 23 | } 24 | } 25 | 26 | func TestFixedSizeResolverSplitsBlocksOfDesiredSize(t *testing.T) { 27 | res := &FixedSizeBlockResolver{ 28 | BlockSize: 5, 29 | MaxDesiredRequestSize: 5, 30 | FileSize: 20000, 31 | } 32 | 33 | // Should split two blocks, each of the desired request size 34 | // into two requests 35 | result := res.SplitBlockRangeToDesiredSize(0, 1) 36 | 37 | if len(result) != 2 { 38 | t.Fatalf("Unexpected result length (expected 2): %v", result) 39 | } 40 | 41 | if result[0].StartBlockID != 0 { 42 | t.Errorf("Unexpected start blockID: %v", result[0]) 43 | } 44 | if result[0].EndBlockID != 0 { 45 | t.Errorf("Unexpected end blockID: %v", result[0]) 46 | } 47 | 48 | if result[1].StartBlockID != 1 { 49 | t.Errorf("Unexpected start blockID: %v", result[1]) 50 | } 51 | if result[1].EndBlockID != 1 { 52 | t.Errorf("Unexpected end blockID: %v", result[1]) 53 | } 54 | } 55 | 56 | func TestThatMultipleBlocksAreSplitByRoundingDown(t *testing.T) { 57 | res := &FixedSizeBlockResolver{ 58 | BlockSize: 5, 59 | MaxDesiredRequestSize: 12, 60 | FileSize: 20000, 61 | } 62 | 63 | // 0,1 (10) - 2-3 (10) 64 | result := res.SplitBlockRangeToDesiredSize(0, 3) 65 | 66 | if len(result) != 2 { 67 | t.Fatalf("Unexpected result length (expected 2): %v", result) 68 | } 69 | 70 | if result[0].StartBlockID != 0 { 71 | t.Errorf("Unexpected start blockID: %v", result[0]) 72 | } 73 | if result[0].EndBlockID != 1 { 74 | t.Errorf("Unexpected end blockID: %v", result[0]) 75 | } 76 | 77 | if result[1].StartBlockID != 2 { 78 | t.Errorf("Unexpected start blockID: %v", result[1]) 79 | } 80 | if result[1].EndBlockID != 3 { 81 | t.Errorf("Unexpected end blockID: %v", result[1]) 82 | } 83 | } 84 | 85 | func TestThatADesiredSizeSmallerThanABlockResultsInSingleBlocks(t *testing.T) { 86 | res := &FixedSizeBlockResolver{ 87 | BlockSize: 5, 88 | MaxDesiredRequestSize: 4, 89 | FileSize: 20000, 90 | } 91 | 92 | // Should split two blocks 93 | result := res.SplitBlockRangeToDesiredSize(0, 1) 94 | 95 | if len(result) != 2 { 96 | t.Fatalf("Unexpected result length (expected 2): %v", result) 97 | } 98 | 99 | if result[0].StartBlockID != 0 { 100 | t.Errorf("Unexpected start blockID: %v", result[0]) 101 | } 102 | if result[0].EndBlockID != 0 { 103 | t.Errorf("Unexpected end blockID: %v", result[0]) 104 | } 105 | 106 | if result[1].StartBlockID != 1 { 107 | t.Errorf("Unexpected start blockID: %v", result[1]) 108 | } 109 | if result[1].EndBlockID != 1 { 110 | t.Errorf("Unexpected end blockID: %v", result[1]) 111 | } 112 | } 113 | 114 | func TestThatFileSizeTruncatesBlockEnds(t *testing.T) { 115 | res := &FixedSizeBlockResolver{ 116 | BlockSize: 5, 117 | MaxDesiredRequestSize: 100, 118 | FileSize: 13, 119 | } 120 | 121 | // Should split two blocks 122 | result := res.GetBlockEndOffset(3) 123 | 124 | if result != 13 { 125 | t.Errorf("Unexpected BlockEnd Offset:", result) 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/gosync/diff.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "runtime" 7 | "time" 8 | 9 | "github.com/codegangsta/cli" 10 | ) 11 | 12 | func init() { 13 | app.Commands = append( 14 | app.Commands, 15 | cli.Command{ 16 | Name: "diff", 17 | ShortName: "d", 18 | Usage: "gosync diff ", 19 | Description: `Compare a file with a reference index, and print statistics on the comparison and performance.`, 20 | Action: Diff, 21 | Flags: []cli.Flag{ 22 | cli.IntFlag{ 23 | Name: "p", 24 | Value: runtime.NumCPU(), 25 | Usage: "The number of streams to use concurrently", 26 | }, 27 | }, 28 | }, 29 | ) 30 | } 31 | 32 | func Diff(c *cli.Context) { 33 | localFilename := c.Args()[0] 34 | referenceFilename := c.Args()[1] 35 | startTime := time.Now() 36 | 37 | localFile := openFileAndHandleError(localFilename) 38 | 39 | if localFile == nil { 40 | os.Exit(1) 41 | } 42 | 43 | defer localFile.Close() 44 | 45 | var blocksize uint32 46 | referenceFile := openFileAndHandleError(referenceFilename) 47 | 48 | if referenceFile == nil { 49 | os.Exit(1) 50 | } 51 | 52 | defer referenceFile.Close() 53 | 54 | _, _, _, _, blocksize, e := readHeadersAndCheck( 55 | referenceFile, 56 | magicString, 57 | majorVersion, 58 | ) 59 | 60 | if e != nil { 61 | fmt.Printf("Error loading index: %v", e) 62 | os.Exit(1) 63 | } 64 | 65 | fmt.Println("Blocksize: ", blocksize) 66 | 67 | index, _, _, err := readIndex(referenceFile, uint(blocksize)) 68 | referenceFile.Close() 69 | 70 | if err != nil { 71 | return 72 | } 73 | 74 | fmt.Println("Weak hash count:", index.WeakCount()) 75 | 76 | fi, err := localFile.Stat() 77 | 78 | if err != nil { 79 | fmt.Println("Could not get info on file:", err) 80 | os.Exit(1) 81 | } 82 | 83 | num_matchers := int64(c.Int("p")) 84 | 85 | localFile_size := fi.Size() 86 | 87 | // Don't split up small files 88 | if localFile_size < 1024*1024 { 89 | num_matchers = 1 90 | } 91 | 92 | merger, compare := multithreadedMatching( 93 | localFile, 94 | index, 95 | localFile_size, 96 | num_matchers, 97 | uint(blocksize), 98 | ) 99 | 100 | mergedBlocks := merger.GetMergedBlocks() 101 | 102 | fmt.Println("\nMatched:") 103 | totalMatchingSize := uint64(0) 104 | matchedBlockCountAfterMerging := uint(0) 105 | 106 | for _, b := range mergedBlocks { 107 | totalMatchingSize += uint64(b.EndBlock-b.StartBlock+1) * uint64(blocksize) 108 | matchedBlockCountAfterMerging += b.EndBlock - b.StartBlock + 1 109 | } 110 | 111 | fmt.Println("Comparisons:", compare.Comparisons) 112 | fmt.Println("Weak hash hits:", compare.WeakHashHits) 113 | 114 | if compare.Comparisons > 0 { 115 | fmt.Printf( 116 | "Weak hit rate: %.2f%%\n", 117 | 100.0*float64(compare.WeakHashHits)/float64(compare.Comparisons), 118 | ) 119 | } 120 | 121 | fmt.Println("Strong hash hits:", compare.StrongHashHits) 122 | if compare.WeakHashHits > 0 { 123 | fmt.Printf( 124 | "Weak hash error rate: %.2f%%\n", 125 | 100.0*float64(compare.WeakHashHits-compare.StrongHashHits)/float64(compare.WeakHashHits), 126 | ) 127 | } 128 | 129 | fmt.Println("Total matched bytes:", totalMatchingSize) 130 | fmt.Println("Total matched blocks:", matchedBlockCountAfterMerging) 131 | 132 | // TODO: GetMissingBlocks uses the highest index, not the count, this can be pretty confusing 133 | // Should clean up this interface to avoid that 134 | missing := mergedBlocks.GetMissingBlocks(uint(index.BlockCount) - 1) 135 | fmt.Println("Index blocks:", index.BlockCount) 136 | 137 | totalMissingSize := uint64(0) 138 | for _, b := range missing { 139 | //fmt.Printf("%#v\n", b) 140 | totalMissingSize += uint64(b.EndBlock-b.StartBlock+1) * uint64(blocksize) 141 | } 142 | 143 | fmt.Println("Approximate missing bytes:", totalMissingSize) 144 | fmt.Println("Time taken:", time.Now().Sub(startTime)) 145 | } 146 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/gosync/diff.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "runtime" 7 | "time" 8 | 9 | "github.com/codegangsta/cli" 10 | ) 11 | 12 | func init() { 13 | app.Commands = append( 14 | app.Commands, 15 | cli.Command{ 16 | Name: "diff", 17 | ShortName: "d", 18 | Usage: "gosync diff ", 19 | Description: `Compare a file with a reference index, and print statistics on the comparison and performance.`, 20 | Action: Diff, 21 | Flags: []cli.Flag{ 22 | cli.IntFlag{ 23 | Name: "p", 24 | Value: runtime.NumCPU(), 25 | Usage: "The number of streams to use concurrently", 26 | }, 27 | }, 28 | }, 29 | ) 30 | } 31 | 32 | func Diff(c *cli.Context) { 33 | localFilename := c.Args()[0] 34 | referenceFilename := c.Args()[1] 35 | startTime := time.Now() 36 | 37 | localFile := openFileAndHandleError(localFilename) 38 | 39 | if localFile == nil { 40 | os.Exit(1) 41 | } 42 | 43 | defer localFile.Close() 44 | 45 | var blocksize uint32 46 | referenceFile := openFileAndHandleError(referenceFilename) 47 | 48 | if referenceFile == nil { 49 | os.Exit(1) 50 | } 51 | 52 | defer referenceFile.Close() 53 | 54 | _, _, _, _, blocksize, e := readHeadersAndCheck( 55 | referenceFile, 56 | magicString, 57 | majorVersion, 58 | ) 59 | 60 | if e != nil { 61 | fmt.Printf("Error loading index: %v", e) 62 | os.Exit(1) 63 | } 64 | 65 | fmt.Println("Blocksize: ", blocksize) 66 | 67 | index, _, _, err := readIndex(referenceFile, uint(blocksize)) 68 | referenceFile.Close() 69 | 70 | if err != nil { 71 | return 72 | } 73 | 74 | fmt.Println("Weak hash count:", index.WeakCount()) 75 | 76 | fi, err := localFile.Stat() 77 | 78 | if err != nil { 79 | fmt.Println("Could not get info on file:", err) 80 | os.Exit(1) 81 | } 82 | 83 | num_matchers := int64(c.Int("p")) 84 | 85 | localFile_size := fi.Size() 86 | 87 | // Don't split up small files 88 | if localFile_size < 1024*1024 { 89 | num_matchers = 1 90 | } 91 | 92 | merger, compare := multithreadedMatching( 93 | localFile, 94 | index, 95 | localFile_size, 96 | num_matchers, 97 | uint(blocksize), 98 | ) 99 | 100 | mergedBlocks := merger.GetMergedBlocks() 101 | 102 | fmt.Println("\nMatched:") 103 | totalMatchingSize := uint64(0) 104 | matchedBlockCountAfterMerging := uint(0) 105 | 106 | for _, b := range mergedBlocks { 107 | totalMatchingSize += uint64(b.EndBlock-b.StartBlock+1) * uint64(blocksize) 108 | matchedBlockCountAfterMerging += b.EndBlock - b.StartBlock + 1 109 | } 110 | 111 | fmt.Println("Comparisons:", compare.Comparisons) 112 | fmt.Println("Weak hash hits:", compare.WeakHashHits) 113 | 114 | if compare.Comparisons > 0 { 115 | fmt.Printf( 116 | "Weak hit rate: %.2f%%\n", 117 | 100.0*float64(compare.WeakHashHits)/float64(compare.Comparisons), 118 | ) 119 | } 120 | 121 | fmt.Println("Strong hash hits:", compare.StrongHashHits) 122 | if compare.WeakHashHits > 0 { 123 | fmt.Printf( 124 | "Weak hash error rate: %.2f%%\n", 125 | 100.0*float64(compare.WeakHashHits-compare.StrongHashHits)/float64(compare.WeakHashHits), 126 | ) 127 | } 128 | 129 | fmt.Println("Total matched bytes:", totalMatchingSize) 130 | fmt.Println("Total matched blocks:", matchedBlockCountAfterMerging) 131 | 132 | // TODO: GetMissingBlocks uses the highest index, not the count, this can be pretty confusing 133 | // Should clean up this interface to avoid that 134 | missing := mergedBlocks.GetMissingBlocks(uint(index.BlockCount) - 1) 135 | fmt.Println("Index blocks:", index.BlockCount) 136 | 137 | totalMissingSize := uint64(0) 138 | for _, b := range missing { 139 | //fmt.Printf("%#v\n", b) 140 | totalMissingSize += uint64(b.EndBlock-b.StartBlock+1) * uint64(blocksize) 141 | } 142 | 143 | fmt.Println("Approximate missing bytes:", totalMissingSize) 144 | fmt.Println("Time taken:", time.Now().Sub(startTime)) 145 | } 146 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/context_test.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "flag" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestNewContext(t *testing.T) { 10 | set := flag.NewFlagSet("test", 0) 11 | set.Int("myflag", 12, "doc") 12 | globalSet := flag.NewFlagSet("test", 0) 13 | globalSet.Int("myflag", 42, "doc") 14 | globalCtx := NewContext(nil, globalSet, nil) 15 | command := Command{Name: "mycommand"} 16 | c := NewContext(nil, set, globalCtx) 17 | c.Command = command 18 | expect(t, c.Int("myflag"), 12) 19 | expect(t, c.GlobalInt("myflag"), 42) 20 | expect(t, c.Command.Name, "mycommand") 21 | } 22 | 23 | func TestContext_Int(t *testing.T) { 24 | set := flag.NewFlagSet("test", 0) 25 | set.Int("myflag", 12, "doc") 26 | c := NewContext(nil, set, nil) 27 | expect(t, c.Int("myflag"), 12) 28 | } 29 | 30 | func TestContext_Duration(t *testing.T) { 31 | set := flag.NewFlagSet("test", 0) 32 | set.Duration("myflag", time.Duration(12*time.Second), "doc") 33 | c := NewContext(nil, set, nil) 34 | expect(t, c.Duration("myflag"), time.Duration(12*time.Second)) 35 | } 36 | 37 | func TestContext_String(t *testing.T) { 38 | set := flag.NewFlagSet("test", 0) 39 | set.String("myflag", "hello world", "doc") 40 | c := NewContext(nil, set, nil) 41 | expect(t, c.String("myflag"), "hello world") 42 | } 43 | 44 | func TestContext_Bool(t *testing.T) { 45 | set := flag.NewFlagSet("test", 0) 46 | set.Bool("myflag", false, "doc") 47 | c := NewContext(nil, set, nil) 48 | expect(t, c.Bool("myflag"), false) 49 | } 50 | 51 | func TestContext_BoolT(t *testing.T) { 52 | set := flag.NewFlagSet("test", 0) 53 | set.Bool("myflag", true, "doc") 54 | c := NewContext(nil, set, nil) 55 | expect(t, c.BoolT("myflag"), true) 56 | } 57 | 58 | func TestContext_Args(t *testing.T) { 59 | set := flag.NewFlagSet("test", 0) 60 | set.Bool("myflag", false, "doc") 61 | c := NewContext(nil, set, nil) 62 | set.Parse([]string{"--myflag", "bat", "baz"}) 63 | expect(t, len(c.Args()), 2) 64 | expect(t, c.Bool("myflag"), true) 65 | } 66 | 67 | func TestContext_NArg(t *testing.T) { 68 | set := flag.NewFlagSet("test", 0) 69 | set.Bool("myflag", false, "doc") 70 | c := NewContext(nil, set, nil) 71 | set.Parse([]string{"--myflag", "bat", "baz"}) 72 | expect(t, c.NArg(), 2) 73 | } 74 | 75 | func TestContext_IsSet(t *testing.T) { 76 | set := flag.NewFlagSet("test", 0) 77 | set.Bool("myflag", false, "doc") 78 | set.String("otherflag", "hello world", "doc") 79 | globalSet := flag.NewFlagSet("test", 0) 80 | globalSet.Bool("myflagGlobal", true, "doc") 81 | globalCtx := NewContext(nil, globalSet, nil) 82 | c := NewContext(nil, set, globalCtx) 83 | set.Parse([]string{"--myflag", "bat", "baz"}) 84 | globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) 85 | expect(t, c.IsSet("myflag"), true) 86 | expect(t, c.IsSet("otherflag"), false) 87 | expect(t, c.IsSet("bogusflag"), false) 88 | expect(t, c.IsSet("myflagGlobal"), false) 89 | } 90 | 91 | func TestContext_GlobalIsSet(t *testing.T) { 92 | set := flag.NewFlagSet("test", 0) 93 | set.Bool("myflag", false, "doc") 94 | set.String("otherflag", "hello world", "doc") 95 | globalSet := flag.NewFlagSet("test", 0) 96 | globalSet.Bool("myflagGlobal", true, "doc") 97 | globalSet.Bool("myflagGlobalUnset", true, "doc") 98 | globalCtx := NewContext(nil, globalSet, nil) 99 | c := NewContext(nil, set, globalCtx) 100 | set.Parse([]string{"--myflag", "bat", "baz"}) 101 | globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) 102 | expect(t, c.GlobalIsSet("myflag"), false) 103 | expect(t, c.GlobalIsSet("otherflag"), false) 104 | expect(t, c.GlobalIsSet("bogusflag"), false) 105 | expect(t, c.GlobalIsSet("myflagGlobal"), true) 106 | expect(t, c.GlobalIsSet("myflagGlobalUnset"), false) 107 | expect(t, c.GlobalIsSet("bogusGlobal"), false) 108 | } 109 | 110 | func TestContext_NumFlags(t *testing.T) { 111 | set := flag.NewFlagSet("test", 0) 112 | set.Bool("myflag", false, "doc") 113 | set.String("otherflag", "hello world", "doc") 114 | globalSet := flag.NewFlagSet("test", 0) 115 | globalSet.Bool("myflagGlobal", true, "doc") 116 | globalCtx := NewContext(nil, globalSet, nil) 117 | c := NewContext(nil, set, globalCtx) 118 | set.Parse([]string{"--myflag", "--otherflag=foo"}) 119 | globalSet.Parse([]string{"--myflagGlobal"}) 120 | expect(t, c.NumFlags(), 2) 121 | } 122 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/README.md: -------------------------------------------------------------------------------- 1 | Go-Sync 2 | ------ 3 | [![Build Status](https://travis-ci.org/Redundancy/go-sync.svg?branch=master)](https://travis-ci.org/Redundancy/go-sync) 4 | [![GoDoc](https://godoc.org/github.com/Redundancy/go-sync?status.svg)](https://godoc.org/github.com/Redundancy/go-sync) 5 | 6 | gosync is a library inspired by zsync and rsync. 7 | Here are the goals: 8 | 9 | ### Fast 10 | Using the concurrency and performance features of Golang, Go-sync is designed to take advantage of multiple processors and multiple HTTP connections to make the most of modern hardware and minimize the impact of the bandwidth latency product. 11 | 12 | ### Cross Platform 13 | Works on Windows and Linux, without cygwin or fuss. 14 | 15 | ### Easy 16 | 17 | A new high-level interface designed to reduce the work of implementing block transfer in your application: 18 | ```golang 19 | fs := &BasicSummary{...} 20 | 21 | rsync, err := MakeRSync( 22 | localFilename, 23 | referencePath, 24 | outFilename, 25 | fs, 26 | ) 27 | 28 | if err != nil { 29 | return err 30 | } 31 | 32 | err = rsync.Patch() 33 | 34 | if err != nil { 35 | return err 36 | } 37 | 38 | return rsync.Close() 39 | ``` 40 | 41 | ### Extensible 42 | All functionality is based on interfaces, allowing customization of behavior: 43 | 44 | ```golang 45 | // Here, the input version is a local string 46 | inputFile := bytes.NewReader(localVersionAsBytes) 47 | 48 | // And the output is a buffer 49 | patchedFile := bytes.NewBuffer(nil) 50 | 51 | // This information is meta-data on the file that should be loaded / provided 52 | // You can also provide your own implementation of the FileSummary interface 53 | summary := &BasicSummary{ 54 | ChecksumIndex: referenceFileIndex, 55 | // Disable verification of hashes for downloaded data (not really a good idea!) 56 | ChecksumLookup: nil, 57 | BlockCount: uint(blockCount), 58 | BlockSize: blockSize, 59 | FileSize: int64(len(referenceAsBytes)), 60 | } 61 | 62 | rsync := &RSync{ 63 | Input: inputFile, 64 | Output: patchedFile, 65 | // An in-memory block source 66 | Source: blocksources.NewReadSeekerBlockSource( 67 | bytes.NewReader(referenceAsBytes), 68 | blocksources.MakeNullFixedSizeResolver(uint64(blockSize)), 69 | ), 70 | Index: summary, 71 | Summary: summary, 72 | OnClose: nil, 73 | } 74 | ``` 75 | 76 | Reuse low level objects to build a new high level library, or implement a new lower-level object to add a new transfer protocol (for example). 77 | 78 | ### Tested 79 | GoSync has been built from the ground up with unit tests. 80 | The GoSync command-line tool has acceptance tests, although not everything is covered. 81 | 82 | ## Current State 83 | Go-Sync is still probably not ready for production. 84 | 85 | The most obvious areas that still need improvement are the acceptance tests, the error messages, 86 | compression on the blocks that are retrieved from the source and handling of file flags. 87 | 88 | ### TODO 89 | - [ ] gzip source blocks (this involves writing out a version of the file that's compressed in block-increments) 90 | - [ ] Clean up naming consistency and clarity: Block / Chunk etc 91 | - [ ] Flesh out full directory build / sync 92 | - [ ] Implement 'patch' payloads from a known start point to a desired end state 93 | - [ ] Validate full file checksum after patching 94 | - [ ] Provide bandwidth limiting / monitoring as part of http blocksource 95 | - [ ] Think about turning the filechecksum into an interface 96 | - [ ] Avoid marshalling / un-marshalling blocks during checksum generation 97 | - [ ] Sequential patcher to resume after error? 98 | 99 | ### Testing 100 | 101 | All tests are run by Travis-CI 102 | 103 | #### Unit tests 104 | 105 | go test github.com/Redundancy/go-sync/... 106 | 107 | #### Acceptance Tests 108 | See the "acceptancetests" folder. This is currently difficult to run locally and relies on several linux utilities. 109 | 110 | #### Commandline & files 111 | 112 | go build github.com/Redundancy/go-sync/gosync 113 | gosync build filenameToPatchTo 114 | gosync patch filenameToPatchFrom filenameToPatchTo.gosync filenameToPatchTo 115 | 116 | Note that normally, patching would rely on a remote http/https file source. 117 | 118 | #### Command line tool reference 119 | gosync --help 120 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/circularbuffer/noalloc.go: -------------------------------------------------------------------------------- 1 | package circularbuffer 2 | 3 | /* 4 | C2 is a circular buffer intended to allow you to write a block of data of up to 'blocksize', and retrieve the 5 | data evicted by that operation, without allocating any extra slice storage 6 | 7 | This requires that it keep at least blocksize*2 data around. In fact, it doubles that again in order to 8 | guarantee that both of these bits of information can always be obtained in a single contiguous block of memory. 9 | 10 | Other than the cost of the extra memory (4xblocksize), this means that it requires 2 writes for every byte stored. 11 | */ 12 | type C2 struct { 13 | // used to know how much was evicted 14 | lastWritten int 15 | 16 | // total number of written bytes 17 | // used to track if the buffer has been filled, but goes above blocksize 18 | totalWritten int 19 | 20 | // quick access to the circular buffer size 21 | blocksize int 22 | 23 | // double internal buffer storage 24 | a, b doubleSizeBuffer 25 | } 26 | 27 | type doubleSizeBuffer struct { 28 | // used to reset the head pointer 29 | baseOffset int 30 | 31 | // index of the next byte to be written 32 | head int 33 | 34 | // buffer 35 | buffer []byte 36 | } 37 | 38 | func MakeC2Buffer(blockSize int) *C2 { 39 | return &C2{ 40 | blocksize: blockSize, 41 | a: doubleSizeBuffer{ 42 | baseOffset: 0, 43 | buffer: make([]byte, blockSize*2), 44 | }, 45 | b: doubleSizeBuffer{ 46 | baseOffset: blockSize, 47 | head: blockSize, 48 | buffer: make([]byte, blockSize*2), 49 | }, 50 | } 51 | } 52 | 53 | func (c *C2) Reset() { 54 | c.a.Reset() 55 | c.b.Reset() 56 | c.lastWritten = 0 57 | c.totalWritten = 0 58 | } 59 | 60 | // Write new data 61 | func (c *C2) Write(b []byte) { 62 | c.a.Write(b) 63 | c.b.Write(b) 64 | c.lastWritten = len(b) 65 | c.totalWritten += c.lastWritten 66 | } 67 | 68 | func (c *C2) getBlockBuffer() *doubleSizeBuffer { 69 | bufferToRead := &c.a 70 | if c.b.head > c.a.head { 71 | bufferToRead = &c.b 72 | } 73 | 74 | return bufferToRead 75 | } 76 | 77 | // the total written, up to the blocksize 78 | func (c *C2) maxWritten() int { 79 | if c.totalWritten < c.blocksize { 80 | return c.totalWritten 81 | } 82 | 83 | return c.blocksize 84 | } 85 | 86 | func (c *C2) Len() int { 87 | return c.maxWritten() 88 | } 89 | 90 | func (c *C2) Empty() bool { 91 | return c.totalWritten == 0 92 | } 93 | 94 | // Shortens the content of the circular buffer 95 | // and returns the content removed 96 | func (c *C2) Truncate(byteCount int) (evicted []byte) { 97 | max := c.maxWritten() 98 | 99 | if byteCount > max { 100 | byteCount = max 101 | } 102 | 103 | bufferToRead := c.getBlockBuffer() 104 | start := bufferToRead.head - max 105 | 106 | c.totalWritten = c.maxWritten() - byteCount 107 | return bufferToRead.buffer[start : start+byteCount] 108 | } 109 | 110 | // get the current buffer contents of block 111 | func (c *C2) GetBlock() []byte { 112 | // figure out which buffer has it stored contiguously 113 | bufferToRead := c.getBlockBuffer() 114 | start := bufferToRead.head - c.maxWritten() 115 | 116 | return bufferToRead.buffer[start:bufferToRead.head] 117 | } 118 | 119 | // get the data that was evicted by the last write 120 | func (c *C2) Evicted() []byte { 121 | if c.totalWritten <= c.blocksize { 122 | return nil 123 | } 124 | 125 | bufferToRead := c.a 126 | if c.b.head < c.a.head { 127 | bufferToRead = c.b 128 | } 129 | 130 | bufferStart := bufferToRead.head + c.blocksize 131 | readLength := c.lastWritten 132 | 133 | // if the buffer wasn't full, we don't read the full length 134 | if c.totalWritten-c.lastWritten < c.blocksize { 135 | readLength -= c.lastWritten - c.totalWritten + c.blocksize 136 | } 137 | 138 | return bufferToRead.buffer[bufferStart-readLength : bufferStart] 139 | } 140 | 141 | func (buff *doubleSizeBuffer) Reset() { 142 | buff.head = buff.baseOffset 143 | } 144 | 145 | func (buff *doubleSizeBuffer) Write(by []byte) { 146 | remaining := by 147 | 148 | for len(remaining) > 0 { 149 | remaining_len := len(remaining) 150 | availableSpace := len(buff.buffer) - buff.head 151 | writeThisTime := remaining_len 152 | 153 | if writeThisTime > availableSpace { 154 | writeThisTime = availableSpace 155 | } 156 | 157 | copy( 158 | buff.buffer[buff.head:buff.head+writeThisTime], // to 159 | by, 160 | ) 161 | 162 | buff.head += writeThisTime 163 | 164 | if buff.head == len(buff.buffer) { 165 | buff.head = 0 166 | } 167 | 168 | remaining = remaining[writeThisTime:] 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/circularbuffer/noalloc_test.go: -------------------------------------------------------------------------------- 1 | package circularbuffer 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | ) 7 | 8 | const BLOCK_SIZE = 10 9 | 10 | var incrementBlock = make([]byte, BLOCK_SIZE) 11 | var incrementBlock2 = make([]byte, BLOCK_SIZE) 12 | 13 | func init() { 14 | for i, _ := range incrementBlock { 15 | incrementBlock[i] = byte(i) 16 | incrementBlock2[i] = byte(i + BLOCK_SIZE) 17 | } 18 | } 19 | 20 | func TestCreateC2Buffer(t *testing.T) { 21 | MakeC2Buffer(BLOCK_SIZE) 22 | } 23 | 24 | func TestWriteBlock(t *testing.T) { 25 | b := MakeC2Buffer(BLOCK_SIZE) 26 | b.Write(incrementBlock) 27 | } 28 | 29 | func TestWritingUnderCapacityGivesEmptyEvicted(t *testing.T) { 30 | b := MakeC2Buffer(2) 31 | b.Write([]byte{1, 2}) 32 | 33 | if len(b.Evicted()) != 0 { 34 | t.Fatal("Evicted should have been empty:", b.Evicted()) 35 | } 36 | } 37 | 38 | func TestWritingMultipleBytesWhenBufferIsNotFull(t *testing.T) { 39 | b := MakeC2Buffer(3) 40 | b.Write([]byte{1, 2}) 41 | b.Write([]byte{3, 4}) 42 | 43 | ev := b.Evicted() 44 | 45 | if len(ev) != 1 || ev[0] != 1 { 46 | t.Fatal("Evicted should have been [1,]:", ev) 47 | } 48 | } 49 | 50 | func TestEvictedRegession1(t *testing.T) { 51 | b := MakeC2Buffer(4) 52 | 53 | b.Write([]byte{7, 6}) 54 | b.Write([]byte{5, 1, 2}) 55 | b.Write([]byte{3, 4}) 56 | 57 | ev := b.Evicted() 58 | if len(ev) != 2 || ev[0] != 6 || ev[1] != 5 { 59 | t.Fatalf("Unexpected evicted [6,5]: %v", ev) 60 | } 61 | } 62 | 63 | func TestGetBlock(t *testing.T) { 64 | b := MakeC2Buffer(BLOCK_SIZE) 65 | b.Write(incrementBlock) 66 | 67 | block := b.GetBlock() 68 | 69 | if len(block) != BLOCK_SIZE { 70 | t.Fatal("Wrong block size returned") 71 | } 72 | 73 | for i, by := range block { 74 | if byte(i) != by { 75 | t.Errorf("byte %v does not match", i) 76 | } 77 | } 78 | } 79 | 80 | func TestWriteTwoBlocksGet(t *testing.T) { 81 | b := MakeC2Buffer(BLOCK_SIZE) 82 | b.Write(incrementBlock) 83 | b.Write(incrementBlock2) 84 | 85 | if bytes.Compare(b.GetBlock(), incrementBlock2) != 0 { 86 | t.Errorf("Get block did not return the right value: %s", b.GetBlock()) 87 | } 88 | } 89 | 90 | func TestWriteSingleByteGetSingleByte(t *testing.T) { 91 | b := MakeC2Buffer(BLOCK_SIZE) 92 | singleByte := []byte{0} 93 | b.Write(singleByte) 94 | 95 | if bytes.Compare(b.GetBlock(), singleByte) != 0 { 96 | t.Errorf("Get block did not return the right value: %s", b.GetBlock()) 97 | } 98 | } 99 | 100 | func TestWriteTwoBlocksGetEvicted(t *testing.T) { 101 | b := MakeC2Buffer(BLOCK_SIZE) 102 | b.Write(incrementBlock) 103 | b.Write(incrementBlock2) 104 | 105 | if bytes.Compare(b.Evicted(), incrementBlock) != 0 { 106 | t.Errorf("Evicted did not return the right value: %s", b.Evicted()) 107 | } 108 | } 109 | 110 | func TestWriteSingleByteReturnsSingleEvictedByte(t *testing.T) { 111 | b := MakeC2Buffer(BLOCK_SIZE) 112 | b.Write(incrementBlock2) 113 | singleByte := []byte{0} 114 | 115 | b.Write(singleByte) 116 | e := b.Evicted() 117 | 118 | if len(e) != 1 { 119 | t.Fatalf("Evicted length is not correct: %s", e) 120 | } 121 | 122 | if e[0] != byte(10) { 123 | t.Errorf("Evicted content is not correct: %s", e) 124 | } 125 | } 126 | 127 | func TestTruncatingAfterWriting(t *testing.T) { 128 | b := MakeC2Buffer(BLOCK_SIZE) 129 | b.Write(incrementBlock) 130 | 131 | evicted := b.Truncate(2) 132 | 133 | if len(evicted) != 2 { 134 | t.Fatalf("Truncate did not return expected evicted length: %v", evicted) 135 | } 136 | 137 | if evicted[0] != 0 || evicted[1] != 1 { 138 | t.Errorf("Unexpected content in evicted: %v", evicted) 139 | } 140 | } 141 | 142 | func TestWritingAfterTruncating(t *testing.T) { 143 | // test that after we truncate some content, the next operations 144 | // on the buffer give us the expected results 145 | b := MakeC2Buffer(BLOCK_SIZE) 146 | b.Write(incrementBlock) 147 | b.Truncate(4) 148 | 149 | b.Write([]byte{34, 46}) 150 | 151 | block := b.GetBlock() 152 | 153 | if len(block) != BLOCK_SIZE-2 { 154 | t.Fatalf( 155 | "Unexpected block length after truncation: %v (%v)", 156 | block, 157 | len(block), 158 | ) 159 | } 160 | 161 | if bytes.Compare(block, []byte{4, 5, 6, 7, 8, 9, 34, 46}) != 0 { 162 | t.Errorf( 163 | "Unexpected block content after truncation: %v (%v)", 164 | block, 165 | len(block)) 166 | } 167 | } 168 | 169 | // This should have no allocations! 170 | func BenchmarkSingleWrites(b *testing.B) { 171 | buffer := MakeC2Buffer(BLOCK_SIZE) 172 | buffer.Write(incrementBlock) 173 | b.ReportAllocs() 174 | 175 | singleByte := []byte{0} 176 | b.StartTimer() 177 | for i := 0; i < b.N; i++ { 178 | buffer.Write(singleByte) 179 | buffer.Evicted() 180 | } 181 | b.StopTimer() 182 | } 183 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/patcher/sequential/sequential_test.go: -------------------------------------------------------------------------------- 1 | package sequential 2 | 3 | import ( 4 | "bytes" 5 | "crypto/md5" 6 | "github.com/Redundancy/go-sync/blocksources" 7 | "github.com/Redundancy/go-sync/patcher" 8 | "io" 9 | "io/ioutil" 10 | "strings" 11 | "testing" 12 | ) 13 | 14 | const ( 15 | BLOCKSIZE = 4 16 | REFERENCE_STRING = "The quick brown fox jumped over the lazy dog" 17 | ) 18 | 19 | var ( 20 | REFERENCE_BUFFER = bytes.NewBufferString(REFERENCE_STRING) 21 | REFERENCE_BLOCKS []string 22 | BLOCK_COUNT int 23 | REFERENCE_HASHES [][]byte 24 | ) 25 | 26 | func init() { 27 | maxLen := len(REFERENCE_STRING) 28 | m := md5.New() 29 | for i := 0; i < maxLen; i += BLOCKSIZE { 30 | last := i + 4 31 | 32 | if last >= maxLen { 33 | last = maxLen - 1 34 | } 35 | 36 | block := REFERENCE_STRING[i:last] 37 | 38 | REFERENCE_BLOCKS = append(REFERENCE_BLOCKS, block) 39 | m.Write([]byte(block)) 40 | REFERENCE_HASHES = append(REFERENCE_HASHES, m.Sum(nil)) 41 | m.Reset() 42 | } 43 | 44 | BLOCK_COUNT = len(REFERENCE_BLOCKS) 45 | } 46 | 47 | func stringToReadSeeker(input string) io.ReadSeeker { 48 | return bytes.NewReader([]byte(input)) 49 | } 50 | 51 | func TestPatchingStart(t *testing.T) { 52 | LOCAL := bytes.NewReader([]byte("48 brown fox jumped over the lazy dog")) 53 | out := bytes.NewBuffer(nil) 54 | 55 | missing := []patcher.MissingBlockSpan{ 56 | { 57 | BlockSize: BLOCKSIZE, 58 | StartBlock: 0, 59 | EndBlock: 2, 60 | Hasher: md5.New(), 61 | ExpectedSums: REFERENCE_HASHES[0:3], 62 | }, 63 | } 64 | 65 | matched := []patcher.FoundBlockSpan{ 66 | { 67 | BlockSize: BLOCKSIZE, 68 | StartBlock: 3, 69 | EndBlock: 11, 70 | MatchOffset: 5, 71 | }, 72 | } 73 | 74 | err := SequentialPatcher( 75 | LOCAL, 76 | blocksources.NewReadSeekerBlockSource( 77 | stringToReadSeeker(REFERENCE_STRING), 78 | blocksources.MakeNullFixedSizeResolver(BLOCKSIZE), 79 | ), 80 | missing, 81 | matched, 82 | 1024, 83 | out, 84 | ) 85 | 86 | if err != nil { 87 | t.Fatal(err) 88 | } 89 | 90 | if result, err := ioutil.ReadAll(out); err == nil { 91 | t.Logf("String split is: \"%v\"", strings.Join(REFERENCE_BLOCKS, "\", \"")) 92 | if bytes.Compare(result, []byte(REFERENCE_STRING)) != 0 { 93 | t.Errorf("Result does not equal reference: \"%s\" vs \"%v\"", result, REFERENCE_STRING) 94 | } 95 | } else { 96 | t.Fatal(err) 97 | } 98 | // 99 | } 100 | 101 | func TestPatchingEnd(t *testing.T) { 102 | LOCAL := bytes.NewReader([]byte("The quick brown fox jumped over the l4zy d0g")) 103 | out := bytes.NewBuffer(nil) 104 | 105 | missing := []patcher.MissingBlockSpan{ 106 | { 107 | BlockSize: BLOCKSIZE, 108 | StartBlock: 9, 109 | EndBlock: 10, 110 | Hasher: md5.New(), 111 | ExpectedSums: REFERENCE_HASHES[0:3], 112 | }, 113 | } 114 | 115 | matched := []patcher.FoundBlockSpan{ 116 | { 117 | BlockSize: BLOCKSIZE, 118 | StartBlock: 0, 119 | EndBlock: 8, 120 | MatchOffset: 0, 121 | }, 122 | } 123 | 124 | err := SequentialPatcher( 125 | LOCAL, 126 | blocksources.NewReadSeekerBlockSource( 127 | stringToReadSeeker(REFERENCE_STRING), 128 | blocksources.MakeNullFixedSizeResolver(BLOCKSIZE), 129 | ), 130 | missing, 131 | matched, 132 | 1024, 133 | out, 134 | ) 135 | 136 | if err != nil { 137 | t.Fatal(err) 138 | } 139 | 140 | if result, err := ioutil.ReadAll(out); err == nil { 141 | if bytes.Compare(result, []byte(REFERENCE_STRING)) != 0 { 142 | t.Errorf("Result does not equal reference: \"%s\" vs \"%v\"", result, REFERENCE_STRING) 143 | } 144 | } else { 145 | t.Fatal(err) 146 | } 147 | } 148 | 149 | func TestPatchingEntirelyMissing(t *testing.T) { 150 | LOCAL := bytes.NewReader([]byte("")) 151 | out := bytes.NewBuffer(nil) 152 | 153 | missing := []patcher.MissingBlockSpan{ 154 | { 155 | BlockSize: BLOCKSIZE, 156 | StartBlock: 0, 157 | EndBlock: 10, 158 | Hasher: md5.New(), 159 | ExpectedSums: REFERENCE_HASHES[0:10], 160 | }, 161 | } 162 | 163 | matched := []patcher.FoundBlockSpan{} 164 | 165 | err := SequentialPatcher( 166 | LOCAL, 167 | blocksources.NewReadSeekerBlockSource( 168 | stringToReadSeeker(REFERENCE_STRING), 169 | blocksources.MakeNullFixedSizeResolver(BLOCKSIZE), 170 | ), 171 | missing, 172 | matched, 173 | 1024, 174 | out, 175 | ) 176 | 177 | if err != nil { 178 | t.Fatal(err) 179 | } 180 | 181 | if result, err := ioutil.ReadAll(out); err == nil { 182 | if bytes.Compare(result, []byte(REFERENCE_STRING)) != 0 { 183 | t.Errorf("Result does not equal reference: \"%s\" vs \"%v\"", result, REFERENCE_STRING) 184 | } 185 | } else { 186 | t.Fatal(err) 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/http_test.go: -------------------------------------------------------------------------------- 1 | package gosync 2 | 3 | import ( 4 | "bytes" 5 | "crypto/md5" 6 | "fmt" 7 | "net" 8 | "net/http" 9 | "time" 10 | 11 | "github.com/Redundancy/go-sync/blocksources" 12 | "github.com/Redundancy/go-sync/comparer" 13 | "github.com/Redundancy/go-sync/filechecksum" 14 | "github.com/Redundancy/go-sync/indexbuilder" 15 | "github.com/Redundancy/go-sync/patcher" 16 | ) 17 | 18 | // due to short example strings, use a very small block size 19 | // using one this small in practice would increase your file transfer! 20 | const BLOCK_SIZE = 4 21 | 22 | // This is the "file" as described by the authoritive version 23 | const REFERENCE = "The quick brown fox jumped over the lazy dog" 24 | 25 | // This is what we have locally. Not too far off, but not correct. 26 | const LOCAL_VERSION = "The qwik brown fox jumped 0v3r the lazy" 27 | 28 | var content = bytes.NewReader([]byte(REFERENCE)) 29 | 30 | func handler(w http.ResponseWriter, req *http.Request) { 31 | http.ServeContent(w, req, "", time.Now(), content) 32 | } 33 | 34 | // set up a http server locally that will respond predictably to ranged requests 35 | func setupServer() <-chan int { 36 | var PORT = 8000 37 | s := http.NewServeMux() 38 | s.HandleFunc("/content", handler) 39 | 40 | portChan := make(chan int) 41 | 42 | go func() { 43 | var listener net.Listener 44 | var err error 45 | 46 | for { 47 | PORT++ 48 | p := fmt.Sprintf(":%v", PORT) 49 | listener, err = net.Listen("tcp", p) 50 | 51 | if err == nil { 52 | break 53 | } 54 | } 55 | portChan <- PORT 56 | http.Serve(listener, s) 57 | }() 58 | 59 | return portChan 60 | } 61 | 62 | // This is exceedingly similar to the module Example, but uses the http blocksource and a local http server 63 | func Example_httpBlockSource() { 64 | PORT := <-setupServer() 65 | LOCAL_URL := fmt.Sprintf("http://localhost:%v/content", PORT) 66 | 67 | generator := filechecksum.NewFileChecksumGenerator(BLOCK_SIZE) 68 | _, referenceFileIndex, checksumLookup, err := indexbuilder.BuildIndexFromString(generator, REFERENCE) 69 | 70 | if err != nil { 71 | return 72 | } 73 | 74 | fileSize := int64(len([]byte(REFERENCE))) 75 | 76 | // This would normally be saved in a file 77 | 78 | blockCount := fileSize / BLOCK_SIZE 79 | if fileSize%BLOCK_SIZE != 0 { 80 | blockCount++ 81 | } 82 | 83 | fs := &BasicSummary{ 84 | ChecksumIndex: referenceFileIndex, 85 | ChecksumLookup: checksumLookup, 86 | BlockCount: uint(blockCount), 87 | BlockSize: uint(BLOCK_SIZE), 88 | FileSize: fileSize, 89 | } 90 | 91 | /* 92 | // Normally, this would be: 93 | rsync, err := MakeRSync( 94 | "toPatch.file", 95 | "http://localhost/content", 96 | "out.file", 97 | fs, 98 | ) 99 | */ 100 | // Need to replace the output and the input 101 | inputFile := bytes.NewReader([]byte(LOCAL_VERSION)) 102 | patchedFile := bytes.NewBuffer(nil) 103 | 104 | resolver := blocksources.MakeFileSizedBlockResolver( 105 | uint64(fs.GetBlockSize()), 106 | fs.GetFileSize(), 107 | ) 108 | 109 | rsync := &RSync{ 110 | Input: inputFile, 111 | Output: patchedFile, 112 | Source: blocksources.NewHttpBlockSource( 113 | LOCAL_URL, 114 | 1, 115 | resolver, 116 | &filechecksum.HashVerifier{ 117 | Hash: md5.New(), 118 | BlockSize: fs.GetBlockSize(), 119 | BlockChecksumGetter: fs, 120 | }, 121 | ), 122 | Summary: fs, 123 | OnClose: nil, 124 | } 125 | 126 | err = rsync.Patch() 127 | 128 | if err != nil { 129 | fmt.Printf("Error: %v\n", err) 130 | return 131 | } 132 | 133 | err = rsync.Close() 134 | 135 | if err != nil { 136 | fmt.Printf("Error: %v\n", err) 137 | return 138 | } 139 | 140 | fmt.Printf("Patched content: \"%v\"\n", patchedFile.String()) 141 | 142 | // Just for inspection 143 | remoteReferenceSource := rsync.Source.(*blocksources.BlockSourceBase) 144 | fmt.Printf("Downloaded Bytes: %v\n", remoteReferenceSource.ReadBytes()) 145 | 146 | // Output: 147 | // Patched content: "The quick brown fox jumped over the lazy dog" 148 | // Downloaded Bytes: 16 149 | } 150 | 151 | func ToPatcherFoundSpan(sl comparer.BlockSpanList, blockSize int64) []patcher.FoundBlockSpan { 152 | result := make([]patcher.FoundBlockSpan, len(sl)) 153 | 154 | for i, v := range sl { 155 | result[i].StartBlock = v.StartBlock 156 | result[i].EndBlock = v.EndBlock 157 | result[i].MatchOffset = v.ComparisonStartOffset 158 | result[i].BlockSize = blockSize 159 | } 160 | 161 | return result 162 | } 163 | 164 | func ToPatcherMissingSpan(sl comparer.BlockSpanList, blockSize int64) []patcher.MissingBlockSpan { 165 | result := make([]patcher.MissingBlockSpan, len(sl)) 166 | 167 | for i, v := range sl { 168 | result[i].StartBlock = v.StartBlock 169 | result[i].EndBlock = v.EndBlock 170 | result[i].BlockSize = blockSize 171 | } 172 | 173 | return result 174 | } 175 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/altsrc/map_input_source.go: -------------------------------------------------------------------------------- 1 | package altsrc 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "time" 7 | 8 | "github.com/codegangsta/cli" 9 | ) 10 | 11 | // MapInputSource implements InputSourceContext to return 12 | // data from the map that is loaded. 13 | type MapInputSource struct { 14 | valueMap map[string]interface{} 15 | } 16 | 17 | // Int returns an int from the map if it exists otherwise returns 0 18 | func (fsm *MapInputSource) Int(name string) (int, error) { 19 | otherGenericValue, exists := fsm.valueMap[name] 20 | if exists { 21 | otherValue, isType := otherGenericValue.(int) 22 | if !isType { 23 | return 0, incorrectTypeForFlagError(name, "int", otherGenericValue) 24 | } 25 | 26 | return otherValue, nil 27 | } 28 | 29 | return 0, nil 30 | } 31 | 32 | // Duration returns a duration from the map if it exists otherwise returns 0 33 | func (fsm *MapInputSource) Duration(name string) (time.Duration, error) { 34 | otherGenericValue, exists := fsm.valueMap[name] 35 | if exists { 36 | otherValue, isType := otherGenericValue.(time.Duration) 37 | if !isType { 38 | return 0, incorrectTypeForFlagError(name, "duration", otherGenericValue) 39 | } 40 | return otherValue, nil 41 | } 42 | 43 | return 0, nil 44 | } 45 | 46 | // Float64 returns an float64 from the map if it exists otherwise returns 0 47 | func (fsm *MapInputSource) Float64(name string) (float64, error) { 48 | otherGenericValue, exists := fsm.valueMap[name] 49 | if exists { 50 | otherValue, isType := otherGenericValue.(float64) 51 | if !isType { 52 | return 0, incorrectTypeForFlagError(name, "float64", otherGenericValue) 53 | } 54 | return otherValue, nil 55 | } 56 | 57 | return 0, nil 58 | } 59 | 60 | // String returns a string from the map if it exists otherwise returns an empty string 61 | func (fsm *MapInputSource) String(name string) (string, error) { 62 | otherGenericValue, exists := fsm.valueMap[name] 63 | if exists { 64 | otherValue, isType := otherGenericValue.(string) 65 | if !isType { 66 | return "", incorrectTypeForFlagError(name, "string", otherGenericValue) 67 | } 68 | return otherValue, nil 69 | } 70 | 71 | return "", nil 72 | } 73 | 74 | // StringSlice returns an []string from the map if it exists otherwise returns nil 75 | func (fsm *MapInputSource) StringSlice(name string) ([]string, error) { 76 | otherGenericValue, exists := fsm.valueMap[name] 77 | if exists { 78 | otherValue, isType := otherGenericValue.([]string) 79 | if !isType { 80 | return nil, incorrectTypeForFlagError(name, "[]string", otherGenericValue) 81 | } 82 | return otherValue, nil 83 | } 84 | 85 | return nil, nil 86 | } 87 | 88 | // IntSlice returns an []int from the map if it exists otherwise returns nil 89 | func (fsm *MapInputSource) IntSlice(name string) ([]int, error) { 90 | otherGenericValue, exists := fsm.valueMap[name] 91 | if exists { 92 | otherValue, isType := otherGenericValue.([]int) 93 | if !isType { 94 | return nil, incorrectTypeForFlagError(name, "[]int", otherGenericValue) 95 | } 96 | return otherValue, nil 97 | } 98 | 99 | return nil, nil 100 | } 101 | 102 | // Generic returns an cli.Generic from the map if it exists otherwise returns nil 103 | func (fsm *MapInputSource) Generic(name string) (cli.Generic, error) { 104 | otherGenericValue, exists := fsm.valueMap[name] 105 | if exists { 106 | otherValue, isType := otherGenericValue.(cli.Generic) 107 | if !isType { 108 | return nil, incorrectTypeForFlagError(name, "cli.Generic", otherGenericValue) 109 | } 110 | return otherValue, nil 111 | } 112 | 113 | return nil, nil 114 | } 115 | 116 | // Bool returns an bool from the map otherwise returns false 117 | func (fsm *MapInputSource) Bool(name string) (bool, error) { 118 | otherGenericValue, exists := fsm.valueMap[name] 119 | if exists { 120 | otherValue, isType := otherGenericValue.(bool) 121 | if !isType { 122 | return false, incorrectTypeForFlagError(name, "bool", otherGenericValue) 123 | } 124 | return otherValue, nil 125 | } 126 | 127 | return false, nil 128 | } 129 | 130 | // BoolT returns an bool from the map otherwise returns true 131 | func (fsm *MapInputSource) BoolT(name string) (bool, error) { 132 | otherGenericValue, exists := fsm.valueMap[name] 133 | if exists { 134 | otherValue, isType := otherGenericValue.(bool) 135 | if !isType { 136 | return true, incorrectTypeForFlagError(name, "bool", otherGenericValue) 137 | } 138 | return otherValue, nil 139 | } 140 | 141 | return true, nil 142 | } 143 | 144 | func incorrectTypeForFlagError(name, expectedTypeName string, value interface{}) error { 145 | valueType := reflect.TypeOf(value) 146 | valueTypeName := "" 147 | if valueType != nil { 148 | valueTypeName = valueType.Name() 149 | } 150 | 151 | return fmt.Errorf("Mismatched type for flag '%s'. Expected '%s' but actual is '%s'", name, expectedTypeName, valueTypeName) 152 | } 153 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/patcher/sequential/sequential.go: -------------------------------------------------------------------------------- 1 | /* 2 | Sequential Patcher will stream the patched version of the file to output, 3 | since it works strictly in order, it cannot patch the local file directly 4 | (since it might overwrite a block needed later), 5 | so there would have to be a final copy once the patching was done. 6 | */ 7 | package sequential 8 | 9 | import ( 10 | "fmt" 11 | "github.com/Redundancy/go-sync/patcher" 12 | "io" 13 | ) 14 | 15 | const ( 16 | ABSOLUTE_POSITION = 0 17 | ) 18 | 19 | /* 20 | This simple example currently doesn't do any pipelining of needed blocks, nor does it deal with 21 | blocks being delivered out of order. 22 | */ 23 | func SequentialPatcher( 24 | localFile io.ReadSeeker, 25 | reference patcher.BlockSource, 26 | requiredRemoteBlocks []patcher.MissingBlockSpan, 27 | locallyAvailableBlocks []patcher.FoundBlockSpan, 28 | maxBlockStorage uint64, // the amount of memory we're allowed to use for temporary data storage 29 | output io.Writer, 30 | ) error { 31 | 32 | maxBlockMissing := uint(0) 33 | if len(requiredRemoteBlocks) > 0 { 34 | maxBlockMissing = requiredRemoteBlocks[len(requiredRemoteBlocks)-1].EndBlock 35 | } 36 | 37 | maxBlockFound := uint(0) 38 | if len(locallyAvailableBlocks) > 0 { 39 | maxBlockFound = locallyAvailableBlocks[len(locallyAvailableBlocks)-1].EndBlock 40 | } 41 | 42 | if reference == nil { 43 | return fmt.Errorf("No BlockSource set for obtaining reference blocks") 44 | } 45 | 46 | maxBlock := maxBlockMissing 47 | if maxBlockFound > maxBlock { 48 | maxBlock = maxBlockFound 49 | } 50 | 51 | currentBlock := uint(0) 52 | 53 | // TODO: find a way to test this, since it seemed to be the cause of an issue 54 | for currentBlock <= maxBlock { 55 | // where is the next block supposed to come from? 56 | if withinFirstBlockOfLocalBlocks(currentBlock, locallyAvailableBlocks) { 57 | firstMatched := locallyAvailableBlocks[0] 58 | 59 | // we have the current block range in the local file 60 | localFile.Seek(firstMatched.MatchOffset, ABSOLUTE_POSITION) 61 | blockSizeToRead := int64(firstMatched.EndBlock-firstMatched.StartBlock+1) * firstMatched.BlockSize 62 | 63 | if _, err := io.Copy(output, io.LimitReader(localFile, blockSizeToRead)); err != nil { 64 | return fmt.Errorf("Could not copy %v bytes to output: %v", blockSizeToRead, err) 65 | } 66 | 67 | currentBlock = firstMatched.EndBlock + 1 68 | locallyAvailableBlocks = locallyAvailableBlocks[1:] 69 | 70 | } else if withinFirstBlockOfRemoteBlocks(currentBlock, requiredRemoteBlocks) { 71 | firstMissing := requiredRemoteBlocks[0] 72 | reference.RequestBlocks(firstMissing) 73 | 74 | select { 75 | case result := <-reference.GetResultChannel(): 76 | if result.StartBlock == currentBlock { 77 | if _, err := output.Write(result.Data); err != nil { 78 | return fmt.Errorf( 79 | "Could not write data to output: %v", 80 | err, 81 | ) 82 | } else { 83 | 84 | completed := calculateNumberOfCompletedBlocks( 85 | len(result.Data), 86 | firstMissing.BlockSize, 87 | ) 88 | 89 | if completed != (firstMissing.EndBlock-firstMissing.StartBlock) + 1 { 90 | return fmt.Errorf( 91 | "Unexpected reponse length from remote source: blocks %v-%v (got %v blocks)", 92 | firstMissing.StartBlock, 93 | firstMissing.EndBlock, 94 | completed, 95 | ) 96 | } 97 | 98 | currentBlock += completed 99 | requiredRemoteBlocks = requiredRemoteBlocks[1:] 100 | } 101 | } else { 102 | return fmt.Errorf( 103 | "Received unexpected block: %v", 104 | result.StartBlock, 105 | ) 106 | } 107 | case err := <-reference.EncounteredError(): 108 | return fmt.Errorf( 109 | "Failed to read from reference file: %v", 110 | err, 111 | ) 112 | } 113 | 114 | } else { 115 | return fmt.Errorf( 116 | "Could not find block in missing or matched list: %v\nRemote: %v\nLocal: %v\n", 117 | currentBlock, 118 | requiredRemoteBlocks, 119 | locallyAvailableBlocks, 120 | ) 121 | } 122 | } 123 | 124 | return nil 125 | } 126 | 127 | func withinFirstBlockOfRemoteBlocks(currentBlock uint, remoteBlocks []patcher.MissingBlockSpan) bool { 128 | return len(remoteBlocks) > 0 && remoteBlocks[0].StartBlock <= currentBlock && remoteBlocks[0].EndBlock >= currentBlock 129 | } 130 | 131 | func withinFirstBlockOfLocalBlocks(currentBlock uint, localBlocks []patcher.FoundBlockSpan) bool { 132 | return len(localBlocks) > 0 && localBlocks[0].StartBlock <= currentBlock && localBlocks[0].EndBlock >= currentBlock 133 | } 134 | 135 | func calculateNumberOfCompletedBlocks(resultLength int, blockSize int64) (completedBlockCount uint) { 136 | // TODO: lots of casting to uint here, is it safe? 137 | completedBlockCount = uint(resultLength) / uint(blockSize) 138 | 139 | // round up in the case of a partial block (last block may not be full sized) 140 | if uint(resultLength)%uint(blockSize) != 0 { 141 | completedBlockCount += 1 142 | } 143 | 144 | return 145 | } 146 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/altsrc/yaml_command_test.go: -------------------------------------------------------------------------------- 1 | // Disabling building of yaml support in cases where golang is 1.0 or 1.1 2 | // as the encoding library is not implemented or supported. 3 | 4 | // +build !go1,!go1.1 5 | 6 | package altsrc 7 | 8 | import ( 9 | "flag" 10 | "io/ioutil" 11 | "os" 12 | "testing" 13 | 14 | "github.com/codegangsta/cli" 15 | ) 16 | 17 | func TestCommandYamlFileTest(t *testing.T) { 18 | app := cli.NewApp() 19 | set := flag.NewFlagSet("test", 0) 20 | ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) 21 | defer os.Remove("current.yaml") 22 | test := []string{"test-cmd", "--load", "current.yaml"} 23 | set.Parse(test) 24 | 25 | c := cli.NewContext(app, set, nil) 26 | 27 | command := &cli.Command{ 28 | Name: "test-cmd", 29 | Aliases: []string{"tc"}, 30 | Usage: "this is for testing", 31 | Description: "testing", 32 | Action: func(c *cli.Context) { 33 | val := c.Int("test") 34 | expect(t, val, 15) 35 | }, 36 | Flags: []cli.Flag{ 37 | NewIntFlag(cli.IntFlag{Name: "test"}), 38 | cli.StringFlag{Name: "load"}}, 39 | } 40 | command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) 41 | err := command.Run(c) 42 | 43 | expect(t, err, nil) 44 | } 45 | 46 | func TestCommandYamlFileTestGlobalEnvVarWins(t *testing.T) { 47 | app := cli.NewApp() 48 | set := flag.NewFlagSet("test", 0) 49 | ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) 50 | defer os.Remove("current.yaml") 51 | 52 | os.Setenv("THE_TEST", "10") 53 | defer os.Setenv("THE_TEST", "") 54 | test := []string{"test-cmd", "--load", "current.yaml"} 55 | set.Parse(test) 56 | 57 | c := cli.NewContext(app, set, nil) 58 | 59 | command := &cli.Command{ 60 | Name: "test-cmd", 61 | Aliases: []string{"tc"}, 62 | Usage: "this is for testing", 63 | Description: "testing", 64 | Action: func(c *cli.Context) { 65 | val := c.Int("test") 66 | expect(t, val, 10) 67 | }, 68 | Flags: []cli.Flag{ 69 | NewIntFlag(cli.IntFlag{Name: "test", EnvVar: "THE_TEST"}), 70 | cli.StringFlag{Name: "load"}}, 71 | } 72 | command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) 73 | 74 | err := command.Run(c) 75 | 76 | expect(t, err, nil) 77 | } 78 | 79 | func TestCommandYamlFileTestSpecifiedFlagWins(t *testing.T) { 80 | app := cli.NewApp() 81 | set := flag.NewFlagSet("test", 0) 82 | ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) 83 | defer os.Remove("current.yaml") 84 | 85 | test := []string{"test-cmd", "--load", "current.yaml", "--test", "7"} 86 | set.Parse(test) 87 | 88 | c := cli.NewContext(app, set, nil) 89 | 90 | command := &cli.Command{ 91 | Name: "test-cmd", 92 | Aliases: []string{"tc"}, 93 | Usage: "this is for testing", 94 | Description: "testing", 95 | Action: func(c *cli.Context) { 96 | val := c.Int("test") 97 | expect(t, val, 7) 98 | }, 99 | Flags: []cli.Flag{ 100 | NewIntFlag(cli.IntFlag{Name: "test"}), 101 | cli.StringFlag{Name: "load"}}, 102 | } 103 | command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) 104 | 105 | err := command.Run(c) 106 | 107 | expect(t, err, nil) 108 | } 109 | 110 | func TestCommandYamlFileTestDefaultValueFileWins(t *testing.T) { 111 | app := cli.NewApp() 112 | set := flag.NewFlagSet("test", 0) 113 | ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) 114 | defer os.Remove("current.yaml") 115 | 116 | test := []string{"test-cmd", "--load", "current.yaml"} 117 | set.Parse(test) 118 | 119 | c := cli.NewContext(app, set, nil) 120 | 121 | command := &cli.Command{ 122 | Name: "test-cmd", 123 | Aliases: []string{"tc"}, 124 | Usage: "this is for testing", 125 | Description: "testing", 126 | Action: func(c *cli.Context) { 127 | val := c.Int("test") 128 | expect(t, val, 15) 129 | }, 130 | Flags: []cli.Flag{ 131 | NewIntFlag(cli.IntFlag{Name: "test", Value: 7}), 132 | cli.StringFlag{Name: "load"}}, 133 | } 134 | command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) 135 | 136 | err := command.Run(c) 137 | 138 | expect(t, err, nil) 139 | } 140 | 141 | func TestCommandYamlFileFlagHasDefaultGlobalEnvYamlSetGlobalEnvWins(t *testing.T) { 142 | app := cli.NewApp() 143 | set := flag.NewFlagSet("test", 0) 144 | ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666) 145 | defer os.Remove("current.yaml") 146 | 147 | os.Setenv("THE_TEST", "11") 148 | defer os.Setenv("THE_TEST", "") 149 | 150 | test := []string{"test-cmd", "--load", "current.yaml"} 151 | set.Parse(test) 152 | 153 | c := cli.NewContext(app, set, nil) 154 | 155 | command := &cli.Command{ 156 | Name: "test-cmd", 157 | Aliases: []string{"tc"}, 158 | Usage: "this is for testing", 159 | Description: "testing", 160 | Action: func(c *cli.Context) { 161 | val := c.Int("test") 162 | expect(t, val, 11) 163 | }, 164 | Flags: []cli.Flag{ 165 | NewIntFlag(cli.IntFlag{Name: "test", Value: 7, EnvVar: "THE_TEST"}), 166 | cli.StringFlag{Name: "load"}}, 167 | } 168 | command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) 169 | err := command.Run(c) 170 | 171 | expect(t, err, nil) 172 | } 173 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/index/index.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package index provides the functionality to describe a reference 'file' and its contents in terms of 3 | the weak and strong checksums, in such a way that you can check if a weak checksum is present, 4 | then check if there is a strong checksum that matches. 5 | 6 | It also allows lookups in terms of block offsets, so that upon finding a match, you can more efficiently 7 | check if the next block follows it. 8 | 9 | The index structure does not lend itself to being an interface - the pattern of taking the result of looking for 10 | the weak checksum and looking up the strong checksum in that requires us to return an object matching an interface which 11 | both packages must know about. 12 | 13 | Here's the interface: 14 | 15 | type Index interface { 16 | FindWeakChecksum(chk []byte) interface{} 17 | FindStrongChecksum(chk []byte, weak interface{}) []chunks.ChunkChecksum 18 | } 19 | 20 | This allows the implementation to rely on a previously generated value, without the users knowing what it is. 21 | This breaks the dependency that requires so many packages to import index. 22 | */ 23 | package index 24 | 25 | import ( 26 | "bytes" 27 | "encoding/binary" 28 | "github.com/Redundancy/go-sync/chunks" 29 | "sort" 30 | ) 31 | 32 | type ChecksumIndex struct { 33 | BlockCount int 34 | 35 | /* 36 | This datastructure is based on some benchmarking that indicates that it outperforms 37 | a basic map 70ns vs 130ns for ~8192 checksums (which is reasonably large - 38 | say 64 MB with no weak collisions @8192 bytes per block). 39 | 40 | We use a 256 element slice, and the value of the least significant byte to determine 41 | which map to look up into. 42 | */ 43 | weakChecksumLookup []map[uint32]StrongChecksumList 44 | 45 | MaxStrongLength int 46 | AverageStrongLength float32 47 | Count int 48 | } 49 | 50 | // Builds an index in which chunks can be found, with their corresponding offsets 51 | // We use this for the 52 | func MakeChecksumIndex(checksums []chunks.ChunkChecksum) *ChecksumIndex { 53 | n := &ChecksumIndex{ 54 | BlockCount: len(checksums), 55 | weakChecksumLookup: make([]map[uint32]StrongChecksumList, 256), 56 | } 57 | 58 | for _, chunk := range checksums { 59 | weakChecksumAsInt := binary.LittleEndian.Uint32(chunk.WeakChecksum) 60 | arrayOffset := weakChecksumAsInt & 255 61 | 62 | if n.weakChecksumLookup[arrayOffset] == nil { 63 | n.weakChecksumLookup[arrayOffset] = make(map[uint32]StrongChecksumList) 64 | } 65 | 66 | n.weakChecksumLookup[arrayOffset][weakChecksumAsInt] = append( 67 | n.weakChecksumLookup[arrayOffset][weakChecksumAsInt], 68 | chunk, 69 | ) 70 | 71 | } 72 | 73 | sum := 0 74 | count := 0 75 | 76 | for _, a := range n.weakChecksumLookup { 77 | for _, c := range a { 78 | sort.Sort(c) 79 | if len(c) > n.MaxStrongLength { 80 | n.MaxStrongLength = len(c) 81 | } 82 | sum += len(c) 83 | count += 1 84 | n.Count += len(c) 85 | } 86 | } 87 | 88 | n.AverageStrongLength = float32(sum) / float32(count) 89 | 90 | return n 91 | } 92 | 93 | func (index *ChecksumIndex) WeakCount() int { 94 | return index.Count 95 | } 96 | 97 | func (index *ChecksumIndex) FindWeakChecksumInIndex(weak []byte) StrongChecksumList { 98 | x := binary.LittleEndian.Uint32(weak) 99 | if index.weakChecksumLookup[x&255] != nil { 100 | if v, ok := index.weakChecksumLookup[x&255][x]; ok { 101 | return v 102 | } 103 | } 104 | return nil 105 | } 106 | 107 | func (index *ChecksumIndex) FindWeakChecksum2(chk []byte) interface{} { 108 | w := index.FindWeakChecksumInIndex(chk) 109 | 110 | if len(w) == 0 { 111 | return nil 112 | } else { 113 | return w 114 | } 115 | } 116 | 117 | func (index *ChecksumIndex) FindStrongChecksum2(chk []byte, weak interface{}) []chunks.ChunkChecksum { 118 | if strongList, ok := weak.(StrongChecksumList); ok { 119 | return strongList.FindStrongChecksum(chk) 120 | } else { 121 | return nil 122 | } 123 | } 124 | 125 | type StrongChecksumList []chunks.ChunkChecksum 126 | 127 | // Sortable interface 128 | func (s StrongChecksumList) Len() int { 129 | return len(s) 130 | } 131 | 132 | // Sortable interface 133 | func (s StrongChecksumList) Swap(i, j int) { 134 | s[i], s[j] = s[j], s[i] 135 | } 136 | 137 | // Sortable interface 138 | func (s StrongChecksumList) Less(i, j int) bool { 139 | return bytes.Compare(s[i].StrongChecksum, s[j].StrongChecksum) == -1 140 | } 141 | 142 | func (s StrongChecksumList) FindStrongChecksum(strong []byte) (result []chunks.ChunkChecksum) { 143 | n := len(s) 144 | 145 | // average length is 1, so fast path comparison 146 | if n == 1 { 147 | if bytes.Compare(s[0].StrongChecksum, strong) == 0 { 148 | return s 149 | } else { 150 | return nil 151 | } 152 | } 153 | 154 | // find the first possible occurance 155 | first_gte_checksum := sort.Search( 156 | n, 157 | func(i int) bool { 158 | return bytes.Compare(s[i].StrongChecksum, strong) >= 0 159 | }, 160 | ) 161 | 162 | // out of bounds 163 | if first_gte_checksum == -1 || first_gte_checksum == n { 164 | return nil 165 | } 166 | 167 | // Somewhere in the middle, but the next one didn't match 168 | if bytes.Compare(s[first_gte_checksum].StrongChecksum, strong) != 0 { 169 | return nil 170 | } 171 | 172 | end := first_gte_checksum + 1 173 | for end < n { 174 | if bytes.Compare(s[end].StrongChecksum, strong) == 0 { 175 | end += 1 176 | } else { 177 | break 178 | } 179 | 180 | } 181 | 182 | return s[first_gte_checksum:end] 183 | } 184 | -------------------------------------------------------------------------------- /vendor/src/github.com/petar/GoLLRB/llrb/llrb_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2010 Petar Maymounkov. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package llrb 6 | 7 | import ( 8 | "math" 9 | "math/rand" 10 | "testing" 11 | ) 12 | 13 | func TestCases(t *testing.T) { 14 | tree := New() 15 | tree.ReplaceOrInsert(Int(1)) 16 | tree.ReplaceOrInsert(Int(1)) 17 | if tree.Len() != 1 { 18 | t.Errorf("expecting len 1") 19 | } 20 | if !tree.Has(Int(1)) { 21 | t.Errorf("expecting to find key=1") 22 | } 23 | 24 | tree.Delete(Int(1)) 25 | if tree.Len() != 0 { 26 | t.Errorf("expecting len 0") 27 | } 28 | if tree.Has(Int(1)) { 29 | t.Errorf("not expecting to find key=1") 30 | } 31 | 32 | tree.Delete(Int(1)) 33 | if tree.Len() != 0 { 34 | t.Errorf("expecting len 0") 35 | } 36 | if tree.Has(Int(1)) { 37 | t.Errorf("not expecting to find key=1") 38 | } 39 | } 40 | 41 | func TestReverseInsertOrder(t *testing.T) { 42 | tree := New() 43 | n := 100 44 | for i := 0; i < n; i++ { 45 | tree.ReplaceOrInsert(Int(n - i)) 46 | } 47 | i := 0 48 | tree.AscendGreaterOrEqual(Int(0), func(item Item) bool { 49 | i++ 50 | if item.(Int) != Int(i) { 51 | t.Errorf("bad order: got %d, expect %d", item.(Int), i) 52 | } 53 | return true 54 | }) 55 | } 56 | 57 | func TestRange(t *testing.T) { 58 | tree := New() 59 | order := []String{ 60 | "ab", "aba", "abc", "a", "aa", "aaa", "b", "a-", "a!", 61 | } 62 | for _, i := range order { 63 | tree.ReplaceOrInsert(i) 64 | } 65 | k := 0 66 | tree.AscendRange(String("ab"), String("ac"), func(item Item) bool { 67 | if k > 3 { 68 | t.Fatalf("returned more items than expected") 69 | } 70 | i1 := order[k] 71 | i2 := item.(String) 72 | if i1 != i2 { 73 | t.Errorf("expecting %s, got %s", i1, i2) 74 | } 75 | k++ 76 | return true 77 | }) 78 | } 79 | 80 | func TestRandomInsertOrder(t *testing.T) { 81 | tree := New() 82 | n := 1000 83 | perm := rand.Perm(n) 84 | for i := 0; i < n; i++ { 85 | tree.ReplaceOrInsert(Int(perm[i])) 86 | } 87 | j := 0 88 | tree.AscendGreaterOrEqual(Int(0), func(item Item) bool { 89 | if item.(Int) != Int(j) { 90 | t.Fatalf("bad order") 91 | } 92 | j++ 93 | return true 94 | }) 95 | } 96 | 97 | func TestRandomReplace(t *testing.T) { 98 | tree := New() 99 | n := 100 100 | perm := rand.Perm(n) 101 | for i := 0; i < n; i++ { 102 | tree.ReplaceOrInsert(Int(perm[i])) 103 | } 104 | perm = rand.Perm(n) 105 | for i := 0; i < n; i++ { 106 | if replaced := tree.ReplaceOrInsert(Int(perm[i])); replaced == nil || replaced.(Int) != Int(perm[i]) { 107 | t.Errorf("error replacing") 108 | } 109 | } 110 | } 111 | 112 | func TestRandomInsertSequentialDelete(t *testing.T) { 113 | tree := New() 114 | n := 1000 115 | perm := rand.Perm(n) 116 | for i := 0; i < n; i++ { 117 | tree.ReplaceOrInsert(Int(perm[i])) 118 | } 119 | for i := 0; i < n; i++ { 120 | tree.Delete(Int(i)) 121 | } 122 | } 123 | 124 | func TestRandomInsertDeleteNonExistent(t *testing.T) { 125 | tree := New() 126 | n := 100 127 | perm := rand.Perm(n) 128 | for i := 0; i < n; i++ { 129 | tree.ReplaceOrInsert(Int(perm[i])) 130 | } 131 | if tree.Delete(Int(200)) != nil { 132 | t.Errorf("deleted non-existent item") 133 | } 134 | if tree.Delete(Int(-2)) != nil { 135 | t.Errorf("deleted non-existent item") 136 | } 137 | for i := 0; i < n; i++ { 138 | if u := tree.Delete(Int(i)); u == nil || u.(Int) != Int(i) { 139 | t.Errorf("delete failed") 140 | } 141 | } 142 | if tree.Delete(Int(200)) != nil { 143 | t.Errorf("deleted non-existent item") 144 | } 145 | if tree.Delete(Int(-2)) != nil { 146 | t.Errorf("deleted non-existent item") 147 | } 148 | } 149 | 150 | func TestRandomInsertPartialDeleteOrder(t *testing.T) { 151 | tree := New() 152 | n := 100 153 | perm := rand.Perm(n) 154 | for i := 0; i < n; i++ { 155 | tree.ReplaceOrInsert(Int(perm[i])) 156 | } 157 | for i := 1; i < n-1; i++ { 158 | tree.Delete(Int(i)) 159 | } 160 | j := 0 161 | tree.AscendGreaterOrEqual(Int(0), func(item Item) bool { 162 | switch j { 163 | case 0: 164 | if item.(Int) != Int(0) { 165 | t.Errorf("expecting 0") 166 | } 167 | case 1: 168 | if item.(Int) != Int(n-1) { 169 | t.Errorf("expecting %d", n-1) 170 | } 171 | } 172 | j++ 173 | return true 174 | }) 175 | } 176 | 177 | func TestRandomInsertStats(t *testing.T) { 178 | tree := New() 179 | n := 100000 180 | perm := rand.Perm(n) 181 | for i := 0; i < n; i++ { 182 | tree.ReplaceOrInsert(Int(perm[i])) 183 | } 184 | avg, _ := tree.HeightStats() 185 | expAvg := math.Log2(float64(n)) - 1.5 186 | if math.Abs(avg-expAvg) >= 2.0 { 187 | t.Errorf("too much deviation from expected average height") 188 | } 189 | } 190 | 191 | func BenchmarkInsert(b *testing.B) { 192 | tree := New() 193 | for i := 0; i < b.N; i++ { 194 | tree.ReplaceOrInsert(Int(b.N - i)) 195 | } 196 | } 197 | 198 | func BenchmarkDelete(b *testing.B) { 199 | b.StopTimer() 200 | tree := New() 201 | for i := 0; i < b.N; i++ { 202 | tree.ReplaceOrInsert(Int(b.N - i)) 203 | } 204 | b.StartTimer() 205 | for i := 0; i < b.N; i++ { 206 | tree.Delete(Int(i)) 207 | } 208 | } 209 | 210 | func BenchmarkDeleteMin(b *testing.B) { 211 | b.StopTimer() 212 | tree := New() 213 | for i := 0; i < b.N; i++ { 214 | tree.ReplaceOrInsert(Int(b.N - i)) 215 | } 216 | b.StartTimer() 217 | for i := 0; i < b.N; i++ { 218 | tree.DeleteMin() 219 | } 220 | } 221 | 222 | func TestInsertNoReplace(t *testing.T) { 223 | tree := New() 224 | n := 1000 225 | for q := 0; q < 2; q++ { 226 | perm := rand.Perm(n) 227 | for i := 0; i < n; i++ { 228 | tree.InsertNoReplace(Int(perm[i])) 229 | } 230 | } 231 | j := 0 232 | tree.AscendGreaterOrEqual(Int(0), func(item Item) bool { 233 | if item.(Int) != Int(j/2) { 234 | t.Fatalf("bad order") 235 | } 236 | j++ 237 | return true 238 | }) 239 | } 240 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/comparer/comparer.go: -------------------------------------------------------------------------------- 1 | /* 2 | package comparer is responsible for using a FileChecksumGenerator (filechecksum) and an index 3 | to move through a file and compare it to the index, producing a FileDiffSummary 4 | */ 5 | package comparer 6 | 7 | import ( 8 | "fmt" 9 | "io" 10 | "sync/atomic" 11 | 12 | "github.com/Redundancy/go-sync/chunks" 13 | "github.com/Redundancy/go-sync/circularbuffer" 14 | "github.com/Redundancy/go-sync/filechecksum" 15 | ) 16 | 17 | const ( 18 | READ_NEXT_BYTE = iota 19 | READ_NEXT_BLOCK 20 | READ_NONE 21 | ) 22 | 23 | // If the weak Hash object satisfies this interface, then 24 | // StartFindMatchingBlocks will not allocate a circular buffer 25 | type BlockBuffer interface { 26 | Write([]byte) (int, error) 27 | // the last set of bytes of the size of the circular buffer 28 | // oldest to newest 29 | GetLastBlock() []byte 30 | } 31 | 32 | type BlockMatchResult struct { 33 | // In case of error 34 | Err error 35 | 36 | // The offset the comparison + baseOffset 37 | ComparisonOffset int64 38 | 39 | // The block from the index that it matched 40 | BlockIdx uint 41 | } 42 | 43 | type Index interface { 44 | FindWeakChecksum2(chk []byte) interface{} 45 | FindStrongChecksum2(chk []byte, weak interface{}) []chunks.ChunkChecksum 46 | } 47 | 48 | /* 49 | Iterates though comparison looking for blocks that match ones from the index 50 | it emits each block to be read from the returned channel. Callers should check for 51 | .Err != nil on the results, in which case reading will end immediately. 52 | 53 | StartFindMatchingBlocks is capable of running asyncronously 54 | on sub-sections of a larger file. When doing this, you must overlap 55 | by the block size, and use seperate checksum generators. 56 | */ 57 | 58 | type Comparer struct { 59 | Comparisons int64 60 | WeakHashHits int64 61 | StrongHashHits int64 62 | } 63 | 64 | func (c *Comparer) StartFindMatchingBlocks( 65 | comparison io.Reader, 66 | baseOffset int64, 67 | generator *filechecksum.FileChecksumGenerator, 68 | referenceIndex Index, 69 | ) <-chan BlockMatchResult { 70 | 71 | resultStream := make(chan BlockMatchResult) 72 | 73 | go c.startFindMatchingBlocks_int( 74 | resultStream, 75 | comparison, 76 | baseOffset, 77 | generator, 78 | referenceIndex, 79 | ) 80 | 81 | return resultStream 82 | } 83 | 84 | /* 85 | TODO: When matching duplicated blocks, a channel of BlockMatchResult slices would be more efficient 86 | */ 87 | func (c *Comparer) startFindMatchingBlocks_int( 88 | results chan<- BlockMatchResult, 89 | comparison io.Reader, 90 | baseOffset int64, 91 | generator *filechecksum.FileChecksumGenerator, 92 | reference Index, 93 | ) { 94 | defer close(results) 95 | 96 | block := make([]byte, generator.BlockSize) 97 | var err error 98 | 99 | ReportErr := func(err error) { 100 | results <- BlockMatchResult{ 101 | Err: err, 102 | } 103 | } 104 | 105 | _, err = io.ReadFull(comparison, block) 106 | 107 | if err != nil { 108 | ReportErr( 109 | fmt.Errorf("Error reading first block in comparison: %v", err), 110 | ) 111 | return 112 | } 113 | 114 | generator.WeakRollingHash.SetBlock(block) 115 | singleByte := make([]byte, 1) 116 | weaksum := make([]byte, generator.WeakRollingHash.Size()) 117 | strongSum := make([]byte, 0, generator.GetStrongHash().Size()) 118 | 119 | blockMemory := circularbuffer.MakeC2Buffer(int(generator.BlockSize)) 120 | blockMemory.Write(block) 121 | 122 | strong := generator.GetStrongHash() 123 | // All the bytes 124 | i := int64(0) 125 | next := READ_NEXT_BYTE 126 | 127 | //ReadLoop: 128 | for { 129 | 130 | atomic.AddInt64(&c.Comparisons, 1) 131 | 132 | // look for a weak match 133 | generator.WeakRollingHash.GetSum(weaksum) 134 | if weakMatchList := reference.FindWeakChecksum2(weaksum); weakMatchList != nil { 135 | atomic.AddInt64(&c.WeakHashHits, 1) 136 | 137 | block = blockMemory.GetBlock() 138 | 139 | strong.Reset() 140 | strong.Write(block) 141 | strongSum = strong.Sum(strongSum) 142 | strongList := reference.FindStrongChecksum2(strongSum, weakMatchList) 143 | 144 | // clear the slice 145 | strongSum = strongSum[:0] 146 | 147 | // If there are many matches, it means that this block is 148 | // duplicated in the reference. 149 | // since we care about finding all the blocks in the reference, 150 | // we must report all of them 151 | off := i + baseOffset 152 | for _, strongMatch := range strongList { 153 | results <- BlockMatchResult{ 154 | ComparisonOffset: off, 155 | BlockIdx: strongMatch.ChunkOffset, 156 | } 157 | } 158 | 159 | if len(strongList) > 0 { 160 | atomic.AddInt64(&c.StrongHashHits, 1) 161 | if next == READ_NONE { 162 | // found the match at the end, so exit 163 | break 164 | } 165 | // No point looking for a match that overlaps this block 166 | next = READ_NEXT_BLOCK 167 | } 168 | } 169 | 170 | var n int 171 | var readBytes []byte 172 | 173 | switch next { 174 | case READ_NEXT_BYTE: 175 | n, err = comparison.Read(singleByte) 176 | readBytes = singleByte 177 | case READ_NEXT_BLOCK: 178 | n, err = io.ReadFull(comparison, block) 179 | readBytes = block[:n] 180 | next = READ_NEXT_BYTE 181 | } 182 | 183 | if uint(n) == generator.BlockSize { 184 | generator.WeakRollingHash.SetBlock(block) 185 | blockMemory.Write(block) 186 | i += int64(n) 187 | } else if n > 0 { 188 | b_len := blockMemory.Len() 189 | blockMemory.Write(readBytes) 190 | generator.WeakRollingHash.AddAndRemoveBytes( 191 | readBytes, 192 | blockMemory.Evicted(), 193 | b_len, 194 | ) 195 | i += int64(n) 196 | } 197 | 198 | if next != READ_NONE && (err == io.EOF || err == io.ErrUnexpectedEOF) { 199 | err = io.EOF 200 | next = READ_NONE 201 | } 202 | 203 | if next == READ_NONE { 204 | if blockMemory.Empty() { 205 | break 206 | } 207 | 208 | b_len := blockMemory.Len() 209 | removedByte := blockMemory.Truncate(1) 210 | generator.WeakRollingHash.RemoveBytes(removedByte, b_len) 211 | i += 1 212 | } 213 | } 214 | 215 | if err != io.EOF { 216 | ReportErr(err) 217 | return 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/blocksources/httpblocksource_test.go: -------------------------------------------------------------------------------- 1 | package blocksources 2 | 3 | import ( 4 | "bytes" 5 | "crypto/md5" 6 | "fmt" 7 | "net" 8 | "net/http" 9 | "testing" 10 | "time" 11 | 12 | "github.com/Redundancy/go-sync/filechecksum" 13 | "github.com/Redundancy/go-sync/patcher" 14 | ) 15 | 16 | var PORT = 8000 17 | 18 | var TEST_CONTENT = []byte("This is test content used for evaluation of the unit tests") 19 | var content = bytes.NewReader(TEST_CONTENT) 20 | var LOCAL_URL = "" 21 | 22 | func handler(w http.ResponseWriter, req *http.Request) { 23 | http.ServeContent(w, req, "", time.Now(), content) 24 | } 25 | 26 | var PARTIAL_CONTENT = []byte("abcdef") 27 | var partialContent = bytes.NewReader(PARTIAL_CONTENT) 28 | 29 | func partialContentHandler(w http.ResponseWriter, req *http.Request) { 30 | http.ServeContent(w, req, "", time.Now(), partialContent) 31 | } 32 | 33 | var CORRUPT_CONTENT = []byte("sfdfsfhhrtertert sffsfsdfsdfsdf") 34 | var corruptContent = bytes.NewReader(CORRUPT_CONTENT) 35 | 36 | func corruptContentHandler(w http.ResponseWriter, req *http.Request) { 37 | http.ServeContent(w, req, "", time.Now(), corruptContent) 38 | } 39 | 40 | // set up a http server locally that will respond predictably to ranged requests 41 | // NB: Doing this will prevent deadlocks from being caught! 42 | func init() { 43 | s := http.NewServeMux() 44 | s.HandleFunc("/", handler) 45 | s.HandleFunc("/partial", partialContentHandler) 46 | s.HandleFunc("/corrupt", corruptContentHandler) 47 | s.Handle("/404", http.NotFoundHandler()) 48 | 49 | portChan := make(chan int) 50 | 51 | go func() { 52 | var listener net.Listener 53 | var err error 54 | 55 | for { 56 | PORT++ 57 | p := fmt.Sprintf(":%v", PORT) 58 | listener, err = net.Listen("tcp", p) 59 | 60 | if err == nil { 61 | break 62 | } 63 | } 64 | portChan <- PORT 65 | http.Serve(listener, s) 66 | }() 67 | 68 | p := fmt.Sprintf(":%v", <-portChan) 69 | LOCAL_URL = "http://localhost" + p 70 | 71 | } 72 | 73 | func TestHandler(t *testing.T) { 74 | resp, err := http.Get(LOCAL_URL) 75 | 76 | if err != nil { 77 | t.Fatal(err) 78 | } 79 | 80 | if resp.StatusCode != 200 { 81 | t.Fatal(resp.Status) 82 | } 83 | } 84 | 85 | func TestHttpBlockSource(t *testing.T) { 86 | b := NewHttpBlockSource( 87 | LOCAL_URL+"/", 88 | 2, 89 | MakeNullFixedSizeResolver(4), 90 | nil, 91 | ) 92 | 93 | err := b.RequestBlocks(patcher.MissingBlockSpan{ 94 | BlockSize: 4, 95 | StartBlock: 0, 96 | EndBlock: 0, 97 | }) 98 | 99 | if err != nil { 100 | t.Fatal(err) 101 | } 102 | 103 | results := b.GetResultChannel() 104 | 105 | select { 106 | case r := <-results: 107 | if bytes.Compare(r.Data, TEST_CONTENT[:4]) != 0 { 108 | t.Errorf("Data differed from expected content: \"%v\"", string(r.Data)) 109 | } 110 | case e := <-b.EncounteredError(): 111 | t.Fatal(e) 112 | case <-time.After(time.Second): 113 | t.Fatal("Waited a second for the response, timeout.") 114 | } 115 | } 116 | 117 | func TestHttpBlockSource404(t *testing.T) { 118 | b := NewHttpBlockSource( 119 | LOCAL_URL+"/404", 120 | 2, 121 | MakeNullFixedSizeResolver(4), 122 | nil, 123 | ) 124 | 125 | b.RequestBlocks(patcher.MissingBlockSpan{ 126 | BlockSize: 4, 127 | StartBlock: 0, 128 | EndBlock: 0, 129 | }) 130 | 131 | results := b.GetResultChannel() 132 | 133 | select { 134 | case <-results: 135 | t.Fatal("Should not have gotten a result") 136 | case e := <-b.EncounteredError(): 137 | if e == nil { 138 | t.Fatal("Error was nil!") 139 | } else if _, ok := e.(URLNotFoundError); !ok { 140 | t.Errorf("Unexpected error type: %v", e) 141 | } 142 | case <-time.After(time.Second): 143 | t.Fatal("Waited a second for the response, timeout.") 144 | } 145 | } 146 | 147 | func TestHttpBlockSourceOffsetBlockRequest(t *testing.T) { 148 | b := NewHttpBlockSource( 149 | LOCAL_URL+"/", 150 | 2, 151 | MakeNullFixedSizeResolver(4), 152 | nil, 153 | ) 154 | 155 | b.RequestBlocks(patcher.MissingBlockSpan{ 156 | BlockSize: 4, 157 | StartBlock: 1, 158 | EndBlock: 3, 159 | }) 160 | 161 | select { 162 | case result := <-b.GetResultChannel(): 163 | if result.StartBlock != 1 { 164 | t.Errorf( 165 | "Unexpected result start block: %v", 166 | result.StartBlock, 167 | ) 168 | } 169 | case <-time.After(time.Second): 170 | t.Fatalf("Timeout waiting for result") 171 | } 172 | } 173 | 174 | func TestHttpBlockSourcePartialContentRequest(t *testing.T) { 175 | b := NewHttpBlockSource( 176 | LOCAL_URL+"/partial", 177 | 2, 178 | MakeFileSizedBlockResolver(4, int64(len(PARTIAL_CONTENT))), 179 | nil, 180 | ) 181 | 182 | b.RequestBlocks(patcher.MissingBlockSpan{ 183 | BlockSize: 4, 184 | StartBlock: 1, 185 | EndBlock: 1, 186 | }) 187 | 188 | select { 189 | case result := <-b.GetResultChannel(): 190 | if result.StartBlock != 1 { 191 | t.Errorf( 192 | "Unexpected result start block: %v", 193 | result.StartBlock, 194 | ) 195 | } 196 | if len(result.Data) != 2 { 197 | t.Errorf( 198 | "Unexpected data length: \"%v\"", 199 | string(result.Data), 200 | ) 201 | } 202 | if string(result.Data) != "ef" { 203 | t.Errorf( 204 | "Unexpected result \"%v\"", 205 | string(result.Data), 206 | ) 207 | } 208 | case err := <-b.EncounteredError(): 209 | t.Fatal(err) 210 | case <-time.After(time.Second): 211 | t.Fatalf("Timeout waiting for result") 212 | } 213 | } 214 | 215 | type SingleBlockSource []byte 216 | 217 | func (d SingleBlockSource) GetStrongChecksumForBlock(blockID int) []byte { 218 | m := md5.New() 219 | return m.Sum(d) 220 | } 221 | 222 | func TestHttpBlockSourceVerification(t *testing.T) { 223 | const BLOCK_SIZE = 4 224 | 225 | b := NewHttpBlockSource( 226 | LOCAL_URL+"/corrupt", 227 | 2, 228 | MakeNullFixedSizeResolver(BLOCK_SIZE), 229 | &filechecksum.HashVerifier{ 230 | Hash: md5.New(), 231 | BlockSize: BLOCK_SIZE, 232 | BlockChecksumGetter: SingleBlockSource(TEST_CONTENT[0:BLOCK_SIZE]), 233 | }, 234 | ) 235 | 236 | b.RequestBlocks(patcher.MissingBlockSpan{ 237 | BlockSize: BLOCK_SIZE, 238 | StartBlock: 0, 239 | EndBlock: 0, 240 | }) 241 | 242 | select { 243 | case result := <-b.GetResultChannel(): 244 | t.Fatalf("Should have thrown an error, got %v", result) 245 | case e := <-b.EncounteredError(): 246 | t.Logf("Encountered expected error: %v", e) 247 | return 248 | case <-time.After(time.Second): 249 | t.Fatalf("Timeout waiting for result") 250 | } 251 | } 252 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/index/index_test.go: -------------------------------------------------------------------------------- 1 | package index 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/Redundancy/go-sync/chunks" 7 | ) 8 | 9 | // Weak checksums must be 4 bytes 10 | var WEAK_A = []byte("aaaa") 11 | var WEAK_B = []byte("bbbb") 12 | 13 | /* 14 | ChunkOffset uint 15 | // the size of the block 16 | Size int64 17 | WeakChecksum []byte 18 | StrongChecksum []byte 19 | */ 20 | 21 | func TestMakeIndex(t *testing.T) { 22 | i := MakeChecksumIndex( 23 | []chunks.ChunkChecksum{ 24 | {ChunkOffset: 0, WeakChecksum: WEAK_A, StrongChecksum: []byte("b")}, 25 | {ChunkOffset: 1, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 26 | }, 27 | ) 28 | 29 | if i.Count != 2 { 30 | t.Fatalf("Wrong count on index %v", i.Count) 31 | } 32 | } 33 | 34 | func TestFindWeakInIndex(t *testing.T) { 35 | i := MakeChecksumIndex( 36 | []chunks.ChunkChecksum{ 37 | {ChunkOffset: 0, WeakChecksum: WEAK_A, StrongChecksum: []byte("b")}, 38 | {ChunkOffset: 1, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 39 | {ChunkOffset: 2, WeakChecksum: WEAK_B, StrongChecksum: []byte("d")}, 40 | }, 41 | ) 42 | 43 | result := i.FindWeakChecksumInIndex(WEAK_B) 44 | 45 | if result == nil { 46 | t.Error("Did not find lookfor in the index") 47 | } else if len(result) != 2 { 48 | t.Errorf("Wrong number of possible matches found: %v", len(result)) 49 | } else if result[0].ChunkOffset != 1 { 50 | t.Errorf("Found chunk had offset %v expected 1", result[0].ChunkOffset) 51 | } 52 | } 53 | 54 | func TestWeakNotInIndex(t *testing.T) { 55 | i := MakeChecksumIndex( 56 | []chunks.ChunkChecksum{ 57 | {ChunkOffset: 0, WeakChecksum: WEAK_A, StrongChecksum: []byte("b")}, 58 | {ChunkOffset: 1, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 59 | {ChunkOffset: 2, WeakChecksum: WEAK_B, StrongChecksum: []byte("d")}, 60 | }, 61 | ) 62 | 63 | result := i.FindWeakChecksumInIndex([]byte("afgh")) 64 | 65 | if result != nil { 66 | t.Error("Result from FindWeakChecksumInIndex should be nil") 67 | } 68 | 69 | result2 := i.FindWeakChecksum2([]byte("afgh")) 70 | 71 | if result2 != nil { 72 | t.Errorf("Result from FindWeakChecksum2 should be nil: %#v", result2) 73 | } 74 | } 75 | 76 | func TestWeakNotInIndex2(t *testing.T) { 77 | i := MakeChecksumIndex( 78 | []chunks.ChunkChecksum{ 79 | {ChunkOffset: 0, WeakChecksum: WEAK_A, StrongChecksum: []byte("b")}, 80 | {ChunkOffset: 1, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 81 | {ChunkOffset: 2, WeakChecksum: WEAK_B, StrongChecksum: []byte("d")}, 82 | }, 83 | ) 84 | 85 | result := i.FindWeakChecksumInIndex([]byte("llll")) 86 | 87 | if result != nil { 88 | t.Error("Result should be nil") 89 | } 90 | } 91 | 92 | func TestFindStrongInIndex(t *testing.T) { 93 | i := MakeChecksumIndex( 94 | []chunks.ChunkChecksum{ 95 | {ChunkOffset: 0, WeakChecksum: WEAK_A, StrongChecksum: []byte("b")}, 96 | {ChunkOffset: 1, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 97 | {ChunkOffset: 2, WeakChecksum: WEAK_B, StrongChecksum: []byte("d")}, 98 | }, 99 | ) 100 | 101 | // builds upon TestFindWeakInIndex 102 | result := i.FindWeakChecksumInIndex(WEAK_B) 103 | strongs := result.FindStrongChecksum([]byte("c")) 104 | 105 | if len(strongs) != 1 { 106 | t.Errorf("Incorrect number of strong checksums found: %v", len(strongs)) 107 | } else if strongs[0].ChunkOffset != 1 { 108 | t.Errorf("Wrong chunk found, had offset %v", strongs[0].ChunkOffset) 109 | } 110 | } 111 | 112 | func TestNotFoundStrongInIndexAtEnd(t *testing.T) { 113 | i := MakeChecksumIndex( 114 | []chunks.ChunkChecksum{ 115 | {ChunkOffset: 0, WeakChecksum: WEAK_A, StrongChecksum: []byte("b")}, 116 | {ChunkOffset: 1, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 117 | {ChunkOffset: 2, WeakChecksum: WEAK_B, StrongChecksum: []byte("d")}, 118 | }, 119 | ) 120 | 121 | // builds upon TestFindWeakInIndex 122 | result := i.FindWeakChecksumInIndex(WEAK_B) 123 | strongs := result.FindStrongChecksum([]byte("e")) 124 | 125 | if len(strongs) != 0 { 126 | t.Errorf("Incorrect number of strong checksums found: %v", strongs) 127 | } 128 | } 129 | 130 | func TestNotFoundStrongInIndexInCenter(t *testing.T) { 131 | // The strong checksum we're looking for is not found 132 | // but is < another checksum in the strong list 133 | 134 | i := MakeChecksumIndex( 135 | []chunks.ChunkChecksum{ 136 | {ChunkOffset: 0, WeakChecksum: WEAK_A, StrongChecksum: []byte("b")}, 137 | {ChunkOffset: 1, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 138 | {ChunkOffset: 2, WeakChecksum: WEAK_B, StrongChecksum: []byte("d")}, 139 | {ChunkOffset: 3, WeakChecksum: WEAK_B, StrongChecksum: []byte("f")}, 140 | }, 141 | ) 142 | 143 | // builds upon TestFindWeakInIndex 144 | result := i.FindWeakChecksumInIndex(WEAK_B) 145 | strongs := result.FindStrongChecksum([]byte("e")) 146 | 147 | if len(strongs) != 0 { 148 | t.Errorf("Incorrect number of strong checksums found: %v", strongs) 149 | } 150 | } 151 | 152 | func TestFindDuplicatedBlocksInIndex(t *testing.T) { 153 | i := MakeChecksumIndex( 154 | []chunks.ChunkChecksum{ 155 | {ChunkOffset: 0, WeakChecksum: WEAK_A, StrongChecksum: []byte("b")}, 156 | {ChunkOffset: 1, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 157 | {ChunkOffset: 3, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 158 | {ChunkOffset: 2, WeakChecksum: WEAK_B, StrongChecksum: []byte("d")}, 159 | }, 160 | ) 161 | 162 | // builds upon TestFindWeakInIndex 163 | result := i.FindWeakChecksumInIndex(WEAK_B) 164 | strongs := result.FindStrongChecksum([]byte("c")) 165 | 166 | if len(strongs) != 2 { 167 | t.Fatalf("Incorrect number of strong checksums found: %v", strongs) 168 | } 169 | 170 | first := strongs[0] 171 | if first.ChunkOffset != 1 { 172 | t.Errorf("Wrong chunk found, had offset %v", first.ChunkOffset) 173 | } 174 | 175 | second := strongs[1] 176 | if second.ChunkOffset != 3 { 177 | t.Errorf("Wrong chunk found, had offset %v", second.ChunkOffset) 178 | } 179 | } 180 | 181 | func TestFindTwoDuplicatedBlocksInIndex(t *testing.T) { 182 | i := MakeChecksumIndex( 183 | []chunks.ChunkChecksum{ 184 | {ChunkOffset: 1, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 185 | {ChunkOffset: 2, WeakChecksum: WEAK_B, StrongChecksum: []byte("c")}, 186 | }, 187 | ) 188 | 189 | // builds upon TestFindWeakInIndex 190 | result := i.FindWeakChecksumInIndex(WEAK_B) 191 | strongs := result.FindStrongChecksum([]byte("c")) 192 | 193 | if len(strongs) != 2 { 194 | t.Fatalf("Incorrect number of strong checksums found: %v", strongs) 195 | } 196 | 197 | first := strongs[0] 198 | if first.ChunkOffset != 1 { 199 | t.Errorf("Wrong chunk found, had offset %v", first.ChunkOffset) 200 | } 201 | 202 | second := strongs[1] 203 | if second.ChunkOffset != 2 { 204 | t.Errorf("Wrong chunk found, had offset %v", second.ChunkOffset) 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /src/gosync/common.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "encoding/binary" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "net/url" 11 | "os" 12 | 13 | "github.com/Redundancy/go-sync/chunks" 14 | "github.com/Redundancy/go-sync/comparer" 15 | "github.com/Redundancy/go-sync/filechecksum" 16 | "github.com/Redundancy/go-sync/index" 17 | "github.com/Redundancy/go-sync/patcher" 18 | "github.com/codegangsta/cli" 19 | ) 20 | 21 | const ( 22 | // KB - One Kilobyte 23 | KB = 1024 24 | // MB - One Megabyte 25 | MB = 1000000 26 | ) 27 | 28 | func errorWrapper(c *cli.Context, f func(*cli.Context) error) { 29 | defer func() { 30 | if p := recover(); p != nil { 31 | fmt.Fprintln(os.Stderr, p) 32 | os.Exit(1) 33 | } 34 | }() 35 | 36 | if err := f(c); err != nil { 37 | fmt.Fprintln(os.Stderr, err.Error()) 38 | os.Exit(1) 39 | } 40 | 41 | return 42 | } 43 | 44 | func openFileAndHandleError(filename string) (f *os.File) { 45 | var err error 46 | f, err = os.Open(filename) 47 | 48 | if err != nil { 49 | f = nil 50 | handleFileError(filename, err) 51 | } 52 | 53 | return 54 | } 55 | 56 | func formatFileError(filename string, err error) error { 57 | switch { 58 | case os.IsExist(err): 59 | return fmt.Errorf( 60 | "Could not open %v (already exists): %v", 61 | filename, 62 | err, 63 | ) 64 | case os.IsNotExist(err): 65 | return fmt.Errorf( 66 | "Could not find %v: %v\n", 67 | filename, 68 | err, 69 | ) 70 | case os.IsPermission(err): 71 | return fmt.Errorf( 72 | "Could not open %v (permission denied): %v\n", 73 | filename, 74 | err, 75 | ) 76 | default: 77 | return fmt.Errorf( 78 | "Unknown error opening %v: %v\n", 79 | filename, 80 | err, 81 | ) 82 | } 83 | } 84 | 85 | func handleFileError(filename string, err error) { 86 | e := formatFileError(filename, err) 87 | fmt.Fprintln(os.Stderr, e) 88 | } 89 | 90 | func getLocalOrRemoteFile(path string) (io.ReadCloser, error) { 91 | url, err := url.Parse(path) 92 | 93 | switch { 94 | case err != nil: 95 | return os.Open(path) 96 | case url.Scheme == "": 97 | return os.Open(path) 98 | default: 99 | response, err := http.Get(path) 100 | 101 | if err != nil { 102 | return nil, err 103 | } 104 | 105 | if response.StatusCode < 200 || response.StatusCode > 299 { 106 | return nil, fmt.Errorf("Request to %v returned status: %v", path, response.Status) 107 | } 108 | 109 | return response.Body, nil 110 | } 111 | } 112 | 113 | func toPatcherFoundSpan(sl comparer.BlockSpanList, blockSize int64) []patcher.FoundBlockSpan { 114 | result := make([]patcher.FoundBlockSpan, len(sl)) 115 | 116 | for i, v := range sl { 117 | result[i].StartBlock = v.StartBlock 118 | result[i].EndBlock = v.EndBlock 119 | result[i].MatchOffset = v.ComparisonStartOffset 120 | result[i].BlockSize = blockSize 121 | } 122 | 123 | return result 124 | } 125 | 126 | func toPatcherMissingSpan(sl comparer.BlockSpanList, blockSize int64) []patcher.MissingBlockSpan { 127 | result := make([]patcher.MissingBlockSpan, len(sl)) 128 | 129 | for i, v := range sl { 130 | result[i].StartBlock = v.StartBlock 131 | result[i].EndBlock = v.EndBlock 132 | result[i].BlockSize = blockSize 133 | } 134 | 135 | return result 136 | } 137 | 138 | func writeHeaders( 139 | f *os.File, 140 | magic string, 141 | blocksize uint32, 142 | filesize int64, 143 | versions []uint16, 144 | ) (err error) { 145 | if _, err = f.WriteString(magicString); err != nil { 146 | return 147 | } 148 | 149 | for _, v := range versions { 150 | if err = binary.Write(f, binary.LittleEndian, v); err != nil { 151 | return 152 | } 153 | } 154 | 155 | if err = binary.Write(f, binary.LittleEndian, filesize); err != nil { 156 | return 157 | } 158 | 159 | err = binary.Write(f, binary.LittleEndian, blocksize) 160 | return 161 | } 162 | 163 | // reads the file headers and checks the magic string, then the semantic versioning 164 | func readHeadersAndCheck( 165 | r io.Reader, 166 | magic string, 167 | requiredMajorVersion uint16, 168 | ) ( 169 | major, minor, patch uint16, 170 | filesize int64, 171 | blocksize uint32, 172 | err error, 173 | ) { 174 | b := make([]byte, len(magicString)) 175 | 176 | if _, err = r.Read(b); err != nil { 177 | return 178 | } else if string(b) != magicString { 179 | err = errors.New( 180 | "file header does not match magic string. Not a valid gosync file", 181 | ) 182 | return 183 | } 184 | 185 | for _, v := range []*uint16{&major, &minor, &patch} { 186 | err = binary.Read(r, binary.LittleEndian, v) 187 | if err != nil { 188 | return 189 | } 190 | } 191 | 192 | if requiredMajorVersion != major { 193 | err = fmt.Errorf( 194 | "The major version of the gosync file (%v.%v.%v) does not match the tool (%v.%v.%v).", 195 | major, minor, patch, 196 | majorVersion, minorVersion, patchVersion, 197 | ) 198 | 199 | return 200 | } 201 | 202 | err = binary.Read(r, binary.LittleEndian, &filesize) 203 | if err != nil { 204 | return 205 | } 206 | 207 | err = binary.Read(r, binary.LittleEndian, &blocksize) 208 | return 209 | } 210 | 211 | func readIndex(r io.Reader, blocksize uint) ( 212 | i *index.ChecksumIndex, 213 | checksumLookup filechecksum.ChecksumLookup, 214 | blockCount uint, 215 | err error, 216 | ) { 217 | generator := filechecksum.NewFileChecksumGenerator(blocksize) 218 | 219 | readChunks, e := chunks.LoadChecksumsFromReader( 220 | r, 221 | generator.WeakRollingHash.Size(), 222 | generator.StrongHash.Size(), 223 | ) 224 | 225 | err = e 226 | 227 | if err != nil { 228 | return 229 | } 230 | 231 | checksumLookup = chunks.StrongChecksumGetter(readChunks) 232 | i = index.MakeChecksumIndex(readChunks) 233 | blockCount = uint(len(readChunks)) 234 | 235 | return 236 | } 237 | 238 | func multithreadedMatching( 239 | localFile *os.File, 240 | idx *index.ChecksumIndex, 241 | localFileSize, 242 | matcherCount int64, 243 | blocksize uint, 244 | ) (*comparer.MatchMerger, *comparer.Comparer) { 245 | // Note: Since not all sections of the file are equal in work 246 | // it would be better to divide things up into more sections and 247 | // pull work from a queue channel as each finish 248 | sectionSize := localFileSize / matcherCount 249 | sectionSize += int64(blocksize) - (sectionSize % int64(blocksize)) 250 | merger := &comparer.MatchMerger{} 251 | compare := &comparer.Comparer{} 252 | 253 | for i := int64(0); i < matcherCount; i++ { 254 | offset := sectionSize * i 255 | 256 | // Sections must overlap by blocksize (strictly blocksize - 1?) 257 | if i > 0 { 258 | offset -= int64(blocksize) 259 | } 260 | 261 | sectionReader := bufio.NewReaderSize( 262 | io.NewSectionReader(localFile, offset, sectionSize), 263 | MB, 264 | ) 265 | 266 | sectionGenerator := filechecksum.NewFileChecksumGenerator(uint(blocksize)) 267 | 268 | matchStream := compare.StartFindMatchingBlocks( 269 | sectionReader, offset, sectionGenerator, idx) 270 | 271 | merger.StartMergeResultStream(matchStream, int64(blocksize)) 272 | } 273 | 274 | return merger, compare 275 | } 276 | 277 | // better way to do this? 278 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/help.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "strings" 7 | "text/tabwriter" 8 | "text/template" 9 | ) 10 | 11 | // The text template for the Default help topic. 12 | // cli.go uses text/template to render templates. You can 13 | // render custom help text by setting this variable. 14 | var AppHelpTemplate = `NAME: 15 | {{.Name}} - {{.Usage}} 16 | 17 | USAGE: 18 | {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} 19 | {{if .Version}} 20 | VERSION: 21 | {{.Version}} 22 | {{end}}{{if len .Authors}} 23 | AUTHOR(S): 24 | {{range .Authors}}{{ . }}{{end}} 25 | {{end}}{{if .Commands}} 26 | COMMANDS: 27 | {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} 28 | {{end}}{{end}}{{if .Flags}} 29 | GLOBAL OPTIONS: 30 | {{range .Flags}}{{.}} 31 | {{end}}{{end}}{{if .Copyright }} 32 | COPYRIGHT: 33 | {{.Copyright}} 34 | {{end}} 35 | ` 36 | 37 | // The text template for the command help topic. 38 | // cli.go uses text/template to render templates. You can 39 | // render custom help text by setting this variable. 40 | var CommandHelpTemplate = `NAME: 41 | {{.HelpName}} - {{.Usage}} 42 | 43 | USAGE: 44 | {{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Description}} 45 | 46 | DESCRIPTION: 47 | {{.Description}}{{end}}{{if .Flags}} 48 | 49 | OPTIONS: 50 | {{range .Flags}}{{.}} 51 | {{end}}{{ end }} 52 | ` 53 | 54 | // The text template for the subcommand help topic. 55 | // cli.go uses text/template to render templates. You can 56 | // render custom help text by setting this variable. 57 | var SubcommandHelpTemplate = `NAME: 58 | {{.HelpName}} - {{.Usage}} 59 | 60 | USAGE: 61 | {{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} 62 | 63 | COMMANDS: 64 | {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} 65 | {{end}}{{if .Flags}} 66 | OPTIONS: 67 | {{range .Flags}}{{.}} 68 | {{end}}{{end}} 69 | ` 70 | 71 | var helpCommand = Command{ 72 | Name: "help", 73 | Aliases: []string{"h"}, 74 | Usage: "Shows a list of commands or help for one command", 75 | ArgsUsage: "[command]", 76 | Action: func(c *Context) { 77 | args := c.Args() 78 | if args.Present() { 79 | ShowCommandHelp(c, args.First()) 80 | } else { 81 | ShowAppHelp(c) 82 | } 83 | }, 84 | } 85 | 86 | var helpSubcommand = Command{ 87 | Name: "help", 88 | Aliases: []string{"h"}, 89 | Usage: "Shows a list of commands or help for one command", 90 | ArgsUsage: "[command]", 91 | Action: func(c *Context) { 92 | args := c.Args() 93 | if args.Present() { 94 | ShowCommandHelp(c, args.First()) 95 | } else { 96 | ShowSubcommandHelp(c) 97 | } 98 | }, 99 | } 100 | 101 | // Prints help for the App or Command 102 | type helpPrinter func(w io.Writer, templ string, data interface{}) 103 | 104 | var HelpPrinter helpPrinter = printHelp 105 | 106 | // Prints version for the App 107 | var VersionPrinter = printVersion 108 | 109 | func ShowAppHelp(c *Context) { 110 | HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) 111 | } 112 | 113 | // Prints the list of subcommands as the default app completion method 114 | func DefaultAppComplete(c *Context) { 115 | for _, command := range c.App.Commands { 116 | for _, name := range command.Names() { 117 | fmt.Fprintln(c.App.Writer, name) 118 | } 119 | } 120 | } 121 | 122 | // Prints help for the given command 123 | func ShowCommandHelp(ctx *Context, command string) { 124 | // show the subcommand help for a command with subcommands 125 | if command == "" { 126 | HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) 127 | return 128 | } 129 | 130 | for _, c := range ctx.App.Commands { 131 | if c.HasName(command) { 132 | HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) 133 | return 134 | } 135 | } 136 | 137 | if ctx.App.CommandNotFound != nil { 138 | ctx.App.CommandNotFound(ctx, command) 139 | } else { 140 | fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command) 141 | } 142 | } 143 | 144 | // Prints help for the given subcommand 145 | func ShowSubcommandHelp(c *Context) { 146 | ShowCommandHelp(c, c.Command.Name) 147 | } 148 | 149 | // Prints the version number of the App 150 | func ShowVersion(c *Context) { 151 | VersionPrinter(c) 152 | } 153 | 154 | func printVersion(c *Context) { 155 | fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) 156 | } 157 | 158 | // Prints the lists of commands within a given context 159 | func ShowCompletions(c *Context) { 160 | a := c.App 161 | if a != nil && a.BashComplete != nil { 162 | a.BashComplete(c) 163 | } 164 | } 165 | 166 | // Prints the custom completions for a given command 167 | func ShowCommandCompletions(ctx *Context, command string) { 168 | c := ctx.App.Command(command) 169 | if c != nil && c.BashComplete != nil { 170 | c.BashComplete(ctx) 171 | } 172 | } 173 | 174 | func printHelp(out io.Writer, templ string, data interface{}) { 175 | funcMap := template.FuncMap{ 176 | "join": strings.Join, 177 | } 178 | 179 | w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0) 180 | t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) 181 | err := t.Execute(w, data) 182 | if err != nil { 183 | // If the writer is closed, t.Execute will fail, and there's nothing 184 | // we can do to recover. We could send this to os.Stderr if we need. 185 | return 186 | } 187 | w.Flush() 188 | } 189 | 190 | func checkVersion(c *Context) bool { 191 | found := false 192 | if VersionFlag.Name != "" { 193 | eachName(VersionFlag.Name, func(name string) { 194 | if c.GlobalBool(name) || c.Bool(name) { 195 | found = true 196 | } 197 | }) 198 | } 199 | return found 200 | } 201 | 202 | func checkHelp(c *Context) bool { 203 | found := false 204 | if HelpFlag.Name != "" { 205 | eachName(HelpFlag.Name, func(name string) { 206 | if c.GlobalBool(name) || c.Bool(name) { 207 | found = true 208 | } 209 | }) 210 | } 211 | return found 212 | } 213 | 214 | func checkCommandHelp(c *Context, name string) bool { 215 | if c.Bool("h") || c.Bool("help") { 216 | ShowCommandHelp(c, name) 217 | return true 218 | } 219 | 220 | return false 221 | } 222 | 223 | func checkSubcommandHelp(c *Context) bool { 224 | if c.GlobalBool("h") || c.GlobalBool("help") { 225 | ShowSubcommandHelp(c) 226 | return true 227 | } 228 | 229 | return false 230 | } 231 | 232 | func checkCompletions(c *Context) bool { 233 | if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { 234 | ShowCompletions(c) 235 | return true 236 | } 237 | 238 | return false 239 | } 240 | 241 | func checkCommandCompletions(c *Context, name string) bool { 242 | if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { 243 | ShowCommandCompletions(c, name) 244 | return true 245 | } 246 | 247 | return false 248 | } 249 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/gosync/common.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "encoding/binary" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "net/url" 11 | "os" 12 | 13 | "github.com/Redundancy/go-sync/chunks" 14 | "github.com/Redundancy/go-sync/comparer" 15 | "github.com/Redundancy/go-sync/filechecksum" 16 | "github.com/Redundancy/go-sync/index" 17 | "github.com/Redundancy/go-sync/patcher" 18 | "github.com/codegangsta/cli" 19 | ) 20 | 21 | const ( 22 | // KB - One Kilobyte 23 | KB = 1024 24 | // MB - One Megabyte 25 | MB = 1000000 26 | ) 27 | 28 | func errorWrapper(c *cli.Context, f func(*cli.Context) error) { 29 | defer func() { 30 | if p := recover(); p != nil { 31 | fmt.Fprintln(os.Stderr, p) 32 | os.Exit(1) 33 | } 34 | }() 35 | 36 | if err := f(c); err != nil { 37 | fmt.Fprintln(os.Stderr, err.Error()) 38 | os.Exit(1) 39 | } 40 | 41 | return 42 | } 43 | 44 | func openFileAndHandleError(filename string) (f *os.File) { 45 | var err error 46 | f, err = os.Open(filename) 47 | 48 | if err != nil { 49 | f = nil 50 | handleFileError(filename, err) 51 | } 52 | 53 | return 54 | } 55 | 56 | func formatFileError(filename string, err error) error { 57 | switch { 58 | case os.IsExist(err): 59 | return fmt.Errorf( 60 | "Could not open %v (already exists): %v", 61 | filename, 62 | err, 63 | ) 64 | case os.IsNotExist(err): 65 | return fmt.Errorf( 66 | "Could not find %v: %v\n", 67 | filename, 68 | err, 69 | ) 70 | case os.IsPermission(err): 71 | return fmt.Errorf( 72 | "Could not open %v (permission denied): %v\n", 73 | filename, 74 | err, 75 | ) 76 | default: 77 | return fmt.Errorf( 78 | "Unknown error opening %v: %v\n", 79 | filename, 80 | err, 81 | ) 82 | } 83 | } 84 | 85 | func handleFileError(filename string, err error) { 86 | e := formatFileError(filename, err) 87 | fmt.Fprintln(os.Stderr, e) 88 | } 89 | 90 | func getLocalOrRemoteFile(path string) (io.ReadCloser, error) { 91 | url, err := url.Parse(path) 92 | 93 | switch { 94 | case err != nil: 95 | return os.Open(path) 96 | case url.Scheme == "": 97 | return os.Open(path) 98 | default: 99 | response, err := http.Get(path) 100 | 101 | if err != nil { 102 | return nil, err 103 | } 104 | 105 | if response.StatusCode < 200 || response.StatusCode > 299 { 106 | return nil, fmt.Errorf("Request to %v returned status: %v", path, response.Status) 107 | } 108 | 109 | return response.Body, nil 110 | } 111 | } 112 | 113 | func toPatcherFoundSpan(sl comparer.BlockSpanList, blockSize int64) []patcher.FoundBlockSpan { 114 | result := make([]patcher.FoundBlockSpan, len(sl)) 115 | 116 | for i, v := range sl { 117 | result[i].StartBlock = v.StartBlock 118 | result[i].EndBlock = v.EndBlock 119 | result[i].MatchOffset = v.ComparisonStartOffset 120 | result[i].BlockSize = blockSize 121 | } 122 | 123 | return result 124 | } 125 | 126 | func toPatcherMissingSpan(sl comparer.BlockSpanList, blockSize int64) []patcher.MissingBlockSpan { 127 | result := make([]patcher.MissingBlockSpan, len(sl)) 128 | 129 | for i, v := range sl { 130 | result[i].StartBlock = v.StartBlock 131 | result[i].EndBlock = v.EndBlock 132 | result[i].BlockSize = blockSize 133 | } 134 | 135 | return result 136 | } 137 | 138 | func writeHeaders( 139 | f *os.File, 140 | magic string, 141 | blocksize uint32, 142 | filesize int64, 143 | versions []uint16, 144 | ) (err error) { 145 | if _, err = f.WriteString(magicString); err != nil { 146 | return 147 | } 148 | 149 | for _, v := range versions { 150 | if err = binary.Write(f, binary.LittleEndian, v); err != nil { 151 | return 152 | } 153 | } 154 | 155 | if err = binary.Write(f, binary.LittleEndian, filesize); err != nil { 156 | return 157 | } 158 | 159 | err = binary.Write(f, binary.LittleEndian, blocksize) 160 | return 161 | } 162 | 163 | // reads the file headers and checks the magic string, then the semantic versioning 164 | func readHeadersAndCheck( 165 | r io.Reader, 166 | magic string, 167 | requiredMajorVersion uint16, 168 | ) ( 169 | major, minor, patch uint16, 170 | filesize int64, 171 | blocksize uint32, 172 | err error, 173 | ) { 174 | b := make([]byte, len(magicString)) 175 | 176 | if _, err = r.Read(b); err != nil { 177 | return 178 | } else if string(b) != magicString { 179 | err = errors.New( 180 | "file header does not match magic string. Not a valid gosync file", 181 | ) 182 | return 183 | } 184 | 185 | for _, v := range []*uint16{&major, &minor, &patch} { 186 | err = binary.Read(r, binary.LittleEndian, v) 187 | if err != nil { 188 | return 189 | } 190 | } 191 | 192 | if requiredMajorVersion != major { 193 | err = fmt.Errorf( 194 | "The major version of the gosync file (%v.%v.%v) does not match the tool (%v.%v.%v).", 195 | major, minor, patch, 196 | majorVersion, minorVersion, patchVersion, 197 | ) 198 | 199 | return 200 | } 201 | 202 | err = binary.Read(r, binary.LittleEndian, &filesize) 203 | if err != nil { 204 | return 205 | } 206 | 207 | err = binary.Read(r, binary.LittleEndian, &blocksize) 208 | return 209 | } 210 | 211 | func readIndex(r io.Reader, blocksize uint) ( 212 | i *index.ChecksumIndex, 213 | checksumLookup filechecksum.ChecksumLookup, 214 | blockCount uint, 215 | err error, 216 | ) { 217 | generator := filechecksum.NewFileChecksumGenerator(blocksize) 218 | 219 | readChunks, e := chunks.LoadChecksumsFromReader( 220 | r, 221 | generator.WeakRollingHash.Size(), 222 | generator.StrongHash.Size(), 223 | ) 224 | 225 | err = e 226 | 227 | if err != nil { 228 | return 229 | } 230 | 231 | checksumLookup = chunks.StrongChecksumGetter(readChunks) 232 | i = index.MakeChecksumIndex(readChunks) 233 | blockCount = uint(len(readChunks)) 234 | 235 | return 236 | } 237 | 238 | func multithreadedMatching( 239 | localFile *os.File, 240 | idx *index.ChecksumIndex, 241 | localFileSize, 242 | matcherCount int64, 243 | blocksize uint, 244 | ) (*comparer.MatchMerger, *comparer.Comparer) { 245 | // Note: Since not all sections of the file are equal in work 246 | // it would be better to divide things up into more sections and 247 | // pull work from a queue channel as each finish 248 | sectionSize := localFileSize / matcherCount 249 | sectionSize += int64(blocksize) - (sectionSize % int64(blocksize)) 250 | merger := &comparer.MatchMerger{} 251 | compare := &comparer.Comparer{} 252 | 253 | for i := int64(0); i < matcherCount; i++ { 254 | offset := sectionSize * i 255 | 256 | // Sections must overlap by blocksize (strictly blocksize - 1?) 257 | if i > 0 { 258 | offset -= int64(blocksize) 259 | } 260 | 261 | sectionReader := bufio.NewReaderSize( 262 | io.NewSectionReader(localFile, offset, sectionSize), 263 | MB, 264 | ) 265 | 266 | sectionGenerator := filechecksum.NewFileChecksumGenerator(uint(blocksize)) 267 | 268 | matchStream := compare.StartFindMatchingBlocks( 269 | sectionReader, offset, sectionGenerator, idx) 270 | 271 | merger.StartMergeResultStream(matchStream, int64(blocksize)) 272 | } 273 | 274 | return merger, compare 275 | } 276 | 277 | // better way to do this? 278 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/rollsum/rollsum_32_test.go: -------------------------------------------------------------------------------- 1 | package rollsum 2 | 3 | import ( 4 | "bytes" 5 | "github.com/Redundancy/go-sync/circularbuffer" 6 | "hash" 7 | "io" 8 | "testing" 9 | ) 10 | 11 | func TestThatRollsum32SatisfiesHashInterface(t *testing.T) { 12 | var i hash.Hash = NewRollsum32(10) 13 | i.Reset() 14 | } 15 | 16 | func TestThatRollsum32SatisfiedWriterInterface(t *testing.T) { 17 | var i io.Writer = NewRollsum32(10) 18 | n, err := i.Write([]byte{1, 2, 3, 4}) 19 | 20 | if n != 4 { 21 | t.Error("Did not report writing 4 bytes") 22 | } 23 | 24 | if err != nil { 25 | t.Error(err) 26 | } 27 | } 28 | 29 | func TestThatRollsum32IsTheSameAfterBlockSizeBytes(t *testing.T) { 30 | r1 := NewRollsum32(4) 31 | r2 := NewRollsum32(4) 32 | 33 | r1.Write([]byte{1, 2, 3, 4}) 34 | 35 | r2.Write([]byte{7, 6}) 36 | r2.Write([]byte{5, 1, 2}) 37 | r2.Write([]byte{3, 4}) 38 | 39 | sum1 := r1.Sum(nil) 40 | sum2 := r2.Sum(nil) 41 | 42 | if bytes.Compare(sum1, sum2) != 0 { 43 | t.Errorf( 44 | "Rollsums are different \"%v\" vs \"%v\"", 45 | sum1, 46 | sum2, 47 | ) 48 | } 49 | } 50 | 51 | func TestThatRollsum32IsTheSameAfterBlockSizeBytesWithPartialEviction(t *testing.T) { 52 | r1 := NewRollsum32(4) 53 | r2 := NewRollsum32(4) 54 | 55 | r1.Write([]byte{1, 2, 3, 4}) 56 | 57 | r2.Write([]byte{7, 5}) 58 | r2.Write([]byte{1, 2, 3, 4}) 59 | 60 | sum1 := r1.Sum(nil) 61 | sum2 := r2.Sum(nil) 62 | 63 | if bytes.Compare(sum1, sum2) != 0 { 64 | t.Errorf( 65 | "Rollsums are different \"%v\" vs \"%v\"", 66 | sum1, 67 | sum2, 68 | ) 69 | } 70 | } 71 | 72 | func TestRegression2(t *testing.T) { 73 | const A = "The quick br" 74 | const B = "The qwik br" 75 | 76 | r1 := NewRollsum32(4) 77 | r2 := NewRollsum32(4) 78 | 79 | r1.Write([]byte(A[:4])) 80 | r1.Reset() 81 | r1.Write([]byte(A[4:8])) 82 | r1.Reset() 83 | r1.Write([]byte(A[8:12])) 84 | 85 | r2.Write([]byte(B[:4])) 86 | r2.Write([]byte(B[4:8])) 87 | for _, c := range B[8:] { 88 | r2.Write([]byte{byte(c)}) 89 | } 90 | 91 | sum1 := r1.Sum(nil) 92 | sum2 := r2.Sum(nil) 93 | 94 | if bytes.Compare(sum1, sum2) != 0 { 95 | t.Errorf( 96 | "Rollsums are different \"%v\" vs \"%v\"", 97 | sum1, 98 | sum2, 99 | ) 100 | } 101 | } 102 | 103 | func TestThatRollsum32RemovesBytesCorrectly(t *testing.T) { 104 | r1 := NewRollsum32Base(2) 105 | 106 | r1.AddByte(255) 107 | r1.AddByte(10) 108 | r1.RemoveByte(255, 2) 109 | r1.AddByte(0) 110 | r1.RemoveByte(10, 2) 111 | r1.AddByte(0) 112 | 113 | if r1.a != 0 || r1.b != 0 { 114 | t.Errorf("Values are not reset: %v %v", r1.a, r1.b) 115 | } 116 | } 117 | 118 | func TestThatRollsum32IsDifferentForDifferentInput(t *testing.T) { 119 | r1 := NewRollsum32(4) 120 | r2 := NewRollsum32(4) 121 | 122 | r1.Write([]byte{1, 2, 3, 4}) 123 | r2.Write([]byte{7, 6, 5, 1}) 124 | 125 | sum1 := r1.Sum(nil) 126 | sum2 := r2.Sum(nil) 127 | 128 | if bytes.Compare(sum1, sum2) == 0 { 129 | t.Errorf( 130 | "Rollsums should be different \"%v\" vs \"%v\"", 131 | sum1, 132 | sum2, 133 | ) 134 | } 135 | } 136 | 137 | func TestResettingTheRollsum32(t *testing.T) { 138 | r1 := NewRollsum32(4) 139 | r2 := NewRollsum32(4) 140 | 141 | r1.Write([]byte{1, 2, 3}) 142 | 143 | r2.Write([]byte{7, 6}) 144 | r2.Reset() 145 | r2.Write([]byte{1, 2, 3}) 146 | 147 | sum1 := r1.Sum(nil) 148 | sum2 := r2.Sum(nil) 149 | 150 | if bytes.Compare(sum1, sum2) != 0 { 151 | t.Errorf( 152 | "Rollsums should not be different \"%v\" vs \"%v\"", 153 | sum1, 154 | sum2, 155 | ) 156 | } 157 | } 158 | 159 | func TestTruncatingPartiallyFilledBufferResultsInSameState(t *testing.T) { 160 | r1 := NewRollsum32Base(4) 161 | r2 := NewRollsum32Base(4) 162 | 163 | r1.AddByte(2) 164 | sum1 := make([]byte, 4) 165 | r1.GetSum(sum1) 166 | 167 | r2.AddByte(1) 168 | r2.AddByte(2) 169 | // Removal works from the left 170 | r2.RemoveByte(1, 2) 171 | sum2 := make([]byte, 4) 172 | r2.GetSum(sum2) 173 | 174 | if bytes.Compare(sum1, sum2) != 0 { 175 | t.Errorf( 176 | "Rollsums should not be different \"%v\" vs \"%v\"", 177 | sum1, 178 | sum2, 179 | ) 180 | } 181 | } 182 | 183 | func TestThat32SumDoesNotChangeTheHashState(t *testing.T) { 184 | r1 := NewRollsum32(4) 185 | 186 | sum1 := r1.Sum([]byte{1, 2, 3}) 187 | sum2 := r1.Sum([]byte{3, 4, 5}) 188 | 189 | if bytes.Compare(sum1[3:], sum2[3:]) != 0 { 190 | t.Errorf( 191 | "Rollsums should not be different \"%v\" vs \"%v\"", 192 | sum1, 193 | sum2, 194 | ) 195 | } 196 | } 197 | 198 | func TestThat32OutputLengthMatchesSize(t *testing.T) { 199 | r1 := NewRollsum32(4) 200 | sumLength := len(r1.Sum(nil)) 201 | 202 | if sumLength != r1.Size() { 203 | t.Errorf("Unexpected length: %v vs expected %v", sumLength, r1.Size()) 204 | } 205 | } 206 | 207 | func BenchmarkRollsum32(b *testing.B) { 208 | r := NewRollsum32(100) 209 | buffer := make([]byte, 100) 210 | b.ReportAllocs() 211 | b.SetBytes(int64(len(buffer))) 212 | checksum := make([]byte, 16) 213 | 214 | b.StartTimer() 215 | for i := 0; i < b.N; i++ { 216 | r.Write(buffer) 217 | r.Sum(checksum) 218 | checksum = checksum[:0] 219 | } 220 | b.StopTimer() 221 | } 222 | 223 | func BenchmarkRollsum32_8096(b *testing.B) { 224 | r := NewRollsum32(8096) 225 | buffer := make([]byte, 8096) 226 | b.ReportAllocs() 227 | b.SetBytes(int64(len(buffer))) 228 | checksum := make([]byte, 16) 229 | 230 | b.StartTimer() 231 | for i := 0; i < b.N; i++ { 232 | r.Write(buffer) 233 | r.Sum(checksum) 234 | checksum = checksum[:0] 235 | } 236 | b.StopTimer() 237 | } 238 | 239 | func BenchmarkRollsum32Base(b *testing.B) { 240 | r := Rollsum32Base{blockSize: 100} 241 | buffer := make([]byte, 100) 242 | checksum := make([]byte, 16) 243 | b.ReportAllocs() 244 | b.SetBytes(int64(len(buffer))) 245 | 246 | b.StartTimer() 247 | for i := 0; i < b.N; i++ { 248 | r.SetBlock(buffer) 249 | r.GetSum(checksum) 250 | } 251 | b.StopTimer() 252 | 253 | } 254 | 255 | // This is the benchmark where Rollsum should beat a full MD5 for each blocksize 256 | func BenchmarkIncrementalRollsum32(b *testing.B) { 257 | r := NewRollsum32(100) 258 | buffer := make([]byte, 100) 259 | r.Write(buffer) 260 | b.SetBytes(1) 261 | 262 | b.ReportAllocs() 263 | checksum := make([]byte, 16) 264 | increment := make([]byte, 1) 265 | 266 | b.StartTimer() 267 | for i := 0; i < b.N; i++ { 268 | r.Write(increment) 269 | r.Sum(checksum) 270 | checksum = checksum[:0] 271 | } 272 | b.StopTimer() 273 | } 274 | 275 | // The C2 veersion should avoid all allocations in the main loop, and beat the pants off the 276 | // other versions 277 | func BenchmarkIncrementalRollsum32WithC2(b *testing.B) { 278 | const BLOCK_SIZE = 100 279 | r := NewRollsum32Base(BLOCK_SIZE) 280 | buffer := make([]byte, BLOCK_SIZE) 281 | b.SetBytes(1) 282 | cbuffer := circularbuffer.MakeC2Buffer(BLOCK_SIZE) 283 | 284 | r.AddBytes(buffer) 285 | cbuffer.Write(buffer) 286 | 287 | b.ReportAllocs() 288 | checksum := make([]byte, 16) 289 | increment := make([]byte, 1) 290 | 291 | b.StartTimer() 292 | for i := 0; i < b.N; i++ { 293 | cbuffer.Write(increment) 294 | r.AddAndRemoveBytes(increment, cbuffer.Evicted(), BLOCK_SIZE) 295 | r.GetSum(checksum) 296 | } 297 | b.StopTimer() 298 | } 299 | -------------------------------------------------------------------------------- /vendor/src/github.com/codegangsta/cli/command.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "strings" 7 | ) 8 | 9 | // Command is a subcommand for a cli.App. 10 | type Command struct { 11 | // The name of the command 12 | Name string 13 | // short name of the command. Typically one character (deprecated, use `Aliases`) 14 | ShortName string 15 | // A list of aliases for the command 16 | Aliases []string 17 | // A short description of the usage of this command 18 | Usage string 19 | // Custom text to show on USAGE section of help 20 | UsageText string 21 | // A longer explanation of how the command works 22 | Description string 23 | // A short description of the arguments of this command 24 | ArgsUsage string 25 | // The function to call when checking for bash command completions 26 | BashComplete func(context *Context) 27 | // An action to execute before any sub-subcommands are run, but after the context is ready 28 | // If a non-nil error is returned, no sub-subcommands are run 29 | Before func(context *Context) error 30 | // An action to execute after any subcommands are run, but before the subcommand has finished 31 | // It is run even if Action() panics 32 | After func(context *Context) error 33 | // The function to call when this command is invoked 34 | Action func(context *Context) 35 | // Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages. 36 | // This function is able to replace the original error messages. 37 | // If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted. 38 | OnUsageError func(context *Context, err error) error 39 | // List of child commands 40 | Subcommands []Command 41 | // List of flags to parse 42 | Flags []Flag 43 | // Treat all flags as normal arguments if true 44 | SkipFlagParsing bool 45 | // Boolean to hide built-in help command 46 | HideHelp bool 47 | 48 | // Full name of command for help, defaults to full command name, including parent commands. 49 | HelpName string 50 | commandNamePath []string 51 | } 52 | 53 | // Returns the full name of the command. 54 | // For subcommands this ensures that parent commands are part of the command path 55 | func (c Command) FullName() string { 56 | if c.commandNamePath == nil { 57 | return c.Name 58 | } 59 | return strings.Join(c.commandNamePath, " ") 60 | } 61 | 62 | // Invokes the command given the context, parses ctx.Args() to generate command-specific flags 63 | func (c Command) Run(ctx *Context) (err error) { 64 | if len(c.Subcommands) > 0 { 65 | return c.startApp(ctx) 66 | } 67 | 68 | if !c.HideHelp && (HelpFlag != BoolFlag{}) { 69 | // append help to flags 70 | c.Flags = append( 71 | c.Flags, 72 | HelpFlag, 73 | ) 74 | } 75 | 76 | if ctx.App.EnableBashCompletion { 77 | c.Flags = append(c.Flags, BashCompletionFlag) 78 | } 79 | 80 | set := flagSet(c.Name, c.Flags) 81 | set.SetOutput(ioutil.Discard) 82 | 83 | if !c.SkipFlagParsing { 84 | firstFlagIndex := -1 85 | terminatorIndex := -1 86 | for index, arg := range ctx.Args() { 87 | if arg == "--" { 88 | terminatorIndex = index 89 | break 90 | } else if arg == "-" { 91 | // Do nothing. A dash alone is not really a flag. 92 | continue 93 | } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { 94 | firstFlagIndex = index 95 | } 96 | } 97 | 98 | if firstFlagIndex > -1 { 99 | args := ctx.Args() 100 | regularArgs := make([]string, len(args[1:firstFlagIndex])) 101 | copy(regularArgs, args[1:firstFlagIndex]) 102 | 103 | var flagArgs []string 104 | if terminatorIndex > -1 { 105 | flagArgs = args[firstFlagIndex:terminatorIndex] 106 | regularArgs = append(regularArgs, args[terminatorIndex:]...) 107 | } else { 108 | flagArgs = args[firstFlagIndex:] 109 | } 110 | 111 | err = set.Parse(append(flagArgs, regularArgs...)) 112 | } else { 113 | err = set.Parse(ctx.Args().Tail()) 114 | } 115 | } else { 116 | if c.SkipFlagParsing { 117 | err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) 118 | } 119 | } 120 | 121 | if err != nil { 122 | if c.OnUsageError != nil { 123 | err := c.OnUsageError(ctx, err) 124 | return err 125 | } else { 126 | fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") 127 | fmt.Fprintln(ctx.App.Writer) 128 | ShowCommandHelp(ctx, c.Name) 129 | return err 130 | } 131 | } 132 | 133 | nerr := normalizeFlags(c.Flags, set) 134 | if nerr != nil { 135 | fmt.Fprintln(ctx.App.Writer, nerr) 136 | fmt.Fprintln(ctx.App.Writer) 137 | ShowCommandHelp(ctx, c.Name) 138 | return nerr 139 | } 140 | context := NewContext(ctx.App, set, ctx) 141 | 142 | if checkCommandCompletions(context, c.Name) { 143 | return nil 144 | } 145 | 146 | if checkCommandHelp(context, c.Name) { 147 | return nil 148 | } 149 | 150 | if c.After != nil { 151 | defer func() { 152 | afterErr := c.After(context) 153 | if afterErr != nil { 154 | if err != nil { 155 | err = NewMultiError(err, afterErr) 156 | } else { 157 | err = afterErr 158 | } 159 | } 160 | }() 161 | } 162 | 163 | if c.Before != nil { 164 | err := c.Before(context) 165 | if err != nil { 166 | fmt.Fprintln(ctx.App.Writer, err) 167 | fmt.Fprintln(ctx.App.Writer) 168 | ShowCommandHelp(ctx, c.Name) 169 | return err 170 | } 171 | } 172 | 173 | context.Command = c 174 | c.Action(context) 175 | return nil 176 | } 177 | 178 | func (c Command) Names() []string { 179 | names := []string{c.Name} 180 | 181 | if c.ShortName != "" { 182 | names = append(names, c.ShortName) 183 | } 184 | 185 | return append(names, c.Aliases...) 186 | } 187 | 188 | // Returns true if Command.Name or Command.ShortName matches given name 189 | func (c Command) HasName(name string) bool { 190 | for _, n := range c.Names() { 191 | if n == name { 192 | return true 193 | } 194 | } 195 | return false 196 | } 197 | 198 | func (c Command) startApp(ctx *Context) error { 199 | app := NewApp() 200 | 201 | // set the name and usage 202 | app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) 203 | if c.HelpName == "" { 204 | app.HelpName = c.HelpName 205 | } else { 206 | app.HelpName = app.Name 207 | } 208 | 209 | if c.Description != "" { 210 | app.Usage = c.Description 211 | } else { 212 | app.Usage = c.Usage 213 | } 214 | 215 | // set CommandNotFound 216 | app.CommandNotFound = ctx.App.CommandNotFound 217 | 218 | // set the flags and commands 219 | app.Commands = c.Subcommands 220 | app.Flags = c.Flags 221 | app.HideHelp = c.HideHelp 222 | 223 | app.Version = ctx.App.Version 224 | app.HideVersion = ctx.App.HideVersion 225 | app.Compiled = ctx.App.Compiled 226 | app.Author = ctx.App.Author 227 | app.Email = ctx.App.Email 228 | app.Writer = ctx.App.Writer 229 | 230 | // bash completion 231 | app.EnableBashCompletion = ctx.App.EnableBashCompletion 232 | if c.BashComplete != nil { 233 | app.BashComplete = c.BashComplete 234 | } 235 | 236 | // set the actions 237 | app.Before = c.Before 238 | app.After = c.After 239 | if c.Action != nil { 240 | app.Action = c.Action 241 | } else { 242 | app.Action = helpSubcommand.Action 243 | } 244 | 245 | for index, cc := range app.Commands { 246 | app.Commands[index].commandNamePath = []string{c.Name, cc.Name} 247 | } 248 | 249 | return app.RunAsSubcommand(ctx) 250 | } 251 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/blocksources/blocksourcebase_test.go: -------------------------------------------------------------------------------- 1 | package blocksources 2 | 3 | import ( 4 | "bytes" 5 | "github.com/Redundancy/go-sync/patcher" 6 | 7 | //"runtime" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | //----------------------------------------------------------------------------- 13 | type erroringRequester struct{} 14 | type testError struct{} 15 | 16 | func (e *testError) Error() string { 17 | return "test" 18 | } 19 | 20 | func (e *erroringRequester) DoRequest(startOffset int64, endOffset int64) (data []byte, err error) { 21 | return nil, &testError{} 22 | } 23 | 24 | func (e *erroringRequester) IsFatal(err error) bool { 25 | return true 26 | } 27 | 28 | //----------------------------------------------------------------------------- 29 | type FunctionRequester func(a, b int64) ([]byte, error) 30 | 31 | func (f FunctionRequester) DoRequest(startOffset int64, endOffset int64) (data []byte, err error) { 32 | return f(startOffset, endOffset) 33 | } 34 | 35 | func (f FunctionRequester) IsFatal(err error) bool { 36 | return true 37 | } 38 | 39 | //----------------------------------------------------------------------------- 40 | 41 | func init() { 42 | //if runtime.GOMAXPROCS(0) == 1 { 43 | //runtime.GOMAXPROCS(4) 44 | //} 45 | } 46 | 47 | func TestRangeSlice(t *testing.T) { 48 | a := []int{0, 1, 2, 3, 4} 49 | b := a[:len(a)-1] 50 | 51 | if len(b) != len(a)-1 { 52 | t.Errorf("b is wrong length, only supposed to remove one item: %v %v", a, b) 53 | } 54 | } 55 | 56 | func TestCreateAndCloseBlockSourceBase(t *testing.T) { 57 | b := NewBlockSourceBase(nil, nil, nil, 1, 1024) 58 | b.Close() 59 | 60 | // TODO: Race condition here. Can Close() block? 61 | if !b.hasQuit { 62 | t.Fatal("Block source base did not exit") 63 | } 64 | } 65 | 66 | func TestErrorWatcher(t *testing.T) { 67 | e := errorWatcher{errorChannel: make(chan error)} 68 | 69 | if e.sendIfSet() != nil { 70 | t.Errorf("Channel should be nil when created") 71 | } 72 | 73 | e.setError(&testError{}) 74 | 75 | if e.sendIfSet() == nil { 76 | t.Errorf("Channel should be non-nil when error is set") 77 | } 78 | if e.Err() == nil { 79 | t.Errorf("Error should not be nil when set") 80 | } 81 | } 82 | 83 | func TestBlockSourceBaseError(t *testing.T) { 84 | b := NewBlockSourceBase( 85 | &erroringRequester{}, 86 | MakeNullFixedSizeResolver(4), 87 | nil, 88 | 1, 89 | 1024, 90 | ) 91 | defer b.Close() 92 | 93 | b.RequestBlocks(patcher.MissingBlockSpan{ 94 | BlockSize: 4, 95 | StartBlock: 1, 96 | EndBlock: 1, 97 | }) 98 | 99 | select { 100 | case <-time.After(time.Second): 101 | t.Fatal("Timed out waiting for error") 102 | case <-b.EncounteredError(): 103 | } 104 | 105 | } 106 | 107 | func TestBlockSourceRequest(t *testing.T) { 108 | expected := []byte("test") 109 | 110 | b := NewBlockSourceBase( 111 | FunctionRequester(func(start, end int64) (data []byte, err error) { 112 | return expected, nil 113 | }), 114 | MakeNullFixedSizeResolver(4), 115 | nil, 116 | 1, 117 | 1024, 118 | ) 119 | defer b.Close() 120 | 121 | b.RequestBlocks(patcher.MissingBlockSpan{ 122 | BlockSize: 4, 123 | StartBlock: 1, 124 | EndBlock: 1, 125 | }) 126 | 127 | result := <-b.GetResultChannel() 128 | 129 | if result.StartBlock != 1 { 130 | t.Errorf("Unexpected start block in result: %v", result.StartBlock) 131 | } 132 | if bytes.Compare(result.Data, expected) != 0 { 133 | t.Errorf("Unexpected data in result: %v", result.Data) 134 | } 135 | } 136 | 137 | func TestConcurrentBlockRequests(t *testing.T) { 138 | content := []byte("test") 139 | 140 | b := NewBlockSourceBase( 141 | FunctionRequester(func(start, end int64) (data []byte, err error) { 142 | return content[start:end], nil 143 | }), 144 | MakeNullFixedSizeResolver(2), 145 | nil, 146 | 2, 147 | 1024, 148 | ) 149 | defer b.Close() 150 | 151 | b.RequestBlocks(patcher.MissingBlockSpan{ 152 | BlockSize: 2, 153 | StartBlock: 0, 154 | EndBlock: 0, 155 | }) 156 | 157 | b.RequestBlocks(patcher.MissingBlockSpan{ 158 | BlockSize: 2, 159 | StartBlock: 1, 160 | EndBlock: 1, 161 | }) 162 | 163 | for i := uint(0); i < 2; i++ { 164 | select { 165 | case r := <-b.GetResultChannel(): 166 | if r.StartBlock != i { 167 | t.Errorf("Wrong start block: %v", r.StartBlock) 168 | } 169 | if bytes.Compare(r.Data, content[i*2:(i+1)*2]) != 0 { 170 | t.Errorf("Unexpected result content for result %v: %v", i+1, string(r.Data)) 171 | } 172 | case <-time.After(time.Second): 173 | t.Fatal("Timed out on request", i+1) 174 | } 175 | } 176 | } 177 | 178 | func TestOutOfOrderRequestCompletion(t *testing.T) { 179 | content := []byte("test") 180 | 181 | channeler := []chan bool{ 182 | make(chan bool), 183 | make(chan bool), 184 | } 185 | 186 | b := NewBlockSourceBase( 187 | FunctionRequester(func(start, end int64) (data []byte, err error) { 188 | // read from the channel based on the start 189 | <-(channeler[start]) 190 | return content[start:end], nil 191 | }), 192 | MakeNullFixedSizeResolver(1), 193 | nil, 194 | 2, 195 | 1024, 196 | ) 197 | defer b.Close() 198 | 199 | b.RequestBlocks(patcher.MissingBlockSpan{ 200 | BlockSize: 1, 201 | StartBlock: 0, 202 | EndBlock: 0, 203 | }) 204 | 205 | b.RequestBlocks(patcher.MissingBlockSpan{ 206 | BlockSize: 1, 207 | StartBlock: 1, 208 | EndBlock: 1, 209 | }) 210 | 211 | // finish the second request 212 | channeler[1] <- true 213 | 214 | select { 215 | case <-b.GetResultChannel(): 216 | t.Error("Should not deliver any blocks yet") 217 | case <-time.After(time.Second): 218 | } 219 | 220 | // once the first block completes, we're ready to send both 221 | channeler[0] <- true 222 | 223 | for i := uint(0); i < 2; i++ { 224 | select { 225 | case r := <-b.GetResultChannel(): 226 | if r.StartBlock != i { 227 | t.Errorf( 228 | "Wrong start block: %v on result %v", 229 | r.StartBlock, 230 | i+1, 231 | ) 232 | } 233 | case <-time.After(time.Second): 234 | t.Fatal("Timed out on request", i+1) 235 | } 236 | } 237 | } 238 | 239 | func TestRequestCountLimiting(t *testing.T) { 240 | counter := make(chan int) 241 | waiter := make(chan bool) 242 | const ( 243 | MAX_CONCURRENCY = 2 244 | REQUESTS = 4 245 | ) 246 | call_counter := 0 247 | 248 | b := NewBlockSourceBase( 249 | FunctionRequester(func(start, end int64) (data []byte, err error) { 250 | counter <- 1 251 | call_counter += 1 252 | <-waiter 253 | counter <- -1 254 | return []byte{0, 0}, nil 255 | }), 256 | MakeNullFixedSizeResolver(1), 257 | nil, 258 | MAX_CONCURRENCY, 259 | 1024, 260 | ) 261 | defer b.Close() 262 | 263 | count := 0 264 | max := 0 265 | 266 | go func() { 267 | for { 268 | change, ok := <-counter 269 | 270 | if !ok { 271 | break 272 | } 273 | 274 | count += change 275 | 276 | if count > max { 277 | max = count 278 | } 279 | } 280 | }() 281 | 282 | for i := 0; i < REQUESTS; i++ { 283 | b.RequestBlocks(patcher.MissingBlockSpan{ 284 | BlockSize: 1, 285 | StartBlock: uint(i), 286 | EndBlock: uint(i), 287 | }) 288 | } 289 | 290 | for i := 0; i < REQUESTS; i++ { 291 | waiter <- true 292 | } 293 | 294 | close(counter) 295 | close(waiter) 296 | 297 | if max > MAX_CONCURRENCY { 298 | t.Errorf("Maximum requests in flight was greater than the requested concurrency: %v", max) 299 | } 300 | if call_counter != REQUESTS { 301 | t.Errorf("Total number of requests is not expected: %v", call_counter) 302 | } 303 | } 304 | -------------------------------------------------------------------------------- /vendor/src/github.com/Redundancy/go-sync/filechecksum/filechecksum.go: -------------------------------------------------------------------------------- 1 | /* 2 | package filechecksum provides the FileChecksumGenerator, whose main responsibility is to read a file, 3 | and generate both weak and strong checksums for every block. It is also used by the comparer, which 4 | will generate weak checksums for potential byte ranges that could match the index, and strong checksums 5 | if needed. 6 | */ 7 | package filechecksum 8 | 9 | import ( 10 | "crypto/md5" 11 | "hash" 12 | "io" 13 | 14 | "github.com/Redundancy/go-sync/chunks" 15 | "github.com/Redundancy/go-sync/rollsum" 16 | ) 17 | 18 | // Rsync swapped to this after version 30 19 | // this is a factory function, because we don't actually want to share hash state 20 | var DefaultStrongHashGenerator = func() hash.Hash { 21 | return md5.New() 22 | } 23 | 24 | // We provide an overall hash of individual files 25 | var DefaultFileHashGenerator = func() hash.Hash { 26 | return md5.New() 27 | } 28 | 29 | // Uses all default hashes (MD5 & rollsum16) 30 | func NewFileChecksumGenerator(blocksize uint) *FileChecksumGenerator { 31 | return &FileChecksumGenerator{ 32 | BlockSize: blocksize, 33 | WeakRollingHash: rollsum.NewRollsum32Base(blocksize), 34 | //WeakRollingHash: rollsum.NewRollsum16Base(blocksize), 35 | StrongHash: DefaultStrongHashGenerator(), 36 | FileChecksumHash: DefaultFileHashGenerator(), 37 | } 38 | } 39 | 40 | type RollingHash interface { 41 | // the size of the hash output 42 | Size() int 43 | 44 | AddByte(b byte) 45 | RemoveByte(b byte, length int) 46 | 47 | AddBytes(bs []byte) 48 | RemoveBytes(bs []byte, length int) 49 | 50 | // pairs up bytes to do remove/add in the right order 51 | AddAndRemoveBytes(add []byte, remove []byte, length int) 52 | 53 | SetBlock(block []byte) 54 | 55 | GetSum(b []byte) 56 | Reset() 57 | } 58 | 59 | /* 60 | FileChecksumGenerator provides a description of what hashing functions to use to 61 | evaluate a file. Since the hashes store state, it is NOT safe to use a generator concurrently 62 | for different things. 63 | */ 64 | type FileChecksumGenerator struct { 65 | // See BlockBuffer 66 | WeakRollingHash RollingHash 67 | StrongHash hash.Hash 68 | FileChecksumHash hash.Hash 69 | BlockSize uint 70 | } 71 | 72 | // Reset all hashes to initial state 73 | func (check *FileChecksumGenerator) Reset() { 74 | check.WeakRollingHash.Reset() 75 | check.StrongHash.Reset() 76 | check.FileChecksumHash.Reset() 77 | } 78 | 79 | func (check *FileChecksumGenerator) ChecksumSize() int { 80 | return check.WeakRollingHash.Size() + check.GetStrongHash().Size() 81 | } 82 | 83 | func (check *FileChecksumGenerator) GetChecksumSizes() (int, int) { 84 | return check.WeakRollingHash.Size(), check.GetStrongHash().Size() 85 | } 86 | 87 | // Gets the Hash function for the overall file used on each block 88 | // defaults to md5 89 | func (check *FileChecksumGenerator) GetFileHash() hash.Hash { 90 | return check.FileChecksumHash 91 | } 92 | 93 | // Gets the Hash function for the strong hash used on each block 94 | // defaults to md5, but can be overriden by the generator 95 | func (check *FileChecksumGenerator) GetStrongHash() hash.Hash { 96 | return check.StrongHash 97 | } 98 | 99 | // GenerateChecksums reads each block of the input file, and outputs first the weak, then the strong checksum 100 | // to the output writer. It will return a checksum for the whole file. 101 | // Potentially speaking, this might be better producing a channel of blocks, which would remove the need for io from 102 | // a number of other places. 103 | func (check *FileChecksumGenerator) GenerateChecksums(inputFile io.Reader, output io.Writer) (fileChecksum []byte, err error) { 104 | for chunkResult := range check.StartChecksumGeneration(inputFile, 64, nil) { 105 | if chunkResult.Err != nil { 106 | return nil, chunkResult.Err 107 | } else if chunkResult.Filechecksum != nil { 108 | return chunkResult.Filechecksum, nil 109 | } 110 | 111 | for _, chunk := range chunkResult.Checksums { 112 | output.Write(chunk.WeakChecksum) 113 | output.Write(chunk.StrongChecksum) 114 | } 115 | } 116 | 117 | return nil, nil 118 | } 119 | 120 | type ChecksumResults struct { 121 | // Return multiple chunks at once for performance 122 | Checksums []chunks.ChunkChecksum 123 | // only used for the last item 124 | Filechecksum []byte 125 | // signals that this is the last item 126 | Err error 127 | } 128 | 129 | // A function or object that can compress blocks 130 | // the compression function must also write out the compressed blocks somewhere! 131 | // Compressed blocks should be independently inflatable 132 | type CompressionFunction func([]byte) (compressedSize int64, err error) 133 | 134 | func (check *FileChecksumGenerator) StartChecksumGeneration( 135 | inputFile io.Reader, 136 | blocksPerResult uint, 137 | compressionFunction CompressionFunction, 138 | ) <-chan ChecksumResults { 139 | resultChan := make(chan ChecksumResults) 140 | go check.generate(resultChan, blocksPerResult, compressionFunction, inputFile) 141 | return resultChan 142 | } 143 | 144 | func (check *FileChecksumGenerator) generate( 145 | resultChan chan ChecksumResults, 146 | blocksPerResult uint, 147 | compressionFunction CompressionFunction, 148 | inputFile io.Reader, 149 | ) { 150 | defer close(resultChan) 151 | 152 | fullChecksum := check.GetFileHash() 153 | strongHash := check.GetStrongHash() 154 | 155 | buffer := make([]byte, check.BlockSize) 156 | 157 | // ensure that the hashes are clean 158 | strongHash.Reset() 159 | fullChecksum.Reset() 160 | 161 | // We reset the hashes when done do we can reuse the generator 162 | defer check.WeakRollingHash.Reset() 163 | defer strongHash.Reset() 164 | defer fullChecksum.Reset() 165 | 166 | results := make([]chunks.ChunkChecksum, 0, blocksPerResult) 167 | 168 | i := uint(0) 169 | for { 170 | n, err := io.ReadFull(inputFile, buffer) 171 | section := buffer[:n] 172 | 173 | if n == 0 { 174 | break 175 | } 176 | 177 | // As hashes, the assumption is that they never error 178 | // additionally, we assume that the only reason not 179 | // to write a full block would be reaching the end of the file 180 | fullChecksum.Write(section) 181 | check.WeakRollingHash.SetBlock(section) 182 | strongHash.Write(section) 183 | 184 | strongChecksumValue := make([]byte, 0, strongHash.Size()) 185 | weakChecksumValue := make([]byte, check.WeakRollingHash.Size()) 186 | 187 | check.WeakRollingHash.GetSum(weakChecksumValue) 188 | strongChecksumValue = strongHash.Sum(strongChecksumValue) 189 | 190 | blockSize := int64(check.BlockSize) 191 | 192 | if compressionFunction != nil { 193 | blockSize, err = compressionFunction(section) 194 | } 195 | 196 | results = append( 197 | results, 198 | chunks.ChunkChecksum{ 199 | ChunkOffset: i, 200 | Size: blockSize, 201 | WeakChecksum: weakChecksumValue, 202 | StrongChecksum: strongChecksumValue, 203 | }, 204 | ) 205 | 206 | i++ 207 | 208 | if len(results) == cap(results) { 209 | resultChan <- ChecksumResults{ 210 | Checksums: results, 211 | } 212 | results = make([]chunks.ChunkChecksum, 0, blocksPerResult) 213 | } 214 | 215 | // clear it again 216 | strongChecksumValue = strongChecksumValue[:0] 217 | 218 | // Reset the strong 219 | strongHash.Reset() 220 | 221 | if n != len(buffer) || err == io.EOF { 222 | break 223 | } 224 | } 225 | 226 | if len(results) > 0 { 227 | resultChan <- ChecksumResults{ 228 | Checksums: results, 229 | } 230 | } 231 | 232 | resultChan <- ChecksumResults{ 233 | Filechecksum: fullChecksum.Sum(nil), 234 | } 235 | 236 | return 237 | } 238 | --------------------------------------------------------------------------------