├── .editorconfig ├── .gitignore ├── CHANGELOG.md ├── CODEOWNERS ├── Gopkg.lock ├── Gopkg.toml ├── LICENSE ├── Makefile ├── README.md ├── autofile ├── README.md ├── autofile.go ├── autofile_test.go ├── cmd │ └── logjack.go ├── group.go ├── group_test.go └── sighup_watcher.go ├── bech32 ├── bech32.go └── bech32_test.go ├── circle.yml ├── cli ├── flags │ ├── log_level.go │ └── log_level_test.go ├── helper.go ├── setup.go └── setup_test.go ├── clist ├── clist.go └── clist_test.go ├── common ├── LICENSE ├── array.go ├── async.go ├── async_test.go ├── bit_array.go ├── bit_array_test.go ├── bytes.go ├── bytes_test.go ├── byteslice.go ├── byteslice_test.go ├── cmap.go ├── cmap_test.go ├── colors.go ├── date.go ├── date_test.go ├── errors.go ├── errors_test.go ├── heap.go ├── int.go ├── int_test.go ├── io.go ├── kvpair.go ├── math.go ├── net.go ├── net_test.go ├── nil.go ├── os.go ├── os_test.go ├── random.go ├── random_test.go ├── repeat_timer.go ├── repeat_timer_test.go ├── service.go ├── service_test.go ├── string.go ├── string_test.go ├── throttle_timer.go ├── throttle_timer_test.go ├── types.pb.go ├── types.proto └── word.go ├── db ├── LICENSE.md ├── README.md ├── backend_test.go ├── c_level_db.go ├── c_level_db_test.go ├── common_test.go ├── db.go ├── db_test.go ├── debug_db.go ├── fsdb.go ├── go_level_db.go ├── go_level_db_test.go ├── mem_batch.go ├── mem_db.go ├── prefix_db.go ├── prefix_db_test.go ├── remotedb │ ├── doc.go │ ├── grpcdb │ │ ├── client.go │ │ ├── doc.go │ │ ├── example_test.go │ │ └── server.go │ ├── proto │ │ ├── defs.pb.go │ │ └── defs.proto │ ├── remotedb.go │ └── remotedb_test.go ├── types.go ├── util.go └── util_test.go ├── flowrate ├── README.md ├── flowrate.go ├── io.go ├── io_test.go └── util.go ├── glide.lock ├── glide.yaml ├── log ├── filter.go ├── filter_test.go ├── logger.go ├── nop_logger.go ├── testing_logger.go ├── tm_json_logger.go ├── tm_logger.go ├── tm_logger_test.go ├── tmfmt_logger.go ├── tmfmt_logger_test.go ├── tracing_logger.go └── tracing_logger_test.go ├── merge.sh ├── merkle ├── README.md ├── simple_map.go ├── simple_map_test.go ├── simple_proof.go ├── simple_tree.go ├── simple_tree_test.go ├── tmhash │ ├── hash.go │ └── hash_test.go └── types.go ├── test.sh ├── test ├── assert.go └── mutate.go └── version └── version.go /.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | # Unix-style newlines with a newline ending every file 5 | [*] 6 | charset = utf-8 7 | end_of_line = lf 8 | insert_final_newline = true 9 | trim_trailing_whitespace = true 10 | 11 | [Makefile] 12 | indent_style = tab 13 | 14 | [*.sh] 15 | indent_style = tab 16 | 17 | [*.proto] 18 | indent_style = space 19 | indent_size = 2 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.sw[opqr] 2 | vendor 3 | .glide 4 | 5 | pubsub/query/fuzz_test/output 6 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @melekes @ebuchman 2 | *.md @zramsay 3 | *.rst @zramsay 4 | -------------------------------------------------------------------------------- /Gopkg.toml: -------------------------------------------------------------------------------- 1 | # Gopkg.toml example 2 | # 3 | # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md 4 | # for detailed Gopkg.toml documentation. 5 | # 6 | # required = ["github.com/user/thing/cmd/thing"] 7 | # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] 8 | # 9 | # [[constraint]] 10 | # name = "github.com/user/project" 11 | # version = "1.0.0" 12 | # 13 | # [[constraint]] 14 | # name = "github.com/user/project2" 15 | # branch = "dev" 16 | # source = "github.com/myfork/project2" 17 | # 18 | # [[override]] 19 | # name = "github.com/x/y" 20 | # version = "2.4.0" 21 | # 22 | # [prune] 23 | # non-go = false 24 | # go-tests = true 25 | # unused-packages = true 26 | 27 | 28 | [[constraint]] 29 | branch = "master" 30 | name = "github.com/fortytw2/leaktest" 31 | 32 | [[constraint]] 33 | name = "github.com/go-kit/kit" 34 | version = "0.6.0" 35 | 36 | [[constraint]] 37 | name = "github.com/go-logfmt/logfmt" 38 | version = "0.3.0" 39 | 40 | [[constraint]] 41 | name = "github.com/gogo/protobuf" 42 | version = "1.0.0" 43 | 44 | [[constraint]] 45 | branch = "master" 46 | name = "github.com/jmhodges/levigo" 47 | 48 | [[constraint]] 49 | name = "github.com/pkg/errors" 50 | version = "0.8.0" 51 | 52 | [[constraint]] 53 | name = "github.com/spf13/cobra" 54 | version = "0.0.1" 55 | 56 | [[constraint]] 57 | name = "github.com/spf13/viper" 58 | version = "1.0.0" 59 | 60 | [[constraint]] 61 | name = "github.com/stretchr/testify" 62 | version = "1.2.1" 63 | 64 | [[constraint]] 65 | name = "github.com/btcsuite/btcutil" 66 | branch ="master" 67 | [prune] 68 | go-tests = true 69 | unused-packages = true 70 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GOTOOLS = \ 2 | github.com/golang/dep/cmd/dep \ 3 | github.com/golang/protobuf/protoc-gen-go \ 4 | github.com/square/certstrap 5 | # github.com/alecthomas/gometalinter.v2 \ 6 | 7 | GOTOOLS_CHECK = dep gometalinter.v2 protoc protoc-gen-go 8 | INCLUDE = -I=. -I=${GOPATH}/src 9 | 10 | all: check get_vendor_deps protoc grpc_dbserver build test install metalinter 11 | 12 | check: check_tools 13 | 14 | ######################################## 15 | ### Build 16 | 17 | protoc: 18 | ## If you get the following error, 19 | ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" 20 | ## See https://stackoverflow.com/a/25518702 21 | protoc $(INCLUDE) --go_out=plugins=grpc:. common/*.proto 22 | @echo "--> adding nolint declarations to protobuf generated files" 23 | @awk '/package common/ { print "//nolint: gas"; print; next }1' common/types.pb.go > common/types.pb.go.new 24 | @mv common/types.pb.go.new common/types.pb.go 25 | 26 | build: 27 | # Nothing to build! 28 | 29 | install: 30 | # Nothing to install! 31 | 32 | 33 | ######################################## 34 | ### Tools & dependencies 35 | 36 | check_tools: 37 | @# https://stackoverflow.com/a/25668869 38 | @echo "Found tools: $(foreach tool,$(GOTOOLS_CHECK),\ 39 | $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" 40 | 41 | get_tools: 42 | @echo "--> Installing tools" 43 | go get -u -v $(GOTOOLS) 44 | # @gometalinter.v2 --install 45 | 46 | get_protoc: 47 | @# https://github.com/google/protobuf/releases 48 | curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \ 49 | cd protobuf-3.4.1 && \ 50 | DIST_LANG=cpp ./configure && \ 51 | make && \ 52 | make install && \ 53 | cd .. && \ 54 | rm -rf protobuf-3.4.1 55 | 56 | update_tools: 57 | @echo "--> Updating tools" 58 | @go get -u $(GOTOOLS) 59 | 60 | get_vendor_deps: 61 | @rm -rf vendor/ 62 | @echo "--> Running dep ensure" 63 | @dep ensure 64 | 65 | 66 | ######################################## 67 | ### Testing 68 | 69 | gen_certs: clean_certs 70 | ## Generating certificates for TLS testing... 71 | certstrap init --common-name "tendermint.com" --passphrase "" 72 | certstrap request-cert -ip "::" --passphrase "" 73 | certstrap sign "::" --CA "tendermint.com" --passphrase "" 74 | mv out/::.crt out/::.key db/remotedb 75 | 76 | clean_certs: 77 | ## Cleaning TLS testing certificates... 78 | rm -rf out 79 | rm -f db/remotedb/::.crt db/remotedb/::.key 80 | 81 | test: gen_certs 82 | GOCACHE=off go test -tags gcc $(shell go list ./... | grep -v vendor) 83 | make clean_certs 84 | 85 | test100: 86 | @for i in {1..100}; do make test; done 87 | 88 | 89 | ######################################## 90 | ### Formatting, linting, and vetting 91 | 92 | fmt: 93 | @go fmt ./... 94 | 95 | metalinter: 96 | @echo "==> Running linter" 97 | gometalinter.v2 --vendor --deadline=600s --disable-all \ 98 | --enable=deadcode \ 99 | --enable=goconst \ 100 | --enable=goimports \ 101 | --enable=gosimple \ 102 | --enable=ineffassign \ 103 | --enable=megacheck \ 104 | --enable=misspell \ 105 | --enable=staticcheck \ 106 | --enable=safesql \ 107 | --enable=structcheck \ 108 | --enable=unconvert \ 109 | --enable=unused \ 110 | --enable=varcheck \ 111 | --enable=vetshadow \ 112 | ./... 113 | 114 | #--enable=maligned \ 115 | #--enable=gas \ 116 | #--enable=aligncheck \ 117 | #--enable=dupl \ 118 | #--enable=errcheck \ 119 | #--enable=gocyclo \ 120 | #--enable=golint \ <== comments on anything exported 121 | #--enable=gotype \ 122 | #--enable=interfacer \ 123 | #--enable=unparam \ 124 | #--enable=vet \ 125 | 126 | metalinter_all: 127 | protoc $(INCLUDE) --lint_out=. types/*.proto 128 | gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... 129 | 130 | 131 | # To avoid unintended conflicts with file names, always add to .PHONY 132 | # unless there is a reason not to. 133 | # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html 134 | .PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all gen_certs clean_certs 135 | 136 | grpc_dbserver: 137 | protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto 138 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TMLIBS 2 | 3 | This repo is a home for various small packages. 4 | 5 | ## autofile 6 | 7 | Autofile is file access with automatic log rotation. A group of files is maintained and rotation happens 8 | when the leading file gets too big. Provides a reader for reading from the file group. 9 | 10 | ## cli 11 | 12 | CLI wraps the `cobra` and `viper` packages and handles some common elements of building a CLI like flags and env vars for the home directory and the logger. 13 | 14 | ## clist 15 | 16 | Clist provides a linekd list that is safe for concurrent access by many readers. 17 | 18 | ## common 19 | 20 | Common provides a hodgepodge of useful functions. 21 | 22 | ## db 23 | 24 | DB provides a database interface and a number of implementions, including ones using an in-memory map, the filesystem directory structure, 25 | an implemention of LevelDB in Go, and the official LevelDB in C. 26 | 27 | ## events 28 | 29 | Events is a synchronous PubSub package. 30 | 31 | ## flowrate 32 | 33 | Flowrate is a fork of https://github.com/mxk/go-flowrate that added a `SetREMA` method. 34 | 35 | ## log 36 | 37 | Log is a log package structured around key-value pairs that allows logging level to be set differently for different keys. 38 | 39 | ## merkle 40 | 41 | Merkle provides a simple static merkle tree and corresponding proofs. 42 | 43 | ## process 44 | 45 | Process is a simple utility for spawning OS processes. 46 | 47 | ## pubsub 48 | 49 | PubSub is an asynchronous PubSub package. 50 | -------------------------------------------------------------------------------- /autofile/README.md: -------------------------------------------------------------------------------- 1 | # go-autofile 2 | -------------------------------------------------------------------------------- /autofile/autofile.go: -------------------------------------------------------------------------------- 1 | package autofile 2 | 3 | import ( 4 | "os" 5 | "sync" 6 | "time" 7 | 8 | cmn "github.com/tendermint/tmlibs/common" 9 | ) 10 | 11 | /* AutoFile usage 12 | 13 | // Create/Append to ./autofile_test 14 | af, err := OpenAutoFile("autofile_test") 15 | if err != nil { 16 | panic(err) 17 | } 18 | 19 | // Stream of writes. 20 | // During this time, the file may be moved e.g. by logRotate. 21 | for i := 0; i < 60; i++ { 22 | af.Write([]byte(Fmt("LOOP(%v)", i))) 23 | time.Sleep(time.Second) 24 | } 25 | 26 | // Close the AutoFile 27 | err = af.Close() 28 | if err != nil { 29 | panic(err) 30 | } 31 | */ 32 | 33 | const autoFileOpenDuration = 1000 * time.Millisecond 34 | 35 | // Automatically closes and re-opens file for writing. 36 | // This is useful for using a log file with the logrotate tool. 37 | type AutoFile struct { 38 | ID string 39 | Path string 40 | ticker *time.Ticker 41 | mtx sync.Mutex 42 | file *os.File 43 | } 44 | 45 | func OpenAutoFile(path string) (af *AutoFile, err error) { 46 | af = &AutoFile{ 47 | ID: cmn.RandStr(12) + ":" + path, 48 | Path: path, 49 | ticker: time.NewTicker(autoFileOpenDuration), 50 | } 51 | if err = af.openFile(); err != nil { 52 | return 53 | } 54 | go af.processTicks() 55 | sighupWatchers.addAutoFile(af) 56 | return 57 | } 58 | 59 | func (af *AutoFile) Close() error { 60 | af.ticker.Stop() 61 | err := af.closeFile() 62 | sighupWatchers.removeAutoFile(af) 63 | return err 64 | } 65 | 66 | func (af *AutoFile) processTicks() { 67 | for { 68 | _, ok := <-af.ticker.C 69 | if !ok { 70 | return // Done. 71 | } 72 | af.closeFile() 73 | } 74 | } 75 | 76 | func (af *AutoFile) closeFile() (err error) { 77 | af.mtx.Lock() 78 | defer af.mtx.Unlock() 79 | 80 | file := af.file 81 | if file == nil { 82 | return nil 83 | } 84 | af.file = nil 85 | return file.Close() 86 | } 87 | 88 | func (af *AutoFile) Write(b []byte) (n int, err error) { 89 | af.mtx.Lock() 90 | defer af.mtx.Unlock() 91 | 92 | if af.file == nil { 93 | if err = af.openFile(); err != nil { 94 | return 95 | } 96 | } 97 | 98 | n, err = af.file.Write(b) 99 | return 100 | } 101 | 102 | func (af *AutoFile) Sync() error { 103 | af.mtx.Lock() 104 | defer af.mtx.Unlock() 105 | 106 | if af.file == nil { 107 | if err := af.openFile(); err != nil { 108 | return err 109 | } 110 | } 111 | return af.file.Sync() 112 | } 113 | 114 | func (af *AutoFile) openFile() error { 115 | file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) 116 | if err != nil { 117 | return err 118 | } 119 | af.file = file 120 | return nil 121 | } 122 | 123 | func (af *AutoFile) Size() (int64, error) { 124 | af.mtx.Lock() 125 | defer af.mtx.Unlock() 126 | 127 | if af.file == nil { 128 | err := af.openFile() 129 | if err != nil { 130 | if err == os.ErrNotExist { 131 | return 0, nil 132 | } 133 | return -1, err 134 | } 135 | } 136 | stat, err := af.file.Stat() 137 | if err != nil { 138 | return -1, err 139 | } 140 | return stat.Size(), nil 141 | 142 | } 143 | -------------------------------------------------------------------------------- /autofile/autofile_test.go: -------------------------------------------------------------------------------- 1 | package autofile 2 | 3 | import ( 4 | "os" 5 | "sync/atomic" 6 | "syscall" 7 | "testing" 8 | "time" 9 | 10 | cmn "github.com/tendermint/tmlibs/common" 11 | ) 12 | 13 | func TestSIGHUP(t *testing.T) { 14 | 15 | // First, create an AutoFile writing to a tempfile dir 16 | file, name := cmn.Tempfile("sighup_test") 17 | if err := file.Close(); err != nil { 18 | t.Fatalf("Error creating tempfile: %v", err) 19 | } 20 | // Here is the actual AutoFile 21 | af, err := OpenAutoFile(name) 22 | if err != nil { 23 | t.Fatalf("Error creating autofile: %v", err) 24 | } 25 | 26 | // Write to the file. 27 | _, err = af.Write([]byte("Line 1\n")) 28 | if err != nil { 29 | t.Fatalf("Error writing to autofile: %v", err) 30 | } 31 | _, err = af.Write([]byte("Line 2\n")) 32 | if err != nil { 33 | t.Fatalf("Error writing to autofile: %v", err) 34 | } 35 | 36 | // Move the file over 37 | err = os.Rename(name, name+"_old") 38 | if err != nil { 39 | t.Fatalf("Error moving autofile: %v", err) 40 | } 41 | 42 | // Send SIGHUP to self. 43 | oldSighupCounter := atomic.LoadInt32(&sighupCounter) 44 | syscall.Kill(syscall.Getpid(), syscall.SIGHUP) 45 | 46 | // Wait a bit... signals are not handled synchronously. 47 | for atomic.LoadInt32(&sighupCounter) == oldSighupCounter { 48 | time.Sleep(time.Millisecond * 10) 49 | } 50 | 51 | // Write more to the file. 52 | _, err = af.Write([]byte("Line 3\n")) 53 | if err != nil { 54 | t.Fatalf("Error writing to autofile: %v", err) 55 | } 56 | _, err = af.Write([]byte("Line 4\n")) 57 | if err != nil { 58 | t.Fatalf("Error writing to autofile: %v", err) 59 | } 60 | if err := af.Close(); err != nil { 61 | t.Fatalf("Error closing autofile") 62 | } 63 | 64 | // Both files should exist 65 | if body := cmn.MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { 66 | t.Errorf("Unexpected body %s", body) 67 | } 68 | if body := cmn.MustReadFile(name); string(body) != "Line 3\nLine 4\n" { 69 | t.Errorf("Unexpected body %s", body) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /autofile/cmd/logjack.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "io" 7 | "os" 8 | "strconv" 9 | "strings" 10 | 11 | auto "github.com/tendermint/tmlibs/autofile" 12 | cmn "github.com/tendermint/tmlibs/common" 13 | ) 14 | 15 | const Version = "0.0.1" 16 | const sleepSeconds = 1 // Every second 17 | const readBufferSize = 1024 // 1KB at a time 18 | 19 | // Parse command-line options 20 | func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { 21 | var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) 22 | var chopSizeStr, limitSizeStr string 23 | flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") 24 | flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") 25 | flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.") 26 | flagSet.BoolVar(&version, "version", false, "Version") 27 | flagSet.Parse(os.Args[1:]) 28 | chopSize = parseBytesize(chopSizeStr) 29 | limitSize = parseBytesize(limitSizeStr) 30 | return 31 | } 32 | 33 | func main() { 34 | 35 | // Read options 36 | headPath, chopSize, limitSize, version := parseFlags() 37 | if version { 38 | fmt.Printf("logjack version %v\n", Version) 39 | return 40 | } 41 | 42 | // Open Group 43 | group, err := auto.OpenGroup(headPath) 44 | if err != nil { 45 | fmt.Printf("logjack couldn't create output file %v\n", headPath) 46 | os.Exit(1) 47 | } 48 | group.SetHeadSizeLimit(chopSize) 49 | group.SetTotalSizeLimit(limitSize) 50 | err = group.Start() 51 | if err != nil { 52 | fmt.Printf("logjack couldn't start with file %v\n", headPath) 53 | os.Exit(1) 54 | } 55 | 56 | go func() { 57 | // Forever, read from stdin and write to AutoFile. 58 | buf := make([]byte, readBufferSize) 59 | for { 60 | n, err := os.Stdin.Read(buf) 61 | group.Write(buf[:n]) 62 | group.Flush() 63 | if err != nil { 64 | group.Stop() 65 | if err == io.EOF { 66 | os.Exit(0) 67 | } else { 68 | fmt.Println("logjack errored") 69 | os.Exit(1) 70 | } 71 | } 72 | } 73 | }() 74 | 75 | // Trap signal 76 | cmn.TrapSignal(func() { 77 | fmt.Println("logjack shutting down") 78 | }) 79 | } 80 | 81 | func parseBytesize(chopSize string) int64 { 82 | // Handle suffix multiplier 83 | var multiplier int64 = 1 84 | if strings.HasSuffix(chopSize, "T") { 85 | multiplier = 1042 * 1024 * 1024 * 1024 86 | chopSize = chopSize[:len(chopSize)-1] 87 | } 88 | if strings.HasSuffix(chopSize, "G") { 89 | multiplier = 1042 * 1024 * 1024 90 | chopSize = chopSize[:len(chopSize)-1] 91 | } 92 | if strings.HasSuffix(chopSize, "M") { 93 | multiplier = 1042 * 1024 94 | chopSize = chopSize[:len(chopSize)-1] 95 | } 96 | if strings.HasSuffix(chopSize, "K") { 97 | multiplier = 1042 98 | chopSize = chopSize[:len(chopSize)-1] 99 | } 100 | 101 | // Parse the numeric part 102 | chopSizeInt, err := strconv.Atoi(chopSize) 103 | if err != nil { 104 | panic(err) 105 | } 106 | 107 | return int64(chopSizeInt) * multiplier 108 | } 109 | -------------------------------------------------------------------------------- /autofile/sighup_watcher.go: -------------------------------------------------------------------------------- 1 | package autofile 2 | 3 | import ( 4 | "os" 5 | "os/signal" 6 | "sync" 7 | "sync/atomic" 8 | "syscall" 9 | ) 10 | 11 | func init() { 12 | initSighupWatcher() 13 | } 14 | 15 | var sighupWatchers *SighupWatcher 16 | var sighupCounter int32 // For testing 17 | 18 | func initSighupWatcher() { 19 | sighupWatchers = newSighupWatcher() 20 | 21 | c := make(chan os.Signal, 1) 22 | signal.Notify(c, syscall.SIGHUP) 23 | 24 | go func() { 25 | for range c { 26 | sighupWatchers.closeAll() 27 | atomic.AddInt32(&sighupCounter, 1) 28 | } 29 | }() 30 | } 31 | 32 | // Watchces for SIGHUP events and notifies registered AutoFiles 33 | type SighupWatcher struct { 34 | mtx sync.Mutex 35 | autoFiles map[string]*AutoFile 36 | } 37 | 38 | func newSighupWatcher() *SighupWatcher { 39 | return &SighupWatcher{ 40 | autoFiles: make(map[string]*AutoFile, 10), 41 | } 42 | } 43 | 44 | func (w *SighupWatcher) addAutoFile(af *AutoFile) { 45 | w.mtx.Lock() 46 | w.autoFiles[af.ID] = af 47 | w.mtx.Unlock() 48 | } 49 | 50 | // If AutoFile isn't registered or was already removed, does nothing. 51 | func (w *SighupWatcher) removeAutoFile(af *AutoFile) { 52 | w.mtx.Lock() 53 | delete(w.autoFiles, af.ID) 54 | w.mtx.Unlock() 55 | } 56 | 57 | func (w *SighupWatcher) closeAll() { 58 | w.mtx.Lock() 59 | for _, af := range w.autoFiles { 60 | af.closeFile() 61 | } 62 | w.mtx.Unlock() 63 | } 64 | -------------------------------------------------------------------------------- /bech32/bech32.go: -------------------------------------------------------------------------------- 1 | package bech32 2 | 3 | import ( 4 | "github.com/btcsuite/btcutil/bech32" 5 | "github.com/pkg/errors" 6 | ) 7 | 8 | //ConvertAndEncode converts from a base64 encoded byte string to base32 encoded byte string and then to bech32 9 | func ConvertAndEncode(hrp string, data []byte) (string, error) { 10 | converted, err := bech32.ConvertBits(data, 8, 5, true) 11 | if err != nil { 12 | return "", errors.Wrap(err, "encoding bech32 failed") 13 | } 14 | return bech32.Encode(hrp, converted) 15 | 16 | } 17 | 18 | //DecodeAndConvert decodes a bech32 encoded string and converts to base64 encoded bytes 19 | func DecodeAndConvert(bech string) (string, []byte, error) { 20 | hrp, data, err := bech32.Decode(bech) 21 | if err != nil { 22 | return "", nil, errors.Wrap(err, "decoding bech32 failed") 23 | } 24 | converted, err := bech32.ConvertBits(data, 5, 8, false) 25 | if err != nil { 26 | return "", nil, errors.Wrap(err, "decoding bech32 failed") 27 | } 28 | return hrp, converted, nil 29 | } 30 | -------------------------------------------------------------------------------- /bech32/bech32_test.go: -------------------------------------------------------------------------------- 1 | package bech32_test 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "testing" 7 | 8 | "github.com/tendermint/tmlibs/bech32" 9 | ) 10 | 11 | func TestEncodeAndDecode(t *testing.T) { 12 | 13 | sum := sha256.Sum256([]byte("hello world\n")) 14 | 15 | bech, err := bech32.ConvertAndEncode("shasum", sum[:]) 16 | 17 | if err != nil { 18 | t.Error(err) 19 | } 20 | hrp, data, err := bech32.DecodeAndConvert(bech) 21 | 22 | if err != nil { 23 | t.Error(err) 24 | } 25 | if hrp != "shasum" { 26 | t.Error("Invalid hrp") 27 | } 28 | if bytes.Compare(data, sum[:]) != 0 { 29 | t.Error("Invalid decode") 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /circle.yml: -------------------------------------------------------------------------------- 1 | machine: 2 | environment: 3 | GOPATH: "${HOME}/.go_workspace" 4 | PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME" 5 | PROJECT_PATH: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME 6 | hosts: 7 | localhost: 127.0.0.1 8 | 9 | dependencies: 10 | override: 11 | - mkdir -p "$PROJECT_PARENT_PATH" 12 | - ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH" 13 | post: 14 | - go version 15 | 16 | test: 17 | override: 18 | - cd $PROJECT_PATH && make get_tools && make get_vendor_deps && bash ./test.sh 19 | post: 20 | - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt 21 | - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" 22 | -------------------------------------------------------------------------------- /cli/flags/log_level.go: -------------------------------------------------------------------------------- 1 | package flags 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/pkg/errors" 8 | 9 | "github.com/tendermint/tmlibs/log" 10 | ) 11 | 12 | const ( 13 | defaultLogLevelKey = "*" 14 | ) 15 | 16 | // ParseLogLevel parses complex log level - comma-separated 17 | // list of module:level pairs with an optional *:level pair (* means 18 | // all other modules). 19 | // 20 | // Example: 21 | // ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout), "info") 22 | func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (log.Logger, error) { 23 | if lvl == "" { 24 | return nil, errors.New("Empty log level") 25 | } 26 | 27 | l := lvl 28 | 29 | // prefix simple one word levels (e.g. "info") with "*" 30 | if !strings.Contains(l, ":") { 31 | l = defaultLogLevelKey + ":" + l 32 | } 33 | 34 | options := make([]log.Option, 0) 35 | 36 | isDefaultLogLevelSet := false 37 | var option log.Option 38 | var err error 39 | 40 | list := strings.Split(l, ",") 41 | for _, item := range list { 42 | moduleAndLevel := strings.Split(item, ":") 43 | 44 | if len(moduleAndLevel) != 2 { 45 | return nil, fmt.Errorf("Expected list in a form of \"module:level\" pairs, given pair %s, list %s", item, list) 46 | } 47 | 48 | module := moduleAndLevel[0] 49 | level := moduleAndLevel[1] 50 | 51 | if module == defaultLogLevelKey { 52 | option, err = log.AllowLevel(level) 53 | if err != nil { 54 | return nil, errors.Wrap(err, fmt.Sprintf("Failed to parse default log level (pair %s, list %s)", item, l)) 55 | } 56 | options = append(options, option) 57 | isDefaultLogLevelSet = true 58 | } else { 59 | switch level { 60 | case "debug": 61 | option = log.AllowDebugWith("module", module) 62 | case "info": 63 | option = log.AllowInfoWith("module", module) 64 | case "error": 65 | option = log.AllowErrorWith("module", module) 66 | case "none": 67 | option = log.AllowNoneWith("module", module) 68 | default: 69 | return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)", level, item, list) 70 | } 71 | options = append(options, option) 72 | 73 | } 74 | } 75 | 76 | // if "*" is not provided, set default global level 77 | if !isDefaultLogLevelSet { 78 | option, err = log.AllowLevel(defaultLogLevelValue) 79 | if err != nil { 80 | return nil, err 81 | } 82 | options = append(options, option) 83 | } 84 | 85 | return log.NewFilter(logger, options...), nil 86 | } 87 | -------------------------------------------------------------------------------- /cli/flags/log_level_test.go: -------------------------------------------------------------------------------- 1 | package flags_test 2 | 3 | import ( 4 | "bytes" 5 | "strings" 6 | "testing" 7 | 8 | tmflags "github.com/tendermint/tmlibs/cli/flags" 9 | "github.com/tendermint/tmlibs/log" 10 | ) 11 | 12 | const ( 13 | defaultLogLevelValue = "info" 14 | ) 15 | 16 | func TestParseLogLevel(t *testing.T) { 17 | var buf bytes.Buffer 18 | jsonLogger := log.NewTMJSONLogger(&buf) 19 | 20 | correctLogLevels := []struct { 21 | lvl string 22 | expectedLogLines []string 23 | }{ 24 | {"mempool:error", []string{ 25 | ``, // if no default is given, assume info 26 | ``, 27 | `{"_msg":"Mesmero","level":"error","module":"mempool"}`, 28 | `{"_msg":"Mind","level":"info","module":"state"}`, // if no default is given, assume info 29 | ``}}, 30 | 31 | {"mempool:error,*:debug", []string{ 32 | `{"_msg":"Kingpin","level":"debug","module":"wire"}`, 33 | ``, 34 | `{"_msg":"Mesmero","level":"error","module":"mempool"}`, 35 | `{"_msg":"Mind","level":"info","module":"state"}`, 36 | `{"_msg":"Gideon","level":"debug"}`}}, 37 | 38 | {"*:debug,wire:none", []string{ 39 | ``, 40 | `{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`, 41 | `{"_msg":"Mesmero","level":"error","module":"mempool"}`, 42 | `{"_msg":"Mind","level":"info","module":"state"}`, 43 | `{"_msg":"Gideon","level":"debug"}`}}, 44 | } 45 | 46 | for _, c := range correctLogLevels { 47 | logger, err := tmflags.ParseLogLevel(c.lvl, jsonLogger, defaultLogLevelValue) 48 | if err != nil { 49 | t.Fatal(err) 50 | } 51 | 52 | buf.Reset() 53 | 54 | logger.With("module", "wire").Debug("Kingpin") 55 | if have := strings.TrimSpace(buf.String()); c.expectedLogLines[0] != have { 56 | t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[0], have, c.lvl) 57 | } 58 | 59 | buf.Reset() 60 | 61 | logger.With("module", "mempool").Info("Kitty Pryde") 62 | if have := strings.TrimSpace(buf.String()); c.expectedLogLines[1] != have { 63 | t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[1], have, c.lvl) 64 | } 65 | 66 | buf.Reset() 67 | 68 | logger.With("module", "mempool").Error("Mesmero") 69 | if have := strings.TrimSpace(buf.String()); c.expectedLogLines[2] != have { 70 | t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[2], have, c.lvl) 71 | } 72 | 73 | buf.Reset() 74 | 75 | logger.With("module", "state").Info("Mind") 76 | if have := strings.TrimSpace(buf.String()); c.expectedLogLines[3] != have { 77 | t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[3], have, c.lvl) 78 | } 79 | 80 | buf.Reset() 81 | 82 | logger.Debug("Gideon") 83 | if have := strings.TrimSpace(buf.String()); c.expectedLogLines[4] != have { 84 | t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[4], have, c.lvl) 85 | } 86 | } 87 | 88 | incorrectLogLevel := []string{"some", "mempool:some", "*:some,mempool:error"} 89 | for _, lvl := range incorrectLogLevel { 90 | if _, err := tmflags.ParseLogLevel(lvl, jsonLogger, defaultLogLevelValue); err == nil { 91 | t.Fatalf("Expected %s to produce error", lvl) 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /cli/helper.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "os" 9 | "path/filepath" 10 | ) 11 | 12 | // WriteConfigVals writes a toml file with the given values. 13 | // It returns an error if writing was impossible. 14 | func WriteConfigVals(dir string, vals map[string]string) error { 15 | data := "" 16 | for k, v := range vals { 17 | data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) 18 | } 19 | cfile := filepath.Join(dir, "config.toml") 20 | return ioutil.WriteFile(cfile, []byte(data), 0666) 21 | } 22 | 23 | // RunWithArgs executes the given command with the specified command line args 24 | // and environmental variables set. It returns any error returned from cmd.Execute() 25 | func RunWithArgs(cmd Executable, args []string, env map[string]string) error { 26 | oargs := os.Args 27 | oenv := map[string]string{} 28 | // defer returns the environment back to normal 29 | defer func() { 30 | os.Args = oargs 31 | for k, v := range oenv { 32 | os.Setenv(k, v) 33 | } 34 | }() 35 | 36 | // set the args and env how we want them 37 | os.Args = args 38 | for k, v := range env { 39 | // backup old value if there, to restore at end 40 | oenv[k] = os.Getenv(k) 41 | err := os.Setenv(k, v) 42 | if err != nil { 43 | return err 44 | } 45 | } 46 | 47 | // and finally run the command 48 | return cmd.Execute() 49 | } 50 | 51 | // RunCaptureWithArgs executes the given command with the specified command 52 | // line args and environmental variables set. It returns string fields 53 | // representing output written to stdout and stderr, additionally any error 54 | // from cmd.Execute() is also returned 55 | func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) { 56 | oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout 57 | rOut, wOut, _ := os.Pipe() 58 | rErr, wErr, _ := os.Pipe() 59 | os.Stdout, os.Stderr = wOut, wErr 60 | defer func() { 61 | os.Stdout, os.Stderr = oldout, olderr // restoring the real stdout 62 | }() 63 | 64 | // copy the output in a separate goroutine so printing can't block indefinitely 65 | copyStd := func(reader *os.File) *(chan string) { 66 | stdC := make(chan string) 67 | go func() { 68 | var buf bytes.Buffer 69 | // io.Copy will end when we call reader.Close() below 70 | io.Copy(&buf, reader) 71 | stdC <- buf.String() 72 | }() 73 | return &stdC 74 | } 75 | outC := copyStd(rOut) 76 | errC := copyStd(rErr) 77 | 78 | // now run the command 79 | err = RunWithArgs(cmd, args, env) 80 | 81 | // and grab the stdout to return 82 | wOut.Close() 83 | wErr.Close() 84 | stdout = <-*outC 85 | stderr = <-*errC 86 | return stdout, stderr, err 87 | } 88 | -------------------------------------------------------------------------------- /cli/setup.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | 9 | "github.com/pkg/errors" 10 | "github.com/spf13/cobra" 11 | "github.com/spf13/viper" 12 | ) 13 | 14 | const ( 15 | HomeFlag = "home" 16 | TraceFlag = "trace" 17 | OutputFlag = "output" 18 | EncodingFlag = "encoding" 19 | ) 20 | 21 | // Executable is the minimal interface to *corba.Command, so we can 22 | // wrap if desired before the test 23 | type Executable interface { 24 | Execute() error 25 | } 26 | 27 | // PrepareBaseCmd is meant for tendermint and other servers 28 | func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { 29 | cobra.OnInitialize(func() { initEnv(envPrefix) }) 30 | cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data") 31 | cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") 32 | cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) 33 | return Executor{cmd, os.Exit} 34 | } 35 | 36 | // PrepareMainCmd is meant for client side libs that want some more flags 37 | // 38 | // This adds --encoding (hex, btc, base64) and --output (text, json) to 39 | // the command. These only really make sense in interactive commands. 40 | func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { 41 | cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") 42 | cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") 43 | cmd.PersistentPreRunE = concatCobraCmdFuncs(validateOutput, cmd.PersistentPreRunE) 44 | return PrepareBaseCmd(cmd, envPrefix, defaultHome) 45 | } 46 | 47 | // initEnv sets to use ENV variables if set. 48 | func initEnv(prefix string) { 49 | copyEnvVars(prefix) 50 | 51 | // env variables with TM prefix (eg. TM_ROOT) 52 | viper.SetEnvPrefix(prefix) 53 | viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) 54 | viper.AutomaticEnv() 55 | } 56 | 57 | // This copies all variables like TMROOT to TM_ROOT, 58 | // so we can support both formats for the user 59 | func copyEnvVars(prefix string) { 60 | prefix = strings.ToUpper(prefix) 61 | ps := prefix + "_" 62 | for _, e := range os.Environ() { 63 | kv := strings.SplitN(e, "=", 2) 64 | if len(kv) == 2 { 65 | k, v := kv[0], kv[1] 66 | if strings.HasPrefix(k, prefix) && !strings.HasPrefix(k, ps) { 67 | k2 := strings.Replace(k, prefix, ps, 1) 68 | os.Setenv(k2, v) 69 | } 70 | } 71 | } 72 | } 73 | 74 | // Executor wraps the cobra Command with a nicer Execute method 75 | type Executor struct { 76 | *cobra.Command 77 | Exit func(int) // this is os.Exit by default, override in tests 78 | } 79 | 80 | type ExitCoder interface { 81 | ExitCode() int 82 | } 83 | 84 | // execute adds all child commands to the root command sets flags appropriately. 85 | // This is called by main.main(). It only needs to happen once to the rootCmd. 86 | func (e Executor) Execute() error { 87 | e.SilenceUsage = true 88 | e.SilenceErrors = true 89 | err := e.Command.Execute() 90 | if err != nil { 91 | if viper.GetBool(TraceFlag) { 92 | fmt.Fprintf(os.Stderr, "ERROR: %+v\n", err) 93 | } else { 94 | fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) 95 | } 96 | 97 | // return error code 1 by default, can override it with a special error type 98 | exitCode := 1 99 | if ec, ok := err.(ExitCoder); ok { 100 | exitCode = ec.ExitCode() 101 | } 102 | e.Exit(exitCode) 103 | } 104 | return err 105 | } 106 | 107 | type cobraCmdFunc func(cmd *cobra.Command, args []string) error 108 | 109 | // Returns a single function that calls each argument function in sequence 110 | // RunE, PreRunE, PersistentPreRunE, etc. all have this same signature 111 | func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { 112 | return func(cmd *cobra.Command, args []string) error { 113 | for _, f := range fs { 114 | if f != nil { 115 | if err := f(cmd, args); err != nil { 116 | return err 117 | } 118 | } 119 | } 120 | return nil 121 | } 122 | } 123 | 124 | // Bind all flags and read the config into viper 125 | func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { 126 | // cmd.Flags() includes flags from this command and all persistent flags from the parent 127 | if err := viper.BindPFlags(cmd.Flags()); err != nil { 128 | return err 129 | } 130 | 131 | homeDir := viper.GetString(HomeFlag) 132 | viper.Set(HomeFlag, homeDir) 133 | viper.SetConfigName("config") // name of config file (without extension) 134 | viper.AddConfigPath(homeDir) // search root directory 135 | viper.AddConfigPath(filepath.Join(homeDir, "config")) // search root directory /config 136 | 137 | // If a config file is found, read it in. 138 | if err := viper.ReadInConfig(); err == nil { 139 | // stderr, so if we redirect output to json file, this doesn't appear 140 | // fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) 141 | } else if _, ok := err.(viper.ConfigFileNotFoundError); !ok { 142 | // ignore not found error, return other errors 143 | return err 144 | } 145 | return nil 146 | } 147 | 148 | func validateOutput(cmd *cobra.Command, args []string) error { 149 | // validate output format 150 | output := viper.GetString(OutputFlag) 151 | switch output { 152 | case "text", "json": 153 | default: 154 | return errors.Errorf("Unsupported output format: %s", output) 155 | } 156 | return nil 157 | } 158 | -------------------------------------------------------------------------------- /common/array.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | func Arr(items ...interface{}) []interface{} { 4 | return items 5 | } 6 | -------------------------------------------------------------------------------- /common/async.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "sync/atomic" 5 | ) 6 | 7 | //---------------------------------------- 8 | // Task 9 | 10 | // val: the value returned after task execution. 11 | // err: the error returned during task completion. 12 | // abort: tells Parallel to return, whether or not all tasks have completed. 13 | type Task func(i int) (val interface{}, err error, abort bool) 14 | 15 | type TaskResult struct { 16 | Value interface{} 17 | Error error 18 | } 19 | 20 | type TaskResultCh <-chan TaskResult 21 | 22 | type taskResultOK struct { 23 | TaskResult 24 | OK bool 25 | } 26 | 27 | type TaskResultSet struct { 28 | chz []TaskResultCh 29 | results []taskResultOK 30 | } 31 | 32 | func newTaskResultSet(chz []TaskResultCh) *TaskResultSet { 33 | return &TaskResultSet{ 34 | chz: chz, 35 | results: make([]taskResultOK, len(chz)), 36 | } 37 | } 38 | 39 | func (trs *TaskResultSet) Channels() []TaskResultCh { 40 | return trs.chz 41 | } 42 | 43 | func (trs *TaskResultSet) LatestResult(index int) (TaskResult, bool) { 44 | if len(trs.results) <= index { 45 | return TaskResult{}, false 46 | } 47 | resultOK := trs.results[index] 48 | return resultOK.TaskResult, resultOK.OK 49 | } 50 | 51 | // NOTE: Not concurrency safe. 52 | // Writes results to trs.results without waiting for all tasks to complete. 53 | func (trs *TaskResultSet) Reap() *TaskResultSet { 54 | for i := 0; i < len(trs.results); i++ { 55 | var trch = trs.chz[i] 56 | select { 57 | case result, ok := <-trch: 58 | if ok { 59 | // Write result. 60 | trs.results[i] = taskResultOK{ 61 | TaskResult: result, 62 | OK: true, 63 | } 64 | } else { 65 | // We already wrote it. 66 | } 67 | default: 68 | // Do nothing. 69 | } 70 | } 71 | return trs 72 | } 73 | 74 | // NOTE: Not concurrency safe. 75 | // Like Reap() but waits until all tasks have returned or panic'd. 76 | func (trs *TaskResultSet) Wait() *TaskResultSet { 77 | for i := 0; i < len(trs.results); i++ { 78 | var trch = trs.chz[i] 79 | select { 80 | case result, ok := <-trch: 81 | if ok { 82 | // Write result. 83 | trs.results[i] = taskResultOK{ 84 | TaskResult: result, 85 | OK: true, 86 | } 87 | } else { 88 | // We already wrote it. 89 | } 90 | } 91 | } 92 | return trs 93 | } 94 | 95 | // Returns the firstmost (by task index) error as 96 | // discovered by all previous Reap() calls. 97 | func (trs *TaskResultSet) FirstValue() interface{} { 98 | for _, result := range trs.results { 99 | if result.Value != nil { 100 | return result.Value 101 | } 102 | } 103 | return nil 104 | } 105 | 106 | // Returns the firstmost (by task index) error as 107 | // discovered by all previous Reap() calls. 108 | func (trs *TaskResultSet) FirstError() error { 109 | for _, result := range trs.results { 110 | if result.Error != nil { 111 | return result.Error 112 | } 113 | } 114 | return nil 115 | } 116 | 117 | //---------------------------------------- 118 | // Parallel 119 | 120 | // Run tasks in parallel, with ability to abort early. 121 | // Returns ok=false iff any of the tasks returned abort=true. 122 | // NOTE: Do not implement quit features here. Instead, provide convenient 123 | // concurrent quit-like primitives, passed implicitly via Task closures. (e.g. 124 | // it's not Parallel's concern how you quit/abort your tasks). 125 | func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { 126 | var taskResultChz = make([]TaskResultCh, len(tasks)) // To return. 127 | var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. 128 | var numPanics = new(int32) // Keep track of panics to set ok=false later. 129 | ok = true // We will set it to false iff any tasks panic'd or returned abort. 130 | 131 | // Start all tasks in parallel in separate goroutines. 132 | // When the task is complete, it will appear in the 133 | // respective taskResultCh (associated by task index). 134 | for i, task := range tasks { 135 | var taskResultCh = make(chan TaskResult, 1) // Capacity for 1 result. 136 | taskResultChz[i] = taskResultCh 137 | go func(i int, task Task, taskResultCh chan TaskResult) { 138 | // Recovery 139 | defer func() { 140 | if pnk := recover(); pnk != nil { 141 | atomic.AddInt32(numPanics, 1) 142 | // Send panic to taskResultCh. 143 | taskResultCh <- TaskResult{nil, ErrorWrap(pnk, "Panic in task")} 144 | // Closing taskResultCh lets trs.Wait() work. 145 | close(taskResultCh) 146 | // Decrement waitgroup. 147 | taskDoneCh <- false 148 | } 149 | }() 150 | // Run the task. 151 | var val, err, abort = task(i) 152 | // Send val/err to taskResultCh. 153 | // NOTE: Below this line, nothing must panic/ 154 | taskResultCh <- TaskResult{val, err} 155 | // Closing taskResultCh lets trs.Wait() work. 156 | close(taskResultCh) 157 | // Decrement waitgroup. 158 | taskDoneCh <- abort 159 | }(i, task, taskResultCh) 160 | } 161 | 162 | // Wait until all tasks are done, or until abort. 163 | // DONE_LOOP: 164 | for i := 0; i < len(tasks); i++ { 165 | abort := <-taskDoneCh 166 | if abort { 167 | ok = false 168 | break 169 | } 170 | } 171 | 172 | // Ok is also false if there were any panics. 173 | // We must do this check here (after DONE_LOOP). 174 | ok = ok && (atomic.LoadInt32(numPanics) == 0) 175 | 176 | return newTaskResultSet(taskResultChz).Reap(), ok 177 | } 178 | -------------------------------------------------------------------------------- /common/async_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "sync/atomic" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestParallel(t *testing.T) { 14 | 15 | // Create tasks. 16 | var counter = new(int32) 17 | var tasks = make([]Task, 100*1000) 18 | for i := 0; i < len(tasks); i++ { 19 | tasks[i] = func(i int) (res interface{}, err error, abort bool) { 20 | atomic.AddInt32(counter, 1) 21 | return -1 * i, nil, false 22 | } 23 | } 24 | 25 | // Run in parallel. 26 | var trs, ok = Parallel(tasks...) 27 | assert.True(t, ok) 28 | 29 | // Verify. 30 | assert.Equal(t, int(*counter), len(tasks), "Each task should have incremented the counter already") 31 | var failedTasks int 32 | for i := 0; i < len(tasks); i++ { 33 | taskResult, ok := trs.LatestResult(i) 34 | if !ok { 35 | assert.Fail(t, "Task #%v did not complete.", i) 36 | failedTasks++ 37 | } else if taskResult.Error != nil { 38 | assert.Fail(t, "Task should not have errored but got %v", taskResult.Error) 39 | failedTasks++ 40 | } else if !assert.Equal(t, -1*i, taskResult.Value.(int)) { 41 | assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int)) 42 | failedTasks++ 43 | } else { 44 | // Good! 45 | } 46 | } 47 | assert.Equal(t, failedTasks, 0, "No task should have failed") 48 | assert.Nil(t, trs.FirstError(), "There should be no errors") 49 | assert.Equal(t, 0, trs.FirstValue(), "First value should be 0") 50 | } 51 | 52 | func TestParallelAbort(t *testing.T) { 53 | 54 | var flow1 = make(chan struct{}, 1) 55 | var flow2 = make(chan struct{}, 1) 56 | var flow3 = make(chan struct{}, 1) // Cap must be > 0 to prevent blocking. 57 | var flow4 = make(chan struct{}, 1) 58 | 59 | // Create tasks. 60 | var tasks = []Task{ 61 | func(i int) (res interface{}, err error, abort bool) { 62 | assert.Equal(t, i, 0) 63 | flow1 <- struct{}{} 64 | return 0, nil, false 65 | }, 66 | func(i int) (res interface{}, err error, abort bool) { 67 | assert.Equal(t, i, 1) 68 | flow2 <- <-flow1 69 | return 1, errors.New("some error"), false 70 | }, 71 | func(i int) (res interface{}, err error, abort bool) { 72 | assert.Equal(t, i, 2) 73 | flow3 <- <-flow2 74 | return 2, nil, true 75 | }, 76 | func(i int) (res interface{}, err error, abort bool) { 77 | assert.Equal(t, i, 3) 78 | <-flow4 79 | return 3, nil, false 80 | }, 81 | } 82 | 83 | // Run in parallel. 84 | var taskResultSet, ok = Parallel(tasks...) 85 | assert.False(t, ok, "ok should be false since we aborted task #2.") 86 | 87 | // Verify task #3. 88 | // Initially taskResultSet.chz[3] sends nothing since flow4 didn't send. 89 | waitTimeout(t, taskResultSet.chz[3], "Task #3") 90 | 91 | // Now let the last task (#3) complete after abort. 92 | flow4 <- <-flow3 93 | 94 | // Wait until all tasks have returned or panic'd. 95 | taskResultSet.Wait() 96 | 97 | // Verify task #0, #1, #2. 98 | checkResult(t, taskResultSet, 0, 0, nil, nil) 99 | checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) 100 | checkResult(t, taskResultSet, 2, 2, nil, nil) 101 | checkResult(t, taskResultSet, 3, 3, nil, nil) 102 | } 103 | 104 | func TestParallelRecover(t *testing.T) { 105 | 106 | // Create tasks. 107 | var tasks = []Task{ 108 | func(i int) (res interface{}, err error, abort bool) { 109 | return 0, nil, false 110 | }, 111 | func(i int) (res interface{}, err error, abort bool) { 112 | return 1, errors.New("some error"), false 113 | }, 114 | func(i int) (res interface{}, err error, abort bool) { 115 | panic(2) 116 | }, 117 | } 118 | 119 | // Run in parallel. 120 | var taskResultSet, ok = Parallel(tasks...) 121 | assert.False(t, ok, "ok should be false since we panic'd in task #2.") 122 | 123 | // Verify task #0, #1, #2. 124 | checkResult(t, taskResultSet, 0, 0, nil, nil) 125 | checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) 126 | checkResult(t, taskResultSet, 2, nil, nil, 2) 127 | } 128 | 129 | // Wait for result 130 | func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val interface{}, err error, pnk interface{}) { 131 | taskResult, ok := taskResultSet.LatestResult(index) 132 | taskName := fmt.Sprintf("Task #%v", index) 133 | assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName) 134 | assert.Equal(t, val, taskResult.Value, taskName) 135 | if err != nil { 136 | assert.Equal(t, err, taskResult.Error, taskName) 137 | } else if pnk != nil { 138 | assert.Equal(t, pnk, taskResult.Error.(Error).Data(), taskName) 139 | } else { 140 | assert.Nil(t, taskResult.Error, taskName) 141 | } 142 | } 143 | 144 | // Wait for timeout (no result) 145 | func waitTimeout(t *testing.T, taskResultCh TaskResultCh, taskName string) { 146 | select { 147 | case _, ok := <-taskResultCh: 148 | if !ok { 149 | assert.Fail(t, "TaskResultCh unexpectedly closed (%v)", taskName) 150 | } else { 151 | assert.Fail(t, "TaskResultCh unexpectedly returned for %v", taskName) 152 | } 153 | case <-time.After(1 * time.Second): // TODO use deterministic time? 154 | // Good! 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /common/bytes.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "strings" 7 | ) 8 | 9 | // The main purpose of HexBytes is to enable HEX-encoding for json/encoding. 10 | type HexBytes []byte 11 | 12 | // Marshal needed for protobuf compatibility 13 | func (bz HexBytes) Marshal() ([]byte, error) { 14 | return bz, nil 15 | } 16 | 17 | // Unmarshal needed for protobuf compatibility 18 | func (bz *HexBytes) Unmarshal(data []byte) error { 19 | *bz = data 20 | return nil 21 | } 22 | 23 | // This is the point of Bytes. 24 | func (bz HexBytes) MarshalJSON() ([]byte, error) { 25 | s := strings.ToUpper(hex.EncodeToString(bz)) 26 | jbz := make([]byte, len(s)+2) 27 | jbz[0] = '"' 28 | copy(jbz[1:], []byte(s)) 29 | jbz[len(jbz)-1] = '"' 30 | return jbz, nil 31 | } 32 | 33 | // This is the point of Bytes. 34 | func (bz *HexBytes) UnmarshalJSON(data []byte) error { 35 | if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { 36 | return fmt.Errorf("Invalid hex string: %s", data) 37 | } 38 | bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) 39 | if err != nil { 40 | return err 41 | } 42 | *bz = bz2 43 | return nil 44 | } 45 | 46 | // Allow it to fulfill various interfaces in light-client, etc... 47 | func (bz HexBytes) Bytes() []byte { 48 | return bz 49 | } 50 | 51 | func (bz HexBytes) String() string { 52 | return strings.ToUpper(hex.EncodeToString(bz)) 53 | } 54 | 55 | func (bz HexBytes) Format(s fmt.State, verb rune) { 56 | switch verb { 57 | case 'p': 58 | s.Write([]byte(fmt.Sprintf("%p", bz))) 59 | default: 60 | s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /common/bytes_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | // This is a trivial test for protobuf compatibility. 12 | func TestMarshal(t *testing.T) { 13 | bz := []byte("hello world") 14 | dataB := HexBytes(bz) 15 | bz2, err := dataB.Marshal() 16 | assert.Nil(t, err) 17 | assert.Equal(t, bz, bz2) 18 | 19 | var dataB2 HexBytes 20 | err = (&dataB2).Unmarshal(bz) 21 | assert.Nil(t, err) 22 | assert.Equal(t, dataB, dataB2) 23 | } 24 | 25 | // Test that the hex encoding works. 26 | func TestJSONMarshal(t *testing.T) { 27 | 28 | type TestStruct struct { 29 | B1 []byte 30 | B2 HexBytes 31 | } 32 | 33 | cases := []struct { 34 | input []byte 35 | expected string 36 | }{ 37 | {[]byte(``), `{"B1":"","B2":""}`}, 38 | {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, 39 | {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, 40 | } 41 | 42 | for i, tc := range cases { 43 | t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { 44 | ts := TestStruct{B1: tc.input, B2: tc.input} 45 | 46 | // Test that it marshals correctly to JSON. 47 | jsonBytes, err := json.Marshal(ts) 48 | if err != nil { 49 | t.Fatal(err) 50 | } 51 | assert.Equal(t, string(jsonBytes), tc.expected) 52 | 53 | // TODO do fuzz testing to ensure that unmarshal fails 54 | 55 | // Test that unmarshaling works correctly. 56 | ts2 := TestStruct{} 57 | err = json.Unmarshal(jsonBytes, &ts2) 58 | if err != nil { 59 | t.Fatal(err) 60 | } 61 | assert.Equal(t, ts2.B1, tc.input) 62 | assert.Equal(t, ts2.B2, HexBytes(tc.input)) 63 | }) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /common/byteslice.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | ) 6 | 7 | // Fingerprint returns the first 6 bytes of a byte slice. 8 | // If the slice is less than 6 bytes, the fingerprint 9 | // contains trailing zeroes. 10 | func Fingerprint(slice []byte) []byte { 11 | fingerprint := make([]byte, 6) 12 | copy(fingerprint, slice) 13 | return fingerprint 14 | } 15 | 16 | func IsZeros(slice []byte) bool { 17 | for _, byt := range slice { 18 | if byt != byte(0) { 19 | return false 20 | } 21 | } 22 | return true 23 | } 24 | 25 | func RightPadBytes(slice []byte, l int) []byte { 26 | if l < len(slice) { 27 | return slice 28 | } 29 | padded := make([]byte, l) 30 | copy(padded[0:len(slice)], slice) 31 | return padded 32 | } 33 | 34 | func LeftPadBytes(slice []byte, l int) []byte { 35 | if l < len(slice) { 36 | return slice 37 | } 38 | padded := make([]byte, l) 39 | copy(padded[l-len(slice):], slice) 40 | return padded 41 | } 42 | 43 | func TrimmedString(b []byte) string { 44 | trimSet := string([]byte{0}) 45 | return string(bytes.TrimLeft(b, trimSet)) 46 | 47 | } 48 | 49 | // PrefixEndBytes returns the end byteslice for a noninclusive range 50 | // that would include all byte slices for which the input is the prefix 51 | func PrefixEndBytes(prefix []byte) []byte { 52 | if prefix == nil { 53 | return nil 54 | } 55 | 56 | end := make([]byte, len(prefix)) 57 | copy(end, prefix) 58 | finished := false 59 | 60 | for !finished { 61 | if end[len(end)-1] != byte(255) { 62 | end[len(end)-1]++ 63 | finished = true 64 | } else { 65 | end = end[:len(end)-1] 66 | if len(end) == 0 { 67 | end = nil 68 | finished = true 69 | } 70 | } 71 | } 72 | return end 73 | } 74 | -------------------------------------------------------------------------------- /common/byteslice_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestPrefixEndBytes(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | var testCases = []struct { 13 | prefix []byte 14 | expected []byte 15 | }{ 16 | {[]byte{byte(55), byte(255), byte(255), byte(0)}, []byte{byte(55), byte(255), byte(255), byte(1)}}, 17 | {[]byte{byte(55), byte(255), byte(255), byte(15)}, []byte{byte(55), byte(255), byte(255), byte(16)}}, 18 | {[]byte{byte(55), byte(200), byte(255)}, []byte{byte(55), byte(201)}}, 19 | {[]byte{byte(55), byte(255), byte(255)}, []byte{byte(56)}}, 20 | {[]byte{byte(255), byte(255), byte(255)}, nil}, 21 | {nil, nil}, 22 | } 23 | 24 | for _, test := range testCases { 25 | end := PrefixEndBytes(test.prefix) 26 | assert.Equal(test.expected, end) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /common/cmap.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import "sync" 4 | 5 | // CMap is a goroutine-safe map 6 | type CMap struct { 7 | m map[string]interface{} 8 | l sync.Mutex 9 | } 10 | 11 | func NewCMap() *CMap { 12 | return &CMap{ 13 | m: make(map[string]interface{}), 14 | } 15 | } 16 | 17 | func (cm *CMap) Set(key string, value interface{}) { 18 | cm.l.Lock() 19 | defer cm.l.Unlock() 20 | cm.m[key] = value 21 | } 22 | 23 | func (cm *CMap) Get(key string) interface{} { 24 | cm.l.Lock() 25 | defer cm.l.Unlock() 26 | return cm.m[key] 27 | } 28 | 29 | func (cm *CMap) Has(key string) bool { 30 | cm.l.Lock() 31 | defer cm.l.Unlock() 32 | _, ok := cm.m[key] 33 | return ok 34 | } 35 | 36 | func (cm *CMap) Delete(key string) { 37 | cm.l.Lock() 38 | defer cm.l.Unlock() 39 | delete(cm.m, key) 40 | } 41 | 42 | func (cm *CMap) Size() int { 43 | cm.l.Lock() 44 | defer cm.l.Unlock() 45 | return len(cm.m) 46 | } 47 | 48 | func (cm *CMap) Clear() { 49 | cm.l.Lock() 50 | defer cm.l.Unlock() 51 | cm.m = make(map[string]interface{}) 52 | } 53 | 54 | func (cm *CMap) Keys() []string { 55 | cm.l.Lock() 56 | defer cm.l.Unlock() 57 | 58 | keys := []string{} 59 | for k := range cm.m { 60 | keys = append(keys, k) 61 | } 62 | return keys 63 | } 64 | 65 | func (cm *CMap) Values() []interface{} { 66 | cm.l.Lock() 67 | defer cm.l.Unlock() 68 | items := []interface{}{} 69 | for _, v := range cm.m { 70 | items = append(items, v) 71 | } 72 | return items 73 | } 74 | -------------------------------------------------------------------------------- /common/cmap_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestIterateKeysWithValues(t *testing.T) { 12 | cmap := NewCMap() 13 | 14 | for i := 1; i <= 10; i++ { 15 | cmap.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) 16 | } 17 | 18 | // Testing size 19 | assert.Equal(t, 10, cmap.Size()) 20 | assert.Equal(t, 10, len(cmap.Keys())) 21 | assert.Equal(t, 10, len(cmap.Values())) 22 | 23 | // Iterating Keys, checking for matching Value 24 | for _, key := range cmap.Keys() { 25 | val := strings.Replace(key, "key", "value", -1) 26 | assert.Equal(t, val, cmap.Get(key)) 27 | } 28 | 29 | // Test if all keys are within []Keys() 30 | keys := cmap.Keys() 31 | for i := 1; i <= 10; i++ { 32 | assert.Contains(t, keys, fmt.Sprintf("key%d", i), "cmap.Keys() should contain key") 33 | } 34 | 35 | // Delete 1 Key 36 | cmap.Delete("key1") 37 | 38 | assert.NotEqual(t, len(keys), len(cmap.Keys()), "[]keys and []Keys() should not be equal, they are copies, one item was removed") 39 | } 40 | 41 | func TestContains(t *testing.T) { 42 | cmap := NewCMap() 43 | 44 | cmap.Set("key1", "value1") 45 | 46 | // Test for known values 47 | assert.True(t, cmap.Has("key1")) 48 | assert.Equal(t, "value1", cmap.Get("key1")) 49 | 50 | // Test for unknown values 51 | assert.False(t, cmap.Has("key2")) 52 | assert.Nil(t, cmap.Get("key2")) 53 | } 54 | -------------------------------------------------------------------------------- /common/colors.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | const ( 9 | ANSIReset = "\x1b[0m" 10 | ANSIBright = "\x1b[1m" 11 | ANSIDim = "\x1b[2m" 12 | ANSIUnderscore = "\x1b[4m" 13 | ANSIBlink = "\x1b[5m" 14 | ANSIReverse = "\x1b[7m" 15 | ANSIHidden = "\x1b[8m" 16 | 17 | ANSIFgBlack = "\x1b[30m" 18 | ANSIFgRed = "\x1b[31m" 19 | ANSIFgGreen = "\x1b[32m" 20 | ANSIFgYellow = "\x1b[33m" 21 | ANSIFgBlue = "\x1b[34m" 22 | ANSIFgMagenta = "\x1b[35m" 23 | ANSIFgCyan = "\x1b[36m" 24 | ANSIFgWhite = "\x1b[37m" 25 | 26 | ANSIBgBlack = "\x1b[40m" 27 | ANSIBgRed = "\x1b[41m" 28 | ANSIBgGreen = "\x1b[42m" 29 | ANSIBgYellow = "\x1b[43m" 30 | ANSIBgBlue = "\x1b[44m" 31 | ANSIBgMagenta = "\x1b[45m" 32 | ANSIBgCyan = "\x1b[46m" 33 | ANSIBgWhite = "\x1b[47m" 34 | ) 35 | 36 | // color the string s with color 'color' 37 | // unless s is already colored 38 | func treat(s string, color string) string { 39 | if len(s) > 2 && s[:2] == "\x1b[" { 40 | return s 41 | } 42 | return color + s + ANSIReset 43 | } 44 | 45 | func treatAll(color string, args ...interface{}) string { 46 | var parts []string 47 | for _, arg := range args { 48 | parts = append(parts, treat(fmt.Sprintf("%v", arg), color)) 49 | } 50 | return strings.Join(parts, "") 51 | } 52 | 53 | func Black(args ...interface{}) string { 54 | return treatAll(ANSIFgBlack, args...) 55 | } 56 | 57 | func Red(args ...interface{}) string { 58 | return treatAll(ANSIFgRed, args...) 59 | } 60 | 61 | func Green(args ...interface{}) string { 62 | return treatAll(ANSIFgGreen, args...) 63 | } 64 | 65 | func Yellow(args ...interface{}) string { 66 | return treatAll(ANSIFgYellow, args...) 67 | } 68 | 69 | func Blue(args ...interface{}) string { 70 | return treatAll(ANSIFgBlue, args...) 71 | } 72 | 73 | func Magenta(args ...interface{}) string { 74 | return treatAll(ANSIFgMagenta, args...) 75 | } 76 | 77 | func Cyan(args ...interface{}) string { 78 | return treatAll(ANSIFgCyan, args...) 79 | } 80 | 81 | func White(args ...interface{}) string { 82 | return treatAll(ANSIFgWhite, args...) 83 | } 84 | 85 | func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string) string { 86 | s := "" 87 | for _, b := range data { 88 | if 0x21 <= b && b < 0x7F { 89 | s += textColor(string(b)) 90 | } else { 91 | s += bytesColor(Fmt("%02X", b)) 92 | } 93 | } 94 | return s 95 | } 96 | -------------------------------------------------------------------------------- /common/date.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "strings" 5 | "time" 6 | 7 | "github.com/pkg/errors" 8 | ) 9 | 10 | // TimeLayout helps to parse a date string of the format YYYY-MM-DD 11 | // Intended to be used with the following function: 12 | // time.Parse(TimeLayout, date) 13 | var TimeLayout = "2006-01-02" //this represents YYYY-MM-DD 14 | 15 | // ParseDateRange parses a date range string of the format start:end 16 | // where the start and end date are of the format YYYY-MM-DD. 17 | // The parsed dates are time.Time and will return the zero time for 18 | // unbounded dates, ex: 19 | // unbounded start: :2000-12-31 20 | // unbounded end: 2000-12-31: 21 | func ParseDateRange(dateRange string) (startDate, endDate time.Time, err error) { 22 | dates := strings.Split(dateRange, ":") 23 | if len(dates) != 2 { 24 | err = errors.New("bad date range, must be in format date:date") 25 | return 26 | } 27 | parseDate := func(date string) (out time.Time, err error) { 28 | if len(date) == 0 { 29 | return 30 | } 31 | out, err = time.Parse(TimeLayout, date) 32 | return 33 | } 34 | startDate, err = parseDate(dates[0]) 35 | if err != nil { 36 | return 37 | } 38 | endDate, err = parseDate(dates[1]) 39 | if err != nil { 40 | return 41 | } 42 | return 43 | } 44 | -------------------------------------------------------------------------------- /common/date_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | var ( 11 | date = time.Date(2015, time.Month(12), 31, 0, 0, 0, 0, time.UTC) 12 | date2 = time.Date(2016, time.Month(12), 31, 0, 0, 0, 0, time.UTC) 13 | zero time.Time 14 | ) 15 | 16 | func TestParseDateRange(t *testing.T) { 17 | assert := assert.New(t) 18 | 19 | var testDates = []struct { 20 | dateStr string 21 | start time.Time 22 | end time.Time 23 | errNil bool 24 | }{ 25 | {"2015-12-31:2016-12-31", date, date2, true}, 26 | {"2015-12-31:", date, zero, true}, 27 | {":2016-12-31", zero, date2, true}, 28 | {"2016-12-31", zero, zero, false}, 29 | {"2016-31-12:", zero, zero, false}, 30 | {":2016-31-12", zero, zero, false}, 31 | } 32 | 33 | for _, test := range testDates { 34 | start, end, err := ParseDateRange(test.dateStr) 35 | if test.errNil { 36 | assert.Nil(err) 37 | testPtr := func(want, have time.Time) { 38 | assert.True(have.Equal(want)) 39 | } 40 | testPtr(test.start, start) 41 | testPtr(test.end, end) 42 | } else { 43 | assert.NotNil(err) 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /common/errors_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | fmt "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestErrorPanic(t *testing.T) { 11 | type pnk struct { 12 | msg string 13 | } 14 | 15 | capturePanic := func() (err Error) { 16 | defer func() { 17 | if r := recover(); r != nil { 18 | err = ErrorWrap(r, "This is the message in ErrorWrap(r, message).") 19 | } 20 | return 21 | }() 22 | panic(pnk{"something"}) 23 | return nil 24 | } 25 | 26 | var err = capturePanic() 27 | 28 | assert.Equal(t, pnk{"something"}, err.Data()) 29 | assert.Equal(t, "Error{{something}}", fmt.Sprintf("%v", err)) 30 | assert.Contains(t, fmt.Sprintf("%#v", err), "This is the message in ErrorWrap(r, message).") 31 | assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") 32 | } 33 | 34 | func TestErrorWrapSomething(t *testing.T) { 35 | 36 | var err = ErrorWrap("something", "formatter%v%v", 0, 1) 37 | 38 | assert.Equal(t, "something", err.Data()) 39 | assert.Equal(t, "Error{something}", fmt.Sprintf("%v", err)) 40 | assert.Regexp(t, `formatter01\n`, fmt.Sprintf("%#v", err)) 41 | assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") 42 | } 43 | 44 | func TestErrorWrapNothing(t *testing.T) { 45 | 46 | var err = ErrorWrap(nil, "formatter%v%v", 0, 1) 47 | 48 | assert.Equal(t, 49 | FmtError{"formatter%v%v", []interface{}{0, 1}}, 50 | err.Data()) 51 | assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) 52 | assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) 53 | assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") 54 | } 55 | 56 | func TestErrorNewError(t *testing.T) { 57 | 58 | var err = NewError("formatter%v%v", 0, 1) 59 | 60 | assert.Equal(t, 61 | FmtError{"formatter%v%v", []interface{}{0, 1}}, 62 | err.Data()) 63 | assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) 64 | assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) 65 | assert.NotContains(t, fmt.Sprintf("%#v", err), "Stack Trace") 66 | } 67 | 68 | func TestErrorNewErrorWithStacktrace(t *testing.T) { 69 | 70 | var err = NewError("formatter%v%v", 0, 1).Stacktrace() 71 | 72 | assert.Equal(t, 73 | FmtError{"formatter%v%v", []interface{}{0, 1}}, 74 | err.Data()) 75 | assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) 76 | assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) 77 | assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") 78 | } 79 | 80 | func TestErrorNewErrorWithTrace(t *testing.T) { 81 | 82 | var err = NewError("formatter%v%v", 0, 1) 83 | err.Trace(0, "trace %v", 1) 84 | err.Trace(0, "trace %v", 2) 85 | err.Trace(0, "trace %v", 3) 86 | 87 | assert.Equal(t, 88 | FmtError{"formatter%v%v", []interface{}{0, 1}}, 89 | err.Data()) 90 | assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) 91 | assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) 92 | dump := fmt.Sprintf("%#v", err) 93 | assert.NotContains(t, dump, "Stack Trace") 94 | assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 1`, dump) 95 | assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 2`, dump) 96 | assert.Regexp(t, `common/errors_test\.go:[0-9]+ - trace 3`, dump) 97 | } 98 | 99 | func TestErrorWrapError(t *testing.T) { 100 | var err1 error = NewError("my message") 101 | var err2 error = ErrorWrap(err1, "another message") 102 | assert.Equal(t, err1, err2) 103 | } 104 | -------------------------------------------------------------------------------- /common/heap.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "container/heap" 6 | ) 7 | 8 | /* 9 | Example usage: 10 | 11 | ``` 12 | h := NewHeap() 13 | 14 | h.Push("msg1", 1) 15 | h.Push("msg3", 3) 16 | h.Push("msg2", 2) 17 | 18 | fmt.Println(h.Pop()) // msg1 19 | fmt.Println(h.Pop()) // msg2 20 | fmt.Println(h.Pop()) // msg3 21 | ``` 22 | */ 23 | type Heap struct { 24 | pq priorityQueue 25 | } 26 | 27 | func NewHeap() *Heap { 28 | return &Heap{pq: make([]*pqItem, 0)} 29 | } 30 | 31 | func (h *Heap) Len() int64 { 32 | return int64(len(h.pq)) 33 | } 34 | 35 | func (h *Heap) Push(value interface{}, priority int) { 36 | heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)}) 37 | } 38 | 39 | func (h *Heap) PushBytes(value interface{}, priority []byte) { 40 | heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)}) 41 | } 42 | 43 | func (h *Heap) PushComparable(value interface{}, priority Comparable) { 44 | heap.Push(&h.pq, &pqItem{value: value, priority: priority}) 45 | } 46 | 47 | func (h *Heap) Peek() interface{} { 48 | if len(h.pq) == 0 { 49 | return nil 50 | } 51 | return h.pq[0].value 52 | } 53 | 54 | func (h *Heap) Update(value interface{}, priority Comparable) { 55 | h.pq.Update(h.pq[0], value, priority) 56 | } 57 | 58 | func (h *Heap) Pop() interface{} { 59 | item := heap.Pop(&h.pq).(*pqItem) 60 | return item.value 61 | } 62 | 63 | //----------------------------------------------------------------------------- 64 | // From: http://golang.org/pkg/container/heap/#example__priorityQueue 65 | 66 | type pqItem struct { 67 | value interface{} 68 | priority Comparable 69 | index int 70 | } 71 | 72 | type priorityQueue []*pqItem 73 | 74 | func (pq priorityQueue) Len() int { return len(pq) } 75 | 76 | func (pq priorityQueue) Less(i, j int) bool { 77 | return pq[i].priority.Less(pq[j].priority) 78 | } 79 | 80 | func (pq priorityQueue) Swap(i, j int) { 81 | pq[i], pq[j] = pq[j], pq[i] 82 | pq[i].index = i 83 | pq[j].index = j 84 | } 85 | 86 | func (pq *priorityQueue) Push(x interface{}) { 87 | n := len(*pq) 88 | item := x.(*pqItem) 89 | item.index = n 90 | *pq = append(*pq, item) 91 | } 92 | 93 | func (pq *priorityQueue) Pop() interface{} { 94 | old := *pq 95 | n := len(old) 96 | item := old[n-1] 97 | item.index = -1 // for safety 98 | *pq = old[0 : n-1] 99 | return item 100 | } 101 | 102 | func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Comparable) { 103 | item.value = value 104 | item.priority = priority 105 | heap.Fix(pq, item.index) 106 | } 107 | 108 | //-------------------------------------------------------------------------------- 109 | // Comparable 110 | 111 | type Comparable interface { 112 | Less(o interface{}) bool 113 | } 114 | 115 | type cmpInt int 116 | 117 | func (i cmpInt) Less(o interface{}) bool { 118 | return int(i) < int(o.(cmpInt)) 119 | } 120 | 121 | type cmpBytes []byte 122 | 123 | func (bz cmpBytes) Less(o interface{}) bool { 124 | return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0 125 | } 126 | -------------------------------------------------------------------------------- /common/int.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "encoding/binary" 5 | "sort" 6 | ) 7 | 8 | // Sort for []uint64 9 | 10 | type Uint64Slice []uint64 11 | 12 | func (p Uint64Slice) Len() int { return len(p) } 13 | func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } 14 | func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } 15 | func (p Uint64Slice) Sort() { sort.Sort(p) } 16 | 17 | func SearchUint64s(a []uint64, x uint64) int { 18 | return sort.Search(len(a), func(i int) bool { return a[i] >= x }) 19 | } 20 | 21 | func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) } 22 | 23 | //-------------------------------------------------------------------------------- 24 | 25 | func PutUint64LE(dest []byte, i uint64) { 26 | binary.LittleEndian.PutUint64(dest, i) 27 | } 28 | 29 | func GetUint64LE(src []byte) uint64 { 30 | return binary.LittleEndian.Uint64(src) 31 | } 32 | 33 | func PutUint64BE(dest []byte, i uint64) { 34 | binary.BigEndian.PutUint64(dest, i) 35 | } 36 | 37 | func GetUint64BE(src []byte) uint64 { 38 | return binary.BigEndian.Uint64(src) 39 | } 40 | 41 | func PutInt64LE(dest []byte, i int64) { 42 | binary.LittleEndian.PutUint64(dest, uint64(i)) 43 | } 44 | 45 | func GetInt64LE(src []byte) int64 { 46 | return int64(binary.LittleEndian.Uint64(src)) 47 | } 48 | 49 | func PutInt64BE(dest []byte, i int64) { 50 | binary.BigEndian.PutUint64(dest, uint64(i)) 51 | } 52 | 53 | func GetInt64BE(src []byte) int64 { 54 | return int64(binary.BigEndian.Uint64(src)) 55 | } 56 | 57 | // IntInSlice returns true if a is found in the list. 58 | func IntInSlice(a int, list []int) bool { 59 | for _, b := range list { 60 | if b == a { 61 | return true 62 | } 63 | } 64 | return false 65 | } 66 | -------------------------------------------------------------------------------- /common/int_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestIntInSlice(t *testing.T) { 10 | assert.True(t, IntInSlice(1, []int{1, 2, 3})) 11 | assert.False(t, IntInSlice(4, []int{1, 2, 3})) 12 | assert.True(t, IntInSlice(0, []int{0})) 13 | assert.False(t, IntInSlice(0, []int{})) 14 | } 15 | -------------------------------------------------------------------------------- /common/io.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io" 7 | ) 8 | 9 | type PrefixedReader struct { 10 | Prefix []byte 11 | reader io.Reader 12 | } 13 | 14 | func NewPrefixedReader(prefix []byte, reader io.Reader) *PrefixedReader { 15 | return &PrefixedReader{prefix, reader} 16 | } 17 | 18 | func (pr *PrefixedReader) Read(p []byte) (n int, err error) { 19 | if len(pr.Prefix) > 0 { 20 | read := copy(p, pr.Prefix) 21 | pr.Prefix = pr.Prefix[read:] 22 | return read, nil 23 | } 24 | return pr.reader.Read(p) 25 | } 26 | 27 | // NOTE: Not goroutine safe 28 | type BufferCloser struct { 29 | bytes.Buffer 30 | Closed bool 31 | } 32 | 33 | func NewBufferCloser(buf []byte) *BufferCloser { 34 | return &BufferCloser{ 35 | *bytes.NewBuffer(buf), 36 | false, 37 | } 38 | } 39 | 40 | func (bc *BufferCloser) Close() error { 41 | if bc.Closed { 42 | return errors.New("BufferCloser already closed") 43 | } 44 | bc.Closed = true 45 | return nil 46 | } 47 | 48 | func (bc *BufferCloser) Write(p []byte) (n int, err error) { 49 | if bc.Closed { 50 | return 0, errors.New("Cannot write to closed BufferCloser") 51 | } 52 | return bc.Buffer.Write(p) 53 | } 54 | 55 | func (bc *BufferCloser) WriteByte(c byte) error { 56 | if bc.Closed { 57 | return errors.New("Cannot write to closed BufferCloser") 58 | } 59 | return bc.Buffer.WriteByte(c) 60 | } 61 | 62 | func (bc *BufferCloser) WriteRune(r rune) (n int, err error) { 63 | if bc.Closed { 64 | return 0, errors.New("Cannot write to closed BufferCloser") 65 | } 66 | return bc.Buffer.WriteRune(r) 67 | } 68 | 69 | func (bc *BufferCloser) WriteString(s string) (n int, err error) { 70 | if bc.Closed { 71 | return 0, errors.New("Cannot write to closed BufferCloser") 72 | } 73 | return bc.Buffer.WriteString(s) 74 | } 75 | -------------------------------------------------------------------------------- /common/kvpair.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "sort" 6 | ) 7 | 8 | //---------------------------------------- 9 | // KVPair 10 | 11 | /* 12 | Defined in types.proto 13 | 14 | type KVPair struct { 15 | Key []byte 16 | Value []byte 17 | } 18 | */ 19 | 20 | type KVPairs []KVPair 21 | 22 | // Sorting 23 | func (kvs KVPairs) Len() int { return len(kvs) } 24 | func (kvs KVPairs) Less(i, j int) bool { 25 | switch bytes.Compare(kvs[i].Key, kvs[j].Key) { 26 | case -1: 27 | return true 28 | case 0: 29 | return bytes.Compare(kvs[i].Value, kvs[j].Value) < 0 30 | case 1: 31 | return false 32 | default: 33 | panic("invalid comparison result") 34 | } 35 | } 36 | func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } 37 | func (kvs KVPairs) Sort() { sort.Sort(kvs) } 38 | 39 | //---------------------------------------- 40 | // KI64Pair 41 | 42 | /* 43 | Defined in types.proto 44 | type KI64Pair struct { 45 | Key []byte 46 | Value int64 47 | } 48 | */ 49 | 50 | type KI64Pairs []KI64Pair 51 | 52 | // Sorting 53 | func (kvs KI64Pairs) Len() int { return len(kvs) } 54 | func (kvs KI64Pairs) Less(i, j int) bool { 55 | switch bytes.Compare(kvs[i].Key, kvs[j].Key) { 56 | case -1: 57 | return true 58 | case 0: 59 | return kvs[i].Value < kvs[j].Value 60 | case 1: 61 | return false 62 | default: 63 | panic("invalid comparison result") 64 | } 65 | } 66 | func (kvs KI64Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } 67 | func (kvs KI64Pairs) Sort() { sort.Sort(kvs) } 68 | -------------------------------------------------------------------------------- /common/math.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | func MaxInt8(a, b int8) int8 { 4 | if a > b { 5 | return a 6 | } 7 | return b 8 | } 9 | 10 | func MaxUint8(a, b uint8) uint8 { 11 | if a > b { 12 | return a 13 | } 14 | return b 15 | } 16 | 17 | func MaxInt16(a, b int16) int16 { 18 | if a > b { 19 | return a 20 | } 21 | return b 22 | } 23 | 24 | func MaxUint16(a, b uint16) uint16 { 25 | if a > b { 26 | return a 27 | } 28 | return b 29 | } 30 | 31 | func MaxInt32(a, b int32) int32 { 32 | if a > b { 33 | return a 34 | } 35 | return b 36 | } 37 | 38 | func MaxUint32(a, b uint32) uint32 { 39 | if a > b { 40 | return a 41 | } 42 | return b 43 | } 44 | 45 | func MaxInt64(a, b int64) int64 { 46 | if a > b { 47 | return a 48 | } 49 | return b 50 | } 51 | 52 | func MaxUint64(a, b uint64) uint64 { 53 | if a > b { 54 | return a 55 | } 56 | return b 57 | } 58 | 59 | func MaxInt(a, b int) int { 60 | if a > b { 61 | return a 62 | } 63 | return b 64 | } 65 | 66 | func MaxUint(a, b uint) uint { 67 | if a > b { 68 | return a 69 | } 70 | return b 71 | } 72 | 73 | //----------------------------------------------------------------------------- 74 | 75 | func MinInt8(a, b int8) int8 { 76 | if a < b { 77 | return a 78 | } 79 | return b 80 | } 81 | 82 | func MinUint8(a, b uint8) uint8 { 83 | if a < b { 84 | return a 85 | } 86 | return b 87 | } 88 | 89 | func MinInt16(a, b int16) int16 { 90 | if a < b { 91 | return a 92 | } 93 | return b 94 | } 95 | 96 | func MinUint16(a, b uint16) uint16 { 97 | if a < b { 98 | return a 99 | } 100 | return b 101 | } 102 | 103 | func MinInt32(a, b int32) int32 { 104 | if a < b { 105 | return a 106 | } 107 | return b 108 | } 109 | 110 | func MinUint32(a, b uint32) uint32 { 111 | if a < b { 112 | return a 113 | } 114 | return b 115 | } 116 | 117 | func MinInt64(a, b int64) int64 { 118 | if a < b { 119 | return a 120 | } 121 | return b 122 | } 123 | 124 | func MinUint64(a, b uint64) uint64 { 125 | if a < b { 126 | return a 127 | } 128 | return b 129 | } 130 | 131 | func MinInt(a, b int) int { 132 | if a < b { 133 | return a 134 | } 135 | return b 136 | } 137 | 138 | func MinUint(a, b uint) uint { 139 | if a < b { 140 | return a 141 | } 142 | return b 143 | } 144 | 145 | //----------------------------------------------------------------------------- 146 | 147 | func ExpUint64(a, b uint64) uint64 { 148 | accum := uint64(1) 149 | for b > 0 { 150 | if b&1 == 1 { 151 | accum *= a 152 | } 153 | a *= a 154 | b >>= 1 155 | } 156 | return accum 157 | } 158 | -------------------------------------------------------------------------------- /common/net.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "net" 5 | "strings" 6 | ) 7 | 8 | // Connect dials the given address and returns a net.Conn. The protoAddr argument should be prefixed with the protocol, 9 | // eg. "tcp://127.0.0.1:8080" or "unix:///tmp/test.sock" 10 | func Connect(protoAddr string) (net.Conn, error) { 11 | proto, address := ProtocolAndAddress(protoAddr) 12 | conn, err := net.Dial(proto, address) 13 | return conn, err 14 | } 15 | 16 | // ProtocolAndAddress splits an address into the protocol and address components. 17 | // For instance, "tcp://127.0.0.1:8080" will be split into "tcp" and "127.0.0.1:8080". 18 | // If the address has no protocol prefix, the default is "tcp". 19 | func ProtocolAndAddress(listenAddr string) (string, string) { 20 | protocol, address := "tcp", listenAddr 21 | parts := strings.SplitN(address, "://", 2) 22 | if len(parts) == 2 { 23 | protocol, address = parts[0], parts[1] 24 | } 25 | return protocol, address 26 | } 27 | -------------------------------------------------------------------------------- /common/net_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestProtocolAndAddress(t *testing.T) { 10 | 11 | cases := []struct { 12 | fullAddr string 13 | proto string 14 | addr string 15 | }{ 16 | { 17 | "tcp://mydomain:80", 18 | "tcp", 19 | "mydomain:80", 20 | }, 21 | { 22 | "mydomain:80", 23 | "tcp", 24 | "mydomain:80", 25 | }, 26 | { 27 | "unix://mydomain:80", 28 | "unix", 29 | "mydomain:80", 30 | }, 31 | } 32 | 33 | for _, c := range cases { 34 | proto, addr := ProtocolAndAddress(c.fullAddr) 35 | assert.Equal(t, proto, c.proto) 36 | assert.Equal(t, addr, c.addr) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /common/nil.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import "reflect" 4 | 5 | // Go lacks a simple and safe way to see if something is a typed nil. 6 | // See: 7 | // - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 8 | // - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion 9 | // - https://github.com/golang/go/issues/21538 10 | func IsTypedNil(o interface{}) bool { 11 | rv := reflect.ValueOf(o) 12 | switch rv.Kind() { 13 | case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice: 14 | return rv.IsNil() 15 | default: 16 | return false 17 | } 18 | } 19 | 20 | // Returns true if it has zero length. 21 | func IsEmpty(o interface{}) bool { 22 | rv := reflect.ValueOf(o) 23 | switch rv.Kind() { 24 | case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: 25 | return rv.Len() == 0 26 | default: 27 | return false 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /common/os.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "os" 9 | "os/exec" 10 | "os/signal" 11 | "path/filepath" 12 | "strings" 13 | "syscall" 14 | ) 15 | 16 | var gopath string 17 | 18 | // GoPath returns GOPATH env variable value. If it is not set, this function 19 | // will try to call `go env GOPATH` subcommand. 20 | func GoPath() string { 21 | if gopath != "" { 22 | return gopath 23 | } 24 | 25 | path := os.Getenv("GOPATH") 26 | if len(path) == 0 { 27 | goCmd := exec.Command("go", "env", "GOPATH") 28 | out, err := goCmd.Output() 29 | if err != nil { 30 | panic(fmt.Sprintf("failed to determine gopath: %v", err)) 31 | } 32 | path = string(out) 33 | } 34 | gopath = path 35 | return path 36 | } 37 | 38 | // TrapSignal catches the SIGTERM and executes cb function. After that it exits 39 | // with code 1. 40 | func TrapSignal(cb func()) { 41 | c := make(chan os.Signal, 1) 42 | signal.Notify(c, os.Interrupt, syscall.SIGTERM) 43 | go func() { 44 | for sig := range c { 45 | fmt.Printf("captured %v, exiting...\n", sig) 46 | if cb != nil { 47 | cb() 48 | } 49 | os.Exit(1) 50 | } 51 | }() 52 | select {} 53 | } 54 | 55 | // Kill the running process by sending itself SIGTERM. 56 | func Kill() error { 57 | p, err := os.FindProcess(os.Getpid()) 58 | if err != nil { 59 | return err 60 | } 61 | return p.Signal(syscall.SIGTERM) 62 | } 63 | 64 | func Exit(s string) { 65 | fmt.Printf(s + "\n") 66 | os.Exit(1) 67 | } 68 | 69 | func EnsureDir(dir string, mode os.FileMode) error { 70 | if _, err := os.Stat(dir); os.IsNotExist(err) { 71 | err := os.MkdirAll(dir, mode) 72 | if err != nil { 73 | return fmt.Errorf("Could not create directory %v. %v", dir, err) 74 | } 75 | } 76 | return nil 77 | } 78 | 79 | func IsDirEmpty(name string) (bool, error) { 80 | f, err := os.Open(name) 81 | if err != nil { 82 | if os.IsNotExist(err) { 83 | return true, err 84 | } 85 | // Otherwise perhaps a permission 86 | // error or some other error. 87 | return false, err 88 | } 89 | defer f.Close() 90 | 91 | _, err = f.Readdirnames(1) // Or f.Readdir(1) 92 | if err == io.EOF { 93 | return true, nil 94 | } 95 | return false, err // Either not empty or error, suits both cases 96 | } 97 | 98 | func FileExists(filePath string) bool { 99 | _, err := os.Stat(filePath) 100 | return !os.IsNotExist(err) 101 | } 102 | 103 | func ReadFile(filePath string) ([]byte, error) { 104 | return ioutil.ReadFile(filePath) 105 | } 106 | 107 | func MustReadFile(filePath string) []byte { 108 | fileBytes, err := ioutil.ReadFile(filePath) 109 | if err != nil { 110 | Exit(Fmt("MustReadFile failed: %v", err)) 111 | return nil 112 | } 113 | return fileBytes 114 | } 115 | 116 | func WriteFile(filePath string, contents []byte, mode os.FileMode) error { 117 | return ioutil.WriteFile(filePath, contents, mode) 118 | } 119 | 120 | func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { 121 | err := WriteFile(filePath, contents, mode) 122 | if err != nil { 123 | Exit(Fmt("MustWriteFile failed: %v", err)) 124 | } 125 | } 126 | 127 | // WriteFileAtomic creates a temporary file with data and the perm given and 128 | // swaps it atomically with filename if successful. 129 | func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { 130 | var ( 131 | dir = filepath.Dir(filename) 132 | tempFile = filepath.Join(dir, "write-file-atomic-"+RandStr(32)) 133 | // Override in case it does exist, create in case it doesn't and force kernel 134 | // flush, which still leaves the potential of lingering disk cache. 135 | flag = os.O_WRONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC 136 | ) 137 | 138 | f, err := os.OpenFile(tempFile, flag, perm) 139 | if err != nil { 140 | return err 141 | } 142 | // Clean up in any case. Defer stacking order is last-in-first-out. 143 | defer os.Remove(f.Name()) 144 | defer f.Close() 145 | 146 | if n, err := f.Write(data); err != nil { 147 | return err 148 | } else if n < len(data) { 149 | return io.ErrShortWrite 150 | } 151 | // Close the file before renaming it, otherwise it will cause "The process 152 | // cannot access the file because it is being used by another process." on windows. 153 | f.Close() 154 | 155 | return os.Rename(f.Name(), filename) 156 | } 157 | 158 | //-------------------------------------------------------------------------------- 159 | 160 | func Tempfile(prefix string) (*os.File, string) { 161 | file, err := ioutil.TempFile("", prefix) 162 | if err != nil { 163 | PanicCrisis(err) 164 | } 165 | return file, file.Name() 166 | } 167 | 168 | func Tempdir(prefix string) (*os.File, string) { 169 | tempDir := os.TempDir() + "/" + prefix + RandStr(12) 170 | err := EnsureDir(tempDir, 0700) 171 | if err != nil { 172 | panic(Fmt("Error creating temp dir: %v", err)) 173 | } 174 | dir, err := os.Open(tempDir) 175 | if err != nil { 176 | panic(Fmt("Error opening temp dir: %v", err)) 177 | } 178 | return dir, tempDir 179 | } 180 | 181 | //-------------------------------------------------------------------------------- 182 | 183 | func Prompt(prompt string, defaultValue string) (string, error) { 184 | fmt.Print(prompt) 185 | reader := bufio.NewReader(os.Stdin) 186 | line, err := reader.ReadString('\n') 187 | if err != nil { 188 | return defaultValue, err 189 | } 190 | line = strings.TrimSpace(line) 191 | if line == "" { 192 | return defaultValue, nil 193 | } 194 | return line, nil 195 | } 196 | -------------------------------------------------------------------------------- /common/os_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "io/ioutil" 6 | "math/rand" 7 | "os" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestWriteFileAtomic(t *testing.T) { 13 | var ( 14 | seed = rand.New(rand.NewSource(time.Now().UnixNano())) 15 | data = []byte(RandStr(seed.Intn(2048))) 16 | old = RandBytes(seed.Intn(2048)) 17 | perm os.FileMode = 0600 18 | ) 19 | 20 | f, err := ioutil.TempFile("/tmp", "write-atomic-test-") 21 | if err != nil { 22 | t.Fatal(err) 23 | } 24 | defer os.Remove(f.Name()) 25 | 26 | if err = ioutil.WriteFile(f.Name(), old, 0664); err != nil { 27 | t.Fatal(err) 28 | } 29 | 30 | if err = WriteFileAtomic(f.Name(), data, perm); err != nil { 31 | t.Fatal(err) 32 | } 33 | 34 | rData, err := ioutil.ReadFile(f.Name()) 35 | if err != nil { 36 | t.Fatal(err) 37 | } 38 | 39 | if !bytes.Equal(data, rData) { 40 | t.Fatalf("data mismatch: %v != %v", data, rData) 41 | } 42 | 43 | stat, err := os.Stat(f.Name()) 44 | if err != nil { 45 | t.Fatal(err) 46 | } 47 | 48 | if have, want := stat.Mode().Perm(), perm; have != want { 49 | t.Errorf("have %v, want %v", have, want) 50 | } 51 | } 52 | 53 | func TestGoPath(t *testing.T) { 54 | // restore original gopath upon exit 55 | path := os.Getenv("GOPATH") 56 | defer func() { 57 | _ = os.Setenv("GOPATH", path) 58 | }() 59 | 60 | err := os.Setenv("GOPATH", "~/testgopath") 61 | if err != nil { 62 | t.Fatal(err) 63 | } 64 | path = GoPath() 65 | if path != "~/testgopath" { 66 | t.Fatalf("should get GOPATH env var value, got %v", path) 67 | } 68 | os.Unsetenv("GOPATH") 69 | 70 | path = GoPath() 71 | if path != "~/testgopath" { 72 | t.Fatalf("subsequent calls should return the same value, got %v", path) 73 | } 74 | } 75 | 76 | func TestGoPathWithoutEnvVar(t *testing.T) { 77 | // restore original gopath upon exit 78 | path := os.Getenv("GOPATH") 79 | defer func() { 80 | _ = os.Setenv("GOPATH", path) 81 | }() 82 | 83 | os.Unsetenv("GOPATH") 84 | // reset cache 85 | gopath = "" 86 | 87 | path = GoPath() 88 | if path == "" || path == "~/testgopath" { 89 | t.Fatalf("should get nonempty result of calling go env GOPATH, got %v", path) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /common/random_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | mrand "math/rand" 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestRandStr(t *testing.T) { 16 | l := 243 17 | s := RandStr(l) 18 | assert.Equal(t, l, len(s)) 19 | } 20 | 21 | func TestRandBytes(t *testing.T) { 22 | l := 243 23 | b := RandBytes(l) 24 | assert.Equal(t, l, len(b)) 25 | } 26 | 27 | func TestRandIntn(t *testing.T) { 28 | n := 243 29 | for i := 0; i < 100; i++ { 30 | x := RandIntn(n) 31 | assert.True(t, x < n) 32 | } 33 | } 34 | 35 | // Test to make sure that we never call math.rand(). 36 | // We do this by ensuring that outputs are deterministic. 37 | func TestDeterminism(t *testing.T) { 38 | var firstOutput string 39 | 40 | // Set math/rand's seed for the sake of debugging this test. 41 | // (It isn't strictly necessary). 42 | mrand.Seed(1) 43 | 44 | for i := 0; i < 100; i++ { 45 | output := testThemAll() 46 | if i == 0 { 47 | firstOutput = output 48 | } else { 49 | if firstOutput != output { 50 | t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v", 51 | i, firstOutput, output) 52 | } 53 | } 54 | } 55 | } 56 | 57 | func testThemAll() string { 58 | 59 | // Such determinism. 60 | grand.reset(1) 61 | 62 | // Use it. 63 | out := new(bytes.Buffer) 64 | perm := RandPerm(10) 65 | blob, _ := json.Marshal(perm) 66 | fmt.Fprintf(out, "perm: %s\n", blob) 67 | fmt.Fprintf(out, "randInt: %d\n", RandInt()) 68 | fmt.Fprintf(out, "randUint: %d\n", RandUint()) 69 | fmt.Fprintf(out, "randIntn: %d\n", RandIntn(97)) 70 | fmt.Fprintf(out, "randInt31: %d\n", RandInt31()) 71 | fmt.Fprintf(out, "randInt32: %d\n", RandInt32()) 72 | fmt.Fprintf(out, "randInt63: %d\n", RandInt63()) 73 | fmt.Fprintf(out, "randInt64: %d\n", RandInt64()) 74 | fmt.Fprintf(out, "randUint32: %d\n", RandUint32()) 75 | fmt.Fprintf(out, "randUint64: %d\n", RandUint64()) 76 | fmt.Fprintf(out, "randUint16Exp: %d\n", RandUint16Exp()) 77 | fmt.Fprintf(out, "randUint32Exp: %d\n", RandUint32Exp()) 78 | fmt.Fprintf(out, "randUint64Exp: %d\n", RandUint64Exp()) 79 | return out.String() 80 | } 81 | 82 | func TestRngConcurrencySafety(t *testing.T) { 83 | var wg sync.WaitGroup 84 | for i := 0; i < 100; i++ { 85 | wg.Add(1) 86 | go func() { 87 | defer wg.Done() 88 | 89 | _ = RandUint64() 90 | <-time.After(time.Millisecond * time.Duration(RandIntn(100))) 91 | _ = RandPerm(3) 92 | }() 93 | } 94 | wg.Wait() 95 | } 96 | 97 | func BenchmarkRandBytes10B(b *testing.B) { 98 | benchmarkRandBytes(b, 10) 99 | } 100 | func BenchmarkRandBytes100B(b *testing.B) { 101 | benchmarkRandBytes(b, 100) 102 | } 103 | func BenchmarkRandBytes1KiB(b *testing.B) { 104 | benchmarkRandBytes(b, 1024) 105 | } 106 | func BenchmarkRandBytes10KiB(b *testing.B) { 107 | benchmarkRandBytes(b, 10*1024) 108 | } 109 | func BenchmarkRandBytes100KiB(b *testing.B) { 110 | benchmarkRandBytes(b, 100*1024) 111 | } 112 | func BenchmarkRandBytes1MiB(b *testing.B) { 113 | benchmarkRandBytes(b, 1024*1024) 114 | } 115 | 116 | func benchmarkRandBytes(b *testing.B, n int) { 117 | for i := 0; i < b.N; i++ { 118 | _ = RandBytes(n) 119 | } 120 | b.ReportAllocs() 121 | } 122 | -------------------------------------------------------------------------------- /common/repeat_timer_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "testing" 7 | "time" 8 | 9 | "github.com/fortytw2/leaktest" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestDefaultTicker(t *testing.T) { 14 | ticker := defaultTickerMaker(time.Millisecond * 10) 15 | <-ticker.Chan() 16 | ticker.Stop() 17 | } 18 | 19 | func TestRepeatTimer(t *testing.T) { 20 | 21 | ch := make(chan time.Time, 100) 22 | mtx := new(sync.Mutex) 23 | 24 | // tick() fires from start to end 25 | // (exclusive) in milliseconds with incr. 26 | // It locks on mtx, so subsequent calls 27 | // run in series. 28 | tick := func(startMs, endMs, incrMs time.Duration) { 29 | mtx.Lock() 30 | go func() { 31 | for tMs := startMs; tMs < endMs; tMs += incrMs { 32 | lt := time.Time{} 33 | lt = lt.Add(tMs * time.Millisecond) 34 | ch <- lt 35 | } 36 | mtx.Unlock() 37 | }() 38 | } 39 | 40 | // tock consumes Ticker.Chan() events and checks them against the ms in "timesMs". 41 | tock := func(t *testing.T, rt *RepeatTimer, timesMs []int64) { 42 | 43 | // Check against timesMs. 44 | for _, timeMs := range timesMs { 45 | tyme := <-rt.Chan() 46 | sinceMs := tyme.Sub(time.Time{}) / time.Millisecond 47 | assert.Equal(t, timeMs, int64(sinceMs)) 48 | } 49 | 50 | // TODO detect number of running 51 | // goroutines to ensure that 52 | // no other times will fire. 53 | // See https://github.com/tendermint/tmlibs/issues/120. 54 | time.Sleep(time.Millisecond * 100) 55 | done := true 56 | select { 57 | case <-rt.Chan(): 58 | done = false 59 | default: 60 | } 61 | assert.True(t, done) 62 | } 63 | 64 | tm := NewLogicalTickerMaker(ch) 65 | rt := NewRepeatTimerWithTickerMaker("bar", time.Second, tm) 66 | 67 | /* NOTE: Useful for debugging deadlocks... 68 | go func() { 69 | time.Sleep(time.Second * 3) 70 | trace := make([]byte, 102400) 71 | count := runtime.Stack(trace, true) 72 | fmt.Printf("Stack of %d bytes: %s\n", count, trace) 73 | }() 74 | */ 75 | 76 | tick(0, 1000, 10) 77 | tock(t, rt, []int64{}) 78 | tick(1000, 2000, 10) 79 | tock(t, rt, []int64{1000}) 80 | tick(2005, 5000, 10) 81 | tock(t, rt, []int64{2005, 3005, 4005}) 82 | tick(5001, 5999, 1) 83 | // Read 5005 instead of 5001 because 84 | // it's 1 second greater than 4005. 85 | tock(t, rt, []int64{5005}) 86 | tick(6000, 7005, 1) 87 | tock(t, rt, []int64{6005}) 88 | tick(7033, 8032, 1) 89 | tock(t, rt, []int64{7033}) 90 | 91 | // After a reset, nothing happens 92 | // until two ticks are received. 93 | rt.Reset() 94 | tock(t, rt, []int64{}) 95 | tick(8040, 8041, 1) 96 | tock(t, rt, []int64{}) 97 | tick(9555, 9556, 1) 98 | tock(t, rt, []int64{9555}) 99 | 100 | // After a stop, nothing more is sent. 101 | rt.Stop() 102 | tock(t, rt, []int64{}) 103 | 104 | // Another stop panics. 105 | assert.Panics(t, func() { rt.Stop() }) 106 | } 107 | 108 | func TestRepeatTimerReset(t *testing.T) { 109 | // check that we are not leaking any go-routines 110 | defer leaktest.Check(t)() 111 | 112 | timer := NewRepeatTimer("test", 20*time.Millisecond) 113 | defer timer.Stop() 114 | 115 | // test we don't receive tick before duration ms. 116 | select { 117 | case <-timer.Chan(): 118 | t.Fatal("did not expect to receive tick") 119 | default: 120 | } 121 | 122 | timer.Reset() 123 | 124 | // test we receive tick after Reset is called 125 | select { 126 | case <-timer.Chan(): 127 | // all good 128 | case <-time.After(40 * time.Millisecond): 129 | t.Fatal("expected to receive tick after reset") 130 | } 131 | 132 | // just random calls 133 | for i := 0; i < 100; i++ { 134 | time.Sleep(time.Duration(rand.Intn(40)) * time.Millisecond) 135 | timer.Reset() 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /common/service_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | type testService struct { 11 | BaseService 12 | } 13 | 14 | func (testService) OnReset() error { 15 | return nil 16 | } 17 | 18 | func TestBaseServiceWait(t *testing.T) { 19 | ts := &testService{} 20 | ts.BaseService = *NewBaseService(nil, "TestService", ts) 21 | ts.Start() 22 | 23 | waitFinished := make(chan struct{}) 24 | go func() { 25 | ts.Wait() 26 | waitFinished <- struct{}{} 27 | }() 28 | 29 | go ts.Stop() 30 | 31 | select { 32 | case <-waitFinished: 33 | // all good 34 | case <-time.After(100 * time.Millisecond): 35 | t.Fatal("expected Wait() to finish within 100 ms.") 36 | } 37 | } 38 | 39 | func TestBaseServiceReset(t *testing.T) { 40 | ts := &testService{} 41 | ts.BaseService = *NewBaseService(nil, "TestService", ts) 42 | ts.Start() 43 | 44 | err := ts.Reset() 45 | require.Error(t, err, "expected cant reset service error") 46 | 47 | ts.Stop() 48 | 49 | err = ts.Reset() 50 | require.NoError(t, err) 51 | 52 | err = ts.Start() 53 | require.NoError(t, err) 54 | } 55 | -------------------------------------------------------------------------------- /common/string.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "strings" 7 | ) 8 | 9 | // Like fmt.Sprintf, but skips formatting if args are empty. 10 | var Fmt = func(format string, a ...interface{}) string { 11 | if len(a) == 0 { 12 | return format 13 | } 14 | return fmt.Sprintf(format, a...) 15 | } 16 | 17 | // IsHex returns true for non-empty hex-string prefixed with "0x" 18 | func IsHex(s string) bool { 19 | if len(s) > 2 && strings.EqualFold(s[:2], "0x") { 20 | _, err := hex.DecodeString(s[2:]) 21 | return err == nil 22 | } 23 | return false 24 | } 25 | 26 | // StripHex returns hex string without leading "0x" 27 | func StripHex(s string) string { 28 | if IsHex(s) { 29 | return s[2:] 30 | } 31 | return s 32 | } 33 | 34 | // StringInSlice returns true if a is found the list. 35 | func StringInSlice(a string, list []string) bool { 36 | for _, b := range list { 37 | if b == a { 38 | return true 39 | } 40 | } 41 | return false 42 | } 43 | 44 | // SplitAndTrim slices s into all subslices separated by sep and returns a 45 | // slice of the string s with all leading and trailing Unicode code points 46 | // contained in cutset removed. If sep is empty, SplitAndTrim splits after each 47 | // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of 48 | // -1. 49 | func SplitAndTrim(s, sep, cutset string) []string { 50 | if s == "" { 51 | return []string{} 52 | } 53 | 54 | spl := strings.Split(s, sep) 55 | for i := 0; i < len(spl); i++ { 56 | spl[i] = strings.Trim(spl[i], cutset) 57 | } 58 | return spl 59 | } 60 | 61 | // Returns true if s is a non-empty printable non-tab ascii character. 62 | func IsASCIIText(s string) bool { 63 | if len(s) == 0 { 64 | return false 65 | } 66 | for _, b := range []byte(s) { 67 | if 32 <= b && b <= 126 { 68 | // good 69 | } else { 70 | return false 71 | } 72 | } 73 | return true 74 | } 75 | 76 | // NOTE: Assumes that s is ASCII as per IsASCIIText(), otherwise panics. 77 | func ASCIITrim(s string) string { 78 | r := make([]byte, 0, len(s)) 79 | for _, b := range []byte(s) { 80 | if b == 32 { 81 | continue // skip space 82 | } else if 32 < b && b <= 126 { 83 | r = append(r, b) 84 | } else { 85 | panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b)) 86 | } 87 | } 88 | return string(r) 89 | } 90 | -------------------------------------------------------------------------------- /common/string_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestStringInSlice(t *testing.T) { 10 | assert.True(t, StringInSlice("a", []string{"a", "b", "c"})) 11 | assert.False(t, StringInSlice("d", []string{"a", "b", "c"})) 12 | assert.True(t, StringInSlice("", []string{""})) 13 | assert.False(t, StringInSlice("", []string{})) 14 | } 15 | 16 | func TestIsHex(t *testing.T) { 17 | notHex := []string{ 18 | "", " ", "a", "x", "0", "0x", "0X", "0x ", "0X ", "0X a", 19 | "0xf ", "0x f", "0xp", "0x-", 20 | "0xf", "0XBED", "0xF", "0xbed", // Odd lengths 21 | } 22 | for _, v := range notHex { 23 | assert.False(t, IsHex(v), "%q is not hex", v) 24 | } 25 | hex := []string{ 26 | "0x00", "0x0a", "0x0F", "0xFFFFFF", "0Xdeadbeef", "0x0BED", 27 | "0X12", "0X0A", 28 | } 29 | for _, v := range hex { 30 | assert.True(t, IsHex(v), "%q is hex", v) 31 | } 32 | } 33 | 34 | func TestSplitAndTrim(t *testing.T) { 35 | testCases := []struct { 36 | s string 37 | sep string 38 | cutset string 39 | expected []string 40 | }{ 41 | {"a,b,c", ",", " ", []string{"a", "b", "c"}}, 42 | {" a , b , c ", ",", " ", []string{"a", "b", "c"}}, 43 | {" a, b, c ", ",", " ", []string{"a", "b", "c"}}, 44 | {" , ", ",", " ", []string{"", ""}}, 45 | {" ", ",", " ", []string{""}}, 46 | } 47 | 48 | for _, tc := range testCases { 49 | assert.Equal(t, tc.expected, SplitAndTrim(tc.s, tc.sep, tc.cutset), "%s", tc.s) 50 | } 51 | } 52 | 53 | func TestIsASCIIText(t *testing.T) { 54 | notASCIIText := []string{ 55 | "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", 56 | } 57 | for _, v := range notASCIIText { 58 | assert.False(t, IsHex(v), "%q is not ascii-text", v) 59 | } 60 | asciiText := []string{ 61 | " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", 62 | } 63 | for _, v := range asciiText { 64 | assert.True(t, IsASCIIText(v), "%q is ascii-text", v) 65 | } 66 | } 67 | 68 | func TestASCIITrim(t *testing.T) { 69 | assert.Equal(t, ASCIITrim(" "), "") 70 | assert.Equal(t, ASCIITrim(" a"), "a") 71 | assert.Equal(t, ASCIITrim("a "), "a") 72 | assert.Equal(t, ASCIITrim(" a "), "a") 73 | assert.Panics(t, func() { ASCIITrim("\xC2\xA2") }) 74 | } 75 | -------------------------------------------------------------------------------- /common/throttle_timer.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | /* 9 | ThrottleTimer fires an event at most "dur" after each .Set() call. 10 | If a short burst of .Set() calls happens, ThrottleTimer fires once. 11 | If a long continuous burst of .Set() calls happens, ThrottleTimer fires 12 | at most once every "dur". 13 | */ 14 | type ThrottleTimer struct { 15 | Name string 16 | Ch chan struct{} 17 | quit chan struct{} 18 | dur time.Duration 19 | 20 | mtx sync.Mutex 21 | timer *time.Timer 22 | isSet bool 23 | } 24 | 25 | func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { 26 | var ch = make(chan struct{}) 27 | var quit = make(chan struct{}) 28 | var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} 29 | t.mtx.Lock() 30 | t.timer = time.AfterFunc(dur, t.fireRoutine) 31 | t.mtx.Unlock() 32 | t.timer.Stop() 33 | return t 34 | } 35 | 36 | func (t *ThrottleTimer) fireRoutine() { 37 | t.mtx.Lock() 38 | defer t.mtx.Unlock() 39 | select { 40 | case t.Ch <- struct{}{}: 41 | t.isSet = false 42 | case <-t.quit: 43 | // do nothing 44 | default: 45 | t.timer.Reset(t.dur) 46 | } 47 | } 48 | 49 | func (t *ThrottleTimer) Set() { 50 | t.mtx.Lock() 51 | defer t.mtx.Unlock() 52 | if !t.isSet { 53 | t.isSet = true 54 | t.timer.Reset(t.dur) 55 | } 56 | } 57 | 58 | func (t *ThrottleTimer) Unset() { 59 | t.mtx.Lock() 60 | defer t.mtx.Unlock() 61 | t.isSet = false 62 | t.timer.Stop() 63 | } 64 | 65 | // For ease of .Stop()'ing services before .Start()'ing them, 66 | // we ignore .Stop()'s on nil ThrottleTimers 67 | func (t *ThrottleTimer) Stop() bool { 68 | if t == nil { 69 | return false 70 | } 71 | close(t.quit) 72 | t.mtx.Lock() 73 | defer t.mtx.Unlock() 74 | return t.timer.Stop() 75 | } 76 | -------------------------------------------------------------------------------- /common/throttle_timer_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | "time" 7 | 8 | // make govet noshadow happy... 9 | asrt "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | type thCounter struct { 13 | input chan struct{} 14 | mtx sync.Mutex 15 | count int 16 | } 17 | 18 | func (c *thCounter) Increment() { 19 | c.mtx.Lock() 20 | c.count++ 21 | c.mtx.Unlock() 22 | } 23 | 24 | func (c *thCounter) Count() int { 25 | c.mtx.Lock() 26 | val := c.count 27 | c.mtx.Unlock() 28 | return val 29 | } 30 | 31 | // Read should run in a go-routine and 32 | // updates count by one every time a packet comes in 33 | func (c *thCounter) Read() { 34 | for range c.input { 35 | c.Increment() 36 | } 37 | } 38 | 39 | func TestThrottle(test *testing.T) { 40 | assert := asrt.New(test) 41 | 42 | ms := 50 43 | delay := time.Duration(ms) * time.Millisecond 44 | longwait := time.Duration(2) * delay 45 | t := NewThrottleTimer("foo", delay) 46 | 47 | // start at 0 48 | c := &thCounter{input: t.Ch} 49 | assert.Equal(0, c.Count()) 50 | go c.Read() 51 | 52 | // waiting does nothing 53 | time.Sleep(longwait) 54 | assert.Equal(0, c.Count()) 55 | 56 | // send one event adds one 57 | t.Set() 58 | time.Sleep(longwait) 59 | assert.Equal(1, c.Count()) 60 | 61 | // send a burst adds one 62 | for i := 0; i < 5; i++ { 63 | t.Set() 64 | } 65 | time.Sleep(longwait) 66 | assert.Equal(2, c.Count()) 67 | 68 | // send 12, over 2 delay sections, adds 3 69 | short := time.Duration(ms/5) * time.Millisecond 70 | for i := 0; i < 13; i++ { 71 | t.Set() 72 | time.Sleep(short) 73 | } 74 | time.Sleep(longwait) 75 | assert.Equal(5, c.Count()) 76 | 77 | close(t.Ch) 78 | } 79 | -------------------------------------------------------------------------------- /common/types.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // source: common/types.proto 3 | 4 | /* 5 | Package common is a generated protocol buffer package. 6 | 7 | It is generated from these files: 8 | common/types.proto 9 | 10 | It has these top-level messages: 11 | KVPair 12 | KI64Pair 13 | */ 14 | //nolint: gas 15 | package common 16 | 17 | import proto "github.com/golang/protobuf/proto" 18 | import fmt "fmt" 19 | import math "math" 20 | 21 | // Reference imports to suppress errors if they are not otherwise used. 22 | var _ = proto.Marshal 23 | var _ = fmt.Errorf 24 | var _ = math.Inf 25 | 26 | // This is a compile-time assertion to ensure that this generated file 27 | // is compatible with the proto package it is being compiled against. 28 | // A compilation error at this line likely means your copy of the 29 | // proto package needs to be updated. 30 | const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package 31 | 32 | // Define these here for compatibility but use tmlibs/common.KVPair. 33 | type KVPair struct { 34 | Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` 35 | Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` 36 | } 37 | 38 | func (m *KVPair) Reset() { *m = KVPair{} } 39 | func (m *KVPair) String() string { return proto.CompactTextString(m) } 40 | func (*KVPair) ProtoMessage() {} 41 | func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } 42 | 43 | func (m *KVPair) GetKey() []byte { 44 | if m != nil { 45 | return m.Key 46 | } 47 | return nil 48 | } 49 | 50 | func (m *KVPair) GetValue() []byte { 51 | if m != nil { 52 | return m.Value 53 | } 54 | return nil 55 | } 56 | 57 | // Define these here for compatibility but use tmlibs/common.KI64Pair. 58 | type KI64Pair struct { 59 | Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` 60 | Value int64 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"` 61 | } 62 | 63 | func (m *KI64Pair) Reset() { *m = KI64Pair{} } 64 | func (m *KI64Pair) String() string { return proto.CompactTextString(m) } 65 | func (*KI64Pair) ProtoMessage() {} 66 | func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } 67 | 68 | func (m *KI64Pair) GetKey() []byte { 69 | if m != nil { 70 | return m.Key 71 | } 72 | return nil 73 | } 74 | 75 | func (m *KI64Pair) GetValue() int64 { 76 | if m != nil { 77 | return m.Value 78 | } 79 | return 0 80 | } 81 | 82 | func init() { 83 | proto.RegisterType((*KVPair)(nil), "common.KVPair") 84 | proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair") 85 | } 86 | 87 | func init() { proto.RegisterFile("common/types.proto", fileDescriptor0) } 88 | 89 | var fileDescriptor0 = []byte{ 90 | // 107 bytes of a gzipped FileDescriptorProto 91 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd, 92 | 0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 93 | 0x83, 0x88, 0x29, 0x19, 0x70, 0xb1, 0x79, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09, 0x09, 0x70, 0x31, 94 | 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 95 | 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60, 0x31, 0x08, 0x47, 0xc9, 0x88, 0x8b, 0xc3, 0xdb, 96 | 0xd3, 0xcc, 0x84, 0x18, 0x3d, 0xcc, 0x50, 0x3d, 0x49, 0x6c, 0x60, 0x4b, 0x8d, 0x01, 0x01, 0x00, 97 | 0x00, 0xff, 0xff, 0xd8, 0xf1, 0xc3, 0x8c, 0x8a, 0x00, 0x00, 0x00, 98 | } 99 | -------------------------------------------------------------------------------- /common/types.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package common; 3 | 4 | //---------------------------------------- 5 | // Abstract types 6 | 7 | // Define these here for compatibility but use tmlibs/common.KVPair. 8 | message KVPair { 9 | bytes key = 1; 10 | bytes value = 2; 11 | } 12 | 13 | // Define these here for compatibility but use tmlibs/common.KI64Pair. 14 | message KI64Pair { 15 | bytes key = 1; 16 | int64 value = 2; 17 | } 18 | -------------------------------------------------------------------------------- /common/word.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "sort" 6 | ) 7 | 8 | var ( 9 | Zero256 = Word256{0} 10 | One256 = Word256{1} 11 | ) 12 | 13 | type Word256 [32]byte 14 | 15 | func (w Word256) String() string { return string(w[:]) } 16 | func (w Word256) TrimmedString() string { return TrimmedString(w.Bytes()) } 17 | func (w Word256) Copy() Word256 { return w } 18 | func (w Word256) Bytes() []byte { return w[:] } // copied. 19 | func (w Word256) Prefix(n int) []byte { return w[:n] } 20 | func (w Word256) Postfix(n int) []byte { return w[32-n:] } 21 | func (w Word256) IsZero() bool { 22 | accum := byte(0) 23 | for _, byt := range w { 24 | accum |= byt 25 | } 26 | return accum == 0 27 | } 28 | func (w Word256) Compare(other Word256) int { 29 | return bytes.Compare(w[:], other[:]) 30 | } 31 | 32 | func Uint64ToWord256(i uint64) Word256 { 33 | buf := [8]byte{} 34 | PutUint64BE(buf[:], i) 35 | return LeftPadWord256(buf[:]) 36 | } 37 | 38 | func Int64ToWord256(i int64) Word256 { 39 | buf := [8]byte{} 40 | PutInt64BE(buf[:], i) 41 | return LeftPadWord256(buf[:]) 42 | } 43 | 44 | func RightPadWord256(bz []byte) (word Word256) { 45 | copy(word[:], bz) 46 | return 47 | } 48 | 49 | func LeftPadWord256(bz []byte) (word Word256) { 50 | copy(word[32-len(bz):], bz) 51 | return 52 | } 53 | 54 | func Uint64FromWord256(word Word256) uint64 { 55 | buf := word.Postfix(8) 56 | return GetUint64BE(buf) 57 | } 58 | 59 | func Int64FromWord256(word Word256) int64 { 60 | buf := word.Postfix(8) 61 | return GetInt64BE(buf) 62 | } 63 | 64 | //------------------------------------- 65 | 66 | type Tuple256 struct { 67 | First Word256 68 | Second Word256 69 | } 70 | 71 | func (tuple Tuple256) Compare(other Tuple256) int { 72 | firstCompare := tuple.First.Compare(other.First) 73 | if firstCompare == 0 { 74 | return tuple.Second.Compare(other.Second) 75 | } 76 | return firstCompare 77 | } 78 | 79 | func Tuple256Split(t Tuple256) (Word256, Word256) { 80 | return t.First, t.Second 81 | } 82 | 83 | type Tuple256Slice []Tuple256 84 | 85 | func (p Tuple256Slice) Len() int { return len(p) } 86 | func (p Tuple256Slice) Less(i, j int) bool { 87 | return p[i].Compare(p[j]) < 0 88 | } 89 | func (p Tuple256Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } 90 | func (p Tuple256Slice) Sort() { sort.Sort(p) } 91 | -------------------------------------------------------------------------------- /db/LICENSE.md: -------------------------------------------------------------------------------- 1 | Tendermint Go-DB Copyright (C) 2015 All in Bits, Inc 2 | 3 | Released under the Apache2.0 license 4 | -------------------------------------------------------------------------------- /db/README.md: -------------------------------------------------------------------------------- 1 | TODO: syndtr/goleveldb should be replaced with actual LevelDB instance 2 | -------------------------------------------------------------------------------- /db/c_level_db_test.go: -------------------------------------------------------------------------------- 1 | // +build gcc 2 | 3 | package db 4 | 5 | import ( 6 | "bytes" 7 | "fmt" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | cmn "github.com/tendermint/tmlibs/common" 12 | ) 13 | 14 | func BenchmarkRandomReadsWrites2(b *testing.B) { 15 | b.StopTimer() 16 | 17 | numItems := int64(1000000) 18 | internal := map[int64]int64{} 19 | for i := 0; i < int(numItems); i++ { 20 | internal[int64(i)] = int64(0) 21 | } 22 | db, err := NewCLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") 23 | if err != nil { 24 | b.Fatal(err.Error()) 25 | return 26 | } 27 | 28 | fmt.Println("ok, starting") 29 | b.StartTimer() 30 | 31 | for i := 0; i < b.N; i++ { 32 | // Write something 33 | { 34 | idx := (int64(cmn.RandInt()) % numItems) 35 | internal[idx] += 1 36 | val := internal[idx] 37 | idxBytes := int642Bytes(int64(idx)) 38 | valBytes := int642Bytes(int64(val)) 39 | //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) 40 | db.Set( 41 | idxBytes, 42 | valBytes, 43 | ) 44 | } 45 | // Read something 46 | { 47 | idx := (int64(cmn.RandInt()) % numItems) 48 | val := internal[idx] 49 | idxBytes := int642Bytes(int64(idx)) 50 | valBytes := db.Get(idxBytes) 51 | //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) 52 | if val == 0 { 53 | if !bytes.Equal(valBytes, nil) { 54 | b.Errorf("Expected %v for %v, got %X", 55 | nil, idx, valBytes) 56 | break 57 | } 58 | } else { 59 | if len(valBytes) != 8 { 60 | b.Errorf("Expected length 8 for %v, got %X", 61 | idx, valBytes) 62 | break 63 | } 64 | valGot := bytes2Int64(valBytes) 65 | if val != valGot { 66 | b.Errorf("Expected %v for %v, got %v", 67 | val, idx, valGot) 68 | break 69 | } 70 | } 71 | } 72 | } 73 | 74 | db.Close() 75 | } 76 | 77 | /* 78 | func int642Bytes(i int64) []byte { 79 | buf := make([]byte, 8) 80 | binary.BigEndian.PutUint64(buf, uint64(i)) 81 | return buf 82 | } 83 | 84 | func bytes2Int64(buf []byte) int64 { 85 | return int64(binary.BigEndian.Uint64(buf)) 86 | } 87 | */ 88 | 89 | func TestCLevelDBBackend(t *testing.T) { 90 | name := cmn.Fmt("test_%x", cmn.RandStr(12)) 91 | db := NewDB(name, LevelDBBackend, "") 92 | defer cleanupDBDir("", name) 93 | 94 | _, ok := db.(*CLevelDB) 95 | assert.True(t, ok) 96 | } 97 | -------------------------------------------------------------------------------- /db/common_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | cmn "github.com/tendermint/tmlibs/common" 11 | ) 12 | 13 | //---------------------------------------- 14 | // Helper functions. 15 | 16 | func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) { 17 | valueGot := db.Get(key) 18 | assert.Equal(t, valueWanted, valueGot) 19 | } 20 | 21 | func checkValid(t *testing.T, itr Iterator, expected bool) { 22 | valid := itr.Valid() 23 | require.Equal(t, expected, valid) 24 | } 25 | 26 | func checkNext(t *testing.T, itr Iterator, expected bool) { 27 | itr.Next() 28 | valid := itr.Valid() 29 | require.Equal(t, expected, valid) 30 | } 31 | 32 | func checkNextPanics(t *testing.T, itr Iterator) { 33 | assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") 34 | } 35 | 36 | func checkDomain(t *testing.T, itr Iterator, start, end []byte) { 37 | ds, de := itr.Domain() 38 | assert.Equal(t, start, ds, "checkDomain domain start incorrect") 39 | assert.Equal(t, end, de, "checkDomain domain end incorrect") 40 | } 41 | 42 | func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { 43 | k, v := itr.Key(), itr.Value() 44 | assert.Exactly(t, key, k) 45 | assert.Exactly(t, value, v) 46 | } 47 | 48 | func checkInvalid(t *testing.T, itr Iterator) { 49 | checkValid(t, itr, false) 50 | checkKeyPanics(t, itr) 51 | checkValuePanics(t, itr) 52 | checkNextPanics(t, itr) 53 | } 54 | 55 | func checkKeyPanics(t *testing.T, itr Iterator) { 56 | assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") 57 | } 58 | 59 | func checkValuePanics(t *testing.T, itr Iterator) { 60 | assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") 61 | } 62 | 63 | func newTempDB(t *testing.T, backend DBBackendType) (db DB) { 64 | dir, dirname := cmn.Tempdir("db_common_test") 65 | db = NewDB("testdb", backend, dirname) 66 | dir.Close() 67 | return db 68 | } 69 | 70 | //---------------------------------------- 71 | // mockDB 72 | 73 | // NOTE: not actually goroutine safe. 74 | // If you want something goroutine safe, maybe you just want a MemDB. 75 | type mockDB struct { 76 | mtx sync.Mutex 77 | calls map[string]int 78 | } 79 | 80 | func newMockDB() *mockDB { 81 | return &mockDB{ 82 | calls: make(map[string]int), 83 | } 84 | } 85 | 86 | func (mdb *mockDB) Mutex() *sync.Mutex { 87 | return &(mdb.mtx) 88 | } 89 | 90 | func (mdb *mockDB) Get([]byte) []byte { 91 | mdb.calls["Get"]++ 92 | return nil 93 | } 94 | 95 | func (mdb *mockDB) Has([]byte) bool { 96 | mdb.calls["Has"]++ 97 | return false 98 | } 99 | 100 | func (mdb *mockDB) Set([]byte, []byte) { 101 | mdb.calls["Set"]++ 102 | } 103 | 104 | func (mdb *mockDB) SetSync([]byte, []byte) { 105 | mdb.calls["SetSync"]++ 106 | } 107 | 108 | func (mdb *mockDB) SetNoLock([]byte, []byte) { 109 | mdb.calls["SetNoLock"]++ 110 | } 111 | 112 | func (mdb *mockDB) SetNoLockSync([]byte, []byte) { 113 | mdb.calls["SetNoLockSync"]++ 114 | } 115 | 116 | func (mdb *mockDB) Delete([]byte) { 117 | mdb.calls["Delete"]++ 118 | } 119 | 120 | func (mdb *mockDB) DeleteSync([]byte) { 121 | mdb.calls["DeleteSync"]++ 122 | } 123 | 124 | func (mdb *mockDB) DeleteNoLock([]byte) { 125 | mdb.calls["DeleteNoLock"]++ 126 | } 127 | 128 | func (mdb *mockDB) DeleteNoLockSync([]byte) { 129 | mdb.calls["DeleteNoLockSync"]++ 130 | } 131 | 132 | func (mdb *mockDB) Iterator(start, end []byte) Iterator { 133 | mdb.calls["Iterator"]++ 134 | return &mockIterator{} 135 | } 136 | 137 | func (mdb *mockDB) ReverseIterator(start, end []byte) Iterator { 138 | mdb.calls["ReverseIterator"]++ 139 | return &mockIterator{} 140 | } 141 | 142 | func (mdb *mockDB) Close() { 143 | mdb.calls["Close"]++ 144 | } 145 | 146 | func (mdb *mockDB) NewBatch() Batch { 147 | mdb.calls["NewBatch"]++ 148 | return &memBatch{db: mdb} 149 | } 150 | 151 | func (mdb *mockDB) Print() { 152 | mdb.calls["Print"]++ 153 | fmt.Printf("mockDB{%v}", mdb.Stats()) 154 | } 155 | 156 | func (mdb *mockDB) Stats() map[string]string { 157 | mdb.calls["Stats"]++ 158 | 159 | res := make(map[string]string) 160 | for key, count := range mdb.calls { 161 | res[key] = fmt.Sprintf("%d", count) 162 | } 163 | return res 164 | } 165 | 166 | //---------------------------------------- 167 | // mockIterator 168 | 169 | type mockIterator struct{} 170 | 171 | func (mockIterator) Domain() (start []byte, end []byte) { 172 | return nil, nil 173 | } 174 | 175 | func (mockIterator) Valid() bool { 176 | return false 177 | } 178 | 179 | func (mockIterator) Next() { 180 | } 181 | 182 | func (mockIterator) Key() []byte { 183 | return nil 184 | } 185 | 186 | func (mockIterator) Value() []byte { 187 | return nil 188 | } 189 | 190 | func (mockIterator) Close() { 191 | } 192 | -------------------------------------------------------------------------------- /db/db.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import "fmt" 4 | 5 | //---------------------------------------- 6 | // Main entry 7 | 8 | type DBBackendType string 9 | 10 | const ( 11 | LevelDBBackend DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc 12 | CLevelDBBackend DBBackendType = "cleveldb" 13 | GoLevelDBBackend DBBackendType = "goleveldb" 14 | MemDBBackend DBBackendType = "memdb" 15 | FSDBBackend DBBackendType = "fsdb" // using the filesystem naively 16 | ) 17 | 18 | type dbCreator func(name string, dir string) (DB, error) 19 | 20 | var backends = map[DBBackendType]dbCreator{} 21 | 22 | func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) { 23 | _, ok := backends[backend] 24 | if !force && ok { 25 | return 26 | } 27 | backends[backend] = creator 28 | } 29 | 30 | func NewDB(name string, backend DBBackendType, dir string) DB { 31 | db, err := backends[backend](name, dir) 32 | if err != nil { 33 | panic(fmt.Sprintf("Error initializing DB: %v", err)) 34 | } 35 | return db 36 | } 37 | -------------------------------------------------------------------------------- /db/db_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDBIteratorSingleKey(t *testing.T) { 11 | for backend := range backends { 12 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 13 | db := newTempDB(t, backend) 14 | db.SetSync(bz("1"), bz("value_1")) 15 | itr := db.Iterator(nil, nil) 16 | 17 | checkValid(t, itr, true) 18 | checkNext(t, itr, false) 19 | checkValid(t, itr, false) 20 | checkNextPanics(t, itr) 21 | 22 | // Once invalid... 23 | checkInvalid(t, itr) 24 | }) 25 | } 26 | } 27 | 28 | func TestDBIteratorTwoKeys(t *testing.T) { 29 | for backend := range backends { 30 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 31 | db := newTempDB(t, backend) 32 | db.SetSync(bz("1"), bz("value_1")) 33 | db.SetSync(bz("2"), bz("value_1")) 34 | 35 | { // Fail by calling Next too much 36 | itr := db.Iterator(nil, nil) 37 | checkValid(t, itr, true) 38 | 39 | checkNext(t, itr, true) 40 | checkValid(t, itr, true) 41 | 42 | checkNext(t, itr, false) 43 | checkValid(t, itr, false) 44 | 45 | checkNextPanics(t, itr) 46 | 47 | // Once invalid... 48 | checkInvalid(t, itr) 49 | } 50 | }) 51 | } 52 | } 53 | 54 | func TestDBIteratorMany(t *testing.T) { 55 | for backend := range backends { 56 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 57 | db := newTempDB(t, backend) 58 | 59 | keys := make([][]byte, 100) 60 | for i := 0; i < 100; i++ { 61 | keys[i] = []byte{byte(i)} 62 | } 63 | 64 | value := []byte{5} 65 | for _, k := range keys { 66 | db.Set(k, value) 67 | } 68 | 69 | itr := db.Iterator(nil, nil) 70 | defer itr.Close() 71 | for ; itr.Valid(); itr.Next() { 72 | assert.Equal(t, db.Get(itr.Key()), itr.Value()) 73 | } 74 | }) 75 | } 76 | } 77 | 78 | func TestDBIteratorEmpty(t *testing.T) { 79 | for backend := range backends { 80 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 81 | db := newTempDB(t, backend) 82 | itr := db.Iterator(nil, nil) 83 | 84 | checkInvalid(t, itr) 85 | }) 86 | } 87 | } 88 | 89 | func TestDBIteratorEmptyBeginAfter(t *testing.T) { 90 | for backend := range backends { 91 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 92 | db := newTempDB(t, backend) 93 | itr := db.Iterator(bz("1"), nil) 94 | 95 | checkInvalid(t, itr) 96 | }) 97 | } 98 | } 99 | 100 | func TestDBIteratorNonemptyBeginAfter(t *testing.T) { 101 | for backend := range backends { 102 | t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { 103 | db := newTempDB(t, backend) 104 | db.SetSync(bz("1"), bz("value_1")) 105 | itr := db.Iterator(bz("2"), nil) 106 | 107 | checkInvalid(t, itr) 108 | }) 109 | } 110 | } 111 | 112 | func TestDBBatchWrite1(t *testing.T) { 113 | mdb := newMockDB() 114 | ddb := NewDebugDB(t.Name(), mdb) 115 | batch := ddb.NewBatch() 116 | 117 | batch.Set(bz("1"), bz("1")) 118 | batch.Set(bz("2"), bz("2")) 119 | batch.Delete(bz("3")) 120 | batch.Set(bz("4"), bz("4")) 121 | batch.Write() 122 | 123 | assert.Equal(t, 0, mdb.calls["Set"]) 124 | assert.Equal(t, 0, mdb.calls["SetSync"]) 125 | assert.Equal(t, 3, mdb.calls["SetNoLock"]) 126 | assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) 127 | assert.Equal(t, 0, mdb.calls["Delete"]) 128 | assert.Equal(t, 0, mdb.calls["DeleteSync"]) 129 | assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) 130 | assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) 131 | } 132 | 133 | func TestDBBatchWrite2(t *testing.T) { 134 | mdb := newMockDB() 135 | ddb := NewDebugDB(t.Name(), mdb) 136 | batch := ddb.NewBatch() 137 | 138 | batch.Set(bz("1"), bz("1")) 139 | batch.Set(bz("2"), bz("2")) 140 | batch.Set(bz("4"), bz("4")) 141 | batch.Delete(bz("3")) 142 | batch.Write() 143 | 144 | assert.Equal(t, 0, mdb.calls["Set"]) 145 | assert.Equal(t, 0, mdb.calls["SetSync"]) 146 | assert.Equal(t, 3, mdb.calls["SetNoLock"]) 147 | assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) 148 | assert.Equal(t, 0, mdb.calls["Delete"]) 149 | assert.Equal(t, 0, mdb.calls["DeleteSync"]) 150 | assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) 151 | assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) 152 | } 153 | 154 | func TestDBBatchWriteSync1(t *testing.T) { 155 | mdb := newMockDB() 156 | ddb := NewDebugDB(t.Name(), mdb) 157 | batch := ddb.NewBatch() 158 | 159 | batch.Set(bz("1"), bz("1")) 160 | batch.Set(bz("2"), bz("2")) 161 | batch.Delete(bz("3")) 162 | batch.Set(bz("4"), bz("4")) 163 | batch.WriteSync() 164 | 165 | assert.Equal(t, 0, mdb.calls["Set"]) 166 | assert.Equal(t, 0, mdb.calls["SetSync"]) 167 | assert.Equal(t, 2, mdb.calls["SetNoLock"]) 168 | assert.Equal(t, 1, mdb.calls["SetNoLockSync"]) 169 | assert.Equal(t, 0, mdb.calls["Delete"]) 170 | assert.Equal(t, 0, mdb.calls["DeleteSync"]) 171 | assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) 172 | assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) 173 | } 174 | 175 | func TestDBBatchWriteSync2(t *testing.T) { 176 | mdb := newMockDB() 177 | ddb := NewDebugDB(t.Name(), mdb) 178 | batch := ddb.NewBatch() 179 | 180 | batch.Set(bz("1"), bz("1")) 181 | batch.Set(bz("2"), bz("2")) 182 | batch.Set(bz("4"), bz("4")) 183 | batch.Delete(bz("3")) 184 | batch.WriteSync() 185 | 186 | assert.Equal(t, 0, mdb.calls["Set"]) 187 | assert.Equal(t, 0, mdb.calls["SetSync"]) 188 | assert.Equal(t, 3, mdb.calls["SetNoLock"]) 189 | assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) 190 | assert.Equal(t, 0, mdb.calls["Delete"]) 191 | assert.Equal(t, 0, mdb.calls["DeleteSync"]) 192 | assert.Equal(t, 0, mdb.calls["DeleteNoLock"]) 193 | assert.Equal(t, 1, mdb.calls["DeleteNoLockSync"]) 194 | } 195 | -------------------------------------------------------------------------------- /db/go_level_db_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "testing" 8 | 9 | cmn "github.com/tendermint/tmlibs/common" 10 | ) 11 | 12 | func BenchmarkRandomReadsWrites(b *testing.B) { 13 | b.StopTimer() 14 | 15 | numItems := int64(1000000) 16 | internal := map[int64]int64{} 17 | for i := 0; i < int(numItems); i++ { 18 | internal[int64(i)] = int64(0) 19 | } 20 | db, err := NewGoLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") 21 | if err != nil { 22 | b.Fatal(err.Error()) 23 | return 24 | } 25 | 26 | fmt.Println("ok, starting") 27 | b.StartTimer() 28 | 29 | for i := 0; i < b.N; i++ { 30 | // Write something 31 | { 32 | idx := (int64(cmn.RandInt()) % numItems) 33 | internal[idx]++ 34 | val := internal[idx] 35 | idxBytes := int642Bytes(int64(idx)) 36 | valBytes := int642Bytes(int64(val)) 37 | //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) 38 | db.Set( 39 | idxBytes, 40 | valBytes, 41 | ) 42 | } 43 | // Read something 44 | { 45 | idx := (int64(cmn.RandInt()) % numItems) 46 | val := internal[idx] 47 | idxBytes := int642Bytes(int64(idx)) 48 | valBytes := db.Get(idxBytes) 49 | //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) 50 | if val == 0 { 51 | if !bytes.Equal(valBytes, nil) { 52 | b.Errorf("Expected %v for %v, got %X", 53 | nil, idx, valBytes) 54 | break 55 | } 56 | } else { 57 | if len(valBytes) != 8 { 58 | b.Errorf("Expected length 8 for %v, got %X", 59 | idx, valBytes) 60 | break 61 | } 62 | valGot := bytes2Int64(valBytes) 63 | if val != valGot { 64 | b.Errorf("Expected %v for %v, got %v", 65 | val, idx, valGot) 66 | break 67 | } 68 | } 69 | } 70 | } 71 | 72 | db.Close() 73 | } 74 | 75 | func int642Bytes(i int64) []byte { 76 | buf := make([]byte, 8) 77 | binary.BigEndian.PutUint64(buf, uint64(i)) 78 | return buf 79 | } 80 | 81 | func bytes2Int64(buf []byte) int64 { 82 | return int64(binary.BigEndian.Uint64(buf)) 83 | } 84 | -------------------------------------------------------------------------------- /db/mem_batch.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | type atomicSetDeleter interface { 8 | Mutex() *sync.Mutex 9 | SetNoLock(key, value []byte) 10 | SetNoLockSync(key, value []byte) 11 | DeleteNoLock(key []byte) 12 | DeleteNoLockSync(key []byte) 13 | } 14 | 15 | type memBatch struct { 16 | db atomicSetDeleter 17 | ops []operation 18 | } 19 | 20 | type opType int 21 | 22 | const ( 23 | opTypeSet opType = 1 24 | opTypeDelete opType = 2 25 | ) 26 | 27 | type operation struct { 28 | opType 29 | key []byte 30 | value []byte 31 | } 32 | 33 | func (mBatch *memBatch) Set(key, value []byte) { 34 | mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) 35 | } 36 | 37 | func (mBatch *memBatch) Delete(key []byte) { 38 | mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) 39 | } 40 | 41 | func (mBatch *memBatch) Write() { 42 | mBatch.write(false) 43 | } 44 | 45 | func (mBatch *memBatch) WriteSync() { 46 | mBatch.write(true) 47 | } 48 | 49 | func (mBatch *memBatch) write(doSync bool) { 50 | if mtx := mBatch.db.Mutex(); mtx != nil { 51 | mtx.Lock() 52 | defer mtx.Unlock() 53 | } 54 | 55 | for i, op := range mBatch.ops { 56 | if doSync && i == (len(mBatch.ops)-1) { 57 | switch op.opType { 58 | case opTypeSet: 59 | mBatch.db.SetNoLockSync(op.key, op.value) 60 | case opTypeDelete: 61 | mBatch.db.DeleteNoLockSync(op.key) 62 | } 63 | break // we're done. 64 | } 65 | switch op.opType { 66 | case opTypeSet: 67 | mBatch.db.SetNoLock(op.key, op.value) 68 | case opTypeDelete: 69 | mBatch.db.DeleteNoLock(op.key) 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /db/mem_db.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "sync" 7 | ) 8 | 9 | func init() { 10 | registerDBCreator(MemDBBackend, func(name string, dir string) (DB, error) { 11 | return NewMemDB(), nil 12 | }, false) 13 | } 14 | 15 | var _ DB = (*MemDB)(nil) 16 | 17 | type MemDB struct { 18 | mtx sync.Mutex 19 | db map[string][]byte 20 | } 21 | 22 | func NewMemDB() *MemDB { 23 | database := &MemDB{ 24 | db: make(map[string][]byte), 25 | } 26 | return database 27 | } 28 | 29 | // Implements atomicSetDeleter. 30 | func (db *MemDB) Mutex() *sync.Mutex { 31 | return &(db.mtx) 32 | } 33 | 34 | // Implements DB. 35 | func (db *MemDB) Get(key []byte) []byte { 36 | db.mtx.Lock() 37 | defer db.mtx.Unlock() 38 | key = nonNilBytes(key) 39 | 40 | value := db.db[string(key)] 41 | return value 42 | } 43 | 44 | // Implements DB. 45 | func (db *MemDB) Has(key []byte) bool { 46 | db.mtx.Lock() 47 | defer db.mtx.Unlock() 48 | key = nonNilBytes(key) 49 | 50 | _, ok := db.db[string(key)] 51 | return ok 52 | } 53 | 54 | // Implements DB. 55 | func (db *MemDB) Set(key []byte, value []byte) { 56 | db.mtx.Lock() 57 | defer db.mtx.Unlock() 58 | 59 | db.SetNoLock(key, value) 60 | } 61 | 62 | // Implements DB. 63 | func (db *MemDB) SetSync(key []byte, value []byte) { 64 | db.mtx.Lock() 65 | defer db.mtx.Unlock() 66 | 67 | db.SetNoLock(key, value) 68 | } 69 | 70 | // Implements atomicSetDeleter. 71 | func (db *MemDB) SetNoLock(key []byte, value []byte) { 72 | db.SetNoLockSync(key, value) 73 | } 74 | 75 | // Implements atomicSetDeleter. 76 | func (db *MemDB) SetNoLockSync(key []byte, value []byte) { 77 | key = nonNilBytes(key) 78 | value = nonNilBytes(value) 79 | 80 | db.db[string(key)] = value 81 | } 82 | 83 | // Implements DB. 84 | func (db *MemDB) Delete(key []byte) { 85 | db.mtx.Lock() 86 | defer db.mtx.Unlock() 87 | 88 | db.DeleteNoLock(key) 89 | } 90 | 91 | // Implements DB. 92 | func (db *MemDB) DeleteSync(key []byte) { 93 | db.mtx.Lock() 94 | defer db.mtx.Unlock() 95 | 96 | db.DeleteNoLock(key) 97 | } 98 | 99 | // Implements atomicSetDeleter. 100 | func (db *MemDB) DeleteNoLock(key []byte) { 101 | db.DeleteNoLockSync(key) 102 | } 103 | 104 | // Implements atomicSetDeleter. 105 | func (db *MemDB) DeleteNoLockSync(key []byte) { 106 | key = nonNilBytes(key) 107 | 108 | delete(db.db, string(key)) 109 | } 110 | 111 | // Implements DB. 112 | func (db *MemDB) Close() { 113 | // Close is a noop since for an in-memory 114 | // database, we don't have a destination 115 | // to flush contents to nor do we want 116 | // any data loss on invoking Close() 117 | // See the discussion in https://github.com/tendermint/tmlibs/pull/56 118 | } 119 | 120 | // Implements DB. 121 | func (db *MemDB) Print() { 122 | db.mtx.Lock() 123 | defer db.mtx.Unlock() 124 | 125 | for key, value := range db.db { 126 | fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) 127 | } 128 | } 129 | 130 | // Implements DB. 131 | func (db *MemDB) Stats() map[string]string { 132 | db.mtx.Lock() 133 | defer db.mtx.Unlock() 134 | 135 | stats := make(map[string]string) 136 | stats["database.type"] = "memDB" 137 | stats["database.size"] = fmt.Sprintf("%d", len(db.db)) 138 | return stats 139 | } 140 | 141 | // Implements DB. 142 | func (db *MemDB) NewBatch() Batch { 143 | db.mtx.Lock() 144 | defer db.mtx.Unlock() 145 | 146 | return &memBatch{db, nil} 147 | } 148 | 149 | //---------------------------------------- 150 | // Iterator 151 | 152 | // Implements DB. 153 | func (db *MemDB) Iterator(start, end []byte) Iterator { 154 | db.mtx.Lock() 155 | defer db.mtx.Unlock() 156 | 157 | keys := db.getSortedKeys(start, end, false) 158 | return newMemDBIterator(db, keys, start, end) 159 | } 160 | 161 | // Implements DB. 162 | func (db *MemDB) ReverseIterator(start, end []byte) Iterator { 163 | db.mtx.Lock() 164 | defer db.mtx.Unlock() 165 | 166 | keys := db.getSortedKeys(start, end, true) 167 | return newMemDBIterator(db, keys, start, end) 168 | } 169 | 170 | // We need a copy of all of the keys. 171 | // Not the best, but probably not a bottleneck depending. 172 | type memDBIterator struct { 173 | db DB 174 | cur int 175 | keys []string 176 | start []byte 177 | end []byte 178 | } 179 | 180 | var _ Iterator = (*memDBIterator)(nil) 181 | 182 | // Keys is expected to be in reverse order for reverse iterators. 183 | func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { 184 | return &memDBIterator{ 185 | db: db, 186 | cur: 0, 187 | keys: keys, 188 | start: start, 189 | end: end, 190 | } 191 | } 192 | 193 | // Implements Iterator. 194 | func (itr *memDBIterator) Domain() ([]byte, []byte) { 195 | return itr.start, itr.end 196 | } 197 | 198 | // Implements Iterator. 199 | func (itr *memDBIterator) Valid() bool { 200 | return 0 <= itr.cur && itr.cur < len(itr.keys) 201 | } 202 | 203 | // Implements Iterator. 204 | func (itr *memDBIterator) Next() { 205 | itr.assertIsValid() 206 | itr.cur++ 207 | } 208 | 209 | // Implements Iterator. 210 | func (itr *memDBIterator) Key() []byte { 211 | itr.assertIsValid() 212 | return []byte(itr.keys[itr.cur]) 213 | } 214 | 215 | // Implements Iterator. 216 | func (itr *memDBIterator) Value() []byte { 217 | itr.assertIsValid() 218 | key := []byte(itr.keys[itr.cur]) 219 | return itr.db.Get(key) 220 | } 221 | 222 | // Implements Iterator. 223 | func (itr *memDBIterator) Close() { 224 | itr.keys = nil 225 | itr.db = nil 226 | } 227 | 228 | func (itr *memDBIterator) assertIsValid() { 229 | if !itr.Valid() { 230 | panic("memDBIterator is invalid") 231 | } 232 | } 233 | 234 | //---------------------------------------- 235 | // Misc. 236 | 237 | func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { 238 | keys := []string{} 239 | for key := range db.db { 240 | inDomain := IsKeyInDomain([]byte(key), start, end, reverse) 241 | if inDomain { 242 | keys = append(keys, key) 243 | } 244 | } 245 | sort.Strings(keys) 246 | if reverse { 247 | nkeys := len(keys) 248 | for i := 0; i < nkeys/2; i++ { 249 | temp := keys[i] 250 | keys[i] = keys[nkeys-i-1] 251 | keys[nkeys-i-1] = temp 252 | } 253 | } 254 | return keys 255 | } 256 | -------------------------------------------------------------------------------- /db/prefix_db_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import "testing" 4 | 5 | func mockDBWithStuff() DB { 6 | db := NewMemDB() 7 | // Under "key" prefix 8 | db.Set(bz("key"), bz("value")) 9 | db.Set(bz("key1"), bz("value1")) 10 | db.Set(bz("key2"), bz("value2")) 11 | db.Set(bz("key3"), bz("value3")) 12 | db.Set(bz("something"), bz("else")) 13 | db.Set(bz(""), bz("")) 14 | db.Set(bz("k"), bz("val")) 15 | db.Set(bz("ke"), bz("valu")) 16 | db.Set(bz("kee"), bz("valuu")) 17 | return db 18 | } 19 | 20 | func TestPrefixDBSimple(t *testing.T) { 21 | db := mockDBWithStuff() 22 | pdb := NewPrefixDB(db, bz("key")) 23 | 24 | checkValue(t, pdb, bz("key"), nil) 25 | checkValue(t, pdb, bz(""), bz("value")) 26 | checkValue(t, pdb, bz("key1"), nil) 27 | checkValue(t, pdb, bz("1"), bz("value1")) 28 | checkValue(t, pdb, bz("key2"), nil) 29 | checkValue(t, pdb, bz("2"), bz("value2")) 30 | checkValue(t, pdb, bz("key3"), nil) 31 | checkValue(t, pdb, bz("3"), bz("value3")) 32 | checkValue(t, pdb, bz("something"), nil) 33 | checkValue(t, pdb, bz("k"), nil) 34 | checkValue(t, pdb, bz("ke"), nil) 35 | checkValue(t, pdb, bz("kee"), nil) 36 | } 37 | 38 | func TestPrefixDBIterator1(t *testing.T) { 39 | db := mockDBWithStuff() 40 | pdb := NewPrefixDB(db, bz("key")) 41 | 42 | itr := pdb.Iterator(nil, nil) 43 | checkDomain(t, itr, nil, nil) 44 | checkItem(t, itr, bz(""), bz("value")) 45 | checkNext(t, itr, true) 46 | checkItem(t, itr, bz("1"), bz("value1")) 47 | checkNext(t, itr, true) 48 | checkItem(t, itr, bz("2"), bz("value2")) 49 | checkNext(t, itr, true) 50 | checkItem(t, itr, bz("3"), bz("value3")) 51 | checkNext(t, itr, false) 52 | checkInvalid(t, itr) 53 | itr.Close() 54 | } 55 | 56 | func TestPrefixDBIterator2(t *testing.T) { 57 | db := mockDBWithStuff() 58 | pdb := NewPrefixDB(db, bz("key")) 59 | 60 | itr := pdb.Iterator(nil, bz("")) 61 | checkDomain(t, itr, nil, bz("")) 62 | checkInvalid(t, itr) 63 | itr.Close() 64 | } 65 | 66 | func TestPrefixDBIterator3(t *testing.T) { 67 | db := mockDBWithStuff() 68 | pdb := NewPrefixDB(db, bz("key")) 69 | 70 | itr := pdb.Iterator(bz(""), nil) 71 | checkDomain(t, itr, bz(""), nil) 72 | checkItem(t, itr, bz(""), bz("value")) 73 | checkNext(t, itr, true) 74 | checkItem(t, itr, bz("1"), bz("value1")) 75 | checkNext(t, itr, true) 76 | checkItem(t, itr, bz("2"), bz("value2")) 77 | checkNext(t, itr, true) 78 | checkItem(t, itr, bz("3"), bz("value3")) 79 | checkNext(t, itr, false) 80 | checkInvalid(t, itr) 81 | itr.Close() 82 | } 83 | 84 | func TestPrefixDBIterator4(t *testing.T) { 85 | db := mockDBWithStuff() 86 | pdb := NewPrefixDB(db, bz("key")) 87 | 88 | itr := pdb.Iterator(bz(""), bz("")) 89 | checkDomain(t, itr, bz(""), bz("")) 90 | checkInvalid(t, itr) 91 | itr.Close() 92 | } 93 | 94 | func TestPrefixDBReverseIterator1(t *testing.T) { 95 | db := mockDBWithStuff() 96 | pdb := NewPrefixDB(db, bz("key")) 97 | 98 | itr := pdb.ReverseIterator(nil, nil) 99 | checkDomain(t, itr, nil, nil) 100 | checkItem(t, itr, bz("3"), bz("value3")) 101 | checkNext(t, itr, true) 102 | checkItem(t, itr, bz("2"), bz("value2")) 103 | checkNext(t, itr, true) 104 | checkItem(t, itr, bz("1"), bz("value1")) 105 | checkNext(t, itr, true) 106 | checkItem(t, itr, bz(""), bz("value")) 107 | checkNext(t, itr, false) 108 | checkInvalid(t, itr) 109 | itr.Close() 110 | } 111 | 112 | func TestPrefixDBReverseIterator2(t *testing.T) { 113 | db := mockDBWithStuff() 114 | pdb := NewPrefixDB(db, bz("key")) 115 | 116 | itr := pdb.ReverseIterator(nil, bz("")) 117 | checkDomain(t, itr, nil, bz("")) 118 | checkItem(t, itr, bz("3"), bz("value3")) 119 | checkNext(t, itr, true) 120 | checkItem(t, itr, bz("2"), bz("value2")) 121 | checkNext(t, itr, true) 122 | checkItem(t, itr, bz("1"), bz("value1")) 123 | checkNext(t, itr, false) 124 | checkInvalid(t, itr) 125 | itr.Close() 126 | } 127 | 128 | func TestPrefixDBReverseIterator3(t *testing.T) { 129 | db := mockDBWithStuff() 130 | pdb := NewPrefixDB(db, bz("key")) 131 | 132 | itr := pdb.ReverseIterator(bz(""), nil) 133 | checkDomain(t, itr, bz(""), nil) 134 | checkItem(t, itr, bz(""), bz("value")) 135 | checkNext(t, itr, false) 136 | checkInvalid(t, itr) 137 | itr.Close() 138 | } 139 | 140 | func TestPrefixDBReverseIterator4(t *testing.T) { 141 | db := mockDBWithStuff() 142 | pdb := NewPrefixDB(db, bz("key")) 143 | 144 | itr := pdb.ReverseIterator(bz(""), bz("")) 145 | checkInvalid(t, itr) 146 | itr.Close() 147 | } 148 | -------------------------------------------------------------------------------- /db/remotedb/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | remotedb is a package for connecting to distributed Tendermint db.DB 3 | instances. The purpose is to detach difficult deployments such as 4 | CLevelDB that requires gcc or perhaps for databases that require 5 | custom configurations such as extra disk space. It also eases 6 | the burden and cost of deployment of dependencies for databases 7 | to be used by Tendermint developers. Most importantly it is built 8 | over the high performant gRPC transport. 9 | 10 | remotedb's RemoteDB implements db.DB so can be used normally 11 | like other databases. One just has to explicitly connect to the 12 | remote database with a client setup such as: 13 | 14 | client, err := remotedb.NewInsecure(addr) 15 | // Make sure to invoke InitRemote! 16 | if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil { 17 | log.Fatalf("Failed to initialize the remote db") 18 | } 19 | 20 | client.Set(key1, value) 21 | gv1 := client.SetSync(k2, v2) 22 | 23 | client.Delete(k1) 24 | gv2 := client.Get(k1) 25 | 26 | for itr := client.Iterator(k1, k9); itr.Valid(); itr.Next() { 27 | ik, iv := itr.Key(), itr.Value() 28 | ds, de := itr.Domain() 29 | } 30 | 31 | stats := client.Stats() 32 | 33 | if !client.Has(dk1) { 34 | client.SetSync(dk1, dv1) 35 | } 36 | */ 37 | package remotedb 38 | -------------------------------------------------------------------------------- /db/remotedb/grpcdb/client.go: -------------------------------------------------------------------------------- 1 | package grpcdb 2 | 3 | import ( 4 | "google.golang.org/grpc" 5 | "google.golang.org/grpc/credentials" 6 | 7 | protodb "github.com/tendermint/tmlibs/db/remotedb/proto" 8 | ) 9 | 10 | // Security defines how the client will talk to the gRPC server. 11 | type Security uint 12 | 13 | const ( 14 | Insecure Security = iota 15 | Secure 16 | ) 17 | 18 | // NewClient creates a gRPC client connected to the bound gRPC server at serverAddr. 19 | // Use kind to set the level of security to either Secure or Insecure. 20 | func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) { 21 | creds, err := credentials.NewClientTLSFromFile(serverCert, "") 22 | if err != nil { 23 | return nil, err 24 | } 25 | cc, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) 26 | if err != nil { 27 | return nil, err 28 | } 29 | return protodb.NewDBClient(cc), nil 30 | } 31 | -------------------------------------------------------------------------------- /db/remotedb/grpcdb/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | grpcdb is the distribution of Tendermint's db.DB instances using 3 | the gRPC transport to decouple local db.DB usages from applications, 4 | to using them over a network in a highly performant manner. 5 | 6 | grpcdb allows users to initialize a database's server like 7 | they would locally and invoke the respective methods of db.DB. 8 | 9 | Most users shouldn't use this package, but should instead use 10 | remotedb. Only the lower level users and database server deployers 11 | should use it, for functionality such as: 12 | 13 | ln, err := net.Listen("tcp", "0.0.0.0:0") 14 | srv := grpcdb.NewServer() 15 | defer srv.Stop() 16 | go func() { 17 | if err := srv.Serve(ln); err != nil { 18 | t.Fatalf("BindServer: %v", err) 19 | } 20 | }() 21 | 22 | or 23 | addr := ":8998" 24 | cert := "server.crt" 25 | key := "server.key" 26 | go func() { 27 | if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { 28 | log.Fatalf("BindServer: %v", err) 29 | } 30 | }() 31 | */ 32 | package grpcdb 33 | -------------------------------------------------------------------------------- /db/remotedb/grpcdb/example_test.go: -------------------------------------------------------------------------------- 1 | package grpcdb_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "log" 7 | 8 | grpcdb "github.com/tendermint/tmlibs/db/remotedb/grpcdb" 9 | protodb "github.com/tendermint/tmlibs/db/remotedb/proto" 10 | ) 11 | 12 | func Example() { 13 | addr := ":8998" 14 | cert := "server.crt" 15 | key := "server.key" 16 | go func() { 17 | if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { 18 | log.Fatalf("BindServer: %v", err) 19 | } 20 | }() 21 | 22 | client, err := grpcdb.NewClient(addr, cert) 23 | if err != nil { 24 | log.Fatalf("Failed to create grpcDB client: %v", err) 25 | } 26 | 27 | ctx := context.Background() 28 | // 1. Initialize the DB 29 | in := &protodb.Init{ 30 | Type: "leveldb", 31 | Name: "grpc-uno-test", 32 | Dir: ".", 33 | } 34 | if _, err := client.Init(ctx, in); err != nil { 35 | log.Fatalf("Init error: %v", err) 36 | } 37 | 38 | // 2. Now it can be used! 39 | query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")} 40 | if _, err := client.SetSync(ctx, query1); err != nil { 41 | log.Fatalf("SetSync err: %v", err) 42 | } 43 | 44 | query2 := &protodb.Entity{Key: []byte("Project")} 45 | read, err := client.Get(ctx, query2) 46 | if err != nil { 47 | log.Fatalf("Get err: %v", err) 48 | } 49 | if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) { 50 | log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /db/remotedb/grpcdb/server.go: -------------------------------------------------------------------------------- 1 | package grpcdb 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "sync" 7 | "time" 8 | 9 | "google.golang.org/grpc" 10 | "google.golang.org/grpc/credentials" 11 | 12 | "github.com/tendermint/tmlibs/db" 13 | protodb "github.com/tendermint/tmlibs/db/remotedb/proto" 14 | ) 15 | 16 | // ListenAndServe is a blocking function that sets up a gRPC based 17 | // server at the address supplied, with the gRPC options passed in. 18 | // Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe. 19 | func ListenAndServe(addr, cert, key string, opts ...grpc.ServerOption) error { 20 | ln, err := net.Listen("tcp", addr) 21 | if err != nil { 22 | return err 23 | } 24 | srv, err := NewServer(cert, key, opts...) 25 | if err != nil { 26 | return err 27 | } 28 | return srv.Serve(ln) 29 | } 30 | 31 | func NewServer(cert, key string, opts ...grpc.ServerOption) (*grpc.Server, error) { 32 | creds, err := credentials.NewServerTLSFromFile(cert, key) 33 | if err != nil { 34 | return nil, err 35 | } 36 | opts = append(opts, grpc.Creds(creds)) 37 | srv := grpc.NewServer(opts...) 38 | protodb.RegisterDBServer(srv, new(server)) 39 | return srv, nil 40 | } 41 | 42 | type server struct { 43 | mu sync.Mutex 44 | db db.DB 45 | } 46 | 47 | var _ protodb.DBServer = (*server)(nil) 48 | 49 | // Init initializes the server's database. Only one type of database 50 | // can be initialized per server. 51 | // 52 | // Dir is the directory on the file system in which the DB will be stored(if backed by disk) (TODO: remove) 53 | // 54 | // Name is representative filesystem entry's basepath 55 | // 56 | // Type can be either one of: 57 | // * cleveldb (if built with gcc enabled) 58 | // * fsdb 59 | // * memdB 60 | // * leveldb 61 | // See https://godoc.org/github.com/tendermint/tmlibs/db#DBBackendType 62 | func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) { 63 | s.mu.Lock() 64 | defer s.mu.Unlock() 65 | 66 | s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir) 67 | return &protodb.Entity{CreatedAt: time.Now().Unix()}, nil 68 | } 69 | 70 | func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { 71 | s.db.Delete(in.Key) 72 | return nothing, nil 73 | } 74 | 75 | var nothing = new(protodb.Nothing) 76 | 77 | func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { 78 | s.db.DeleteSync(in.Key) 79 | return nothing, nil 80 | } 81 | 82 | func (s *server) Get(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { 83 | value := s.db.Get(in.Key) 84 | return &protodb.Entity{Value: value}, nil 85 | } 86 | 87 | func (s *server) GetStream(ds protodb.DB_GetStreamServer) error { 88 | // Receive routine 89 | responsesChan := make(chan *protodb.Entity) 90 | go func() { 91 | defer close(responsesChan) 92 | ctx := context.Background() 93 | for { 94 | in, err := ds.Recv() 95 | if err != nil { 96 | responsesChan <- &protodb.Entity{Err: err.Error()} 97 | return 98 | } 99 | out, err := s.Get(ctx, in) 100 | if err != nil { 101 | if out == nil { 102 | out = new(protodb.Entity) 103 | out.Key = in.Key 104 | } 105 | out.Err = err.Error() 106 | responsesChan <- out 107 | return 108 | } 109 | 110 | // Otherwise continue on 111 | responsesChan <- out 112 | } 113 | }() 114 | 115 | // Send routine, block until we return 116 | for out := range responsesChan { 117 | if err := ds.Send(out); err != nil { 118 | return err 119 | } 120 | } 121 | return nil 122 | } 123 | 124 | func (s *server) Has(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { 125 | exists := s.db.Has(in.Key) 126 | return &protodb.Entity{Exists: exists}, nil 127 | } 128 | 129 | func (s *server) Set(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { 130 | s.db.Set(in.Key, in.Value) 131 | return nothing, nil 132 | } 133 | 134 | func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { 135 | s.db.SetSync(in.Key, in.Value) 136 | return nothing, nil 137 | } 138 | 139 | func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error { 140 | it := s.db.Iterator(query.Start, query.End) 141 | return s.handleIterator(it, dis.Send) 142 | } 143 | 144 | func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) error) error { 145 | for it.Valid() { 146 | start, end := it.Domain() 147 | out := &protodb.Iterator{ 148 | Domain: &protodb.Domain{Start: start, End: end}, 149 | Valid: it.Valid(), 150 | Key: it.Key(), 151 | Value: it.Value(), 152 | } 153 | if err := sendFunc(out); err != nil { 154 | return err 155 | } 156 | 157 | // Finally move the iterator forward 158 | it.Next() 159 | } 160 | return nil 161 | } 162 | 163 | func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error { 164 | it := s.db.ReverseIterator(query.Start, query.End) 165 | return s.handleIterator(it, dis.Send) 166 | } 167 | 168 | func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error) { 169 | stats := s.db.Stats() 170 | return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil 171 | } 172 | 173 | func (s *server) BatchWrite(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { 174 | return s.batchWrite(c, b, false) 175 | } 176 | 177 | func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { 178 | return s.batchWrite(c, b, true) 179 | } 180 | 181 | func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) { 182 | bat := s.db.NewBatch() 183 | for _, op := range b.Ops { 184 | switch op.Type { 185 | case protodb.Operation_SET: 186 | bat.Set(op.Entity.Key, op.Entity.Value) 187 | case protodb.Operation_DELETE: 188 | bat.Delete(op.Entity.Key) 189 | } 190 | } 191 | if sync { 192 | bat.WriteSync() 193 | } else { 194 | bat.Write() 195 | } 196 | return nothing, nil 197 | } 198 | -------------------------------------------------------------------------------- /db/remotedb/proto/defs.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package protodb; 4 | 5 | message Batch { 6 | repeated Operation ops = 1; 7 | } 8 | 9 | message Operation { 10 | Entity entity = 1; 11 | enum Type { 12 | SET = 0; 13 | DELETE = 1; 14 | } 15 | Type type = 2; 16 | } 17 | 18 | message Entity { 19 | int32 id = 1; 20 | bytes key = 2; 21 | bytes value = 3; 22 | bool exists = 4; 23 | bytes start = 5; 24 | bytes end = 6; 25 | string err = 7; 26 | int64 created_at = 8; 27 | } 28 | 29 | message Nothing { 30 | } 31 | 32 | message Domain { 33 | bytes start = 1; 34 | bytes end = 2; 35 | } 36 | 37 | message Iterator { 38 | Domain domain = 1; 39 | bool valid = 2; 40 | bytes key = 3; 41 | bytes value = 4; 42 | } 43 | 44 | message Stats { 45 | map data = 1; 46 | int64 time_at = 2; 47 | } 48 | 49 | message Init { 50 | string Type = 1; 51 | string Name = 2; 52 | string Dir = 3; 53 | } 54 | 55 | service DB { 56 | rpc init(Init) returns (Entity) {} 57 | rpc get(Entity) returns (Entity) {} 58 | rpc getStream(stream Entity) returns (stream Entity) {} 59 | 60 | rpc has(Entity) returns (Entity) {} 61 | rpc set(Entity) returns (Nothing) {} 62 | rpc setSync(Entity) returns (Nothing) {} 63 | rpc delete(Entity) returns (Nothing) {} 64 | rpc deleteSync(Entity) returns (Nothing) {} 65 | rpc iterator(Entity) returns (stream Iterator) {} 66 | rpc reverseIterator(Entity) returns (stream Iterator) {} 67 | // rpc print(Nothing) returns (Entity) {} 68 | rpc stats(Nothing) returns (Stats) {} 69 | rpc batchWrite(Batch) returns (Nothing) {} 70 | rpc batchWriteSync(Batch) returns (Nothing) {} 71 | } 72 | -------------------------------------------------------------------------------- /db/remotedb/remotedb_test.go: -------------------------------------------------------------------------------- 1 | package remotedb_test 2 | 3 | import ( 4 | "net" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/require" 9 | 10 | "github.com/tendermint/tmlibs/db/remotedb" 11 | "github.com/tendermint/tmlibs/db/remotedb/grpcdb" 12 | ) 13 | 14 | func TestRemoteDB(t *testing.T) { 15 | cert := "::.crt" 16 | key := "::.key" 17 | ln, err := net.Listen("tcp", "0.0.0.0:0") 18 | require.Nil(t, err, "expecting a port to have been assigned on which we can listen") 19 | srv, err := grpcdb.NewServer(cert, key) 20 | require.Nil(t, err) 21 | defer srv.Stop() 22 | go func() { 23 | if err := srv.Serve(ln); err != nil { 24 | t.Fatalf("BindServer: %v", err) 25 | } 26 | }() 27 | 28 | client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert) 29 | require.Nil(t, err, "expecting a successful client creation") 30 | dbName := "test-remote-db" 31 | require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "leveldb"})) 32 | defer func() { 33 | err := os.RemoveAll(dbName + ".db") 34 | if err != nil { 35 | panic(err) 36 | } 37 | }() 38 | 39 | k1 := []byte("key-1") 40 | v1 := client.Get(k1) 41 | require.Equal(t, 0, len(v1), "expecting no key1 to have been stored, got %X (%s)", v1, v1) 42 | vv1 := []byte("value-1") 43 | client.Set(k1, vv1) 44 | gv1 := client.Get(k1) 45 | require.Equal(t, gv1, vv1) 46 | 47 | // Simple iteration 48 | itr := client.Iterator(nil, nil) 49 | itr.Next() 50 | require.Equal(t, itr.Key(), []byte("key-1")) 51 | require.Equal(t, itr.Value(), []byte("value-1")) 52 | require.Panics(t, itr.Next) 53 | itr.Close() 54 | 55 | // Set some more keys 56 | k2 := []byte("key-2") 57 | v2 := []byte("value-2") 58 | client.SetSync(k2, v2) 59 | has := client.Has(k2) 60 | require.True(t, has) 61 | gv2 := client.Get(k2) 62 | require.Equal(t, gv2, v2) 63 | 64 | // More iteration 65 | itr = client.Iterator(nil, nil) 66 | itr.Next() 67 | require.Equal(t, itr.Key(), []byte("key-1")) 68 | require.Equal(t, itr.Value(), []byte("value-1")) 69 | itr.Next() 70 | require.Equal(t, itr.Key(), []byte("key-2")) 71 | require.Equal(t, itr.Value(), []byte("value-2")) 72 | require.Panics(t, itr.Next) 73 | itr.Close() 74 | 75 | // Deletion 76 | client.Delete(k1) 77 | client.DeleteSync(k2) 78 | gv1 = client.Get(k1) 79 | gv2 = client.Get(k2) 80 | require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore") 81 | require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore") 82 | 83 | // Batch tests - set 84 | k3 := []byte("key-3") 85 | k4 := []byte("key-4") 86 | k5 := []byte("key-5") 87 | v3 := []byte("value-3") 88 | v4 := []byte("value-4") 89 | v5 := []byte("value-5") 90 | bat := client.NewBatch() 91 | bat.Set(k3, v3) 92 | bat.Set(k4, v4) 93 | rv3 := client.Get(k3) 94 | require.Equal(t, 0, len(rv3), "expecting no k3 to have been stored") 95 | rv4 := client.Get(k4) 96 | require.Equal(t, 0, len(rv4), "expecting no k4 to have been stored") 97 | bat.Write() 98 | rv3 = client.Get(k3) 99 | require.Equal(t, rv3, v3, "expecting k3 to have been stored") 100 | rv4 = client.Get(k4) 101 | require.Equal(t, rv4, v4, "expecting k4 to have been stored") 102 | 103 | // Batch tests - deletion 104 | bat = client.NewBatch() 105 | bat.Delete(k4) 106 | bat.Delete(k3) 107 | bat.WriteSync() 108 | rv3 = client.Get(k3) 109 | require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted") 110 | rv4 = client.Get(k4) 111 | require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") 112 | 113 | // Batch tests - set and delete 114 | bat = client.NewBatch() 115 | bat.Set(k4, v4) 116 | bat.Set(k5, v5) 117 | bat.Delete(k4) 118 | bat.WriteSync() 119 | rv4 = client.Get(k4) 120 | require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") 121 | rv5 := client.Get(k5) 122 | require.Equal(t, rv5, v5, "expecting k5 to have been stored") 123 | } 124 | -------------------------------------------------------------------------------- /db/types.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | // DBs are goroutine safe. 4 | type DB interface { 5 | 6 | // Get returns nil iff key doesn't exist. 7 | // A nil key is interpreted as an empty byteslice. 8 | // CONTRACT: key, value readonly []byte 9 | Get([]byte) []byte 10 | 11 | // Has checks if a key exists. 12 | // A nil key is interpreted as an empty byteslice. 13 | // CONTRACT: key, value readonly []byte 14 | Has(key []byte) bool 15 | 16 | // Set sets the key. 17 | // A nil key is interpreted as an empty byteslice. 18 | // CONTRACT: key, value readonly []byte 19 | Set([]byte, []byte) 20 | SetSync([]byte, []byte) 21 | 22 | // Delete deletes the key. 23 | // A nil key is interpreted as an empty byteslice. 24 | // CONTRACT: key readonly []byte 25 | Delete([]byte) 26 | DeleteSync([]byte) 27 | 28 | // Iterate over a domain of keys in ascending order. End is exclusive. 29 | // Start must be less than end, or the Iterator is invalid. 30 | // A nil start is interpreted as an empty byteslice. 31 | // If end is nil, iterates up to the last item (inclusive). 32 | // CONTRACT: No writes may happen within a domain while an iterator exists over it. 33 | // CONTRACT: start, end readonly []byte 34 | Iterator(start, end []byte) Iterator 35 | 36 | // Iterate over a domain of keys in descending order. End is exclusive. 37 | // Start must be greater than end, or the Iterator is invalid. 38 | // If start is nil, iterates from the last/greatest item (inclusive). 39 | // If end is nil, iterates up to the first/least item (inclusive). 40 | // CONTRACT: No writes may happen within a domain while an iterator exists over it. 41 | // CONTRACT: start, end readonly []byte 42 | ReverseIterator(start, end []byte) Iterator 43 | 44 | // Closes the connection. 45 | Close() 46 | 47 | // Creates a batch for atomic updates. 48 | NewBatch() Batch 49 | 50 | // For debugging 51 | Print() 52 | 53 | // Stats returns a map of property values for all keys and the size of the cache. 54 | Stats() map[string]string 55 | } 56 | 57 | //---------------------------------------- 58 | // Batch 59 | 60 | type Batch interface { 61 | SetDeleter 62 | Write() 63 | WriteSync() 64 | } 65 | 66 | type SetDeleter interface { 67 | Set(key, value []byte) // CONTRACT: key, value readonly []byte 68 | Delete(key []byte) // CONTRACT: key readonly []byte 69 | } 70 | 71 | //---------------------------------------- 72 | // Iterator 73 | 74 | /* 75 | Usage: 76 | 77 | var itr Iterator = ... 78 | defer itr.Close() 79 | 80 | for ; itr.Valid(); itr.Next() { 81 | k, v := itr.Key(); itr.Value() 82 | // ... 83 | } 84 | */ 85 | type Iterator interface { 86 | 87 | // The start & end (exclusive) limits to iterate over. 88 | // If end < start, then the Iterator goes in reverse order. 89 | // 90 | // A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate 91 | // over anything with the prefix []byte{12, 13}. 92 | // 93 | // The smallest key is the empty byte array []byte{} - see BeginningKey(). 94 | // The largest key is the nil byte array []byte(nil) - see EndingKey(). 95 | // CONTRACT: start, end readonly []byte 96 | Domain() (start []byte, end []byte) 97 | 98 | // Valid returns whether the current position is valid. 99 | // Once invalid, an Iterator is forever invalid. 100 | Valid() bool 101 | 102 | // Next moves the iterator to the next sequential key in the database, as 103 | // defined by order of iteration. 104 | // 105 | // If Valid returns false, this method will panic. 106 | Next() 107 | 108 | // Key returns the key of the cursor. 109 | // If Valid returns false, this method will panic. 110 | // CONTRACT: key readonly []byte 111 | Key() (key []byte) 112 | 113 | // Value returns the value of the cursor. 114 | // If Valid returns false, this method will panic. 115 | // CONTRACT: value readonly []byte 116 | Value() (value []byte) 117 | 118 | // Close releases the Iterator. 119 | Close() 120 | } 121 | 122 | // For testing convenience. 123 | func bz(s string) []byte { 124 | return []byte(s) 125 | } 126 | 127 | // We defensively turn nil keys or values into []byte{} for 128 | // most operations. 129 | func nonNilBytes(bz []byte) []byte { 130 | if bz == nil { 131 | return []byte{} 132 | } 133 | return bz 134 | } 135 | -------------------------------------------------------------------------------- /db/util.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "bytes" 5 | ) 6 | 7 | func cp(bz []byte) (ret []byte) { 8 | ret = make([]byte, len(bz)) 9 | copy(ret, bz) 10 | return ret 11 | } 12 | 13 | // Returns a slice of the same length (big endian) 14 | // except incremented by one. 15 | // Returns nil on overflow (e.g. if bz bytes are all 0xFF) 16 | // CONTRACT: len(bz) > 0 17 | func cpIncr(bz []byte) (ret []byte) { 18 | if len(bz) == 0 { 19 | panic("cpIncr expects non-zero bz length") 20 | } 21 | ret = cp(bz) 22 | for i := len(bz) - 1; i >= 0; i-- { 23 | if ret[i] < byte(0xFF) { 24 | ret[i]++ 25 | return 26 | } 27 | ret[i] = byte(0x00) 28 | if i == 0 { 29 | // Overflow 30 | return nil 31 | } 32 | } 33 | return nil 34 | } 35 | 36 | // Returns a slice of the same length (big endian) 37 | // except decremented by one. 38 | // Returns nil on underflow (e.g. if bz bytes are all 0x00) 39 | // CONTRACT: len(bz) > 0 40 | func cpDecr(bz []byte) (ret []byte) { 41 | if len(bz) == 0 { 42 | panic("cpDecr expects non-zero bz length") 43 | } 44 | ret = cp(bz) 45 | for i := len(bz) - 1; i >= 0; i-- { 46 | if ret[i] > byte(0x00) { 47 | ret[i]-- 48 | return 49 | } 50 | ret[i] = byte(0xFF) 51 | if i == 0 { 52 | // Underflow 53 | return nil 54 | } 55 | } 56 | return nil 57 | } 58 | 59 | // See DB interface documentation for more information. 60 | func IsKeyInDomain(key, start, end []byte, isReverse bool) bool { 61 | if !isReverse { 62 | if bytes.Compare(key, start) < 0 { 63 | return false 64 | } 65 | if end != nil && bytes.Compare(end, key) <= 0 { 66 | return false 67 | } 68 | return true 69 | } else { 70 | if start != nil && bytes.Compare(start, key) < 0 { 71 | return false 72 | } 73 | if end != nil && bytes.Compare(key, end) <= 0 { 74 | return false 75 | } 76 | return true 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /db/util_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | // Empty iterator for empty db. 9 | func TestPrefixIteratorNoMatchNil(t *testing.T) { 10 | for backend := range backends { 11 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 12 | db := newTempDB(t, backend) 13 | itr := IteratePrefix(db, []byte("2")) 14 | 15 | checkInvalid(t, itr) 16 | }) 17 | } 18 | } 19 | 20 | // Empty iterator for db populated after iterator created. 21 | func TestPrefixIteratorNoMatch1(t *testing.T) { 22 | for backend := range backends { 23 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 24 | db := newTempDB(t, backend) 25 | itr := IteratePrefix(db, []byte("2")) 26 | db.SetSync(bz("1"), bz("value_1")) 27 | 28 | checkInvalid(t, itr) 29 | }) 30 | } 31 | } 32 | 33 | // Empty iterator for prefix starting after db entry. 34 | func TestPrefixIteratorNoMatch2(t *testing.T) { 35 | for backend := range backends { 36 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 37 | db := newTempDB(t, backend) 38 | db.SetSync(bz("3"), bz("value_3")) 39 | itr := IteratePrefix(db, []byte("4")) 40 | 41 | checkInvalid(t, itr) 42 | }) 43 | } 44 | } 45 | 46 | // Iterator with single val for db with single val, starting from that val. 47 | func TestPrefixIteratorMatch1(t *testing.T) { 48 | for backend := range backends { 49 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 50 | db := newTempDB(t, backend) 51 | db.SetSync(bz("2"), bz("value_2")) 52 | itr := IteratePrefix(db, bz("2")) 53 | 54 | checkValid(t, itr, true) 55 | checkItem(t, itr, bz("2"), bz("value_2")) 56 | checkNext(t, itr, false) 57 | 58 | // Once invalid... 59 | checkInvalid(t, itr) 60 | }) 61 | } 62 | } 63 | 64 | // Iterator with prefix iterates over everything with same prefix. 65 | func TestPrefixIteratorMatches1N(t *testing.T) { 66 | for backend := range backends { 67 | t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { 68 | db := newTempDB(t, backend) 69 | 70 | // prefixed 71 | db.SetSync(bz("a/1"), bz("value_1")) 72 | db.SetSync(bz("a/3"), bz("value_3")) 73 | 74 | // not 75 | db.SetSync(bz("b/3"), bz("value_3")) 76 | db.SetSync(bz("a-3"), bz("value_3")) 77 | db.SetSync(bz("a.3"), bz("value_3")) 78 | db.SetSync(bz("abcdefg"), bz("value_3")) 79 | itr := IteratePrefix(db, bz("a/")) 80 | 81 | checkValid(t, itr, true) 82 | checkItem(t, itr, bz("a/1"), bz("value_1")) 83 | checkNext(t, itr, true) 84 | checkItem(t, itr, bz("a/3"), bz("value_3")) 85 | 86 | // Bad! 87 | checkNext(t, itr, false) 88 | 89 | //Once invalid... 90 | checkInvalid(t, itr) 91 | }) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /flowrate/README.md: -------------------------------------------------------------------------------- 1 | Data Flow Rate Control 2 | ====================== 3 | 4 | To download and install this package run: 5 | 6 | go get github.com/mxk/go-flowrate/flowrate 7 | 8 | The documentation is available at: 9 | 10 | http://godoc.org/github.com/mxk/go-flowrate/flowrate 11 | -------------------------------------------------------------------------------- /flowrate/io.go: -------------------------------------------------------------------------------- 1 | // 2 | // Written by Maxim Khitrov (November 2012) 3 | // 4 | 5 | package flowrate 6 | 7 | import ( 8 | "errors" 9 | "io" 10 | ) 11 | 12 | // ErrLimit is returned by the Writer when a non-blocking write is short due to 13 | // the transfer rate limit. 14 | var ErrLimit = errors.New("flowrate: flow rate limit exceeded") 15 | 16 | // Limiter is implemented by the Reader and Writer to provide a consistent 17 | // interface for monitoring and controlling data transfer. 18 | type Limiter interface { 19 | Done() int64 20 | Status() Status 21 | SetTransferSize(bytes int64) 22 | SetLimit(new int64) (old int64) 23 | SetBlocking(new bool) (old bool) 24 | } 25 | 26 | // Reader implements io.ReadCloser with a restriction on the rate of data 27 | // transfer. 28 | type Reader struct { 29 | io.Reader // Data source 30 | *Monitor // Flow control monitor 31 | 32 | limit int64 // Rate limit in bytes per second (unlimited when <= 0) 33 | block bool // What to do when no new bytes can be read due to the limit 34 | } 35 | 36 | // NewReader restricts all Read operations on r to limit bytes per second. 37 | func NewReader(r io.Reader, limit int64) *Reader { 38 | return &Reader{r, New(0, 0), limit, true} 39 | } 40 | 41 | // Read reads up to len(p) bytes into p without exceeding the current transfer 42 | // rate limit. It returns (0, nil) immediately if r is non-blocking and no new 43 | // bytes can be read at this time. 44 | func (r *Reader) Read(p []byte) (n int, err error) { 45 | p = p[:r.Limit(len(p), r.limit, r.block)] 46 | if len(p) > 0 { 47 | n, err = r.IO(r.Reader.Read(p)) 48 | } 49 | return 50 | } 51 | 52 | // SetLimit changes the transfer rate limit to new bytes per second and returns 53 | // the previous setting. 54 | func (r *Reader) SetLimit(new int64) (old int64) { 55 | old, r.limit = r.limit, new 56 | return 57 | } 58 | 59 | // SetBlocking changes the blocking behavior and returns the previous setting. A 60 | // Read call on a non-blocking reader returns immediately if no additional bytes 61 | // may be read at this time due to the rate limit. 62 | func (r *Reader) SetBlocking(new bool) (old bool) { 63 | old, r.block = r.block, new 64 | return 65 | } 66 | 67 | // Close closes the underlying reader if it implements the io.Closer interface. 68 | func (r *Reader) Close() error { 69 | defer r.Done() 70 | if c, ok := r.Reader.(io.Closer); ok { 71 | return c.Close() 72 | } 73 | return nil 74 | } 75 | 76 | // Writer implements io.WriteCloser with a restriction on the rate of data 77 | // transfer. 78 | type Writer struct { 79 | io.Writer // Data destination 80 | *Monitor // Flow control monitor 81 | 82 | limit int64 // Rate limit in bytes per second (unlimited when <= 0) 83 | block bool // What to do when no new bytes can be written due to the limit 84 | } 85 | 86 | // NewWriter restricts all Write operations on w to limit bytes per second. The 87 | // transfer rate and the default blocking behavior (true) can be changed 88 | // directly on the returned *Writer. 89 | func NewWriter(w io.Writer, limit int64) *Writer { 90 | return &Writer{w, New(0, 0), limit, true} 91 | } 92 | 93 | // Write writes len(p) bytes from p to the underlying data stream without 94 | // exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is 95 | // non-blocking and no additional bytes can be written at this time. 96 | func (w *Writer) Write(p []byte) (n int, err error) { 97 | var c int 98 | for len(p) > 0 && err == nil { 99 | s := p[:w.Limit(len(p), w.limit, w.block)] 100 | if len(s) > 0 { 101 | c, err = w.IO(w.Writer.Write(s)) 102 | } else { 103 | return n, ErrLimit 104 | } 105 | p = p[c:] 106 | n += c 107 | } 108 | return 109 | } 110 | 111 | // SetLimit changes the transfer rate limit to new bytes per second and returns 112 | // the previous setting. 113 | func (w *Writer) SetLimit(new int64) (old int64) { 114 | old, w.limit = w.limit, new 115 | return 116 | } 117 | 118 | // SetBlocking changes the blocking behavior and returns the previous setting. A 119 | // Write call on a non-blocking writer returns as soon as no additional bytes 120 | // may be written at this time due to the rate limit. 121 | func (w *Writer) SetBlocking(new bool) (old bool) { 122 | old, w.block = w.block, new 123 | return 124 | } 125 | 126 | // Close closes the underlying writer if it implements the io.Closer interface. 127 | func (w *Writer) Close() error { 128 | defer w.Done() 129 | if c, ok := w.Writer.(io.Closer); ok { 130 | return c.Close() 131 | } 132 | return nil 133 | } 134 | -------------------------------------------------------------------------------- /flowrate/util.go: -------------------------------------------------------------------------------- 1 | // 2 | // Written by Maxim Khitrov (November 2012) 3 | // 4 | 5 | package flowrate 6 | 7 | import ( 8 | "math" 9 | "strconv" 10 | "time" 11 | ) 12 | 13 | // clockRate is the resolution and precision of clock(). 14 | const clockRate = 20 * time.Millisecond 15 | 16 | // czero is the process start time rounded down to the nearest clockRate 17 | // increment. 18 | var czero = time.Now().Round(clockRate) 19 | 20 | // clock returns a low resolution timestamp relative to the process start time. 21 | func clock() time.Duration { 22 | return time.Now().Round(clockRate).Sub(czero) 23 | } 24 | 25 | // clockToTime converts a clock() timestamp to an absolute time.Time value. 26 | func clockToTime(c time.Duration) time.Time { 27 | return czero.Add(c) 28 | } 29 | 30 | // clockRound returns d rounded to the nearest clockRate increment. 31 | func clockRound(d time.Duration) time.Duration { 32 | return (d + clockRate>>1) / clockRate * clockRate 33 | } 34 | 35 | // round returns x rounded to the nearest int64 (non-negative values only). 36 | func round(x float64) int64 { 37 | if _, frac := math.Modf(x); frac >= 0.5 { 38 | return int64(math.Ceil(x)) 39 | } 40 | return int64(math.Floor(x)) 41 | } 42 | 43 | // Percent represents a percentage in increments of 1/1000th of a percent. 44 | type Percent uint32 45 | 46 | // percentOf calculates what percent of the total is x. 47 | func percentOf(x, total float64) Percent { 48 | if x < 0 || total <= 0 { 49 | return 0 50 | } else if p := round(x / total * 1e5); p <= math.MaxUint32 { 51 | return Percent(p) 52 | } 53 | return Percent(math.MaxUint32) 54 | } 55 | 56 | func (p Percent) Float() float64 { 57 | return float64(p) * 1e-3 58 | } 59 | 60 | func (p Percent) String() string { 61 | var buf [12]byte 62 | b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) 63 | n := len(b) 64 | b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) 65 | b[n] = '.' 66 | return string(append(b, '%')) 67 | } 68 | -------------------------------------------------------------------------------- /glide.lock: -------------------------------------------------------------------------------- 1 | hash: 98752078f39da926f655268b3b143f713d64edd379fc9fcb1210d9d8aa7ab4e0 2 | updated: 2018-02-03T01:28:00.221548057-05:00 3 | imports: 4 | - name: github.com/fsnotify/fsnotify 5 | version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 6 | - name: github.com/go-kit/kit 7 | version: 4dc7be5d2d12881735283bcab7352178e190fc71 8 | subpackages: 9 | - log 10 | - log/level 11 | - log/term 12 | - name: github.com/go-logfmt/logfmt 13 | version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 14 | - name: github.com/go-stack/stack 15 | version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf 16 | - name: github.com/gogo/protobuf 17 | version: 1adfc126b41513cc696b209667c8656ea7aac67c 18 | subpackages: 19 | - gogoproto 20 | - proto 21 | - protoc-gen-gogo/descriptor 22 | - name: github.com/golang/snappy 23 | version: 553a641470496b2327abcac10b36396bd98e45c9 24 | - name: github.com/hashicorp/hcl 25 | version: 23c074d0eceb2b8a5bfdbb271ab780cde70f05a8 26 | subpackages: 27 | - hcl/ast 28 | - hcl/parser 29 | - hcl/scanner 30 | - hcl/strconv 31 | - hcl/token 32 | - json/parser 33 | - json/scanner 34 | - json/token 35 | - name: github.com/inconshreveable/mousetrap 36 | version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 37 | - name: github.com/jmhodges/levigo 38 | version: c42d9e0ca023e2198120196f842701bb4c55d7b9 39 | - name: github.com/kr/logfmt 40 | version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 41 | - name: github.com/magiconair/properties 42 | version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 43 | - name: github.com/mitchellh/mapstructure 44 | version: b4575eea38cca1123ec2dc90c26529b5c5acfcff 45 | - name: github.com/pelletier/go-toml 46 | version: acdc4509485b587f5e675510c4f2c63e90ff68a8 47 | - name: github.com/pkg/errors 48 | version: 645ef00459ed84a119197bfb8d8205042c6df63d 49 | - name: github.com/spf13/afero 50 | version: bb8f1927f2a9d3ab41c9340aa034f6b803f4359c 51 | subpackages: 52 | - mem 53 | - name: github.com/spf13/cast 54 | version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4 55 | - name: github.com/spf13/cobra 56 | version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b 57 | - name: github.com/spf13/jwalterweatherman 58 | version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 59 | - name: github.com/spf13/pflag 60 | version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f 61 | - name: github.com/spf13/viper 62 | version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 63 | - name: github.com/syndtr/goleveldb 64 | version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 65 | subpackages: 66 | - leveldb 67 | - leveldb/cache 68 | - leveldb/comparer 69 | - leveldb/errors 70 | - leveldb/filter 71 | - leveldb/iterator 72 | - leveldb/journal 73 | - leveldb/memdb 74 | - leveldb/opt 75 | - leveldb/storage 76 | - leveldb/table 77 | - leveldb/util 78 | - name: golang.org/x/crypto 79 | version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 80 | subpackages: 81 | - ripemd160 82 | - name: golang.org/x/sys 83 | version: 37707fdb30a5b38865cfb95e5aab41707daec7fd 84 | subpackages: 85 | - unix 86 | - name: golang.org/x/text 87 | version: c01e4764d870b77f8abe5096ee19ad20d80e8075 88 | subpackages: 89 | - transform 90 | - unicode/norm 91 | - name: gopkg.in/yaml.v2 92 | version: d670f9405373e636a5a2765eea47fac0c9bc91a4 93 | testImports: 94 | - name: github.com/davecgh/go-spew 95 | version: 346938d642f2ec3594ed81d874461961cd0faa76 96 | subpackages: 97 | - spew 98 | - name: github.com/fortytw2/leaktest 99 | version: 3b724c3d7b8729a35bf4e577f71653aec6e53513 100 | - name: github.com/pmezard/go-difflib 101 | version: d8ed2627bdf02c080bf22230dbb337003b7aba2d 102 | subpackages: 103 | - difflib 104 | - name: github.com/stretchr/testify 105 | version: 12b6f73e6084dad08a7c6e575284b177ecafbc71 106 | subpackages: 107 | - assert 108 | - require 109 | -------------------------------------------------------------------------------- /glide.yaml: -------------------------------------------------------------------------------- 1 | package: github.com/tendermint/tmlibs 2 | import: 3 | - package: github.com/go-kit/kit 4 | version: ^0.6.0 5 | subpackages: 6 | - log 7 | - log/level 8 | - log/term 9 | - package: github.com/go-logfmt/logfmt 10 | version: ^0.3.0 11 | - package: github.com/gogo/protobuf 12 | version: ^1.0.0 13 | subpackages: 14 | - gogoproto 15 | - proto 16 | - package: github.com/jmhodges/levigo 17 | - package: github.com/pkg/errors 18 | version: ^0.8.0 19 | - package: github.com/spf13/cobra 20 | version: ^0.0.1 21 | - package: github.com/spf13/viper 22 | version: ^1.0.0 23 | - package: github.com/syndtr/goleveldb 24 | subpackages: 25 | - leveldb 26 | - leveldb/errors 27 | - leveldb/iterator 28 | - leveldb/opt 29 | - package: golang.org/x/crypto 30 | subpackages: 31 | - ripemd160 32 | testImport: 33 | - package: github.com/stretchr/testify 34 | version: ^1.2.1 35 | subpackages: 36 | - assert 37 | - require 38 | - package: github.com/fortytw2/leaktest 39 | -------------------------------------------------------------------------------- /log/filter.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import "fmt" 4 | 5 | type level byte 6 | 7 | const ( 8 | levelDebug level = 1 << iota 9 | levelInfo 10 | levelError 11 | ) 12 | 13 | type filter struct { 14 | next Logger 15 | allowed level // XOR'd levels for default case 16 | allowedKeyvals map[keyval]level // When key-value match, use this level 17 | } 18 | 19 | type keyval struct { 20 | key interface{} 21 | value interface{} 22 | } 23 | 24 | // NewFilter wraps next and implements filtering. See the commentary on the 25 | // Option functions for a detailed description of how to configure levels. If 26 | // no options are provided, all leveled log events created with Debug, Info or 27 | // Error helper methods are squelched. 28 | func NewFilter(next Logger, options ...Option) Logger { 29 | l := &filter{ 30 | next: next, 31 | allowedKeyvals: make(map[keyval]level), 32 | } 33 | for _, option := range options { 34 | option(l) 35 | } 36 | return l 37 | } 38 | 39 | func (l *filter) Info(msg string, keyvals ...interface{}) { 40 | levelAllowed := l.allowed&levelInfo != 0 41 | if !levelAllowed { 42 | return 43 | } 44 | l.next.Info(msg, keyvals...) 45 | } 46 | 47 | func (l *filter) Debug(msg string, keyvals ...interface{}) { 48 | levelAllowed := l.allowed&levelDebug != 0 49 | if !levelAllowed { 50 | return 51 | } 52 | l.next.Debug(msg, keyvals...) 53 | } 54 | 55 | func (l *filter) Error(msg string, keyvals ...interface{}) { 56 | levelAllowed := l.allowed&levelError != 0 57 | if !levelAllowed { 58 | return 59 | } 60 | l.next.Error(msg, keyvals...) 61 | } 62 | 63 | // With implements Logger by constructing a new filter with a keyvals appended 64 | // to the logger. 65 | // 66 | // If custom level was set for a keyval pair using one of the 67 | // Allow*With methods, it is used as the logger's level. 68 | // 69 | // Examples: 70 | // logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) 71 | // logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" 72 | // 73 | // logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) 74 | // logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil 75 | // 76 | // logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) 77 | // logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" 78 | func (l *filter) With(keyvals ...interface{}) Logger { 79 | for i := len(keyvals) - 2; i >= 0; i -= 2 { 80 | for kv, allowed := range l.allowedKeyvals { 81 | if keyvals[i] == kv.key && keyvals[i+1] == kv.value { 82 | return &filter{next: l.next.With(keyvals...), allowed: allowed, allowedKeyvals: l.allowedKeyvals} 83 | } 84 | } 85 | } 86 | return &filter{next: l.next.With(keyvals...), allowed: l.allowed, allowedKeyvals: l.allowedKeyvals} 87 | } 88 | 89 | //-------------------------------------------------------------------------------- 90 | 91 | // Option sets a parameter for the filter. 92 | type Option func(*filter) 93 | 94 | // AllowLevel returns an option for the given level or error if no option exist 95 | // for such level. 96 | func AllowLevel(lvl string) (Option, error) { 97 | switch lvl { 98 | case "debug": 99 | return AllowDebug(), nil 100 | case "info": 101 | return AllowInfo(), nil 102 | case "error": 103 | return AllowError(), nil 104 | case "none": 105 | return AllowNone(), nil 106 | default: 107 | return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" level, given %s", lvl) 108 | } 109 | } 110 | 111 | // AllowAll is an alias for AllowDebug. 112 | func AllowAll() Option { 113 | return AllowDebug() 114 | } 115 | 116 | // AllowDebug allows error, info and debug level log events to pass. 117 | func AllowDebug() Option { 118 | return allowed(levelError | levelInfo | levelDebug) 119 | } 120 | 121 | // AllowInfo allows error and info level log events to pass. 122 | func AllowInfo() Option { 123 | return allowed(levelError | levelInfo) 124 | } 125 | 126 | // AllowError allows only error level log events to pass. 127 | func AllowError() Option { 128 | return allowed(levelError) 129 | } 130 | 131 | // AllowNone allows no leveled log events to pass. 132 | func AllowNone() Option { 133 | return allowed(0) 134 | } 135 | 136 | func allowed(allowed level) Option { 137 | return func(l *filter) { l.allowed = allowed } 138 | } 139 | 140 | // AllowDebugWith allows error, info and debug level log events to pass for a specific key value pair. 141 | func AllowDebugWith(key interface{}, value interface{}) Option { 142 | return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo | levelDebug } 143 | } 144 | 145 | // AllowInfoWith allows error and info level log events to pass for a specific key value pair. 146 | func AllowInfoWith(key interface{}, value interface{}) Option { 147 | return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo } 148 | } 149 | 150 | // AllowErrorWith allows only error level log events to pass for a specific key value pair. 151 | func AllowErrorWith(key interface{}, value interface{}) Option { 152 | return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError } 153 | } 154 | 155 | // AllowNoneWith allows no leveled log events to pass for a specific key value pair. 156 | func AllowNoneWith(key interface{}, value interface{}) Option { 157 | return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = 0 } 158 | } 159 | -------------------------------------------------------------------------------- /log/filter_test.go: -------------------------------------------------------------------------------- 1 | package log_test 2 | 3 | import ( 4 | "bytes" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/tendermint/tmlibs/log" 9 | ) 10 | 11 | func TestVariousLevels(t *testing.T) { 12 | testCases := []struct { 13 | name string 14 | allowed log.Option 15 | want string 16 | }{ 17 | { 18 | "AllowAll", 19 | log.AllowAll(), 20 | strings.Join([]string{ 21 | `{"_msg":"here","level":"debug","this is":"debug log"}`, 22 | `{"_msg":"here","level":"info","this is":"info log"}`, 23 | `{"_msg":"here","level":"error","this is":"error log"}`, 24 | }, "\n"), 25 | }, 26 | { 27 | "AllowDebug", 28 | log.AllowDebug(), 29 | strings.Join([]string{ 30 | `{"_msg":"here","level":"debug","this is":"debug log"}`, 31 | `{"_msg":"here","level":"info","this is":"info log"}`, 32 | `{"_msg":"here","level":"error","this is":"error log"}`, 33 | }, "\n"), 34 | }, 35 | { 36 | "AllowInfo", 37 | log.AllowInfo(), 38 | strings.Join([]string{ 39 | `{"_msg":"here","level":"info","this is":"info log"}`, 40 | `{"_msg":"here","level":"error","this is":"error log"}`, 41 | }, "\n"), 42 | }, 43 | { 44 | "AllowError", 45 | log.AllowError(), 46 | strings.Join([]string{ 47 | `{"_msg":"here","level":"error","this is":"error log"}`, 48 | }, "\n"), 49 | }, 50 | { 51 | "AllowNone", 52 | log.AllowNone(), 53 | ``, 54 | }, 55 | } 56 | 57 | for _, tc := range testCases { 58 | t.Run(tc.name, func(t *testing.T) { 59 | var buf bytes.Buffer 60 | logger := log.NewFilter(log.NewTMJSONLogger(&buf), tc.allowed) 61 | 62 | logger.Debug("here", "this is", "debug log") 63 | logger.Info("here", "this is", "info log") 64 | logger.Error("here", "this is", "error log") 65 | 66 | if want, have := tc.want, strings.TrimSpace(buf.String()); want != have { 67 | t.Errorf("\nwant:\n%s\nhave:\n%s", want, have) 68 | } 69 | }) 70 | } 71 | } 72 | 73 | func TestLevelContext(t *testing.T) { 74 | var buf bytes.Buffer 75 | 76 | logger := log.NewTMJSONLogger(&buf) 77 | logger = log.NewFilter(logger, log.AllowError()) 78 | logger = logger.With("context", "value") 79 | 80 | logger.Error("foo", "bar", "baz") 81 | if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"error"}`, strings.TrimSpace(buf.String()); want != have { 82 | t.Errorf("\nwant '%s'\nhave '%s'", want, have) 83 | } 84 | 85 | buf.Reset() 86 | logger.Info("foo", "bar", "baz") 87 | if want, have := ``, strings.TrimSpace(buf.String()); want != have { 88 | t.Errorf("\nwant '%s'\nhave '%s'", want, have) 89 | } 90 | } 91 | 92 | func TestVariousAllowWith(t *testing.T) { 93 | var buf bytes.Buffer 94 | 95 | logger := log.NewTMJSONLogger(&buf) 96 | 97 | logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value")) 98 | logger1.With("context", "value").Info("foo", "bar", "baz") 99 | if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}`, strings.TrimSpace(buf.String()); want != have { 100 | t.Errorf("\nwant '%s'\nhave '%s'", want, have) 101 | } 102 | 103 | buf.Reset() 104 | 105 | logger2 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) 106 | logger2.With("context", "value", "user", "Sam").Info("foo", "bar", "baz") 107 | if want, have := ``, strings.TrimSpace(buf.String()); want != have { 108 | t.Errorf("\nwant '%s'\nhave '%s'", want, have) 109 | } 110 | 111 | buf.Reset() 112 | 113 | logger3 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam")) 114 | logger3.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") 115 | if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { 116 | t.Errorf("\nwant '%s'\nhave '%s'", want, have) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /log/logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "io" 5 | 6 | kitlog "github.com/go-kit/kit/log" 7 | ) 8 | 9 | // Logger is what any Tendermint library should take. 10 | type Logger interface { 11 | Debug(msg string, keyvals ...interface{}) 12 | Info(msg string, keyvals ...interface{}) 13 | Error(msg string, keyvals ...interface{}) 14 | 15 | With(keyvals ...interface{}) Logger 16 | } 17 | 18 | // NewSyncWriter returns a new writer that is safe for concurrent use by 19 | // multiple goroutines. Writes to the returned writer are passed on to w. If 20 | // another write is already in progress, the calling goroutine blocks until 21 | // the writer is available. 22 | // 23 | // If w implements the following interface, so does the returned writer. 24 | // 25 | // interface { 26 | // Fd() uintptr 27 | // } 28 | func NewSyncWriter(w io.Writer) io.Writer { 29 | return kitlog.NewSyncWriter(w) 30 | } 31 | -------------------------------------------------------------------------------- /log/nop_logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | type nopLogger struct{} 4 | 5 | // Interface assertions 6 | var _ Logger = (*nopLogger)(nil) 7 | 8 | // NewNopLogger returns a logger that doesn't do anything. 9 | func NewNopLogger() Logger { return &nopLogger{} } 10 | 11 | func (nopLogger) Info(string, ...interface{}) {} 12 | func (nopLogger) Debug(string, ...interface{}) {} 13 | func (nopLogger) Error(string, ...interface{}) {} 14 | 15 | func (l *nopLogger) With(...interface{}) Logger { 16 | return l 17 | } 18 | -------------------------------------------------------------------------------- /log/testing_logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/go-kit/kit/log/term" 8 | ) 9 | 10 | var ( 11 | // reuse the same logger across all tests 12 | _testingLogger Logger 13 | ) 14 | 15 | // TestingLogger returns a TMLogger which writes to STDOUT if testing being run 16 | // with the verbose (-v) flag, NopLogger otherwise. 17 | // 18 | // Note that the call to TestingLogger() must be made 19 | // inside a test (not in the init func) because 20 | // verbose flag only set at the time of testing. 21 | func TestingLogger() Logger { 22 | if _testingLogger != nil { 23 | return _testingLogger 24 | } 25 | 26 | if testing.Verbose() { 27 | _testingLogger = NewTMLogger(NewSyncWriter(os.Stdout)) 28 | } else { 29 | _testingLogger = NewNopLogger() 30 | } 31 | 32 | return _testingLogger 33 | } 34 | 35 | // TestingLoggerWithColorFn allow you to provide your own color function. See 36 | // TestingLogger for documentation. 37 | func TestingLoggerWithColorFn(colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { 38 | if _testingLogger != nil { 39 | return _testingLogger 40 | } 41 | 42 | if testing.Verbose() { 43 | _testingLogger = NewTMLoggerWithColorFn(NewSyncWriter(os.Stdout), colorFn) 44 | } else { 45 | _testingLogger = NewNopLogger() 46 | } 47 | 48 | return _testingLogger 49 | } 50 | -------------------------------------------------------------------------------- /log/tm_json_logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "io" 5 | 6 | kitlog "github.com/go-kit/kit/log" 7 | ) 8 | 9 | // NewTMJSONLogger returns a Logger that encodes keyvals to the Writer as a 10 | // single JSON object. Each log event produces no more than one call to 11 | // w.Write. The passed Writer must be safe for concurrent use by multiple 12 | // goroutines if the returned Logger will be used concurrently. 13 | func NewTMJSONLogger(w io.Writer) Logger { 14 | return &tmLogger{kitlog.NewJSONLogger(w)} 15 | } 16 | -------------------------------------------------------------------------------- /log/tm_logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | 7 | kitlog "github.com/go-kit/kit/log" 8 | kitlevel "github.com/go-kit/kit/log/level" 9 | "github.com/go-kit/kit/log/term" 10 | ) 11 | 12 | const ( 13 | msgKey = "_msg" // "_" prefixed to avoid collisions 14 | moduleKey = "module" 15 | ) 16 | 17 | type tmLogger struct { 18 | srcLogger kitlog.Logger 19 | } 20 | 21 | // Interface assertions 22 | var _ Logger = (*tmLogger)(nil) 23 | 24 | // NewTMTermLogger returns a logger that encodes msg and keyvals to the Writer 25 | // using go-kit's log as an underlying logger and our custom formatter. Note 26 | // that underlying logger could be swapped with something else. 27 | func NewTMLogger(w io.Writer) Logger { 28 | // Color by level value 29 | colorFn := func(keyvals ...interface{}) term.FgBgColor { 30 | if keyvals[0] != kitlevel.Key() { 31 | panic(fmt.Sprintf("expected level key to be first, got %v", keyvals[0])) 32 | } 33 | switch keyvals[1].(kitlevel.Value).String() { 34 | case "debug": 35 | return term.FgBgColor{Fg: term.DarkGray} 36 | case "error": 37 | return term.FgBgColor{Fg: term.Red} 38 | default: 39 | return term.FgBgColor{} 40 | } 41 | } 42 | 43 | return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} 44 | } 45 | 46 | // NewTMLoggerWithColorFn allows you to provide your own color function. See 47 | // NewTMLogger for documentation. 48 | func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { 49 | return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} 50 | } 51 | 52 | // Info logs a message at level Info. 53 | func (l *tmLogger) Info(msg string, keyvals ...interface{}) { 54 | lWithLevel := kitlevel.Info(l.srcLogger) 55 | if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { 56 | errLogger := kitlevel.Error(l.srcLogger) 57 | kitlog.With(errLogger, msgKey, msg).Log("err", err) 58 | } 59 | } 60 | 61 | // Debug logs a message at level Debug. 62 | func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { 63 | lWithLevel := kitlevel.Debug(l.srcLogger) 64 | if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { 65 | errLogger := kitlevel.Error(l.srcLogger) 66 | kitlog.With(errLogger, msgKey, msg).Log("err", err) 67 | } 68 | } 69 | 70 | // Error logs a message at level Error. 71 | func (l *tmLogger) Error(msg string, keyvals ...interface{}) { 72 | lWithLevel := kitlevel.Error(l.srcLogger) 73 | lWithMsg := kitlog.With(lWithLevel, msgKey, msg) 74 | if err := lWithMsg.Log(keyvals...); err != nil { 75 | lWithMsg.Log("err", err) 76 | } 77 | } 78 | 79 | // With returns a new contextual logger with keyvals prepended to those passed 80 | // to calls to Info, Debug or Error. 81 | func (l *tmLogger) With(keyvals ...interface{}) Logger { 82 | return &tmLogger{kitlog.With(l.srcLogger, keyvals...)} 83 | } 84 | -------------------------------------------------------------------------------- /log/tm_logger_test.go: -------------------------------------------------------------------------------- 1 | package log_test 2 | 3 | import ( 4 | "bytes" 5 | "io/ioutil" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/go-logfmt/logfmt" 10 | "github.com/tendermint/tmlibs/log" 11 | ) 12 | 13 | func TestLoggerLogsItsErrors(t *testing.T) { 14 | var buf bytes.Buffer 15 | 16 | logger := log.NewTMLogger(&buf) 17 | logger.Info("foo", "baz baz", "bar") 18 | msg := strings.TrimSpace(buf.String()) 19 | if !strings.Contains(msg, logfmt.ErrInvalidKey.Error()) { 20 | t.Errorf("Expected logger msg to contain ErrInvalidKey, got %s", msg) 21 | } 22 | } 23 | 24 | func BenchmarkTMLoggerSimple(b *testing.B) { 25 | benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), baseInfoMessage) 26 | } 27 | 28 | func BenchmarkTMLoggerContextual(b *testing.B) { 29 | benchmarkRunner(b, log.NewTMLogger(ioutil.Discard), withInfoMessage) 30 | } 31 | 32 | func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { 33 | lc := logger.With("common_key", "common_value") 34 | b.ReportAllocs() 35 | b.ResetTimer() 36 | for i := 0; i < b.N; i++ { 37 | f(lc) 38 | } 39 | } 40 | 41 | var ( 42 | baseInfoMessage = func(logger log.Logger) { logger.Info("foo_message", "foo_key", "foo_value") } 43 | withInfoMessage = func(logger log.Logger) { logger.With("a", "b").Info("c", "d", "f") } 44 | ) 45 | -------------------------------------------------------------------------------- /log/tmfmt_logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "sync" 8 | "time" 9 | 10 | kitlog "github.com/go-kit/kit/log" 11 | kitlevel "github.com/go-kit/kit/log/level" 12 | "github.com/go-logfmt/logfmt" 13 | ) 14 | 15 | type tmfmtEncoder struct { 16 | *logfmt.Encoder 17 | buf bytes.Buffer 18 | } 19 | 20 | func (l *tmfmtEncoder) Reset() { 21 | l.Encoder.Reset() 22 | l.buf.Reset() 23 | } 24 | 25 | var tmfmtEncoderPool = sync.Pool{ 26 | New: func() interface{} { 27 | var enc tmfmtEncoder 28 | enc.Encoder = logfmt.NewEncoder(&enc.buf) 29 | return &enc 30 | }, 31 | } 32 | 33 | type tmfmtLogger struct { 34 | w io.Writer 35 | } 36 | 37 | // NewTMFmtLogger returns a logger that encodes keyvals to the Writer in 38 | // Tendermint custom format. Note complex types (structs, maps, slices) 39 | // formatted as "%+v". 40 | // 41 | // Each log event produces no more than one call to w.Write. 42 | // The passed Writer must be safe for concurrent use by multiple goroutines if 43 | // the returned Logger will be used concurrently. 44 | func NewTMFmtLogger(w io.Writer) kitlog.Logger { 45 | return &tmfmtLogger{w} 46 | } 47 | 48 | func (l tmfmtLogger) Log(keyvals ...interface{}) error { 49 | enc := tmfmtEncoderPool.Get().(*tmfmtEncoder) 50 | enc.Reset() 51 | defer tmfmtEncoderPool.Put(enc) 52 | 53 | const unknown = "unknown" 54 | lvl := "none" 55 | msg := unknown 56 | module := unknown 57 | 58 | // indexes of keys to skip while encoding later 59 | excludeIndexes := make([]int, 0) 60 | 61 | for i := 0; i < len(keyvals)-1; i += 2 { 62 | // Extract level 63 | if keyvals[i] == kitlevel.Key() { 64 | excludeIndexes = append(excludeIndexes, i) 65 | switch keyvals[i+1].(type) { 66 | case string: 67 | lvl = keyvals[i+1].(string) 68 | case kitlevel.Value: 69 | lvl = keyvals[i+1].(kitlevel.Value).String() 70 | default: 71 | panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) 72 | } 73 | // and message 74 | } else if keyvals[i] == msgKey { 75 | excludeIndexes = append(excludeIndexes, i) 76 | msg = keyvals[i+1].(string) 77 | // and module (could be multiple keyvals; if such case last keyvalue wins) 78 | } else if keyvals[i] == moduleKey { 79 | excludeIndexes = append(excludeIndexes, i) 80 | module = keyvals[i+1].(string) 81 | } 82 | } 83 | 84 | // Form a custom Tendermint line 85 | // 86 | // Example: 87 | // D[05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) 88 | // 89 | // Description: 90 | // D - first character of the level, uppercase (ASCII only) 91 | // [05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) 92 | // Stopping ... - message 93 | enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) 94 | 95 | if module != unknown { 96 | enc.buf.WriteString("module=" + module + " ") 97 | } 98 | 99 | KeyvalueLoop: 100 | for i := 0; i < len(keyvals)-1; i += 2 { 101 | for _, j := range excludeIndexes { 102 | if i == j { 103 | continue KeyvalueLoop 104 | } 105 | } 106 | 107 | err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]) 108 | if err == logfmt.ErrUnsupportedValueType { 109 | enc.EncodeKeyval(keyvals[i], fmt.Sprintf("%+v", keyvals[i+1])) 110 | } else if err != nil { 111 | return err 112 | } 113 | } 114 | 115 | // Add newline to the end of the buffer 116 | if err := enc.EndRecord(); err != nil { 117 | return err 118 | } 119 | 120 | // The Logger interface requires implementations to be safe for concurrent 121 | // use by multiple goroutines. For this implementation that means making 122 | // only one call to l.w.Write() for each call to Log. 123 | if _, err := l.w.Write(enc.buf.Bytes()); err != nil { 124 | return err 125 | } 126 | return nil 127 | } 128 | -------------------------------------------------------------------------------- /log/tmfmt_logger_test.go: -------------------------------------------------------------------------------- 1 | package log_test 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "io/ioutil" 7 | "math" 8 | "regexp" 9 | "testing" 10 | 11 | kitlog "github.com/go-kit/kit/log" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/tendermint/tmlibs/log" 14 | ) 15 | 16 | func TestTMFmtLogger(t *testing.T) { 17 | t.Parallel() 18 | buf := &bytes.Buffer{} 19 | logger := log.NewTMFmtLogger(buf) 20 | 21 | if err := logger.Log("hello", "world"); err != nil { 22 | t.Fatal(err) 23 | } 24 | assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hello=world\n$`), buf.String()) 25 | 26 | buf.Reset() 27 | if err := logger.Log("a", 1, "err", errors.New("error")); err != nil { 28 | t.Fatal(err) 29 | } 30 | assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ a=1 err=error\n$`), buf.String()) 31 | 32 | buf.Reset() 33 | if err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}); err != nil { 34 | t.Fatal(err) 35 | } 36 | assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ std_map=map\[1:2\] my_map=special_behavior\n$`), buf.String()) 37 | 38 | buf.Reset() 39 | if err := logger.Log("level", "error"); err != nil { 40 | t.Fatal(err) 41 | } 42 | assert.Regexp(t, regexp.MustCompile(`E\[.+\] unknown \s+\n$`), buf.String()) 43 | 44 | buf.Reset() 45 | if err := logger.Log("_msg", "Hello"); err != nil { 46 | t.Fatal(err) 47 | } 48 | assert.Regexp(t, regexp.MustCompile(`N\[.+\] Hello \s+\n$`), buf.String()) 49 | 50 | buf.Reset() 51 | if err := logger.Log("module", "main", "module", "crypto", "module", "wire"); err != nil { 52 | t.Fatal(err) 53 | } 54 | assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+module=wire\s+\n$`), buf.String()) 55 | } 56 | 57 | func BenchmarkTMFmtLoggerSimple(b *testing.B) { 58 | benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), baseMessage) 59 | } 60 | 61 | func BenchmarkTMFmtLoggerContextual(b *testing.B) { 62 | benchmarkRunnerKitlog(b, log.NewTMFmtLogger(ioutil.Discard), withMessage) 63 | } 64 | 65 | func TestTMFmtLoggerConcurrency(t *testing.T) { 66 | t.Parallel() 67 | testConcurrency(t, log.NewTMFmtLogger(ioutil.Discard), 10000) 68 | } 69 | 70 | func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) { 71 | lc := kitlog.With(logger, "common_key", "common_value") 72 | b.ReportAllocs() 73 | b.ResetTimer() 74 | for i := 0; i < b.N; i++ { 75 | f(lc) 76 | } 77 | } 78 | 79 | var ( 80 | baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } 81 | withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } 82 | ) 83 | 84 | // These test are designed to be run with the race detector. 85 | 86 | func testConcurrency(t *testing.T, logger kitlog.Logger, total int) { 87 | n := int(math.Sqrt(float64(total))) 88 | share := total / n 89 | 90 | errC := make(chan error, n) 91 | 92 | for i := 0; i < n; i++ { 93 | go func() { 94 | errC <- spam(logger, share) 95 | }() 96 | } 97 | 98 | for i := 0; i < n; i++ { 99 | err := <-errC 100 | if err != nil { 101 | t.Fatalf("concurrent logging error: %v", err) 102 | } 103 | } 104 | } 105 | 106 | func spam(logger kitlog.Logger, count int) error { 107 | for i := 0; i < count; i++ { 108 | err := logger.Log("key", i) 109 | if err != nil { 110 | return err 111 | } 112 | } 113 | return nil 114 | } 115 | 116 | type mymap map[int]int 117 | 118 | func (m mymap) String() string { return "special_behavior" } 119 | -------------------------------------------------------------------------------- /log/tracing_logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/pkg/errors" 7 | ) 8 | 9 | // NewTracingLogger enables tracing by wrapping all errors (if they 10 | // implement stackTracer interface) in tracedError. 11 | // 12 | // All errors returned by https://github.com/pkg/errors implement stackTracer 13 | // interface. 14 | // 15 | // For debugging purposes only as it doubles the amount of allocations. 16 | func NewTracingLogger(next Logger) Logger { 17 | return &tracingLogger{ 18 | next: next, 19 | } 20 | } 21 | 22 | type stackTracer interface { 23 | error 24 | StackTrace() errors.StackTrace 25 | } 26 | 27 | type tracingLogger struct { 28 | next Logger 29 | } 30 | 31 | func (l *tracingLogger) Info(msg string, keyvals ...interface{}) { 32 | l.next.Info(msg, formatErrors(keyvals)...) 33 | } 34 | 35 | func (l *tracingLogger) Debug(msg string, keyvals ...interface{}) { 36 | l.next.Debug(msg, formatErrors(keyvals)...) 37 | } 38 | 39 | func (l *tracingLogger) Error(msg string, keyvals ...interface{}) { 40 | l.next.Error(msg, formatErrors(keyvals)...) 41 | } 42 | 43 | func (l *tracingLogger) With(keyvals ...interface{}) Logger { 44 | return &tracingLogger{next: l.next.With(formatErrors(keyvals)...)} 45 | } 46 | 47 | func formatErrors(keyvals []interface{}) []interface{} { 48 | newKeyvals := make([]interface{}, len(keyvals)) 49 | copy(newKeyvals, keyvals) 50 | for i := 0; i < len(newKeyvals)-1; i += 2 { 51 | if err, ok := newKeyvals[i+1].(stackTracer); ok { 52 | newKeyvals[i+1] = tracedError{err} 53 | } 54 | } 55 | return newKeyvals 56 | } 57 | 58 | // tracedError wraps a stackTracer and just makes the Error() result 59 | // always return a full stack trace. 60 | type tracedError struct { 61 | wrapped stackTracer 62 | } 63 | 64 | var _ stackTracer = tracedError{} 65 | 66 | func (t tracedError) StackTrace() errors.StackTrace { 67 | return t.wrapped.StackTrace() 68 | } 69 | 70 | func (t tracedError) Cause() error { 71 | return t.wrapped 72 | } 73 | 74 | func (t tracedError) Error() string { 75 | return fmt.Sprintf("%+v", t.wrapped) 76 | } 77 | -------------------------------------------------------------------------------- /log/tracing_logger_test.go: -------------------------------------------------------------------------------- 1 | package log_test 2 | 3 | import ( 4 | "bytes" 5 | stderr "errors" 6 | "fmt" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/pkg/errors" 11 | "github.com/tendermint/tmlibs/log" 12 | ) 13 | 14 | func TestTracingLogger(t *testing.T) { 15 | var buf bytes.Buffer 16 | 17 | logger := log.NewTMJSONLogger(&buf) 18 | 19 | logger1 := log.NewTracingLogger(logger) 20 | err1 := errors.New("Courage is grace under pressure.") 21 | err2 := errors.New("It does not matter how slowly you go, so long as you do not stop.") 22 | logger1.With("err1", err1).Info("foo", "err2", err2) 23 | have := strings.Replace(strings.Replace(strings.TrimSpace(buf.String()), "\\n", "", -1), "\\t", "", -1) 24 | if want := strings.Replace(strings.Replace(`{"_msg":"foo","err1":"`+fmt.Sprintf("%+v", err1)+`","err2":"`+fmt.Sprintf("%+v", err2)+`","level":"info"}`, "\t", "", -1), "\n", "", -1); want != have { 25 | t.Errorf("\nwant '%s'\nhave '%s'", want, have) 26 | } 27 | 28 | buf.Reset() 29 | 30 | logger.With("err1", stderr.New("Opportunities don't happen. You create them.")).Info("foo", "err2", stderr.New("Once you choose hope, anything's possible.")) 31 | if want, have := `{"_msg":"foo","err1":"Opportunities don't happen. You create them.","err2":"Once you choose hope, anything's possible.","level":"info"}`, strings.TrimSpace(buf.String()); want != have { 32 | t.Errorf("\nwant '%s'\nhave '%s'", want, have) 33 | } 34 | 35 | buf.Reset() 36 | 37 | logger.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") 38 | if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have { 39 | t.Errorf("\nwant '%s'\nhave '%s'", want, have) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /merge.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | set -e 3 | 4 | # NOTE: go-alert depends on go-common 5 | 6 | REPOS=("autofile" "clist" "db" "events" "flowrate" "logger" "process") 7 | 8 | mkdir common 9 | git mv *.go common 10 | git mv LICENSE common 11 | 12 | git commit -m "move all files to common/ to begin repo merge" 13 | 14 | for repo in "${REPOS[@]}"; do 15 | # add and fetch the repo 16 | git remote add -f "$repo" "https://github.com/tendermint/go-${repo}" 17 | 18 | # merge master and move into subdir 19 | git merge "$repo/master" --no-edit 20 | 21 | if [[ "$repo" != "flowrate" ]]; then 22 | mkdir "$repo" 23 | git mv *.go "$repo/" 24 | fi 25 | 26 | set +e # these might not exist 27 | git mv *.md "$repo/" 28 | git mv README "$repo/README.md" 29 | git mv Makefile "$repo/Makefile" 30 | git rm LICENSE 31 | set -e 32 | 33 | # commit 34 | git commit -m "merge go-${repo}" 35 | 36 | git remote rm "$repo" 37 | done 38 | 39 | go get github.com/ebuchman/got 40 | got replace "tendermint/go-common" "tendermint/go-common/common" 41 | for repo in "${REPOS[@]}"; do 42 | 43 | if [[ "$repo" != "flowrate" ]]; then 44 | got replace "tendermint/go-${repo}" "tendermint/go-common/${repo}" 45 | else 46 | got replace "tendermint/go-${repo}/flowrate" "tendermint/go-common/flowrate" 47 | fi 48 | done 49 | 50 | git add -u 51 | git commit -m "update import paths" 52 | 53 | # TODO: change any paths in non-Go files 54 | # TODO: add license 55 | -------------------------------------------------------------------------------- /merkle/README.md: -------------------------------------------------------------------------------- 1 | ## Simple Merkle Tree 2 | 3 | For smaller static data structures that don't require immutable snapshots or mutability; 4 | for instance the transactions and validation signatures of a block can be hashed using this simple merkle tree logic. 5 | -------------------------------------------------------------------------------- /merkle/simple_map.go: -------------------------------------------------------------------------------- 1 | package merkle 2 | 3 | import ( 4 | cmn "github.com/tendermint/tmlibs/common" 5 | "github.com/tendermint/tmlibs/merkle/tmhash" 6 | ) 7 | 8 | type SimpleMap struct { 9 | kvs cmn.KVPairs 10 | sorted bool 11 | } 12 | 13 | func NewSimpleMap() *SimpleMap { 14 | return &SimpleMap{ 15 | kvs: nil, 16 | sorted: false, 17 | } 18 | } 19 | 20 | func (sm *SimpleMap) Set(key string, value Hasher) { 21 | sm.sorted = false 22 | 23 | // Hash the key to blind it... why not? 24 | khash := SimpleHashFromBytes([]byte(key)) 25 | 26 | // And the value is hashed too, so you can 27 | // check for equality with a cached value (say) 28 | // and make a determination to fetch or not. 29 | vhash := value.Hash() 30 | 31 | sm.kvs = append(sm.kvs, cmn.KVPair{ 32 | Key: khash, 33 | Value: vhash, 34 | }) 35 | } 36 | 37 | // Merkle root hash of items sorted by key 38 | // (UNSTABLE: and by value too if duplicate key). 39 | func (sm *SimpleMap) Hash() []byte { 40 | sm.Sort() 41 | return hashKVPairs(sm.kvs) 42 | } 43 | 44 | func (sm *SimpleMap) Sort() { 45 | if sm.sorted { 46 | return 47 | } 48 | sm.kvs.Sort() 49 | sm.sorted = true 50 | } 51 | 52 | // Returns a copy of sorted KVPairs. 53 | func (sm *SimpleMap) KVPairs() cmn.KVPairs { 54 | sm.Sort() 55 | kvs := make(cmn.KVPairs, len(sm.kvs)) 56 | copy(kvs, sm.kvs) 57 | return kvs 58 | } 59 | 60 | //---------------------------------------- 61 | 62 | // A local extension to KVPair that can be hashed. 63 | type KVPair cmn.KVPair 64 | 65 | func (kv KVPair) Hash() []byte { 66 | hasher := tmhash.New() 67 | err := encodeByteSlice(hasher, kv.Key) 68 | if err != nil { 69 | panic(err) 70 | } 71 | err = encodeByteSlice(hasher, kv.Value) 72 | if err != nil { 73 | panic(err) 74 | } 75 | return hasher.Sum(nil) 76 | } 77 | 78 | func hashKVPairs(kvs cmn.KVPairs) []byte { 79 | kvsH := make([]Hasher, 0, len(kvs)) 80 | for _, kvp := range kvs { 81 | kvsH = append(kvsH, KVPair(kvp)) 82 | } 83 | return SimpleHashFromHashers(kvsH) 84 | } 85 | -------------------------------------------------------------------------------- /merkle/simple_map_test.go: -------------------------------------------------------------------------------- 1 | package merkle 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | type strHasher string 11 | 12 | func (str strHasher) Hash() []byte { 13 | return SimpleHashFromBytes([]byte(str)) 14 | } 15 | 16 | func TestSimpleMap(t *testing.T) { 17 | { 18 | db := NewSimpleMap() 19 | db.Set("key1", strHasher("value1")) 20 | assert.Equal(t, "3dafc06a52039d029be57c75c9d16356a4256ef4", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") 21 | } 22 | { 23 | db := NewSimpleMap() 24 | db.Set("key1", strHasher("value2")) 25 | assert.Equal(t, "03eb5cfdff646bc4e80fec844e72fd248a1c6b2c", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") 26 | } 27 | { 28 | db := NewSimpleMap() 29 | db.Set("key1", strHasher("value1")) 30 | db.Set("key2", strHasher("value2")) 31 | assert.Equal(t, "acc3971eab8513171cc90ce8b74f368c38f9657d", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") 32 | } 33 | { 34 | db := NewSimpleMap() 35 | db.Set("key2", strHasher("value2")) // NOTE: out of order 36 | db.Set("key1", strHasher("value1")) 37 | assert.Equal(t, "acc3971eab8513171cc90ce8b74f368c38f9657d", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") 38 | } 39 | { 40 | db := NewSimpleMap() 41 | db.Set("key1", strHasher("value1")) 42 | db.Set("key2", strHasher("value2")) 43 | db.Set("key3", strHasher("value3")) 44 | assert.Equal(t, "0cd117ad14e6cd22edcd9aa0d84d7063b54b862f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") 45 | } 46 | { 47 | db := NewSimpleMap() 48 | db.Set("key2", strHasher("value2")) // NOTE: out of order 49 | db.Set("key1", strHasher("value1")) 50 | db.Set("key3", strHasher("value3")) 51 | assert.Equal(t, "0cd117ad14e6cd22edcd9aa0d84d7063b54b862f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /merkle/simple_proof.go: -------------------------------------------------------------------------------- 1 | package merkle 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | ) 7 | 8 | type SimpleProof struct { 9 | Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. 10 | } 11 | 12 | // proofs[0] is the proof for items[0]. 13 | func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) { 14 | trails, rootSPN := trailsFromHashers(items) 15 | rootHash = rootSPN.Hash 16 | proofs = make([]*SimpleProof, len(items)) 17 | for i, trail := range trails { 18 | proofs[i] = &SimpleProof{ 19 | Aunts: trail.FlattenAunts(), 20 | } 21 | } 22 | return 23 | } 24 | 25 | func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs []*SimpleProof) { 26 | sm := NewSimpleMap() 27 | for k, v := range m { 28 | sm.Set(k, v) 29 | } 30 | sm.Sort() 31 | kvs := sm.kvs 32 | kvsH := make([]Hasher, 0, len(kvs)) 33 | for _, kvp := range kvs { 34 | kvsH = append(kvsH, KVPair(kvp)) 35 | } 36 | return SimpleProofsFromHashers(kvsH) 37 | } 38 | 39 | // Verify that leafHash is a leaf hash of the simple-merkle-tree 40 | // which hashes to rootHash. 41 | func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { 42 | computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) 43 | return computedHash != nil && bytes.Equal(computedHash, rootHash) 44 | } 45 | 46 | func (sp *SimpleProof) String() string { 47 | return sp.StringIndented("") 48 | } 49 | 50 | func (sp *SimpleProof) StringIndented(indent string) string { 51 | return fmt.Sprintf(`SimpleProof{ 52 | %s Aunts: %X 53 | %s}`, 54 | indent, sp.Aunts, 55 | indent) 56 | } 57 | 58 | // Use the leafHash and innerHashes to get the root merkle hash. 59 | // If the length of the innerHashes slice isn't exactly correct, the result is nil. 60 | // Recursive impl. 61 | func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { 62 | if index >= total || index < 0 || total <= 0 { 63 | return nil 64 | } 65 | switch total { 66 | case 0: 67 | panic("Cannot call computeHashFromAunts() with 0 total") 68 | case 1: 69 | if len(innerHashes) != 0 { 70 | return nil 71 | } 72 | return leafHash 73 | default: 74 | if len(innerHashes) == 0 { 75 | return nil 76 | } 77 | numLeft := (total + 1) / 2 78 | if index < numLeft { 79 | leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) 80 | if leftHash == nil { 81 | return nil 82 | } 83 | return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) 84 | } 85 | rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) 86 | if rightHash == nil { 87 | return nil 88 | } 89 | return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) 90 | } 91 | } 92 | 93 | // Helper structure to construct merkle proof. 94 | // The node and the tree is thrown away afterwards. 95 | // Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. 96 | // node.Parent.Hash = hash(node.Hash, node.Right.Hash) or 97 | // hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. 98 | type SimpleProofNode struct { 99 | Hash []byte 100 | Parent *SimpleProofNode 101 | Left *SimpleProofNode // Left sibling (only one of Left,Right is set) 102 | Right *SimpleProofNode // Right sibling (only one of Left,Right is set) 103 | } 104 | 105 | // Starting from a leaf SimpleProofNode, FlattenAunts() will return 106 | // the inner hashes for the item corresponding to the leaf. 107 | func (spn *SimpleProofNode) FlattenAunts() [][]byte { 108 | // Nonrecursive impl. 109 | innerHashes := [][]byte{} 110 | for spn != nil { 111 | if spn.Left != nil { 112 | innerHashes = append(innerHashes, spn.Left.Hash) 113 | } else if spn.Right != nil { 114 | innerHashes = append(innerHashes, spn.Right.Hash) 115 | } else { 116 | break 117 | } 118 | spn = spn.Parent 119 | } 120 | return innerHashes 121 | } 122 | 123 | // trails[0].Hash is the leaf hash for items[0]. 124 | // trails[i].Parent.Parent....Parent == root for all i. 125 | func trailsFromHashers(items []Hasher) (trails []*SimpleProofNode, root *SimpleProofNode) { 126 | // Recursive impl. 127 | switch len(items) { 128 | case 0: 129 | return nil, nil 130 | case 1: 131 | trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} 132 | return []*SimpleProofNode{trail}, trail 133 | default: 134 | lefts, leftRoot := trailsFromHashers(items[:(len(items)+1)/2]) 135 | rights, rightRoot := trailsFromHashers(items[(len(items)+1)/2:]) 136 | rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) 137 | root := &SimpleProofNode{rootHash, nil, nil, nil} 138 | leftRoot.Parent = root 139 | leftRoot.Right = rightRoot 140 | rightRoot.Parent = root 141 | rightRoot.Left = leftRoot 142 | return append(lefts, rights...), root 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /merkle/simple_tree.go: -------------------------------------------------------------------------------- 1 | /* 2 | Computes a deterministic minimal height merkle tree hash. 3 | If the number of items is not a power of two, some leaves 4 | will be at different levels. Tries to keep both sides of 5 | the tree the same size, but the left may be one greater. 6 | 7 | Use this for short deterministic trees, such as the validator list. 8 | For larger datasets, use IAVLTree. 9 | 10 | * 11 | / \ 12 | / \ 13 | / \ 14 | / \ 15 | * * 16 | / \ / \ 17 | / \ / \ 18 | / \ / \ 19 | * * * h6 20 | / \ / \ / \ 21 | h0 h1 h2 h3 h4 h5 22 | 23 | */ 24 | 25 | package merkle 26 | 27 | import ( 28 | "github.com/tendermint/tmlibs/merkle/tmhash" 29 | ) 30 | 31 | func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { 32 | var hasher = tmhash.New() 33 | err := encodeByteSlice(hasher, left) 34 | if err != nil { 35 | panic(err) 36 | } 37 | err = encodeByteSlice(hasher, right) 38 | if err != nil { 39 | panic(err) 40 | } 41 | return hasher.Sum(nil) 42 | } 43 | 44 | func SimpleHashFromHashes(hashes [][]byte) []byte { 45 | // Recursive impl. 46 | switch len(hashes) { 47 | case 0: 48 | return nil 49 | case 1: 50 | return hashes[0] 51 | default: 52 | left := SimpleHashFromHashes(hashes[:(len(hashes)+1)/2]) 53 | right := SimpleHashFromHashes(hashes[(len(hashes)+1)/2:]) 54 | return SimpleHashFromTwoHashes(left, right) 55 | } 56 | } 57 | 58 | // NOTE: Do not implement this, use SimpleHashFromByteslices instead. 59 | // type Byteser interface { Bytes() []byte } 60 | // func SimpleHashFromBytesers(items []Byteser) []byte { ... } 61 | 62 | func SimpleHashFromByteslices(bzs [][]byte) []byte { 63 | hashes := make([][]byte, len(bzs)) 64 | for i, bz := range bzs { 65 | hashes[i] = SimpleHashFromBytes(bz) 66 | } 67 | return SimpleHashFromHashes(hashes) 68 | } 69 | 70 | func SimpleHashFromBytes(bz []byte) []byte { 71 | hasher := tmhash.New() 72 | hasher.Write(bz) 73 | return hasher.Sum(nil) 74 | } 75 | 76 | func SimpleHashFromHashers(items []Hasher) []byte { 77 | hashes := make([][]byte, len(items)) 78 | for i, item := range items { 79 | hash := item.Hash() 80 | hashes[i] = hash 81 | } 82 | return SimpleHashFromHashes(hashes) 83 | } 84 | 85 | func SimpleHashFromMap(m map[string]Hasher) []byte { 86 | sm := NewSimpleMap() 87 | for k, v := range m { 88 | sm.Set(k, v) 89 | } 90 | return sm.Hash() 91 | } 92 | -------------------------------------------------------------------------------- /merkle/simple_tree_test.go: -------------------------------------------------------------------------------- 1 | package merkle 2 | 3 | import ( 4 | "bytes" 5 | 6 | cmn "github.com/tendermint/tmlibs/common" 7 | . "github.com/tendermint/tmlibs/test" 8 | 9 | "testing" 10 | ) 11 | 12 | type testItem []byte 13 | 14 | func (tI testItem) Hash() []byte { 15 | return []byte(tI) 16 | } 17 | 18 | func TestSimpleProof(t *testing.T) { 19 | 20 | total := 100 21 | 22 | items := make([]Hasher, total) 23 | for i := 0; i < total; i++ { 24 | items[i] = testItem(cmn.RandBytes(32)) 25 | } 26 | 27 | rootHash := SimpleHashFromHashers(items) 28 | 29 | rootHash2, proofs := SimpleProofsFromHashers(items) 30 | 31 | if !bytes.Equal(rootHash, rootHash2) { 32 | t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2) 33 | } 34 | 35 | // For each item, check the trail. 36 | for i, item := range items { 37 | itemHash := item.Hash() 38 | proof := proofs[i] 39 | 40 | // Verify success 41 | ok := proof.Verify(i, total, itemHash, rootHash) 42 | if !ok { 43 | t.Errorf("Verification failed for index %v.", i) 44 | } 45 | 46 | // Wrong item index should make it fail 47 | { 48 | ok = proof.Verify((i+1)%total, total, itemHash, rootHash) 49 | if ok { 50 | t.Errorf("Expected verification to fail for wrong index %v.", i) 51 | } 52 | } 53 | 54 | // Trail too long should make it fail 55 | origAunts := proof.Aunts 56 | proof.Aunts = append(proof.Aunts, cmn.RandBytes(32)) 57 | { 58 | ok = proof.Verify(i, total, itemHash, rootHash) 59 | if ok { 60 | t.Errorf("Expected verification to fail for wrong trail length.") 61 | } 62 | } 63 | proof.Aunts = origAunts 64 | 65 | // Trail too short should make it fail 66 | proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1] 67 | { 68 | ok = proof.Verify(i, total, itemHash, rootHash) 69 | if ok { 70 | t.Errorf("Expected verification to fail for wrong trail length.") 71 | } 72 | } 73 | proof.Aunts = origAunts 74 | 75 | // Mutating the itemHash should make it fail. 76 | ok = proof.Verify(i, total, MutateByteSlice(itemHash), rootHash) 77 | if ok { 78 | t.Errorf("Expected verification to fail for mutated leaf hash") 79 | } 80 | 81 | // Mutating the rootHash should make it fail. 82 | ok = proof.Verify(i, total, itemHash, MutateByteSlice(rootHash)) 83 | if ok { 84 | t.Errorf("Expected verification to fail for mutated root hash") 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /merkle/tmhash/hash.go: -------------------------------------------------------------------------------- 1 | package tmhash 2 | 3 | import ( 4 | "crypto/sha256" 5 | "hash" 6 | ) 7 | 8 | var ( 9 | Size = 20 10 | BlockSize = sha256.BlockSize 11 | ) 12 | 13 | type sha256trunc struct { 14 | sha256 hash.Hash 15 | } 16 | 17 | func (h sha256trunc) Write(p []byte) (n int, err error) { 18 | return h.sha256.Write(p) 19 | } 20 | func (h sha256trunc) Sum(b []byte) []byte { 21 | shasum := h.sha256.Sum(b) 22 | return shasum[:Size] 23 | } 24 | 25 | func (h sha256trunc) Reset() { 26 | h.sha256.Reset() 27 | } 28 | 29 | func (h sha256trunc) Size() int { 30 | return Size 31 | } 32 | 33 | func (h sha256trunc) BlockSize() int { 34 | return h.sha256.BlockSize() 35 | } 36 | 37 | func New() hash.Hash { 38 | return sha256trunc{ 39 | sha256: sha256.New(), 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /merkle/tmhash/hash_test.go: -------------------------------------------------------------------------------- 1 | package tmhash_test 2 | 3 | import ( 4 | "crypto/sha256" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/tendermint/tmlibs/merkle/tmhash" 9 | ) 10 | 11 | func TestHash(t *testing.T) { 12 | testVector := []byte("abc") 13 | hasher := tmhash.New() 14 | hasher.Write(testVector) 15 | bz := hasher.Sum(nil) 16 | 17 | hasher = sha256.New() 18 | hasher.Write(testVector) 19 | bz2 := hasher.Sum(nil) 20 | bz2 = bz2[:20] 21 | 22 | assert.Equal(t, bz, bz2) 23 | } 24 | -------------------------------------------------------------------------------- /merkle/types.go: -------------------------------------------------------------------------------- 1 | package merkle 2 | 3 | import ( 4 | "encoding/binary" 5 | "io" 6 | ) 7 | 8 | type Tree interface { 9 | Size() (size int) 10 | Height() (height int8) 11 | Has(key []byte) (has bool) 12 | Proof(key []byte) (value []byte, proof []byte, exists bool) // TODO make it return an index 13 | Get(key []byte) (index int, value []byte, exists bool) 14 | GetByIndex(index int) (key []byte, value []byte) 15 | Set(key []byte, value []byte) (updated bool) 16 | Remove(key []byte) (value []byte, removed bool) 17 | HashWithCount() (hash []byte, count int) 18 | Hash() (hash []byte) 19 | Save() (hash []byte) 20 | Load(hash []byte) 21 | Copy() Tree 22 | Iterate(func(key []byte, value []byte) (stop bool)) (stopped bool) 23 | IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) 24 | } 25 | 26 | type Hasher interface { 27 | Hash() []byte 28 | } 29 | 30 | //----------------------------------------------------------------------- 31 | // NOTE: these are duplicated from go-amino so we dont need go-amino as a dep 32 | 33 | func encodeByteSlice(w io.Writer, bz []byte) (err error) { 34 | err = encodeUvarint(w, uint64(len(bz))) 35 | if err != nil { 36 | return 37 | } 38 | _, err = w.Write(bz) 39 | return 40 | } 41 | 42 | func encodeUvarint(w io.Writer, i uint64) (err error) { 43 | var buf [10]byte 44 | n := binary.PutUvarint(buf[:], i) 45 | _, err = w.Write(buf[0:n]) 46 | return 47 | } 48 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # run the linter 5 | # make metalinter_test 6 | 7 | # setup certs 8 | make gen_certs 9 | 10 | # run the unit tests with coverage 11 | echo "" > coverage.txt 12 | for d in $(go list ./... | grep -v vendor); do 13 | go test -race -coverprofile=profile.out -covermode=atomic "$d" 14 | if [ -f profile.out ]; then 15 | cat profile.out >> coverage.txt 16 | rm profile.out 17 | fi 18 | done 19 | 20 | # cleanup certs 21 | make clean_certs 22 | -------------------------------------------------------------------------------- /test/assert.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func AssertPanics(t *testing.T, msg string, f func()) { 8 | defer func() { 9 | if err := recover(); err == nil { 10 | t.Errorf("Should have panic'd, but didn't: %v", msg) 11 | } 12 | }() 13 | f() 14 | } 15 | -------------------------------------------------------------------------------- /test/mutate.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | cmn "github.com/tendermint/tmlibs/common" 5 | ) 6 | 7 | // Contract: !bytes.Equal(input, output) && len(input) >= len(output) 8 | func MutateByteSlice(bytez []byte) []byte { 9 | // If bytez is empty, panic 10 | if len(bytez) == 0 { 11 | panic("Cannot mutate an empty bytez") 12 | } 13 | 14 | // Copy bytez 15 | mBytez := make([]byte, len(bytez)) 16 | copy(mBytez, bytez) 17 | bytez = mBytez 18 | 19 | // Try a random mutation 20 | switch cmn.RandInt() % 2 { 21 | case 0: // Mutate a single byte 22 | bytez[cmn.RandInt()%len(bytez)] += byte(cmn.RandInt()%255 + 1) 23 | case 1: // Remove an arbitrary byte 24 | pos := cmn.RandInt() % len(bytez) 25 | bytez = append(bytez[:pos], bytez[pos+1:]...) 26 | } 27 | return bytez 28 | } 29 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | const Version = "0.9.0" 4 | --------------------------------------------------------------------------------